Scrapy爬取伯乐在线文章
首先搭建虚拟环境,创建工程
scrapy startproject ArticleSpider
cd ArticleSpider
scrapy genspider jobbole blog.jobbole.com
修改 start_urls = ['http://blog.jobbole.com/all-posts/']
获取网页信息
ArticleSpider/spiders/jobbole.py
# -*- coding: utf-8 -*-
import datetime
import re
from urllib import parse
import scrapy
from scrapy import Request
from ArticleSpider.items import JobBoleArticleItem
from ArticleSpider.utils.common import get_md5
class JobboleSpider(scrapy.Spider):
name = 'jobbole'
allowed_domains = ['blog.jobbole.com']
start_urls = ['http://blog.jobbole.com/all-posts/']
def parse(self, response):
"""
1. 从文章列表中获取文章链接交给scrapy下载 再进行解析
2. 获取下一页的链接并交给scrapy下载, 下载完成后在使用parse函数进行解析
:param response:
:return:
"""
post_nodes = response.css("#archive .floated-thumb .post-thumb a")
for post_node in post_nodes:
image_url = post_node.css("img::attr(src)").extract_first("")
post_url = post_node.css("::attr(href)").extract_first("")
yield Request(url=parse.urljoin(response.url, post_url), meta={"front_image_url": image_url},
callback=self.parse_detail)
# 提取下一页
next_url = response.css(".next.page-numbers::attr(href)").extract_first()
if next_url:
yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)
def parse_detail(self, response):
"""
获取具体字段
:param response:
:return:
"""
article_item = JobBoleArticleItem()
# 通过css选择器提取字段
front_image_url = response.meta.get("front_image_url", "") #文章封面图
title = response.css(".entry-header h1::text").extract()[0]
create_date = response.css("p.entry-meta-hide-on-mobile::text").extract()[0].strip().replace("·","").strip()
praise_nums = response.css(".vote-post-up h10::text").extract()[0]
fav_nums = response.css(".bookmark-btn::text").extract()[0]
match_re = re.match(".*?(\d+).*", fav_nums)
if match_re:
fav_nums = int(match_re.group(1))
else:
fav_nums = 0
comment_nums = response.css("a[href='#article-comment'] span::text").extract()[0]
match_re = re.match(".*?(\d+).*", comment_nums)
if match_re:
comment_nums = int(match_re.group(1))
else:
comment_nums = 0
content = response.css("div.entry").extract()[0]
tag_list = response.css("p.entry-meta-hide-on-mobile a::text").extract()
tag_list = [element for element in tag_list if not element.strip().endswith("评论")]
tags = ",".join(tag_list)
article_item["url_object_id"] = get_md5(response.url)
article_item["title"] = title
article_item["url"] = response.url
try:
create_date = datetime.datetime.strptime(create_date, "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date()
article_item["create_date"] = create_date
article_item["front_image_url"] = [front_image_url]
article_item["praise_nums"] = praise_nums
article_item["comment_nums"] = comment_nums
article_item["fav_nums"] = fav_nums
article_item["tags"] = tags
article_item["content"] = content
yield article_item
ArticleSpider/items.py
class JobBoleArticleItem(scrapy.Item):
title = scrapy.Field()
create_date = scrapy.Field()
url = scrapy.Field()
url_object_id = scrapy.Field()
front_image_url = scrapy.Field()
front_image_path = scrapy.Field()
praise_nums = scrapy.Field()
comment_nums = scrapy.Field()
fav_nums = scrapy.Field()
tags = scrapy.Field()
content = scrapy.Field()
ArticleSpider/pipelines.py
from scrapy.pipelines.images import ImagesPipeline
class ArticlespiderPipeline(object):
def process_item(self, item, spider):
return item
class ArticleImagePipeline(ImagesPipeline):
def item_completed(self, results, item, info):
if "front_image_url" in item:
for ok, value in results:
image_file_path = value["path"]
item["front_image_path"] = image_file_path
return item
ArticleSpider/settings.py
创建一个文件夹ArticleSpider/images,用来保存图片
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ArticleSpider.pipelines.ArticlespiderPipeline': 300,
# 'scrapy.pipelines.images.ImagesPipeline': 1,
'ArticleSpider.pipelines.ArticleImagePipeline': 1,
}
IMAGES_URLS_FIELD = "front_image_url"
project_dir = os.path.abspath(os.path.dirname(__file__))
IMAGES_STORE = os.path.join(project_dir, 'images')
数据入库
/*
Navicat MySQL Data Transfer
Source Server : 2233
Source Server Version : 50723
Source Host : localhost:3306
Source Database : article_spider
Target Server Type : MYSQL
Target Server Version : 50723
File Encoding : 65001
Date: 2018-10-15 11:19:07
*/
SET FOREIGN_KEY_CHECKS=0;
-- ----------------------------
-- Table structure for jobbole_article
-- ----------------------------
DROP TABLE IF EXISTS `jobbole_article`;
CREATE TABLE `jobbole_article` (
`title` varchar(255) NOT NULL,
`create_date` date DEFAULT NULL,
`url` varchar(255) NOT NULL,
`url_object_id` varchar(50) NOT NULL,
`front_image_url` varchar(255) DEFAULT NULL,
`front_image_path` varchar(255) DEFAULT NULL,
`praise_nums` int(11) NOT NULL DEFAULT '0',
`comment_nums` int(11) NOT NULL DEFAULT '0',
`fav_nums` int(11) NOT NULL DEFAULT '0',
`tags` varchar(255) DEFAULT NULL,
`content` longtext NOT NULL,
PRIMARY KEY (`url_object_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
编写pipline
import MySQLdb
import MySQLdb.cursors
from scrapy.pipelines.images import ImagesPipeline
from twisted.enterprise import adbapi
from ArticleSpider import settings
from ArticleSpider.settings import MYSQL_DBNAME, MYSQL_PASSWORD, MYSQL_USER, MYSQL_HOST
class ArticlespiderPipeline(object):
def process_item(self, item, spider):
return item
class ArticleImagePipeline(ImagesPipeline):
def item_completed(self, results, item, info):
if "front_image_url" in item:
for ok, value in results:
image_file_path = value["path"]
item["front_image_path"] = image_file_path
return item
class MysqlPipeline(object):
# 采用同步的机制写入mysql
def __init__(self):
self.conn = MySQLdb.connect(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DBNAME, charset="utf8",
use_unicode=True)
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
insert_sql = """
insert into jobbole_article(title, url,url_object_id, create_date, fav_nums)
VALUES (%s, %s, %s, %s, %s)
"""
self.cursor.execute(insert_sql,
(item["title"], item["url"], item["url_object_id"], item["create_date"], item["fav_nums"]))
self.conn.commit()
class MysqlTwistedPipline(object):
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbparms = dict(
host=settings["MYSQL_HOST"],
db=settings["MYSQL_DBNAME"],
user=settings["MYSQL_USER"],
passwd=settings["MYSQL_PASSWORD"],
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True,
)
dbpool = adbapi.ConnectionPool("MySQLdb", **dbparms)
return cls(dbpool)
def process_item(self, item, spider):
# 使用twisted将mysql插入变成异步执行
query = self.dbpool.runInteraction(self.do_insert, item)
query.addErrback(self.handle_error, item, spider) # 处理异常
def handle_error(self, failure, item, spider):
# 处理异步插入的异常
print(failure)
def do_insert(self, cursor, item):
# 执行具体的插入
# 根据不同的item 构建不同的sql语句并插入到mysql中
insert_sql = """
insert into jobbole_article(title, url,url_object_id, create_date, front_image_url, front_image_path, praise_nums, comment_nums,fav_nums, tags,content )
VALUES (%s, %s, %s, %s,%s,%s, %s, %s, %s,%s,%s)
"""
cursor.execute(insert_sql,
(item["title"], item["url"], item["url_object_id"], item["create_date"], item["front_image_url"],
item["front_image_path"], item["praise_nums"], item["comment_nums"], item["fav_nums"], item["tags"],
item["content"]))
settings.py
ITEM_PIPELINES = {
# 'ArticleSpider.pipelines.ArticlespiderPipeline': 300,
# 'scrapy.pipelines.images.ImagesPipeline': 1,
'ArticleSpider.pipelines.ArticleImagePipeline': 1,
'ArticleSpider.pipelines.MysqlTwistedPipline': 2
}
item loader
ArticleSpider/spiders/jobbole.py
# 通过item loader加载item
front_image_url = response.meta.get("front_image_url", "") # 文章封面图
item_loader = ArticleItemLoader(item=JobBoleArticleItem(), response=response)
item_loader.add_css("title", ".entry-header h1::text")
item_loader.add_value("url", response.url)
item_loader.add_value("url_object_id", get_md5(response.url))
item_loader.add_css("create_date", "p.entry-meta-hide-on-mobile::text")
item_loader.add_value("front_image_url", [front_image_url])
item_loader.add_css("praise_nums", ".vote-post-up h10::text")
item_loader.add_css("comment_nums", "a[href='#article-comment'] span::text")
item_loader.add_css("fav_nums", ".bookmark-btn::text")
item_loader.add_css("tags", "p.entry-meta-hide-on-mobile a::text")
item_loader.add_css("content", "div.entry")
article_item = item_loader.load_item()
yield article_item
ArticleSpider/items.py
class ArticleItemLoader(ItemLoader):
# 自定义itemloader
default_output_processor = TakeFirst()
def date_convert(value):
try:
create_date = datetime.datetime.strptime(value, "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date()
return create_date
def get_nums(value):
match_re = re.match(".*?(\d+).*", value)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0
return nums
def remove_comment_tags(value):
# 去掉tag中提取的评论
if "评论" in value:
return ""
else:
return value
def return_value(value):
return value
class JobBoleArticleItem(scrapy.Item):
title = scrapy.Field()
create_date = scrapy.Field(
input_processor=MapCompose(date_convert),
)
url = scrapy.Field()
url_object_id = scrapy.Field()
front_image_url = scrapy.Field(
output_processor=MapCompose(return_value)
)
front_image_path = scrapy.Field()
praise_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
)
comment_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
)
fav_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
)
tags = scrapy.Field(
input_processor=MapCompose(remove_comment_tags),
output_processor=Join(",")
)
content = scrapy.Field()
```
Scrapy爬取伯乐在线文章的更多相关文章
- scrapy爬取伯乐在线文章数据
创建项目 切换到ArticleSpider目录下创建爬虫文件 设置settings.py爬虫协议为False 编写启动爬虫文件main.py
- 爬虫实战——Scrapy爬取伯乐在线所有文章
Scrapy简单介绍及爬取伯乐在线所有文章 一.简说安装相关环境及依赖包 1.安装Python(2或3都行,我这里用的是3) 2.虚拟环境搭建: 依赖包:virtualenv,virtualenvwr ...
- Scrapy爬取伯乐在线的所有文章
本篇文章将从搭建虚拟环境开始,爬取伯乐在线上的所有文章的数据. 搭建虚拟环境之前需要配置环境变量,该环境变量的变量值为虚拟环境的存放目录 1. 配置环境变量 2.创建虚拟环境 用mkvirtualen ...
- 爬取伯乐在线文章(五)itemloader
ItemLoader 在我们执行scrapy爬取字段中,会有大量的CSS或是Xpath代码,当要爬取的网站多了,要维护起来很麻烦,为解决这类问题,我们可以根据scrapy提供的loader机制. 导入 ...
- Scrapy基础(六)————Scrapy爬取伯乐在线一通过css和xpath解析文章字段
上次我们介绍了scrapy的安装和加入debug的main文件,这次重要介绍创建的爬虫的基本爬取有用信息 通过命令(这篇博文)创建了jobbole这个爬虫,并且生成了jobbole.py这个文件,又写 ...
- 爬取伯乐在线文章(四)将爬取结果保存到MySQL
Item Pipeline 当Item在Spider中被收集之后,它将会被传递到Item Pipeline,这些Item Pipeline组件按定义的顺序处理Item. 每个Item Pipeline ...
- 第三天,爬取伯乐在线文章代码,编写items.py,保存数据到本地json文件中
一. 爬取http://blog.jobbole.com/all-posts/中的所有文章 1. 编写jobbole.py简单代码 import scrapy from scrapy. ...
- 爬取伯乐在线文章(二)通过xpath提取源文件中需要的内容
爬取说明 以单个页面为例,如:http://blog.jobbole.com/110287/ 我们可以提取标题.日期.多少个评论.正文内容等 Xpath介绍 1. xpath简介 (1) xpath使 ...
- python爬虫scrapy框架——爬取伯乐在线网站文章
一.前言 1. scrapy依赖包: 二.创建工程 1. 创建scrapy工程: scrapy staratproject ArticleSpider 2. 开始(创建)新的爬虫: cd Artic ...
随机推荐
- Redux与它的中间件:redux-thunk,redux-actions,redux-promise,redux-saga
序言 这里要讲的就是一个Redux在React中的应用问题,讲一讲Redux,react-redux,redux-thunk,redux-actions,redux-promise,redux-sag ...
- awk分析mysql状态
今天是腊月27,明天是腊月28,一到过年,就习惯说农历,而不说公历.这两天挺闲的,就再造一把. 话说Linux处理文本工具有三剑客,awk.grep.sed,其中awk最为厉害,grep也挺是常用.今 ...
- Python_装饰器复习_30
复习: # 装饰器的进阶 # functools.wraps # 带参数的装饰器 # 多个装饰器装饰同一个函数# 周末的作业 # 文件操作 # 字符串处理 # 输入输出 # 流程控制 # 装饰器# 开 ...
- uva11300 分金币(中位数)
来源:https://vjudge.net/problem/UVA-11300 题意: 有n个人围成一圈,每个人有一定数量的金币,每次只能挪动一个位置,求挪动的最少金币使他们平分金币 题解: 蓝书p6 ...
- c++入门之引用
引用通常被用在函数形参传递的过程中.一般的参数传递的过程:将实参进行拷贝,函数中都是对拷贝的变量进行操作,而不是对原变量进行操作.但很多情况下,我们都希望对原变量进行操作.(比如交换两个变量的数值). ...
- 如何利用mui实现底部选择器(含日期选择器)?
1.第一步: 项目中应该引入相应的css和js文件,相关文件可到mui官网查询. <link rel="stylesheet" type="text/css&quo ...
- 使用matplotlib画饼图
import matplotlib.pyplot as pltx = [4, 9, 21, 55, 30, 18]labels = ['math', 'history', 'chemistry', ' ...
- 关于oracle设置主键自增的问题
关于orcale设置主键自增的问题 关于主键Oracle中并没有提供一个直接的语句设置,对于这个oralce一般都是用序列和触发器来实现 一下又两种方法来实现 一 ,不使用触发器 创建序列: crea ...
- Python_架构、同一台电脑上两个py文件通信、两台电脑如何通信、几十台电脑如何通信、更多电脑之间的通信、库、端口号
1.架构 C/S架构(鼻祖) C:client 客户端 S:server 服务器 早期使用的一种架构,目前的各种app使用的就是这种架构,它的表现形式就是拥有专门的app. B/S架构(隶属于C/ ...
- 【学习总结】Master课程 之 虚拟化与云计算
Section 1- Cloud Computing Introduction-云计算介绍 1-What can Cloud Computing do? - 云计算可以做什么? 服务模式:美国国家标准 ...