首先搭建虚拟环境,创建工程

scrapy startproject ArticleSpider
cd ArticleSpider
scrapy genspider jobbole blog.jobbole.com

修改 start_urls = ['http://blog.jobbole.com/all-posts/']

获取网页信息

ArticleSpider/spiders/jobbole.py

# -*- coding: utf-8 -*-
import datetime
import re
from urllib import parse
import scrapy
from scrapy import Request from ArticleSpider.items import JobBoleArticleItem
from ArticleSpider.utils.common import get_md5 class JobboleSpider(scrapy.Spider):
name = 'jobbole'
allowed_domains = ['blog.jobbole.com']
start_urls = ['http://blog.jobbole.com/all-posts/'] def parse(self, response):
"""
1. 从文章列表中获取文章链接交给scrapy下载 再进行解析
2. 获取下一页的链接并交给scrapy下载, 下载完成后在使用parse函数进行解析
:param response:
:return:
"""
post_nodes = response.css("#archive .floated-thumb .post-thumb a")
for post_node in post_nodes:
image_url = post_node.css("img::attr(src)").extract_first("")
post_url = post_node.css("::attr(href)").extract_first("")
yield Request(url=parse.urljoin(response.url, post_url), meta={"front_image_url": image_url},
callback=self.parse_detail) # 提取下一页
next_url = response.css(".next.page-numbers::attr(href)").extract_first()
if next_url:
yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse) def parse_detail(self, response):
"""
获取具体字段
:param response:
:return:
"""
article_item = JobBoleArticleItem() # 通过css选择器提取字段
front_image_url = response.meta.get("front_image_url", "") #文章封面图
title = response.css(".entry-header h1::text").extract()[0]
create_date = response.css("p.entry-meta-hide-on-mobile::text").extract()[0].strip().replace("·","").strip()
praise_nums = response.css(".vote-post-up h10::text").extract()[0]
fav_nums = response.css(".bookmark-btn::text").extract()[0]
match_re = re.match(".*?(\d+).*", fav_nums)
if match_re:
fav_nums = int(match_re.group(1))
else:
fav_nums = 0 comment_nums = response.css("a[href='#article-comment'] span::text").extract()[0]
match_re = re.match(".*?(\d+).*", comment_nums)
if match_re:
comment_nums = int(match_re.group(1))
else:
comment_nums = 0 content = response.css("div.entry").extract()[0] tag_list = response.css("p.entry-meta-hide-on-mobile a::text").extract()
tag_list = [element for element in tag_list if not element.strip().endswith("评论")]
tags = ",".join(tag_list) article_item["url_object_id"] = get_md5(response.url)
article_item["title"] = title
article_item["url"] = response.url
try:
create_date = datetime.datetime.strptime(create_date, "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date()
article_item["create_date"] = create_date
article_item["front_image_url"] = [front_image_url]
article_item["praise_nums"] = praise_nums
article_item["comment_nums"] = comment_nums
article_item["fav_nums"] = fav_nums
article_item["tags"] = tags
article_item["content"] = content
yield article_item

ArticleSpider/items.py

class JobBoleArticleItem(scrapy.Item):
title = scrapy.Field()
create_date = scrapy.Field()
url = scrapy.Field()
url_object_id = scrapy.Field()
front_image_url = scrapy.Field()
front_image_path = scrapy.Field()
praise_nums = scrapy.Field()
comment_nums = scrapy.Field()
fav_nums = scrapy.Field()
tags = scrapy.Field()
content = scrapy.Field()

ArticleSpider/pipelines.py

from scrapy.pipelines.images import ImagesPipeline

class ArticlespiderPipeline(object):
def process_item(self, item, spider):
return item class ArticleImagePipeline(ImagesPipeline):
def item_completed(self, results, item, info):
if "front_image_url" in item:
for ok, value in results:
image_file_path = value["path"]
item["front_image_path"] = image_file_path return item

ArticleSpider/settings.py

创建一个文件夹ArticleSpider/images,用来保存图片

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ArticleSpider.pipelines.ArticlespiderPipeline': 300,
# 'scrapy.pipelines.images.ImagesPipeline': 1,
'ArticleSpider.pipelines.ArticleImagePipeline': 1,
} IMAGES_URLS_FIELD = "front_image_url"
project_dir = os.path.abspath(os.path.dirname(__file__))
IMAGES_STORE = os.path.join(project_dir, 'images')

数据入库

/*
Navicat MySQL Data Transfer Source Server : 2233
Source Server Version : 50723
Source Host : localhost:3306
Source Database : article_spider Target Server Type : MYSQL
Target Server Version : 50723
File Encoding : 65001 Date: 2018-10-15 11:19:07
*/ SET FOREIGN_KEY_CHECKS=0; -- ----------------------------
-- Table structure for jobbole_article
-- ----------------------------
DROP TABLE IF EXISTS `jobbole_article`;
CREATE TABLE `jobbole_article` (
`title` varchar(255) NOT NULL,
`create_date` date DEFAULT NULL,
`url` varchar(255) NOT NULL,
`url_object_id` varchar(50) NOT NULL,
`front_image_url` varchar(255) DEFAULT NULL,
`front_image_path` varchar(255) DEFAULT NULL,
`praise_nums` int(11) NOT NULL DEFAULT '0',
`comment_nums` int(11) NOT NULL DEFAULT '0',
`fav_nums` int(11) NOT NULL DEFAULT '0',
`tags` varchar(255) DEFAULT NULL,
`content` longtext NOT NULL,
PRIMARY KEY (`url_object_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

编写pipline

import MySQLdb
import MySQLdb.cursors
from scrapy.pipelines.images import ImagesPipeline
from twisted.enterprise import adbapi from ArticleSpider import settings
from ArticleSpider.settings import MYSQL_DBNAME, MYSQL_PASSWORD, MYSQL_USER, MYSQL_HOST class ArticlespiderPipeline(object):
def process_item(self, item, spider):
return item class ArticleImagePipeline(ImagesPipeline):
def item_completed(self, results, item, info):
if "front_image_url" in item:
for ok, value in results:
image_file_path = value["path"]
item["front_image_path"] = image_file_path return item class MysqlPipeline(object):
# 采用同步的机制写入mysql
def __init__(self):
self.conn = MySQLdb.connect(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DBNAME, charset="utf8",
use_unicode=True)
self.cursor = self.conn.cursor() def process_item(self, item, spider):
insert_sql = """
insert into jobbole_article(title, url,url_object_id, create_date, fav_nums)
VALUES (%s, %s, %s, %s, %s)
"""
self.cursor.execute(insert_sql,
(item["title"], item["url"], item["url_object_id"], item["create_date"], item["fav_nums"]))
self.conn.commit() class MysqlTwistedPipline(object):
def __init__(self, dbpool):
self.dbpool = dbpool @classmethod
def from_settings(cls, settings):
dbparms = dict(
host=settings["MYSQL_HOST"],
db=settings["MYSQL_DBNAME"],
user=settings["MYSQL_USER"],
passwd=settings["MYSQL_PASSWORD"],
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True,
)
dbpool = adbapi.ConnectionPool("MySQLdb", **dbparms) return cls(dbpool) def process_item(self, item, spider):
# 使用twisted将mysql插入变成异步执行
query = self.dbpool.runInteraction(self.do_insert, item)
query.addErrback(self.handle_error, item, spider) # 处理异常 def handle_error(self, failure, item, spider):
# 处理异步插入的异常
print(failure) def do_insert(self, cursor, item):
# 执行具体的插入
# 根据不同的item 构建不同的sql语句并插入到mysql中
insert_sql = """
insert into jobbole_article(title, url,url_object_id, create_date, front_image_url, front_image_path, praise_nums, comment_nums,fav_nums, tags,content )
VALUES (%s, %s, %s, %s,%s,%s, %s, %s, %s,%s,%s)
"""
cursor.execute(insert_sql,
(item["title"], item["url"], item["url_object_id"], item["create_date"], item["front_image_url"],
item["front_image_path"], item["praise_nums"], item["comment_nums"], item["fav_nums"], item["tags"],
item["content"]))

settings.py

ITEM_PIPELINES = {
# 'ArticleSpider.pipelines.ArticlespiderPipeline': 300,
# 'scrapy.pipelines.images.ImagesPipeline': 1,
'ArticleSpider.pipelines.ArticleImagePipeline': 1,
'ArticleSpider.pipelines.MysqlTwistedPipline': 2
}

item loader

ArticleSpider/spiders/jobbole.py

        # 通过item loader加载item
front_image_url = response.meta.get("front_image_url", "") # 文章封面图
item_loader = ArticleItemLoader(item=JobBoleArticleItem(), response=response)
item_loader.add_css("title", ".entry-header h1::text")
item_loader.add_value("url", response.url)
item_loader.add_value("url_object_id", get_md5(response.url))
item_loader.add_css("create_date", "p.entry-meta-hide-on-mobile::text")
item_loader.add_value("front_image_url", [front_image_url])
item_loader.add_css("praise_nums", ".vote-post-up h10::text")
item_loader.add_css("comment_nums", "a[href='#article-comment'] span::text")
item_loader.add_css("fav_nums", ".bookmark-btn::text")
item_loader.add_css("tags", "p.entry-meta-hide-on-mobile a::text")
item_loader.add_css("content", "div.entry") article_item = item_loader.load_item() yield article_item

ArticleSpider/items.py

class ArticleItemLoader(ItemLoader):
# 自定义itemloader
default_output_processor = TakeFirst() def date_convert(value):
try:
create_date = datetime.datetime.strptime(value, "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date() return create_date def get_nums(value):
match_re = re.match(".*?(\d+).*", value)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0 return nums def remove_comment_tags(value):
# 去掉tag中提取的评论
if "评论" in value:
return ""
else:
return value def return_value(value):
return value class JobBoleArticleItem(scrapy.Item):
title = scrapy.Field()
create_date = scrapy.Field(
input_processor=MapCompose(date_convert),
)
url = scrapy.Field()
url_object_id = scrapy.Field()
front_image_url = scrapy.Field(
output_processor=MapCompose(return_value)
)
front_image_path = scrapy.Field()
praise_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
)
comment_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
)
fav_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
)
tags = scrapy.Field(
input_processor=MapCompose(remove_comment_tags),
output_processor=Join(",")
)
content = scrapy.Field()
```

Scrapy爬取伯乐在线文章的更多相关文章

  1. scrapy爬取伯乐在线文章数据

    创建项目 切换到ArticleSpider目录下创建爬虫文件 设置settings.py爬虫协议为False 编写启动爬虫文件main.py

  2. 爬虫实战——Scrapy爬取伯乐在线所有文章

    Scrapy简单介绍及爬取伯乐在线所有文章 一.简说安装相关环境及依赖包 1.安装Python(2或3都行,我这里用的是3) 2.虚拟环境搭建: 依赖包:virtualenv,virtualenvwr ...

  3. Scrapy爬取伯乐在线的所有文章

    本篇文章将从搭建虚拟环境开始,爬取伯乐在线上的所有文章的数据. 搭建虚拟环境之前需要配置环境变量,该环境变量的变量值为虚拟环境的存放目录 1. 配置环境变量 2.创建虚拟环境 用mkvirtualen ...

  4. 爬取伯乐在线文章(五)itemloader

    ItemLoader 在我们执行scrapy爬取字段中,会有大量的CSS或是Xpath代码,当要爬取的网站多了,要维护起来很麻烦,为解决这类问题,我们可以根据scrapy提供的loader机制. 导入 ...

  5. Scrapy基础(六)————Scrapy爬取伯乐在线一通过css和xpath解析文章字段

    上次我们介绍了scrapy的安装和加入debug的main文件,这次重要介绍创建的爬虫的基本爬取有用信息 通过命令(这篇博文)创建了jobbole这个爬虫,并且生成了jobbole.py这个文件,又写 ...

  6. 爬取伯乐在线文章(四)将爬取结果保存到MySQL

    Item Pipeline 当Item在Spider中被收集之后,它将会被传递到Item Pipeline,这些Item Pipeline组件按定义的顺序处理Item. 每个Item Pipeline ...

  7. 第三天,爬取伯乐在线文章代码,编写items.py,保存数据到本地json文件中

        一. 爬取http://blog.jobbole.com/all-posts/中的所有文章     1. 编写jobbole.py简单代码 import scrapy from scrapy. ...

  8. 爬取伯乐在线文章(二)通过xpath提取源文件中需要的内容

    爬取说明 以单个页面为例,如:http://blog.jobbole.com/110287/ 我们可以提取标题.日期.多少个评论.正文内容等 Xpath介绍 1. xpath简介 (1) xpath使 ...

  9. python爬虫scrapy框架——爬取伯乐在线网站文章

    一.前言  1. scrapy依赖包: 二.创建工程 1. 创建scrapy工程: scrapy staratproject ArticleSpider 2. 开始(创建)新的爬虫: cd Artic ...

随机推荐

  1. Redis系列文章总结:ASP.Net Core 中如何借助CSRedis实现一个安全高效的分布式锁

    引言:最近回头看了看开发的.Net Core 2.1项目的复盘总结,其中在多处用到Redis实现的分布式锁,虽然在OnResultExecuting方法中做了防止死锁的处理,但在某些场景下还是会发生死 ...

  2. java 类与类,类与接口 ,接口与接口关系

    类: 生活中类是人们对客观事物不断认识而产生的抽象概念,而对象则是现实生活中的一个个实体 面向对象程序设计中,对象是程序的基本单位,相似的对象像变量和类型的关系一样归并到一类,所以,并不先具体地定义对 ...

  3. PS调出最美海滨城市俯拍照

    原图 一.找一张漂亮的风景照片,美丽的海滩. 二.打开PS做效果把图片放进去然后ctrl+j复制一层,添加滤镜-模糊-特殊模糊. 三.然后在这个图层的基础上添加滤镜-滤镜库-干画笔效果. 四.这个时候 ...

  4. 每周分享之JS数组的使用

    数组,一堆数字归为一组,就是一个数组,一堆对象放在一个组里,也是一个数组,概念很容易懂,说白了就是一个有限集合. JS数组的语法无法两种,插入和移除(语法自行科普).用处挺常见的,既然数组是一个集合, ...

  5. 软工网络15团队作业8——Beta阶段敏捷冲刺

    Deadline: 2018-5-31 22:00PM,以博客提交至班级博客时间为准 根据以下要求: (1)在敏捷冲刺前发布一篇博客,作为beta版敏捷冲刺的开始, (2)同时,团队在日期区间[5.2 ...

  6. Tomcat web.xml配置参数详解

    Apache Tomcat Configuration Reference - The Context Containerhttps://tomcat.apache.org/tomcat-5.5-do ...

  7. JSP页面导致tomcat内存溢出一例

    今天发现一个奇怪的问题,一个tomcat应用,里面只有一个单纯的jsp页面,而且这个jsp页面没有任何java代码——想用这个jsp页面配合tomcat完成一个性能验证.但是用jmeter压测了几分钟 ...

  8. 设置SQLServer数据库内存

    需要设置SQLServer数据库的内存配置.登录数据库,这里使用的是SQLServer2008,右键点击最上方的服务器名,在弹出的菜单中,点击属性] 打开服务器属性窗口.默认显示的是第一项[常规]内容 ...

  9. 安装splash

    参考: https://blog.csdn.net/qq_41020281/article/details/82599075

  10. StringTokenizer

    StringTokenizer是一个用来分隔String的应用类,相当于VB的split函数. 1.构造函数 public StringTokenizer(String str) public Str ...