# 爬虫主程序quotes.py
# -*- coding: utf-8 -*-
import scrapy
from quotetutorial.items import QuoteItem
# 启动爬虫
# 请求都是默认的,我们不需要管请求的操作,只要关心解析的过程就可以了
class QuotesSpider(scrapy.Spider):
name = "quotes"
allowed_domains = ["quotes.toscrape.com"]
start_urls = ['http://quotes.toscrape.com/']
# python默认执行了相当一部分操作,比如这里,有了start_urls以后,会默认启用start_requests方法,该方法遍历得到每一个url,并且调用make_requests_from_url进行请求,返回response给parse函数
# 请求得到的结果,回调给parse,response即相应结果
def parse(self, response):
quotes = response.css('.quote')
for quote in quotes:
# 如果只有一个内容,可以用extract_first,如果有多个内容可以用extract全部提取出来
# 可以用scrapy-conda shell quotes.toscrape.com 进入到命令行交互模式下
item = QuoteItem()
text = quote.css('.text::text').extract_first()
author = quote.css('.author::text').extract_first()
tags = quote.css('.tags .tag::text').extract()
item['text'] = text
item['author'] = author
item['tags'] = tags
yield item
# 获取下一页的相对url
next = response.css('.pager .next a::attr(href)').extract_first()
# 拼接成绝对url
url = response.urljoin(next)
#递归调用,循环打开下一页
yield scrapy.Request(url = url, callback=self.parse)
# 可以用scrapy-conda crawl quotes -o 文件名+扩展名
# quotes.json/quotes.jl/quotes.csv/quotes.pickle/quotes.xml/quotes.marshal导出各种格式
# ftp://user:pass@ftp.exampel.com/path/quotes.csv 可以远程ftp保存
# items定义的项目数据结构
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
# items相当于定义一个数据结构,给它指定一个个字段,比如标题,图片,文本内容等
import scrapy class QuoteItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
text = scrapy.Field()
author = scrapy.Field()
tags = scrapy.Field()
# 中间储藏环节middlewares.py
# -*- coding: utf-8 -*- # Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals class QuotetutorialSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects. @classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider. # Should return None or raise an exception.
return None def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response. # Must return an iterable of Request, dict or Item objects.
for i in result:
yield i def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict
# or Item objects.
pass def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated. # Must return only requests (not items).
for r in start_requests:
yield r def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
# pipelines.py管道环节,可决定数据如何处理
# -*- coding: utf-8 -*- # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
import pymongo
#判断文本长度是否超过某一界限
class TextPipeline(object):
def __init__(self):
self.limit = 50 def process_item(self, item, spider):
if item['text']:
if len(item['text']) > self.limit:
item['text'] = item['text'][0:self.limit].rstrip() + '...'
return item
else:
return DropItem('Missing Text') class MongoPipeline(object):
def __init__(self, mongo_url,mongo_db):
self.mongo_url = mongo_url
self.mongo_db = mongo_db @classmethod
def from_crawler(cls,crawler):
return cls(
mongo_url = crawler.settings.get('MONGO_URL'),
mongo_db = crawler.settings.get('MONGO_DB')
)
# mongodb的初始化对象
def open_spider(self,spider):
self.client = pymongo.MongoClient(self.mongo_url)
self.db = self.client[self.mongo_db] # 保存到mongodb
def process_item(self, item, spider):
name = item.__class__.__name__
self.db[name].insert(dict(item))
return item #千万要注意,这里不要忘了返回,否则就会显示None
def close_spider(self,spider):
self.client.close()
# 配置文件
# -*- coding: utf-8 -*- # Scrapy settings for quotetutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html BOT_NAME = 'quotetutorial' SPIDER_MODULES = ['quotetutorial.spiders']
NEWSPIDER_MODULE = 'quotetutorial.spiders'
MONGO_URL = 'localhost'
MONGO_DB = 'quotetutorial' # Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'quotetutorial (+http://www.yourdomain.com)' # Obey robots.txt rules
ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'quotetutorial.middlewares.QuotetutorialSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'quotetutorial.middlewares.MyCustomDownloaderMiddleware': 543,
#} # Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# 'quotetutorial.pipelines.TextPipeline': 300,
'quotetutorial.pipelines.MongoPipeline': 400,
} # Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# 利用scrapy爬取quotes.toscrape.com网站
# 第一步,分析目标网站,确定数据结构,即要爬取的项目构成.去items里面设置对应项目
# 第二步,爬取首页信息,分析首页,提取项目items
# 第三步,首页抓取下一页信息,循环递归调用request请求,实现翻页爬取
# 第四步,修改settings,pipelines等,实现对items的修改,清洗,梳理以及存储

Scrapy抓取Quotes to Scrape的更多相关文章

  1. 通过Scrapy抓取QQ空间

    毕业设计题目就是用Scrapy抓取QQ空间的数据,最近毕业设计弄完了,来总结以下: 首先是模拟登录的问题: 由于Tencent对模拟登录比较讨厌,各个防备,而本人能力有限,所以做的最简单的,手动登录后 ...

  2. python scrapy 抓取脚本之家文章(scrapy 入门使用简介)

    老早之前就听说过python的scrapy.这是一个分布式爬虫的框架,可以让你轻松写出高性能的分布式异步爬虫.使用框架的最大好处当然就是不同重复造轮子了,因为有很多东西框架当中都有了,直接拿过来使用就 ...

  3. scrapy抓取淘宝女郎

    scrapy抓取淘宝女郎 准备工作 首先在淘宝女郎的首页这里查看,当然想要爬取更多的话,当然这里要查看翻页的url,不过这操蛋的地方就是这里的翻页是使用javascript加载的,这个就有点尴尬了,找 ...

  4. scrapy抓取拉勾网职位信息(一)——scrapy初识及lagou爬虫项目建立

    本次以scrapy抓取拉勾网职位信息作为scrapy学习的一个实战演练 python版本:3.7.1 框架:scrapy(pip直接安装可能会报错,如果是vc++环境不满足,建议直接安装一个visua ...

  5. scrapy抓取的中文结果乱码解决办法

    使用scrapy抓取的结果,中文默认是Unicode,无法显示中文. 中文默认是Unicode,如: \u5317\u4eac\u5927\u5b66 在setting文件中设置: FEED_EXPO ...

  6. 分布式爬虫:使用Scrapy抓取数据

    分布式爬虫:使用Scrapy抓取数据 Scrapy是Python开发的一个快速,高层次的屏幕抓取和web抓取框架,用于抓取web站点并从页面中提取结构化的数据.Scrapy用途广泛,可以用于数据挖掘. ...

  7. 解决Scrapy抓取中文网页保存为json文件时中文不显示而是显示unicode的问题

    注意:此方法跟之前保存成json文件的写法有少许不同之处,注意区分 情境再现: 使用scrapy抓取中文网页,得到的数据类型是unicode,在控制台输出的话也是显示unicode,如下所示 {'au ...

  8. scrapy抓取中国新闻网新闻

    目标说明 利用scrapy抓取中新网新闻,关于自然灾害滑坡的全部国内新闻:要求主题为滑坡类新闻,包含灾害造成的经济损失等相关内容,并结合textrank算法,得到每篇新闻的关键词,便于后续文本挖掘分析 ...

  9. scrapy抓取斗鱼APP主播信息

    如何进行APP抓包 首先确保手机和电脑连接的是同一个局域网(通过路由器转发的网络,校园网好像还有些问题). 1.安装抓包工具Fiddler,并进行配置 Tools>>options> ...

随机推荐

  1. 【highlight.js】页面代码高亮插件

    [highlight.js] 很多博客都支持页面插入各种语言的代码,而这些代码肯定是有高亮设置的.那么在我们自己的页面上如何进行代码高亮设置?有现成的这个highlight.js插件我们可以使用. h ...

  2. 【Linux】 用户管理

    Linux用户管理 ■ 查看用户整体情况 cat /etc/passwd可以查看用户的一些基本信息.用finger <user>似乎更加方便 查看某一个特定的用户的话就可以 grep &l ...

  3. c++ --> c++中四种类型转换方式

    c++中四种类型转换方式   c风格转换的格式很简单(TYPE)EXPRESSION,但是c风格的类型转换有不少缺点, 1)它可以在任意类型之间转换,比如你可以把一个指向const对象的指针转换成指向 ...

  4. HashMap的底层原理

    简单说: 底层原理就是采用数组加链表: 两张图片很清晰地表明存储结构: 既然是线性数组,为什么能随机存取?这里HashMap用了一个小算法,大致是这样实现: // 存储时: int hash = ke ...

  5. java基础笔记(2)----流程控制

    java流程控制结构包括顺序结构,分支结构,循环结构. 顺序结构: 程序从上到下依次执行,中间没有任何判断和跳转. 代码如下: package com.lvsling.test; public cla ...

  6. java 5并发中的阻塞队列ArrayBlockingQueue的使用以及案例实现

    演示一个阻塞队列的使用 public class BlockingQueueTest { public static void main(String[] args) { //创建一个包含三个元素的阻 ...

  7. Idea简单SpringMVC框架配置

    前边已经介绍过了Struts在Idea上的配置,相对于Struts来说,我觉得SpringMVC有更多的优势,首先Struts是需要对action进行配置,页面发送不同的请求,就需要配置不同的acti ...

  8. I/O多路转接之poll 函数

    poll 一.poll()函数: 这个函数是某些Unix系统提供的用于执行与select()函数同等功能的函数,自认为poll和select大同小异,下面是这个函数的声明: #include < ...

  9. 基础篇 - SQL 的约束

    基础篇 - SQL 的约束       约束 一.实验简介 约束是一种限制,它通过对表的行或列的数据做出限制,来确保表的数据的完整性.唯一性.本节实验将在实践操作中熟悉 MySQL 中的几种约束. 二 ...

  10. 记一次jar包冲突

    题记:永远不要在同一个项目中,引用不同版本的两个jar包,否则,这可能就是一个大坑. 在做网校项目的时候,帮助中心要使用lucene,所以就引入了lucene-5.5.1的包,删掉了原先存在于项目中的 ...