scrapy中deferred的回调
def _next_request_from_scheduler(self, spider):#在引擎中处理一个请求
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)#生成了一个deferred对象
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.remove_request(request))
d.addErrback(lambda f: logger.info('Error while removing request from slot',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.nextcall.schedule())
d.addErrback(lambda f: logger.info('Error while scheduling new request',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d def _download(self, request, spider):
slot = self.slot
slot.add_request(request)
def _on_success(response):
assert isinstance(response, (Response, Request))
if isinstance(response, Response):
response.request = request # tie request to response received
logkws = self.logformatter.crawled(request, response, spider)
logger.log(*logformatter_adapter(logkws), extra={'spider': spider})
self.signals.send_catch_log(signal=signals.response_received, \
response=response, request=request, spider=spider)
return response def _on_complete(_):
slot.nextcall.schedule()
return _ dwld = self.downloader.fetch(request, spider)#下载器fetch
dwld.addCallbacks(_on_success)
dwld.addBoth(_on_complete)
return dwld
在HTTP11处理器中
class HTTP11DownloadHandler(object): def download_request(self, request, spider):
"""Return a deferred for the HTTP download"""
agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool,
maxsize=getattr(spider, 'download_maxsize', self._default_maxsize),
warnsize=getattr(spider, 'download_warnsize', self._default_warnsize),
fail_on_dataloss=self._fail_on_dataloss)
return agent.download_request(request) class ScrapyAgent(object): def download_request(self, request):
timeout = request.meta.get('download_timeout') or self._connectTimeout
agent = self._get_agent(request, timeout) # request details
url = urldefrag(request.url)[0]
method = to_bytes(request.method)
headers = TxHeaders(request.headers)
if isinstance(agent, self._TunnelingAgent):
headers.removeHeader(b'Proxy-Authorization')
if request.body:
bodyproducer = _RequestBodyProducer(request.body)
elif method == b'POST':
# Setting Content-Length: 0 even for POST requests is not a
# MUST per HTTP RFCs, but it's common behavior, and some
# servers require this, otherwise returning HTTP 411 Length required
#
# RFC 7230#section-3.3.2:
# "a Content-Length header field is normally sent in a POST
# request even when the value is 0 (indicating an empty payload body)."
#
# Twisted < 17 will not add "Content-Length: 0" by itself;
# Twisted >= 17 fixes this;
# Using a producer with an empty-string sends `0` as Content-Length
# for all versions of Twisted.
bodyproducer = _RequestBodyProducer(b'')
else:
bodyproducer = None
start_time = time()
d = agent.request(
method, to_bytes(url, encoding='ascii'), headers, bodyproducer)
# set download latency
d.addCallback(self._cb_latency, request, start_time)
# response body is ready to be consumed
d.addCallback(self._cb_bodyready, request)
d.addCallback(self._cb_bodydone, request, url)
# check download timeout
self._timeout_cl = reactor.callLater(timeout, d.cancel)
d.addBoth(self._cb_timeout, request, url, timeout)
return d class _ResponseReader(protocol.Protocol): def __init__(self, finished, txresponse, request, maxsize, warnsize,
fail_on_dataloss):
self._finished = finished
self._txresponse = txresponse
self._request = request
self._bodybuf = BytesIO()
self._maxsize = maxsize
self._warnsize = warnsize
self._fail_on_dataloss = fail_on_dataloss
self._fail_on_dataloss_warned = False
self._reached_warnsize = False
self._bytes_received = 0 def dataReceived(self, bodyBytes):#读取数据,放到缓冲
# This maybe called several times after cancel was called with buffered
# data.
if self._finished.called:
return self._bodybuf.write(bodyBytes)
self._bytes_received += len(bodyBytes) if self._maxsize and self._bytes_received > self._maxsize:
logger.error("Received (%(bytes)s) bytes larger than download "
"max size (%(maxsize)s).",
{'bytes': self._bytes_received,
'maxsize': self._maxsize})
# Clear buffer earlier to avoid keeping data in memory for a long
# time.
self._bodybuf.truncate(0)
self._finished.cancel() if self._warnsize and self._bytes_received > self._warnsize and not self._reached_warnsize:
self._reached_warnsize = True
logger.warning("Received more bytes than download "
"warn size (%(warnsize)s) in request %(request)s.",
{'warnsize': self._warnsize,
'request': self._request}) def connectionLost(self, reason):#连接完成后调用,也即响应已经到达。
if self._finished.called:
return body = self._bodybuf.getvalue()
if reason.check(ResponseDone):#self._finished是deferred对象
self._finished.callback((self._txresponse, body, None))#回调了,
return if reason.check(PotentialDataLoss):
self._finished.callback((self._txresponse, body, ['partial']))
return if reason.check(ResponseFailed) and any(r.check(_DataLoss) for r in reason.value.reasons):
if not self._fail_on_dataloss:
self._finished.callback((self._txresponse, body, ['dataloss']))
return elif not self._fail_on_dataloss_warned:
logger.warn("Got data loss in %s. If you want to process broken "
"responses set the setting DOWNLOAD_FAIL_ON_DATALOSS = False"
" -- This message won't be shown in further requests",
self._txresponse.request.absoluteURI.decode())
self._fail_on_dataloss_warned = True self._finished.errback(reason)
scrapy中deferred的回调的更多相关文章
- scrapy中使用LinkExtractor提取链接
le = LinkExtractor(restrict_css='ul.pager li.next') links = le.extract_links(response) 使用LinkExtra ...
- 通过实例说明在scrapy中 yield的作用
源https://www.jianshu.com/p/7c1a084853d8 开始前的准备工作: 1.MySQL下载:点我2.python MySQL驱动下载:pymysql(pyMySql,直接用 ...
- python的scrapy框架的使用 和xpath的使用 && scrapy中request和response的函数参数 && parse()函数运行机制
这篇博客主要是讲一下scrapy框架的使用,对于糗事百科爬取数据并未去专门处理 最后爬取的数据保存为json格式 一.先说一下pyharm怎么去看一些函数在源码中的代码实现 按着ctrl然后点击函数就 ...
- Objective-C中的Block回调模式
在前面的博客中提到了Block的概念和使用方法,个人感觉Block最爽的用法莫过于在回调时用block.感觉比委托回调和目标方法回调用着要顺手,好不好用还得读者亲自用一下才知道.如果 读者之前用过SS ...
- Android中的接口回调技术
Android中的接口回调技术有很多应用的场景,最常见的:Activity(人机交互的端口)的UI界面中定义了Button,点击该Button时,执行某个逻辑. 下面参见上述执行的模型,讲述James ...
- 如何优雅的处理Nodejs中的异步回调
前言 Nodejs最大的亮点就在于事件驱动, 非阻塞I/O 模型,这使得Nodejs具有很强的并发处理能力,非常适合编写网络应用.在Nodejs中大部分的I/O操作几乎都是异步的,也就是我们处理I/O ...
- js中this和回调方法循环-我们到底能走多远系列(35)
我们到底能走多远系列(35) 扯淡: 13年最后一个月了,你们在13年初的计划实现了吗?还来得及吗? 请加油~ 主题: 最近一直在写js,遇到了几个问题,可能初入门的时候都会遇到吧,总结下. 例子: ...
- Scrapy中使用Django的Model访问数据库
Scrapy中使用Django的Model进行数据库访问 当已存在Django项目的时候,直接引入Django的Model来使用比较简单 # 使用以下语句添加Django项目的目录到path impo ...
- scrapy中的下载器中间件
scrapy中的下载器中间件 下载中间件 下载器中间件是介于Scrapy的request/response处理的钩子框架. 是用于全局修改Scrapy request和response的一个轻量.底层 ...
随机推荐
- Firefox不支持event解决方法
IE 中可以直接使用event 对象,而FF 中则不可以,解决方法之一如下: var theEvent = window.event || arguments.callee.caller.argume ...
- git 对比两个分支差异
比如我们有 2 个分支:master, dev,现在想查看这两个 branch 的区别,有以下几种方式: 1.查看 dev 有,而 master 中没有的: git log dev ^master 同 ...
- Android开发之Activity生命周期篇
一.Activity: 1.Activity:Activity是一个与用记交互的系统模块,几乎所有的Activity都是和用户进行交互的. 2.在Android中Activity主要是用来做控制的,它 ...
- react路由嵌套
所谓的嵌套路由就是在某些以及路由下面存在二级路由,这些二级路由除了公用一级路由导航模块外,还公用当前的二级路由的导航模块,也就是部分进行了切换,要实现嵌套路由,首先回顾之前的内容,实现基本的react ...
- defaultProps和propTypes
在上一篇文章中总结了父子组件的数据传递,下面先来简单的回顾一下之前的内容: 此时,子组件中div里面的数据依赖于父组件传递过来的数据,那么当父组件没有给子组件传递数据时,子组件div里面就没有了数据了 ...
- 解决 https urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] 错误
import ssl ssl._create_default_https_context = ssl._create_unverified_context
- MySQL学习----多版本并发mvcc
MySQL中的大多数事务性存储引擎实现的都不是简单的行级锁.基于提升并发性能的考虑,他们一般实现了多版本并发控制(mvcc).不仅是mysql,包括oracle,postgresql等其他数据库也实现 ...
- Html的本质及在web中的作用
概要 本文以一个Socket程序为例由浅及深地揭示了Html的本质问题,同时介绍了作为web开发者我们在开发网站时需要做的事情 Html的本质以及开发需要的工作 1.服务器-客户端模型 其实,对于所有 ...
- T-SQL 有参数存储过程的创建与执行
use StudentManager go if exists(select * from sysobjects where name='usp_ScoreQuery2') drop procedur ...
- BCGcontrolBar(八) Ribbon图标变换
点击前 点击后 CBCGPRibbonButton *pRibbonBtn=NULL; pRibbonBtn=DYNAMIC_DOWNCAST(CBCGPRibbonButton,m_pFrame-& ...