scrapy工作原理探秘
def _next_request_from_scheduler(self, spider):#engine从调度器取得下一个request
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)#登记一个下载,返回deferred对象
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.remove_request(request))#引擎的slot移除该请求
d.addErrback(lambda f: logger.info('Error while removing request from slot',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.nextcall.schedule())#再次调度
d.addErrback(lambda f: logger.info('Error while scheduling new request',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d def _handle_downloader_output(self, response, request, spider):
assert isinstance(response, (Request, Response, Failure)), response
# downloader middleware can return requests (for example, redirects)
if isinstance(response, Request):
self.crawl(response, spider)#在slot的队列中放置一个请求
return
# response is a Response or Failure
d = self.scraper.enqueue_scrape(response, request, spider)#输出的延迟对象
d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
每个请求对象的延迟对象经过_handle_downloader_output处理后,又会返回一个延迟对象。
def enqueue_scrape(self, response, request, spider):
slot = self.slot
dfd = slot.add_response_request(response, request)#在scraper的queue中添加(response, request,defer)
def finish_scraping(_):
slot.finish_response(response, request)
self._check_if_closing(spider, slot)
self._scrape_next(spider, slot)
return _
dfd.addBoth(finish_scraping)
dfd.addErrback(
lambda f: logger.error('Scraper bug processing %(request)s',
{'request': request},
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
self._scrape_next(spider, slot)
return dfd def _scrape_next(self, spider, slot):
while slot.queue:
response, request, deferred = slot.next_response_request_deferred()
self._scrape(response, request, spider).chainDeferred(deferred) def _scrape(self, response, request, spider):
"""Handle the downloaded response or failure through the spider
callback/errback"""
assert isinstance(response, (Response, Failure)) dfd = self._scrape2(response, request, spider) # returns spiders processed output
dfd.addErrback(self.handle_spider_error, request, response, spider)
dfd.addCallback(self.handle_spider_output, request, response, spider)
return dfd def _scrape2(self, request_result, request, spider):
"""Handle the different cases of request's result been a Response or a
Failure"""
if not isinstance(request_result, Failure):#当结果不是失败对象
return self.spidermw.scrape_response(
self.call_spider, request_result, request, spider)#
else:
# FIXME: don't ignore errors in spider middleware
dfd = self.call_spider(request_result, request, spider)
return dfd.addErrback(
self._log_download_errors, request_result, request, spider) def call_spider(self, result, request, spider):
result.request = request
dfd = defer_result(result)
dfd.addCallbacks(request.callback or spider.parse, request.errback)
return dfd.addCallback(iterate_spider_output)#addCallback方法会返回一个defer
iterate_spider_output(scrapy/util/spider.py) def iterate_spider_output(result):
return arg_to_iter(result) #from scrapy.utils.misc import arg_to_iter
def arg_to_iter(arg):
"""Convert an argument to an iterable. The argument can be a None, single
value, or an iterable. Exception: if arg is a dict, [arg] will be returned
"""
if arg is None:
return []
elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
return arg
else:
return [arg]
class SpiderMiddlewareManager(MiddlewareManager):
component_name = 'spider middleware'
@classmethod
def _get_mwlist_from_settings(cls, settings):
return build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES'))
def _add_middleware(self, mw):
super(SpiderMiddlewareManager, self)._add_middleware(mw)
if hasattr(mw, 'process_spider_input'):
self.methods['process_spider_input'].append(mw.process_spider_input)
if hasattr(mw, 'process_spider_output'):
self.methods['process_spider_output'].insert(0, mw.process_spider_output)
if hasattr(mw, 'process_spider_exception'):
self.methods['process_spider_exception'].insert(0, mw.process_spider_exception)
if hasattr(mw, 'process_start_requests'):
self.methods['process_start_requests'].insert(0, mw.process_start_requests)
def scrape_response(self, scrape_func, response, request, spider):
fname = lambda f:'%s.%s' % (
six.get_method_self(f).__class__.__name__,
six.get_method_function(f).__name__)
def process_spider_input(response):
for method in self.methods['process_spider_input']:
try:
result = method(response=response, spider=spider)
assert result is None, \
'Middleware %s must returns None or ' \
'raise an exception, got %s ' \
% (fname(method), type(result))
except:
return scrape_func(Failure(), request, spider)
return scrape_func(response, request, spider)
def process_spider_exception(_failure):
exception = _failure.value
for method in self.methods['process_spider_exception']:
result = method(response=response, exception=exception, spider=spider)
assert result is None or _isiterable(result), \
'Middleware %s must returns None, or an iterable object, got %s ' % \
(fname(method), type(result))
if result is not None:
return result
return _failure
def process_spider_output(result):
for method in self.methods['process_spider_output']:
result = method(response=response, result=result, spider=spider)
assert _isiterable(result), \
'Middleware %s must returns an iterable object, got %s ' % \
(fname(method), type(result))
return result
dfd = mustbe_deferred(process_spider_input, response)
dfd.addErrback(process_spider_exception)
dfd.addCallback(process_spider_output)
return dfd
def process_start_requests(self, start_requests, spider):
return self._process_chain('process_start_requests', start_requests, spider)
def handle_spider_output(self, result, request, response, spider):
if not result:
return defer_succeed(None)
it = iter_errback(result, self.handle_spider_error, request, response, spider)
dfd = parallel(it, self.concurrent_items,
self._process_spidermw_output, request, response, spider)
return dfd def _process_spidermw_output(self, output, request, response, spider):#把生成的请求放到scheduler
"""Process each Request/Item (given in the output parameter) returned
from the given spider
"""
if isinstance(output, Request):#如果输出是请求,继续爬行
self.crawler.engine.crawl(request=output, spider=spider)
elif isinstance(output, (BaseItem, dict)):#是item,进行保存
self.slot.itemproc_size += 1
dfd = self.itemproc.process_item(output, spider)
dfd.addBoth(self._itemproc_finished, output, response, spider)
return dfd
elif output is None:
pass
else:
typename = type(output).__name__
logger.error('Spider must return Request, BaseItem, dict or None, '
'got %(typename)r in %(request)s',
{'request': request, 'typename': typename},
extra={'spider': spider})
engine的crawl函数
def crawl(self, request, spider):
assert spider in self.open_spiders, \
"Spider %r not opened when crawling: %s" % (spider.name, request)
self.schedule(request, spider)
self.slot.nextcall.schedule() def schedule(self, request, spider):
self.signals.send_catch_log(signal=signals.request_scheduled,
request=request, spider=spider)
if not self.slot.scheduler.enqueue_request(request):
self.signals.send_catch_log(signal=signals.request_dropped,
request=request, spider=spider)
reactor.callLater(delay, self)通过timer触发deferred对象的callback。
scrapy工作原理探秘的更多相关文章
- Scrapy工作原理
目录 1. Scrapy旧版架构图(绿线是数据流向) 2. Scrapy新版架构图 1. 组件介绍 2. 数据流(Data Flow) 3. 使用Scrapy框架爬虫的重要命令 4. Middlewa ...
- scrapy工作原理概述
当运行scrapy crawl spider 时,会生成一个crawl命令对象,scrapy是调用execute函数(cmdlin.py)来执行命令的,execute函数会给命令对象添加crawler ...
- 一篇文章教会你理解Scrapy网络爬虫框架的工作原理和数据采集过程
今天小编给大家详细的讲解一下Scrapy爬虫框架,希望对大家的学习有帮助. 1.Scrapy爬虫框架 Scrapy是一个使用Python编程语言编写的爬虫框架,任何人都可以根据自己的需求进行修改,并且 ...
- How Javascript works (Javascript工作原理) (十五) 类和继承及 Babel 和 TypeScript 代码转换探秘
个人总结:读完这篇文章需要15分钟,文章主要讲解了Babel和TypeScript的工作原理,(例如对es6 类的转换,是将原始es6代码转换为es5代码,这些代码中包含着类似于 _classCall ...
- Scrapy 框架结构及工作原理
1.下图为 Scrapy 框架的组成结构,并从数据流的角度揭示 Scrapy 的工作原理 2.首先.简单了解一下 Scrapy 框架中的各个组件 组 件 描 述 类 型 EN ...
- scrapy框架结构与工作原理
组件: ENGINE:引擎,框架的核心,其他组件在其控制下协同工作. SCHEDULER:调度器,负责对SPIDER提交的下载请求进行调度 DOWNLOADER:下载器,负责下载页面,发送HTTP请求 ...
- Python爬虫-Scrapy框架的工作原理
Scrapy框架工作原理 Scrapy框架架构图 Scrapy框架主要由六大组件组成,分别为: 调度器(Scheduler),下载器(Downler),爬虫(Spiders),中间件(Middwa ...
- How Javascript works (Javascript工作原理) (十二) 网络层探秘及如何提高其性能和安全性
个人总结:阅读完这篇文章需要20分钟,这篇文章主要讲解了现代浏览器在网络层传输所用到的一些技术, 应当对 window.performance.timing 这个API所有了解. 这是 JavaScr ...
- scrapy学习笔记(二)框架结构工作原理
scrapy结构图: scrapy组件: ENGINE:引擎,框架的核心,其它所有组件在其控制下协同工作. SCHEDULER:调度器,负责对SPIDER提交的下载请求进行调度. DOWNLOADER ...
随机推荐
- kafka服务自动关闭
解决方法: kafka启动的时候添加守护进程 bin/kafka-server-start.sh -daemon ./config/server.properties & 问题原因: 待补充. ...
- 解决 https urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] 错误
import ssl ssl._create_default_https_context = ssl._create_unverified_context
- 【转】十年你能做的能得到的有多少?一个工科IT男的工作回忆
https://blog.csdn.net/b5w2p0/article/details/8798989
- 廖雪峰Java4反射与泛型-3范型-5extends通配符
1.泛型的继承关系: Pair<Integer>不是Pair<Number>的子类 add()不接受Pair<Integer> Pair.java package ...
- ospf精确宣告地址
ospf的一点小问题 http://bbs.51cto.com/thread-881459-1.html 参照博客地址 network 172.20.1.0 0.0.0.3 area 0 networ ...
- 简单的一个MySQL类的实现:
'''定义MySQL类:1.对象有id.host.port三个属性2.定义工具create_id,在实例化时为每个对象随机生成id,保证id唯一3.提供两种实例化方式,方式一:用户传入host和por ...
- [UE4]Canvas Panel应用小技巧
当设置为满屏拉伸的时候,只要把“偏移左侧”和“偏移底部”都设置为0,就会自动拉伸为整屏了.再也不需要手动担心拉不满屏了.
- PhysX Clothing for UE4
转自:http://www.52vr.com/article-737-1.html Hello! 之前看到论坛里有人发了个关于UE4布料的小知识,于是跟帖说抽空写个布料的工作流程供大家参考,那么,今天 ...
- PostgreSQL 一主多从(多副本,强同步)简明手册 - 配置、压测、监控、切换、防脑裂、修复、0丢失 - 珍藏级
参考来源: https://github.com/digoal/blog/blob/master/201803/20180326_01.md#postgresql-一主多从多副本强同步简明手册---配 ...
- 禁用win7更新
关闭自动更新服务Windows Update 第一种方法 ---------------------------------------------------------------- Win + ...