def _next_request_from_scheduler(self, spider):#engine从调度器取得下一个request
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)#登记一个下载,返回deferred对象
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.remove_request(request))#引擎的slot移除该请求
d.addErrback(lambda f: logger.info('Error while removing request from slot',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.nextcall.schedule())#再次调度
d.addErrback(lambda f: logger.info('Error while scheduling new request',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d def _handle_downloader_output(self, response, request, spider):
assert isinstance(response, (Request, Response, Failure)), response
# downloader middleware can return requests (for example, redirects)
if isinstance(response, Request):
self.crawl(response, spider)#在slot的队列中放置一个请求
return
# response is a Response or Failure
d = self.scraper.enqueue_scrape(response, request, spider)#输出的延迟对象
d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d

每个请求对象的延迟对象经过_handle_downloader_output处理后,又会返回一个延迟对象。

def enqueue_scrape(self, response, request, spider):
slot = self.slot
dfd = slot.add_response_request(response, request)#在scraper的queue中添加(response, request,defer)
def finish_scraping(_):
slot.finish_response(response, request)
self._check_if_closing(spider, slot)
self._scrape_next(spider, slot)
return _
dfd.addBoth(finish_scraping)
dfd.addErrback(
lambda f: logger.error('Scraper bug processing %(request)s',
{'request': request},
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
self._scrape_next(spider, slot)
return dfd def _scrape_next(self, spider, slot):
while slot.queue:
response, request, deferred = slot.next_response_request_deferred()
self._scrape(response, request, spider).chainDeferred(deferred) def _scrape(self, response, request, spider):
"""Handle the downloaded response or failure through the spider
callback/errback"""
assert isinstance(response, (Response, Failure)) dfd = self._scrape2(response, request, spider) # returns spiders processed output
dfd.addErrback(self.handle_spider_error, request, response, spider)
dfd.addCallback(self.handle_spider_output, request, response, spider)
return dfd def _scrape2(self, request_result, request, spider):
"""Handle the different cases of request's result been a Response or a
Failure"""
if not isinstance(request_result, Failure):#当结果不是失败对象
return self.spidermw.scrape_response(
self.call_spider, request_result, request, spider)#
else:
# FIXME: don't ignore errors in spider middleware
dfd = self.call_spider(request_result, request, spider)
return dfd.addErrback(
self._log_download_errors, request_result, request, spider) def call_spider(self, result, request, spider):
result.request = request
dfd = defer_result(result)
dfd.addCallbacks(request.callback or spider.parse, request.errback)
     return dfd.addCallback(iterate_spider_output)#addCallback方法会返回一个defer
iterate_spider_output(scrapy/util/spider.py)

def iterate_spider_output(result):
    return arg_to_iter(result) #from scrapy.utils.misc import  arg_to_iter
def arg_to_iter(arg):
    """Convert an argument to an iterable. The argument can be a None, single
    value, or an iterable.     Exception: if arg is a dict, [arg] will be returned
    """
    if arg is None:
        return []
    elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
        return arg
    else:
        return [arg]
class SpiderMiddlewareManager(MiddlewareManager):

    component_name = 'spider middleware'

    @classmethod
def _get_mwlist_from_settings(cls, settings):
return build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES')) def _add_middleware(self, mw):
super(SpiderMiddlewareManager, self)._add_middleware(mw)
if hasattr(mw, 'process_spider_input'):
self.methods['process_spider_input'].append(mw.process_spider_input)
if hasattr(mw, 'process_spider_output'):
self.methods['process_spider_output'].insert(0, mw.process_spider_output)
if hasattr(mw, 'process_spider_exception'):
self.methods['process_spider_exception'].insert(0, mw.process_spider_exception)
if hasattr(mw, 'process_start_requests'):
self.methods['process_start_requests'].insert(0, mw.process_start_requests) def scrape_response(self, scrape_func, response, request, spider):
fname = lambda f:'%s.%s' % (
six.get_method_self(f).__class__.__name__,
six.get_method_function(f).__name__) def process_spider_input(response):
for method in self.methods['process_spider_input']:
try:
result = method(response=response, spider=spider)
assert result is None, \
'Middleware %s must returns None or ' \
'raise an exception, got %s ' \
% (fname(method), type(result))
except:
return scrape_func(Failure(), request, spider)
return scrape_func(response, request, spider) def process_spider_exception(_failure):
exception = _failure.value
for method in self.methods['process_spider_exception']:
result = method(response=response, exception=exception, spider=spider)
assert result is None or _isiterable(result), \
'Middleware %s must returns None, or an iterable object, got %s ' % \
(fname(method), type(result))
if result is not None:
return result
return _failure def process_spider_output(result):
for method in self.methods['process_spider_output']:
result = method(response=response, result=result, spider=spider)
assert _isiterable(result), \
'Middleware %s must returns an iterable object, got %s ' % \
(fname(method), type(result))
return result dfd = mustbe_deferred(process_spider_input, response)
dfd.addErrback(process_spider_exception)
dfd.addCallback(process_spider_output)
return dfd def process_start_requests(self, start_requests, spider):
return self._process_chain('process_start_requests', start_requests, spider)
def handle_spider_output(self, result, request, response, spider):
if not result:
return defer_succeed(None)
it = iter_errback(result, self.handle_spider_error, request, response, spider)
dfd = parallel(it, self.concurrent_items,
self._process_spidermw_output, request, response, spider)
return dfd def _process_spidermw_output(self, output, request, response, spider):#把生成的请求放到scheduler
"""Process each Request/Item (given in the output parameter) returned
from the given spider
"""
if isinstance(output, Request):#如果输出是请求,继续爬行
self.crawler.engine.crawl(request=output, spider=spider)
elif isinstance(output, (BaseItem, dict)):#是item,进行保存
self.slot.itemproc_size += 1
dfd = self.itemproc.process_item(output, spider)
dfd.addBoth(self._itemproc_finished, output, response, spider)
return dfd
elif output is None:
pass
else:
typename = type(output).__name__
logger.error('Spider must return Request, BaseItem, dict or None, '
'got %(typename)r in %(request)s',
{'request': request, 'typename': typename},
extra={'spider': spider})

engine的crawl函数

def crawl(self, request, spider):
assert spider in self.open_spiders, \
"Spider %r not opened when crawling: %s" % (spider.name, request)
self.schedule(request, spider)
self.slot.nextcall.schedule() def schedule(self, request, spider):
self.signals.send_catch_log(signal=signals.request_scheduled,
request=request, spider=spider)
if not self.slot.scheduler.enqueue_request(request):
self.signals.send_catch_log(signal=signals.request_dropped,
request=request, spider=spider)

reactor.callLater(delay, self)通过timer触发deferred对象的callback。

scrapy工作原理探秘的更多相关文章

  1. Scrapy工作原理

    目录 1. Scrapy旧版架构图(绿线是数据流向) 2. Scrapy新版架构图 1. 组件介绍 2. 数据流(Data Flow) 3. 使用Scrapy框架爬虫的重要命令 4. Middlewa ...

  2. scrapy工作原理概述

    当运行scrapy crawl spider 时,会生成一个crawl命令对象,scrapy是调用execute函数(cmdlin.py)来执行命令的,execute函数会给命令对象添加crawler ...

  3. 一篇文章教会你理解Scrapy网络爬虫框架的工作原理和数据采集过程

    今天小编给大家详细的讲解一下Scrapy爬虫框架,希望对大家的学习有帮助. 1.Scrapy爬虫框架 Scrapy是一个使用Python编程语言编写的爬虫框架,任何人都可以根据自己的需求进行修改,并且 ...

  4. How Javascript works (Javascript工作原理) (十五) 类和继承及 Babel 和 TypeScript 代码转换探秘

    个人总结:读完这篇文章需要15分钟,文章主要讲解了Babel和TypeScript的工作原理,(例如对es6 类的转换,是将原始es6代码转换为es5代码,这些代码中包含着类似于 _classCall ...

  5. Scrapy 框架结构及工作原理

    1.下图为 Scrapy 框架的组成结构,并从数据流的角度揭示 Scrapy 的工作原理 2.首先.简单了解一下 Scrapy 框架中的各个组件 组       件 描      述 类   型 EN ...

  6. scrapy框架结构与工作原理

    组件: ENGINE:引擎,框架的核心,其他组件在其控制下协同工作. SCHEDULER:调度器,负责对SPIDER提交的下载请求进行调度 DOWNLOADER:下载器,负责下载页面,发送HTTP请求 ...

  7. Python爬虫-Scrapy框架的工作原理

    Scrapy框架工作原理 Scrapy框架架构图 Scrapy框架主要由六大组件组成,分别为: ​ 调度器(Scheduler),下载器(Downler),爬虫(Spiders),中间件(Middwa ...

  8. How Javascript works (Javascript工作原理) (十二) 网络层探秘及如何提高其性能和安全性

    个人总结:阅读完这篇文章需要20分钟,这篇文章主要讲解了现代浏览器在网络层传输所用到的一些技术, 应当对 window.performance.timing 这个API所有了解. 这是 JavaScr ...

  9. scrapy学习笔记(二)框架结构工作原理

    scrapy结构图: scrapy组件: ENGINE:引擎,框架的核心,其它所有组件在其控制下协同工作. SCHEDULER:调度器,负责对SPIDER提交的下载请求进行调度. DOWNLOADER ...

随机推荐

  1. Python Selenium set Chrome Preference Download Location.

    def set_chrome_pref(self): chromeOptions = webdriver.ChromeOptions() prefs = {"download.default ...

  2. Lucene - CustomScoreQuery 自定义排序

    在某些场景需要做自定义排序(非单值字段排序.非文本相关度排序),除了自己重写collect.weight,可以借助CustomScoreQuery. 场景:根据tag字段中标签的数量进行排序(tag字 ...

  3. Ngui Tween 组合动画 group

    使用NGUI的Tween做补间动画,难免会涉及组合各种Tween.最常用的就是 Scale+Alpha组合 做淡入淡出了.那么如何控制 播放完一个Tween 后在 播放另一个Tween呢? 利用del ...

  4. PureMVC剖析

    http://www.cnblogs.com/skynet/archive/2013/01/29/2881244.html http://hi.baidu.com/mmforever/item/408 ...

  5. vue聊天功能之滚动条自动定位到底部

    一.问题描述 首次进入聊天窗口,数据加载之后先显示最早消息,后显示最新消息,也就是数据加载完之后,延迟了一个时间滚动条才自动定位到最底部. 二.解决方案 如果数据在刚好加载完的时候滚动条就定位到了最底 ...

  6. python 数组或列表维度增加

    怎么把[,,,,]变成[[],[],[],[],[]]???

  7. JavaScript和HTML DOM的区别与联系

    JavaScript和HTML DOM的区别与联系 区别: javascript JavaScript 是因特网上最流行的浏览器脚本语言.很容易使用!你一定会喜欢它的! JavaScript 被数百万 ...

  8. C语言强化——排序

    1.完成堆排,对比堆排和qsort在排序1亿数的时间差异 #include<stdio.h> #include<time.h> #include<stdlib.h> ...

  9. [UE4]模拟网络延迟和丢包

    进入游戏界面,按下键盘的“~”键,打开控制台界面,输入net会自动列出跟网络相关的命令 1.Net pktLag=,模拟延迟,单位是毫秒 2.Net PktLagVariance=300,在模拟延迟的 ...

  10. Mybatis 系列5-结合源码解析TypeHandler

    [Mybatis 系列10-结合源码解析mybatis 执行流程] [Mybatis 系列9-强大的动态sql 语句] [Mybatis 系列8-结合源码解析select.resultMap的用法] ...