# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals class DownloadtestSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects. @classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider. # Should return None or raise an exception.
return None def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response. # Must return an iterable of Request, dict or Item objects.
for i in result:
yield i def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict
# or Item objects.
pass def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated. # Must return only requests (not items).
for r in start_requests:
yield r def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name) class DownloadtestDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects. @classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware. # Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None def process_response(self, request, response, spider):
# Called with the response returned from the downloader. # Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception. # Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name) import random #设置请求头
class RandomUA():
def __init__(self):
self.user_agent=[
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.1 Safari/605.1.16',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)'
] def process_request(self, request, spider):
request.headers['User-Agent'] = random.choice(self.user_agent) def process_response(self, request, response, spider):
response.status=201
return response #设置代理
class ProxyMiddleware:
proxy_list=[
"http://127.0.0.1:8080"
# "http://183.91.33.41:80",
# "http://111.29.3.190:80",
# "http://124.156.108.71:82"
] def process_request(self, request, spider):
ip=random.choice(self.proxy_list)
request.meta['proxy']=ip

Scrapy框架: middlewares.py设置的更多相关文章

  1. Scrapy框架: settings.py设置

    # -*- coding: utf-8 -*- # Scrapy settings for maitian project # # For simplicity, this file contains ...

  2. Scrapy框架: pipelines.py设置

    保存数据到json文件 # -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your p ...

  3. Scrapy框架之高级 转

    一.CrawlSpider模板 创建项目 scrapy startproject 项目名称 查看模板 scrapy genspider -l 创建crawl模板 scrapy genspider -t ...

  4. 基于scrapy框架的爬虫

    Scrapy是一个为了爬取网站数据,提取结构性数据而编写的应用框架. 可以应用在包括数据挖掘,信息处理或存储历史数据等一系列的程序中. scrapy 框架 高性能的网络请求 高性能的数据解析 高性能的 ...

  5. scrapy框架的中间件

    中间件的使用 作用:拦截所有的请求和响应 拦截请求:process_request拦截正常的请求,process_exception拦截异常的请求 篡改请求的头信息 def process_reque ...

  6. scrapy框架设置代理

    网易音乐在单ip请求下经常会遇到网页返回码503的情况经查询,503为单个ip请求流量超限,猜测是网易音乐的一种反扒方式因原音乐下载程序采用scrapy框架,所以需要在scrapy中通过代理的方式去解 ...

  7. scrapy框架设置代理ip,headers头和cookies

    [设置代理ip] 根据最新的scrapy官方文档,scrapy爬虫框架的代理配置有以下两种方法: 一.使用中间件DownloaderMiddleware进行配置使用Scrapy默认方法scrapy s ...

  8. 网络爬虫之scrapy框架设置代理

    前戏 os.environ()简介 os.environ()可以获取到当前进程的环境变量,注意,是当前进程. 如果我们在一个程序中设置了环境变量,另一个程序是无法获取设置的那个变量的. 环境变量是以一 ...

  9. Python爬虫Scrapy框架入门(2)

    本文是跟着大神博客,尝试从网站上爬一堆东西,一堆你懂得的东西 附上原创链接: http://www.cnblogs.com/qiyeboy/p/5428240.html 基本思路是,查看网页元素,填写 ...

随机推荐

  1. maven scope 作用域

    1.test范围指的是测试范围有效,在编译和打包时都不会使用这个依赖 2.compile范围指的是编译范围有效,在编译和打包时都会将依赖存储进去 3.provided依赖:在编译和测试的过程有效,最后 ...

  2. 蓝桥杯:排它平方数-java

    问题描述: 小明正看着 203879 这个数字发呆.原来,203879 * 203879 = 41566646641这有什么神奇呢?仔细观察,203879 是个6位数,并且它的每个数位上的数字都是不同 ...

  3. POJ 3020:Antenna Placement(无向二分图的最小路径覆盖)

    Antenna Placement Time Limit: 1000MS   Memory Limit: 65536K Total Submissions: 6334   Accepted: 3125 ...

  4. PL/SQL to update all columns

    undefine schema_name; declare l_Err ); begin for r in (select atc.table_name, atc.column_name, atc.d ...

  5. 2019牛客暑期多校训练营(第三场)I Median

    题意:给出n-2的中位数序列b,b[i]代表原序列中(a[i],a[i+1],a[i+2])的中位数,求a. 解法:比赛的时候没做出来,赛后看题解的.解法跟网上各位大佬一样:首先要证明其实原序列a中的 ...

  6. HDU-3333 Turing Tree 分块求区间不同数和

    HDU-3333 Turning Tree 题目大意:先给出n个数字.面对q个询问区间,输出这个区间不同数的和. 题解:这道题有几种解法.这里讲一下用分块解决的方法.( 离线树状数组解法看这里 Hdu ...

  7. Python网络编程UDP服务器与客服端简单例子

    [转载] https://blog.csdn.net/hu330459076/article/details/7868028 UDP服务器代码: #!/usr/bin/env python # -*- ...

  8. 部署多个tomcat

    当需要部署多个tomcat的时,为了避免启动tomcat时出现冲突, 修改tomcat中的某些参数,编辑bin/startup.bat,避免启动路径错误,默认会启动CATALINA_HOME所指向的t ...

  9. JavaSE---用户交互---获取键盘输入

    1.概述 1.1 JDK1.5提供了Scanner类,用来获取键盘输入: 1.2 Scanner类是   一个基于正则表达式的文本扫描器,可以从文件.输入流.字符串中解析出基本类型值.字符串值: 1. ...

  10. java html生成PDF,并打印

    import java.io.File; import java.io.FileOutputStream; import org.zefer.pd4ml.PD4Constants; import or ...