安装sqlslte,scrapy需要这个模块

yum install sqlite-devel

python3.5

下载包自己编译安装

./configure

make

make install

自带pip,升到最新版

pip3 install --upgrade pip

python3 MySQL模块

pip3 install pymysql

安装Twisted,scrapy使用的线程框架

wget https://pypi.python.org/packages/6b/23/8dbe86fc83215015e221fbd861a545c6ec5c9e9cd7514af114d1f64084ab/Twisted-16.4.1.tar.bz2#md5=c6d09bdd681f538369659111f079c29d

解包

tar -jxvf Twisted-16.4.1.tar.bz2

进目录
cd Twisted-16.4.1

安装

python3 setup.py install

安装scrapy

pip install scrapy

安装redis

yum install redis

安装scrapy-redis

git clone https://github.com/rolando/scrapy-redis.git

cd scrapy-redis/

python3 setup.py install

由于python2和python3字符串不同引起的bug,github上临时解决的方法

#util.py
import six def bytes_to_str(s, encoding='utf-8'):
"""Returns a str if a bytes object is given.""" if six.PY3 and isinstance(s, bytes):
return s.decode(encoding) return s

  

# spider.py
import six from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from scrapy.spiders import Spider, CrawlSpider from . import connection
from .utils import bytes_to_str # Default batch size matches default concurrent requests setting.
DEFAULT_START_URLS_BATCH_SIZE = 16
DEFAULT_START_URLS_KEY = '%(name)s:start_urls' class RedisMixin(object):
"""Mixin class to implement reading urls from a redis queue."""
# Per spider redis key, default to DEFAULT_KEY.
redis_key = None
# Fetch this amount of start urls when idle. Default to DEFAULT_BATCH_SIZE.
redis_batch_size = None
redis_encoding = 'utf-8'
# Redis client instance.
server = None def start_requests(self):
"""Returns a batch of start requests from redis."""
return self.next_requests() def setup_redis(self, crawler=None):
"""Setup redis connection and idle signal. This should be called after the spider has set its crawler object.
"""
if self.server is not None:
return if crawler is None:
# We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None) if crawler is None:
raise ValueError("crawler is required") settings = crawler.settings if self.redis_key is None:
self.redis_key = settings.get(
'REDIS_START_URLS_KEY', DEFAULT_START_URLS_KEY,
) self.redis_key = self.redis_key % {'name': self.name} if not self.redis_key.strip():
raise ValueError("redis_key must not be empty") if self.redis_batch_size is None:
self.redis_batch_size = settings.getint(
'REDIS_START_URLS_BATCH_SIZE', DEFAULT_START_URLS_BATCH_SIZE,
) try:
self.redis_batch_size = int(self.redis_batch_size)
except (TypeError, ValueError):
raise ValueError("redis_batch_size must be an integer") self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s)", self.__dict__) self.server = connection.from_settings(crawler.settings)
# The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle) def next_requests(self):
"""Returns a request to be scheduled or none."""
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET')
fetch_one = self.server.spop if use_set else self.server.lpop
# XXX: Do we need to use a timeout here?
found = 0
while found < self.redis_batch_size:
data = fetch_one(self.redis_key)
if not data:
# Queue empty.
break
req = self.make_request_from_data(data)
if req:
yield req
found += 1
else:
self.logger.debug("Request not made from data: %r", data) if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key) def make_request_from_data(self, data):
# By default, data is an URL. if not isinstance(data, six.string_types):
# XXX: Shall we log and continue?
self.logger.error("Wrong type for data: %s" % type(data))
url = bytes_to_str(data, self.redis_encoding)
else:
url = data # FIXME: This is a naive guard against using a wrong redis_key where
# data are not string URLs.
if '://' not in url:
# XXX: Shall this be an exception?
self.logger.error("Missing scheme in URL: '%s'", url) return self.make_requests_from_url(url) def schedule_next_requests(self):
"""Schedules a request if available"""
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self) def spider_idle(self):
"""Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()
raise DontCloseSpider class RedisSpider(RedisMixin, Spider):
"""Spider that reads urls from redis queue when idle.""" @classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(RedisSpider, self).from_crawler(crawler, *args, **kwargs)
obj.setup_redis(crawler)
return obj class RedisCrawlSpider(RedisMixin, CrawlSpider):
"""Spider that reads urls from redis queue when idle.""" @classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(RedisCrawlSpider, self).from_crawler(crawler, *args, **kwargs)
obj.setup_redis(crawler)
return obj

  

scrapy 和 scrapy_redis 安装的更多相关文章

  1. Python之Scrapy爬虫框架安装及简单使用

    题记:早已听闻python爬虫框架的大名.近些天学习了下其中的Scrapy爬虫框架,将自己理解的跟大家分享.有表述不当之处,望大神们斧正. 一.初窥Scrapy Scrapy是一个为了爬取网站数据,提 ...

  2. scrapy之环境安装

    scrapy之环境安装 在之前我安装了scrapy,但是在pycharm中却无法使用. 具体情况是: 我的电脑上存在多个python,有python2,python3,anaconda,其中anaco ...

  3. Python3.5在Windows7环境下Scrapy库的安装

    Python3.5在Windows7环境下Scrapy库的安装 忙活了一下午,总算是把Scrapy库给装完了,记下来给需要帮助的人 首先安装的环境:Windows7 64位 Python的版本是:3. ...

  4. scrapy初体验 - 安装遇到的坑及第一个范例

    scrapy,python开发的一个快速,高层次的屏幕抓取和web抓取框架,用于抓取web站点并从页面中提取结构化的数据.scrapy用途广泛,可以用于数据挖掘.监测和自动化测试.scrapy的安装稍 ...

  5. 浅析scrapy与scrapy_redis区别

    最近在工作中写了很多 scrapy_redis 分布式爬虫,但是回想 scrapy 与 scrapy_redis 两者区别的时候,竟然,思维只是局限在了应用方面,于是乎,搜索了很多相关文章介绍,这才搞 ...

  6. Scrapy框架的安装

    Win+R 输入cmd打开命令行 我们先把pip升级到最新版,输入代码如下: pip install --upgrade pip 不过一般这种更新方式会经常性出错,安装文件在下载到一半时就会超时报错 ...

  7. scrapy和scrapy_redis入门

    Scarp框架 需求 获取网页的url 下载网页内容(Downloader下载器) 定位元素位置, 获取特定的信息(Spiders 蜘蛛) 存储信息(ItemPipeline, 一条一条从管里走) 队 ...

  8. Windows平台下,Scrapy Installation,安装问题解决

    按理说直接:pip install scrapy 就可以成功,但是出现了错误"libxml/xpath.h: No such file or directory" "er ...

  9. 关于Scrapy框架的安装

    Scrapy介绍与环境安装 Scrapy介绍与环境安装 What is scrapy? An open source and collaborative framework for extractin ...

随机推荐

  1. APM程序分析-AC_WPNav.cpp

    APM程序分析 主程序在ArduCopter.cpp的loop()函数. /// advance_wp_target_along_track - move target location along ...

  2. 【Django】--Form组件

    Django的Form主要具有一下几大功能: 生成HTML标签 验证用户数据(显示错误信息) HTML Form提交保留上次提交数据 初始化页面显示内容 例子: 1.创建Form类 from djan ...

  3. Splinter学习——不仅仅是自动化测试哦

    前两天,想抢购一个小米MIX,结果,一开始抢就没有了.于是想,作为程序猿,总得有点特殊手段吧,比如说一个小脚本.最近在学习python,百度了一下,发现了Splinter这个强大的东东!用了不到两小时 ...

  4. 解决java.lang.IncompatibleClassChangeError: Implementing class

    jar包冲突(有重复jar) 仔细检查

  5. 利用反射实现通用的excel导入导出

    如果一个项目中存在多种信息的导入导出,为了简化代码,就需要用反射实现通用的excel导入导出 实例代码如下: 1.创建一个 Book类,并编写set和get方法 package com.bean; p ...

  6. JAVASCRIPT常用API总结

    目录 元素查找 class操作 节点操作 属性操作 内容操作 css操作 位置大小 事件 DOM加载完毕 绑定上下文 去除空格 Ajax JSON处理 节点遍历 元素查找 // Node docume ...

  7. Oracle连接与会话

    连接(connection):连接是从客户端到oracle数据库实例的一条物理路径. 会话(session):会话是数据库实例中存在的一个逻辑实体. case1:一个连接可以有多个会话 SQL> ...

  8. OpneCv2.x 模块结构

    转自:http://blog.csdn.net/huang9012/article/details/21811271 之前啃了不少OpenCV的官方文档,发现如果了解了一些OpenCV整体的模块架构后 ...

  9. HDU 5754 Life Winner Bo 组合博弈

    Life Winner Bo Problem Description   Bo is a "Life Winner".He likes playing chessboard gam ...

  10. html内容超出了div的宽度如何换行让内容自动换行

    在显示评论列表的时候因为有固定宽,但是显示的内容超出的了div的宽,在这种情况下我们需要将其换行,实现的css代码如下   在工作中评论内容测试遇到评论着的评论内容为:"dddddddddd ...