scrapy爬虫
a. 配置文件
#settings.py
DEPTH_LIMIT = 1 #指定“递归”的层数
ROBOTSTXT_OBEY = False #对方网站规定哪些网址可以爬,这个选项表示不遵循此规定
b. 选择器
.// #表示对象的子孙中
./ #儿子
./dev #儿子中的div标签
./div[@id='i1'] #儿子中的div标签且id='i1'
obj.extract() #列表中每一个对象转换字符串 => []
obj.extract_first #列表中的每一个对象转换字符串 => 列表第一个元素
//div/text() #获取某个标签的文本
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from scrapy.selector import Selector, HtmlXPathSelector
from scrapy.http import HtmlResponse
html = """<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<ul>
<li class="item-"><a id='i1' href="link.html">first item</a></li>
<li class="item-0"><a id='i2' href="llink.html">first item</a></li>
<li class="item-1"><a href="llink2.html">second item<span>vv</span></a></li>
</ul>
<div><a href="llink2.html">second item</a></div>
</body>
</html>
"""
response = HtmlResponse(url='http://example.com', body=html,encoding='utf-8')
# hxs = HtmlXPathSelector(response)
# print(hxs)
# hxs = Selector(response=response).xpath('//a')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[2]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[@id]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[@id="i1"]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[@href="link.html"][@id="i1"]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[contains(@href, "link")]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[starts-with(@href, "link")]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/text()').extract()
# print(hxs)
# hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/@href').extract()
# print(hxs)
# hxs = Selector(response=response).xpath('/html/body/ul/li/a/@href').extract()
# print(hxs)
# hxs = Selector(response=response).xpath('//body/ul/li/a/@href').extract_first()
# print(hxs) # ul_list = Selector(response=response).xpath('//body/ul/li')
# for item in ul_list:
# v = item.xpath('./a/span')
# # 或
# # v = item.xpath('a/span')
# # 或
# # v = item.xpath('*/a/span')
# print(v)
c. 结构化处理
setting.py ITEM_PIPELINES = {
'day96.pipelines.Day96Pipeline': 300,
} DB = "....."
# -*- coding: utf-8 -*- # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html from scrapy.exceptions import DropItem class Day96Pipeline(object): def __init__(self,conn_str):
self.conn_str = conn_str @classmethod
def from_crawler(cls, crawler):
"""
初始化时候,用于创建pipeline对象
:param crawler:
:return:
"""
conn_str = crawler.settings.get('DB')
return cls(conn_str) def open_spider(self,spider):
"""
爬虫开始执行时,调用
:param spider:
:return:
"""
self.conn = open(self.conn_str, 'a') def close_spider(self,spider):
"""
爬虫关闭时,被调用
:param spider:
:return:
"""
self.conn.close() def process_item(self, item, spider):
"""
每当数据需要持久化时,就会被调用
:param item:
:param spider:
:return:
"""
# if spider.name == 'chouti'
tpl = "%s\n%s\n\n" %(item['title'],item['href'])
self.conn.write(tpl) # 交给下一个pipeline处理
return item # 丢弃item,不交给
# raise DropItem()
pipelines.py
d. 常用命令
scrapy startproject sp1
cd p1
scrapy genspider baidu baidu.com #创建爬虫
scrapy crawl baidu
scrapy crawl baidu --nolog
e. 目录结构
sp1
- scrapy.cfg #初始配置文件
- sp1
- spiders #目录
- items.py #格式化
- pipelines.py #持久化
- middlewares.py #中间件
- settings.py #配置
事例
# -*- coding: utf-8 -*-
import scrapy
import sys
import io
from scrapy.selector import Selector,HtmlXPathSelector class ChoutiSpider(scrapy.Spider):
name = 'chouti'
allowed_domains = ['chouti.com']
start_urls = ['http://dig.chouti.com/'] def parse(self, response):
hxs = Selector(response=response).xpath('//div[@id="content-list"]/div[@class="item"]')
for obj in hxs:
a = obj.xpath('.//a[@class="show-content color-chag"]/text()').extract_first()
print(a.strip())
获取抽屉新闻标题
# -*- coding: utf-8 -*-
import scrapy
import sys
import io
from scrapy.selector import Selector,HtmlXPathSelector class ChoutiSpider(scrapy.Spider):
name = 'chouti'
allowed_domains = ['chouti.com']
start_urls = ['http://dig.chouti.com/'] visited_urls = set() def parse(self, response): #获取当前页的所有页码的url
hxs = Selector(response=response).xpath('//div[@id="dig_lcpage"]//a/@href').extract()
for url in hxs:
md5_url = self.md5(url)
if md5_url in self.visited_urls:
print('已经存在',url)
else:
self.visited_urls.add(md5_url)
print(url) def md5(self,url):
import hashlib
obj = hashlib.md5()
obj.update(bytes(url,encoding='utf-8'))
return obj.hexdigest()
获取抽屉当前页的所有页码
# -*- coding: utf-8 -*-
import scrapy
import sys
import io
from scrapy.http import Request
from scrapy.selector import Selector,HtmlXPathSelector class ChoutiSpider(scrapy.Spider):
name = 'chouti'
allowed_domains = ['chouti.com']
start_urls = ['http://dig.chouti.com/'] visited_urls = set() def parse(self, response): #获取当前页的所有页码的url hxs = Selector(response=response).xpath('//div[@id="dig_lcpage"]//a/@href').extract() for url in hxs:
md5_url = self.md5(url)
if md5_url in self.visited_urls:
pass
else:
print(url)
self.visited_urls.add(md5_url)
url = "http://dig.chouti.com%s" %url
#将新要访问的url增加到调度器
yield Request(url=url,callback=self.parse) def md5(self,url):
import hashlib
obj = hashlib.md5()
obj.update(bytes(url,encoding='utf-8'))
return obj.hexdigest()
获取抽屉所有页码
a. 避免重复的url
setting.py DUPEFILTER_CLASS = "day96.duplication.RepeatFilter"
class RepeatFilter(object):
def __init__(self):
self.visited_set = set()
@classmethod
def from_settings(cls, settings):
print('...')
return cls() def request_seen(self, request):
if request.url in self.visited_set:
return True
self.visited_set.add(request.url)
return False def open(self): # can return deferred
print('open')
pass def close(self, reason): # can return a deferred
print('close')
pass
def log(self, request, spider): # log that a request has been filtered
# print('log....')
pass
duplication.py
# -*- coding: utf-8 -*-
import scrapy
import sys
import io
from scrapy.http import Request
from scrapy.selector import Selector,HtmlXPathSelector class ChoutiSpider(scrapy.Spider):
name = 'chouti'
allowed_domains = ['chouti.com']
start_urls = ['http://dig.chouti.com/'] from scrapy.dupefilter import RFPDupeFilter def parse(self, response):
print(response.url) #获取当前页的所有页码的url hxs = Selector(response=response).xpath('//div[@id="dig_lcpage"]//a/@href').extract() for url in hxs: url = "http://dig.chouti.com%s" %url
#将新要访问的url增加到调度器
yield Request(url=url,callback=self.parse) def md5(self,url):
import hashlib
obj = hashlib.md5()
obj.update(bytes(url,encoding='utf-8'))
return obj.hexdigest()
chouti.py
116
传智播客
#爬传智播客的老师的名称 #scrapy startproject mySpider #cat /Users/huaixiaozi/PycharmProjects/mySpider/mySpider/items.py import scrapy
class MyspiderItem(scrapy.Item):
name = scrapy.Field()
title = scrapy.Field()
info = scrapy.Field() #cat /Users/huaixiaozi/PycharmProjects/mySpider/mySpider/spiders/itcastspider.py
import scrapy
from mySpider.items import ItcastItem #创建一个爬虫类
class ItcastSpider(scrapy.Spider):
#爬虫名
name = "itcast"
#允许爬虫作用的范围
allowd_domains = ["http://www.itcast.cn"]
#爬虫起始的url
start_urls = ["http://www.itcast.cn/channel/teacher.shtml#"] def parse(self, response): #通过scripy自带的xpath匹配出所有老师的根节点列表集合
teacher_list = response.xpath('//div[@class="li_txt"]') teacherItem = []
#遍历根节点集合
for each in teacher_list:
item = ItcastItem()
name = each.xpath('./h3/text()').extract()
title = each.xpath('./h4/text()').extract()
info = each.xpath('./p/text()').extract()
print("--------------",type(name))
item['name'] = name[0]
item['title'] = title[0]
item['info'] = info[0] teacherItem.append(item)
return teacherItem #保存到json文件
scrapy crawl itcast -o itcast.json
#保存到csv文件
scrapy crawl itcast -o itcast.csv
爬传智播客的老师的名称
scrapy爬虫的更多相关文章
- scrapy爬虫结果插入mysql数据库
1.通过工具创建数据库scrapy
- Python之Scrapy爬虫框架安装及简单使用
题记:早已听闻python爬虫框架的大名.近些天学习了下其中的Scrapy爬虫框架,将自己理解的跟大家分享.有表述不当之处,望大神们斧正. 一.初窥Scrapy Scrapy是一个为了爬取网站数据,提 ...
- Linux搭建Scrapy爬虫集成开发环境
安装Python 下载地址:http://www.python.org/, Python 有 Python 2 和 Python 3 两个版本, 语法有些区别,ubuntu上自带了python2.7. ...
- Scrapy 爬虫
Scrapy 爬虫 使用指南 完全教程 scrapy note command 全局命令: startproject :在 project_name 文件夹下创建一个名为 project_name ...
- [Python爬虫] scrapy爬虫系列 <一>.安装及入门介绍
前面介绍了很多Selenium基于自动测试的Python爬虫程序,主要利用它的xpath语句,通过分析网页DOM树结构进行爬取内容,同时可以结合Phantomjs模拟浏览器进行鼠标或键盘操作.但是,更 ...
- 同时运行多个scrapy爬虫的几种方法(自定义scrapy项目命令)
试想一下,前面做的实验和例子都只有一个spider.然而,现实的开发的爬虫肯定不止一个.既然这样,那么就会有如下几个问题:1.在同一个项目中怎么创建多个爬虫的呢?2.多个爬虫的时候是怎么将他们运行起来 ...
- 如何让你的scrapy爬虫不再被ban之二(利用第三方平台crawlera做scrapy爬虫防屏蔽)
我们在做scrapy爬虫的时候,爬虫经常被ban是常态.然而前面的文章如何让你的scrapy爬虫不再被ban,介绍了scrapy爬虫防屏蔽的各种策略组合.前面采用的是禁用cookies.动态设置use ...
- 如何让你的scrapy爬虫不再被ban
前面用scrapy编写爬虫抓取了自己博客的内容并保存成json格式的数据(scrapy爬虫成长日记之创建工程-抽取数据-保存为json格式的数据)和写入数据库(scrapy爬虫成长日记之将抓取内容写入 ...
- scrapy爬虫成长日记之将抓取内容写入mysql数据库
前面小试了一下scrapy抓取博客园的博客(您可在此查看scrapy爬虫成长日记之创建工程-抽取数据-保存为json格式的数据),但是前面抓取的数据时保存为json格式的文本文件中的.这很显然不满足我 ...
- 【图文详解】scrapy爬虫与动态页面——爬取拉勾网职位信息(2)
上次挖了一个坑,今天终于填上了,还记得之前我们做的拉勾爬虫吗?那时我们实现了一页的爬取,今天让我们再接再厉,实现多页爬取,顺便实现职位和公司的关键词搜索功能. 之前的内容就不再介绍了,不熟悉的请一定要 ...
随机推荐
- 聊聊flink Table的groupBy操作
本文主要研究一下flink Table的groupBy操作 Table.groupBy flink-table_2.11-1.7.0-sources.jar!/org/apache/flink/tab ...
- Android 解决ScrollView嵌入ListView | GridView | ScrollView显示问题
一.ScrollView中嵌套ListView ScrollView和ListView都是滚动结构,很明显如果在ScrollView中加入ListView,可以预见性的知道,肯定会有显示/滚动的问题, ...
- 【bzoj2588】Count on a tree
Portal -->bzoj2588 Solution 不行我一定要来挂这道题qwq很气愤qwq(其实还不是因为自己蠢..) 额首先说一下正解 如果这个问题放在序列上面的话..直接离散化一下然后 ...
- python基础----isinstance(obj,cls)和issubclass(sub,super)、反射、__setattr__,__delattr__,__getattr__、二次加工标准类型(包装)
一.isinstance(obj,cls)和issubclass(sub,super) isinstance(obj,cls)检查是否ob ...
- EA画时序图初试
1.步骤: 1. 新建一个项目: 2. Use Case Model右键-->添加图-->左边选择UML Behavioral,右边选择Sequence: 3. 选择工具栏中的工具,点击工 ...
- ubuntu 16 server 安装lnmp所需依赖
安装 1.nginx build-essential libc6 libpcre3 libpcre3-dev libssl-dev zliblg zliblg-dev lab-base 依赖库: ap ...
- js实现数组排序
1. JavaScript的sort()方法 var array = [1,4,-8,-3,6,12,9,8]; function compare(val1,val2){ return val1-va ...
- Dubbo 的应用
--- 用于大规模服务化,通过在消费方获取服务提供方的地址列表,实现负载均衡,减轻服务器压力. 最简单调用图 节点角色说明: l Provider: 暴露服务的服务提供方. l Consumer ...
- ACM选修HUST1058(市赛题) Lucky Sequence 同余定理
Description Edward 得到了一个长度为 N 的整数序列,他想找出这里面有多少个“幸运的”连续子序列.一个连续子序列被称为“幸运的”,当且仅当该子序列内的整数之和恰好是 K 的 ...
- [Luogu 1967] NOIP2013 货车运输
[Luogu 1967] NOIP2013 货车运输 一年多前令我十分头大的老题终于可以随手切掉了- 然而我这码风又变毒瘤了,我也很绝望. 看着一年前不带类不加空格不空行的清纯码风啊,时光也好像回去了 ...