scrapy爬取全部知乎用户信息
# -*- coding: utf-8 -*-
# scrapy爬取全部知乎用户信息
# 1:是否遵守robbots_txt协议改为False
# 2: 加入爬取所需的headers: user-agent,authorazation
# 3:确定爬取任务:即想要得到的用户信息
# 4: 爬取思路解析
# 整体思路:从起始大v开始,获得其关注列表和粉丝列表;解析列表,可以得到每一个用户的详细信息地址,组成每一个用户的url;
# 从用户的url开始,解析用户详细信息,取到详细信息。同时又可以解析到每一个用户的关注列表和粉丝列表,循环请求。
# 分步骤如下:
# 4-1:找到起始大v,请求其页面,循环翻页获取其全部的关注列表,粉丝列表
# 4-2:列表步骤:解析关注列表,粉丝列表,从所有列表中取得用户的url_token,组成用户url,执行用户步骤4-3
# 4-3:用户步骤:解析用户url,该步骤可以获得1.该用户详细信息 2.该用户全部的关注列表与粉丝列表,返回列表步骤4-2
# 4-4:同步存储item到数据库mongodb,去重设计。
import json
import scrapy
from zhihu2.items import Zhihu2Item
class ZhihuuserSpider(scrapy.Spider):
name = 'zhihuuser'
allowed_domains = ['www.zhihu.com']
start_urls = ['http://www.zhihu.com/']
start_user = 'excited-vczh'
# 一:对用户关注列表的请求构造
# 用户关注列表 start_user为起始大v,followees_include为请求参数,limit为每页显示用户数,默认20,offset为页码参数,首页为0
followees_url = 'https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&offset={offset}&limit={limit}'
followees_include = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'
# 二:对用户粉丝列表的请求构造
# 用户关注列表 start_user为起始大v,followees_include为请求参数,limit为每页显示用户数,默认20,offset为页码参数,首页为0
followers_url = 'https://www.zhihu.com/api/v4/members/{user}/followers?include={include}&offset={offset}&limit={limit}'
followers_include = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'
# 三:对用户详细信息的请求构造
user_url = 'https://www.zhihu.com/api/v4/members/{user}?include={include}'
user_include = 'allow_message,is_followed,is_following,is_org,is_blocking,employments,answer_count,follower_count,articles_count,gender,badge[?(type=best_answerer)].topics'
def start_requests(self):
# 分别举列表url和用户url示例,以验证是否能够爬取
# 关注列表url示例
# 返回401是请求验证用户的身份,知乎的首页是要求验证用户的身份才能进入,所以需要在settings里面设置authorization
# url='https://www.zhihu.com/api/v4/members/excited-vczh/followees?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics&offset=60&limit=20'
# 用户详细url示例
# url='https://www.zhihu.com/api/v4/members/lanfengxing?include=allow_message%2Cis_followed%2Cis_following%2Cis_org%2Cis_blocking%2Cemployments%2Canswer_count%2Cfollower_count%2Carticles_count%2Cgender%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics'
# yield scrapy.Request(url, callback=self.parse)
# 构造用户关注列表的请求 主要用到format方法
yield scrapy.Request(url=self.followees_url.format(user=self.start_user, include=self.followees_include, offset=0, limit=20), callback=self.parse_followees)
# 构造用户粉丝列表的请求 主要用到format方法
yield scrapy.Request(url=self.followers_url.format(user=self.start_user, include=self.followers_include, offset=0, limit=20),callback=self.parse_followers)
# 对用户详细信息的请求构造
yield scrapy.Request(url=self.user_url.format(user=self.start_user, include=self.user_include),callback=self.parse_user)
# 解析关注列表
def parse_followees(self, response):
results = json.loads(response.text)
if 'data' in results.keys():
for result in results.get('data'):
# 解析关注列表,得到所关注人的url_token,构造解析详细信息请求
yield scrapy.Request(url=self.user_url.format(user=result.get('url_token'), include=self.user_include),callback=self.parse_user)
# 构造翻页请求
if 'paging' in results.keys() and results.get('paging').get('is_end')==False:
next = results.get('paging').get('next')
yield scrapy.Request(url=next, callback=self.parse_followees)
# 解析粉丝列表
def parse_followers(self, response):
results = json.loads(response.text)
if 'data' in results.keys():
for result in results.get('data'):
# 解析关注列表,得到所关注人的url_token,构造解析详细信息请求
yield scrapy.Request(url=self.user_url.format(user=result.get('url_token'), include=self.user_include),
callback=self.parse_user)
# 构造翻页请求
if 'paging' in results.keys() and results.get('paging').get('is_end') == False:
next = results.get('paging').get('next')
yield scrapy.Request(url=next, callback=self.parse_followers)
# 解析用户详细信息,由于我们任务的目标是获取用户详细信息,因此在这一步要确定哪些信息是被使用,在items里面做相应设置
def parse_user(self, response):
item = Zhihu2Item()
# 返回的response是json格式,因此需要解析json
results = json.loads(response.text)
# 遍历item数据结构的键名,item.field可以得到数据结构的所有键
for field in item.fields:
# 如果item的键名在网页里面,则遍历赋值
if field in results.keys():
item[field]=results.get(field)
yield item
# 提取用户的关注列表
yield scrapy.Request(url=self.followees_url.format(user=results.get('url_token'),include = self.followees_include,offset=0, limit=20),callback=self.parse_followees)
# 提取用户的粉丝列表
yield scrapy.Request(url=self.followers_url.format(user=results.get('url_token'), include=self.followers_include, offset=0, limit=20),callback=self.parse_followers)
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
# 想要获取的用户信息设置
class Zhihu2Item(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
# 姓名
name = scrapy.Field()
# 性别
gender = scrapy.Field()
# 职业
employments = scrapy.Field()
# 级别
badge = scrapy.Field()
# 一句话介绍
headline = scrapy.Field()
# 粉丝数
follower_count = scrapy.Field()
# 回答问题数
answer_count = scrapy.Field()
# 撰写文章数
articles_count = scrapy.Field()
# 头像
avatar_url = scrapy.Field()
avatar_url_template = scrapy.Field()
# id
id = scrapy.Field()
# 注册类型
type = scrapy.Field()
# 注册url
url = scrapy.Field()
# 主页地址,唯一识别码
url_token = scrapy.Field()
# 用户类型
user_type = scrapy.Field()
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
# 项目管道用来处理得到的item信息,这里设置存储到MongoDB的class
class MongoPipeline(object):
#初始化变量, 这里需要传入mongo_uri,mongo_db两个参数,这两个参数可以从类方法里面获得
def __init__(self,mongo_uri,mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
# 定义类方法,获得mongo_uri,mongo_db
@classmethod
def from_crawler(cls,crawler):
return cls(
mongo_uri = crawler.settings.get('MONGO_URI'),
mongo_db = crawler.settings.get('MONGO_DB')
)
# 初始化mongodb的变量,client, 与db,爬虫启动时即开始初始化
def open_spider(self,spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
# 存储主体进程,返回item或者DropItem,这里设置update方法设置去重, 如果有同名就更新,没有就重新建立
def process_item(self, item, spider):
name = item.__class__.__name__
self.db[name].update({'url_token':item['url_token']}, {'$set':item}, True)
return item
# 关闭mongodb
def close_spider(self,spider):
self.client.close()
# -*- coding: utf-8 -*-
# Scrapy settings for zhihu2 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'zhihu2'
SPIDER_MODULES = ['zhihu2.spiders']
NEWSPIDER_MODULE = 'zhihu2.spiders'
MONGO_URI = 'localhost'
MONGO_DB = 'zhihu2'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'zhihu2 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'authorization':'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'zhihu2.middlewares.Zhihu2SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'zhihu2.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'zhihu2.pipelines.MongoPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
scrapy爬取全部知乎用户信息的更多相关文章
- Python爬虫从入门到放弃(十八)之 Scrapy爬取所有知乎用户信息(上)
爬取的思路 首先我们应该找到一个账号,这个账号被关注的人和关注的人都相对比较多的,就是下图中金字塔顶端的人,然后通过爬取这个账号的信息后,再爬取他关注的人和被关注的人的账号信息,然后爬取被关注人的账号 ...
- Python之爬虫(二十) Scrapy爬取所有知乎用户信息(上)
爬取的思路 首先我们应该找到一个账号,这个账号被关注的人和关注的人都相对比较多的,就是下图中金字塔顶端的人,然后通过爬取这个账号的信息后,再爬取他关注的人和被关注的人的账号信息,然后爬取被关注人的账号 ...
- Python爬虫从入门到放弃(十九)之 Scrapy爬取所有知乎用户信息(下)
在上一篇文章中主要写了关于爬虫过程的分析,下面是代码的实现,完整代码在:https://github.com/pythonsite/spider items中的代码主要是我们要爬取的字段的定义 cla ...
- Python之爬虫(二十一) Scrapy爬取所有知乎用户信息(下)
在上一篇文章中主要写了关于爬虫过程的分析,下面是代码的实现,完整代码在:https://github.com/pythonsite/spider items中的代码主要是我们要爬取的字段的定义 cla ...
- 利用Scrapy爬取所有知乎用户详细信息并存至MongoDB
欢迎大家关注腾讯云技术社区-博客园官方主页,我们将持续在博客园为大家推荐技术精品文章哦~ 作者 :崔庆才 本节分享一下爬取知乎用户所有用户信息的 Scrapy 爬虫实战. 本节目标 本节要实现的内容有 ...
- 使用 Scrapy 爬取去哪儿网景区信息
Scrapy 是一个使用 Python 语言开发,为了爬取网站数据,提取结构性数据而编写的应用框架,它用途广泛,比如:数据挖掘.监测和自动化测试.安装使用终端命令 pip install Scrapy ...
- 利用scrapy爬取腾讯的招聘信息
利用scrapy框架抓取腾讯的招聘信息,爬取地址为:https://hr.tencent.com/position.php 抓取字段包括:招聘岗位,人数,工作地点,发布时间,及具体的工作要求和工作任务 ...
- 抓取百万知乎用户信息之HttpHelper的迭代之路
什么是Httphelper? httpelpers是一个封装好拿来获取网络上资源的工具类.因为是用http协议,故取名httphelper. httphelper出现的背景 使用WebClient可以 ...
- 43.scrapy爬取链家网站二手房信息-1
首先分析:目的:采集链家网站二手房数据1.先分析一下二手房主界面信息,显示情况如下: url = https://gz.lianjia.com/ershoufang/pg1/显示总数据量为27589套 ...
随机推荐
- shiro(二)自定义realm,模拟数据库查询验证
自定义一个realm类,实现realm接口 package com; import org.apache.shiro.authc.*; import org.apache.shiro.realm.Re ...
- express+mongodb+socket.io
node后端代码 // Setup basic express server var express = require('express'); var app = express(); var pa ...
- Nginx正向代理与反向代理
1.正向代理: 正向代理类似一个跳板机,代理访问外部资源. 典型应用:为在防火墙内的局域网客户端提供访问Internet的途径 如:IE例外设置代理服务器 正向代理配置实例:为不影响默认配置:添 ...
- 爬虫(scrapy第一篇)
---------------------------------------------------------------------------------------------------- ...
- eclipse中svn的各种状态图标详解
- 已忽略版本控制的文件.可以通过Window → Preferences → Team → Ignored Resources.来忽略文件. A file ignored by version co ...
- hihocoder [Offer收割]编程练习赛52 D 部门聚会
看了题目的讨论才会做的 首先一点,算每条边(u, v)对于n*(n+1)/2种[l, r]组合的贡献 正着算不如反着算 哪些[l, r]的组合没有包含这条边(u, v)呢 这个很好算 只需要统计u这半 ...
- NOIP知识点
基础算法 贪心 枚举 分治 二分 倍增 高精度 模拟 图论 图 最短路(dijkstra.spfa.floyd) 最小生成树(kruskal.prim) 并查集 拓扑排序 二分图染色 Tarjan 树 ...
- IO流回顾与总结第一篇之字节流与字符流的操作。。。。。
一.引言 趁着年后的这点时间,抓紧点时间回顾下javase中的IO流,以往都是用到那些常用的IO类,这次来个全点的,有不对的地方还请大神指正一下,做到坚持写博的习惯来...... 回归正题,IO流顾名 ...
- ExecutorService实际上是一个线程池的管理工具
在Java5之后,并发线程这块发生了根本的变化,最重要的莫过于新的启动.调度.管理线程的一大堆API了.在Java5以后,通过Executor来启动线程比用 Thread的start()更好.在新特征 ...
- webview缓存及跳转时截取url地址、监听页面变化
缓存及一些设定 我在做一些项目时,h5做的项目手机浏览器能使用,但是在搬到webview时候不能用,这个时候通过查阅资料,原来是webview没有设定好,包括缓存.缓存大小及路径等等 mWebview ...