基于pydpier爬取1药网(转载)
1.商品爬取
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2019-02-02 08:59:40
# Project: oneDrug from pyspider.libs.base_handler import *
from pymongo import MongoClient
import re class Handler(BaseHandler):
crawl_config = {
} def __init__(self):
self.client = MongoClient('mongodb://localhost:27017')
self.drug = self.client.drug def insert_goods(self, data):
collection = self.drug['goods']
collection.update({'goods_id': data['goods_id']}, data, True) def insert_comments(self, data):
collection = self.drug['comments']
collection.insert_one(data) @every(minutes=24 * 60)
def on_start(self):
self.crawl('https://www.111.com.cn/categories/', callback=self.categories_page, validate_cert=False,
fetch_type='js') @config(age=10 * 24 * 60 * 60)
def categories_page(self, response):
for each in response.doc('.allsort em > a').items():
self.crawl(each.attr.href, callback=self.cagetory_list_page, validate_cert=False, fetch_type='js') @config(priority=1)
def cagetory_list_page(self, response):
for each in response.doc('#itemSearchList a[target="_blank"][class="product_pic pro_img"]').items():
self.crawl(each.attr.href, callback=self.detail_page, validate_cert=False, fetch_type='js')
next = response.doc('#search_table > div.turnPageBottom > a.page_next').attr.href
self.crawl(next, callback=self.cagetory_list_page, validate_cert=False, fetch_type='js') @config(priority=2)
def detail_page(self, response):
goods_id = response.doc('#gallery_view > ul > li.item_number').text()
cagetory_one = response.doc('body > div.wrap.clearfix > div > span:nth-child(3) > a').text()
cagetory_two = response.doc('body > div.wrap.clearfix > div > span:nth-child(5) > a').text()
cagetory_three = response.doc('body > div.wrap.clearfix > div > span:nth-child(7) > a').text()
merchants = response.doc('div.middle_property > span:nth-child(1)').text()
goods_name = response.doc('div.middle_property > h1').text()
goods_desc = response.doc('div.middle_property > span.red.giftRed').text()
goods_price = response.doc(
'div.middle_property > div.shangpin_info > dl:nth-child(2) > dd > span.good_price').text()
total_comments = response.doc('#fristReviewCount > span > a').text() brand = response.doc(
'#tabCon > div:nth-child(1) > div.goods_intro > table > tbody > tr:nth-child(2) > td:nth-child(2)').text()
spec = response.doc(
'#tabCon > div:nth-child(1) > div.goods_intro > table > tbody > tr:nth-child(2) > td:nth-child(4)').text()
weight = response.doc(
'#tabCon > div:nth-child(1) > div.goods_intro > table > tbody > tr:nth-child(3) > td:nth-child(2)').text()
manufacturers = response.doc(
'#tabCon > div:nth-child(1) > div.goods_intro > table > tbody > tr:nth-child(3) > td:nth-child(4)').text()
approval_number = response.doc(
'#tabCon > div:nth-child(1) > div.goods_intro > table > tbody > tr:nth-child(4) > td:nth-child(2)').text()
drug_type = response.doc(
'#tabCon > div:nth-child(1) > div.goods_intro > table > tbody > tr:nth-child(4) > td:nth-child(4)').text() instructions = {}
if response.doc('#prodDetailCotentDiv > table > tbody > tr:nth-child(1) > th').text():
for i in range(3, 22):
instructions_key = \
response.doc('#prodDetailCotentDiv > table > tbody > tr:nth-child({}) > th'.format(i)).text().split(
" ")[0]
instructions_value = response.doc(
'#prodDetailCotentDiv > table > tbody > tr:nth-child({}) > td'.format(i)).text()
instructions[instructions_key] = instructions_value total_comments = response.doc('#itemComments > span').text()
good_comments = response.doc('#productExperience > div > ul > li:nth-child(2) > a > span').text()
mid_comments = response.doc('#productExperience > div > ul > li:nth-child(3) > a > span').text()
bad_comments = response.doc('#productExperience > div > ul > li:nth-child(4) > a > span').text() url_id = re.findall('\d+', response.url)[1] goods_data = {
'url_id': url_id,
'goods_id': goods_id,
'goods_name': goods_name,
'goods_desc': goods_desc,
'goods_price': goods_price,
'merchants': merchants,
'cagetory': {
'': cagetory_one,
'': cagetory_two,
'': cagetory_three
},
'drug_detail': {
'brand': brand,
'spec': spec,
'weight': weight,
'manufacturers': manufacturers,
'approval_number': approval_number,
'drug_type': drug_type
},
'instructions': instructions,
'comments': {
'total_comments': total_comments,
'good_comments': good_comments,
'mid_comments': mid_comments,
'bad_comments': bad_comments
}
}
self.insert_goods(goods_data)
2.评论爬取
from pymongo import MongoClient
import requests
from bs4 import BeautifulSoup
import re
import socket class Drug:
def __init__(self):
self.clint = MongoClient('mongodb://localhost:27017')
self.drug = self.clint.drug
self.collection = self.drug['goods']
self.comm_collection = self.drug['comments'] def dbmodify(self):
for data in self.collection.find({},{"goods_id":1,"goods_price":1}):
try:
_id = data['_id']
id = data['goods_id'].split(":")[1]
price = data['goods_price'].split("¥")[1]
self.collection.update({'_id': _id},{'$set':{'goods_id':id,'goods_price':price}})
print(_id, id, price)
except IndexError:
pass def getBaseArgument(self,goods_id):
base_url = 'https://www.111.com.cn/interfaces/review/list/html.action'
data = {
'goodsId': goods_id,
'pageIndex': 1,
'score': '1&_19020301'
}
try:
self.collection.update_one({'url_id': goods_id}, {'$set': {'commspider': True}})
requests.packages.urllib3.disable_warnings()
requests.adapters.DEFAULT_RETRIES = 5
# 设置连接活跃状态为False
s = requests.session()
s.keep_alive = False
r = s.get(base_url, params=data, timeout = 5,verify=False)
r.close()
soup = BeautifulSoup(r.text, 'html.parser')
if soup.find_all("div", class_="view_no_result"):
return "No Comments!"
else:
total_page_text = soup.find_all(text=re.compile(r'共\d+页'))[0]
pattern = re.compile(r'\d+')
total_page = pattern.findall(total_page_text)
return total_page[0]
except requests.exceptions.RequestException as e:
print(e) def getCommlist(self,goods_id, total_page):
base_url = 'https://www.111.com.cn/interfaces/review/list/html.action'
try:
for i in range(1, int(total_page)):
data = {
'goodsId': goods_id,
'pageIndex': i,
'score': '1&_19020301'
}
try:
requests.packages.urllib3.disable_warnings()
requests.adapters.DEFAULT_RETRIES = 15
# 设置连接活跃状态为False
s = requests.session()
s.keep_alive = False
r = s.get(base_url, params=data, timeout = 5,verify=False)
r.close()
soup = BeautifulSoup(r.text, 'html.parser')
for tr in soup.find_all("tr"):
comments = {}
try:
comments['goodsId'] = goods_id
comments['content'] = tr.find('p').text.strip()
comments['date'] = tr.find('p', attrs={'class': 'eval_date'}).text.strip()
self.comm_collection.insert_one(comments)
except:
print(goods_id + "Have some problem!\n")
print(comments)
except requests.exceptions.RequestException as e:
print(e)
except ValueError:
return "No Comments! Try next!" def getComments(self):
i = 0
goods_list = []
for data in self.collection.find({'commspider': False}, {"url_id"}):
id = data['url_id']
goods_list.append(id)
length = len(goods_list)
print("总共 {} 条商品".format(length))
for good in goods_list:
total_page = self.getBaseArgument(good)
comments = self.getCommlist(good,total_page)
i = i + 1
print("总共 {} 条商品\n目前第 {} 条\n商品编号 {} \n".format(length,i, good))
print(comments) test = Drug().getComments()
基于pydpier爬取1药网(转载)的更多相关文章
- Python爬取中国天气网
Python爬取中国天气网 基于requests库制作的爬虫. 使用方法:打开终端输入 “python3 weather.py 北京(或你所在的城市)" 程序正常运行需要在同文件夹下加入一个 ...
- 爬取西刺网的免费IP
在写爬虫时,经常需要切换IP,所以很有必要自已在数据维护库中维护一个IP池,这样,就可以在需用的时候随机切换IP,我的方法是爬取西刺网的免费IP,存入数据库中,然后在scrapy 工程中加入tools ...
- python爬虫基础应用----爬取校花网视频
一.爬虫简单介绍 爬虫是什么? 爬虫是首先使用模拟浏览器访问网站获取数据,然后通过解析过滤获得有价值的信息,最后保存到到自己库中的程序. 爬虫程序包括哪些模块? python中的爬虫程序主要包括,re ...
- selenium爬取煎蛋网
selenium爬取煎蛋网 直接上代码 from selenium import webdriver from selenium.webdriver.support.ui import WebDriv ...
- Scrapy实战篇(一)之爬取链家网成交房源数据(上)
今天,我们就以链家网南京地区为例,来学习爬取链家网的成交房源数据. 这里推荐使用火狐浏览器,并且安装firebug和firepath两款插件,你会发现,这两款插件会给我们后续的数据提取带来很大的方便. ...
- (python爬取小故事网并写入mysql)
前言: 这是一篇来自整理EVERNOTE的笔记所产生的小博客,实现功能主要为用广度优先算法爬取小故事网,爬满100个链接并写入mysql,虽然CS作为双学位已经修习了三年多了,但不仅理论知识一般,动手 ...
- Python Scrapy 爬取煎蛋网妹子图实例(一)
前面介绍了爬虫框架的一个实例,那个比较简单,这里在介绍一个实例 爬取 煎蛋网 妹子图,遗憾的是 上周煎蛋网还有妹子图了,但是这周妹子图变成了 随手拍, 不过没关系,我们爬图的目的是为了加强实战应用,管 ...
- 利用Python网络爬虫爬取学校官网十条标题
利用Python网络爬虫爬取学校官网十条标题 案例代码: # __author : "J" # date : 2018-03-06 # 导入需要用到的库文件 import urll ...
- Python的scrapy之爬取顶点小说网的所有小说
闲来无事用Python的scrapy框架练练手,爬取顶点小说网的所有小说的详细信息. 看一下网页的构造: tr标签里面的 td 使我们所要爬取的信息 下面是我们要爬取的二级页面 小说的简介信息: 下面 ...
随机推荐
- Luogu 1099 树网的核
bzoj1999 数据加强版(n <= 5e5) 较早的noip题,值得研究 重要结论:直径的最长性,任何从直径中离开直径的点到它离开的点的距离,都不会比直径的另一端到它离开的点长(否则就有新的 ...
- Vue.js路由组件
1.如果在创建项目中,没有自动安装vue router,那就自行安装.cnpm install vue-router --save vue-router两种模式 hash模式和history模式. 默 ...
- web大文件上传控件-监控fd_create流程-Xproer.HttpUploader6
监控fd_create流程 1.打开ie,f12 2.启动网络监控 点击开始捕获 上传文件夹,然后查看监控 将监控信息转到详细视图
- portableDFS-可便携的分布式文件系统
PPT下载(因附件大小有限制,删除了PPT中的隐藏页,如需完整版本,请转到it168文库下载):portableDFS-可便携的分布式文件系统.ppt 完整版本请上这里下载:http://wenku. ...
- 如何解决某个jar包的依赖冲突问题
我用的是idea集成开发环境,因此以该开发工具讲解. 首先在在Terminal窗口中,键入:mvn dependency:tree -Dincludes=com.google.guava 如果不加-D ...
- 用JS实现表格的高亮显示
1.所用事件详解 2.<thead>,<tbody>标签介绍 <thead> 标签定义表格的表头.该标签用于组合 HTML 表格的表头内容. thead 元素应该与 ...
- arcconf工具相关命令V1.0
arcconf工具相关命令V1.0 清除当前所有raid配置 Arcconf delete 1 array all #删除所有逻辑盘 Arcconf uninit 1 all ...
- arp欺骗进行流量截获-1
这边博文主要讲一下怎么使用arp欺骗进行流量截获,主要用于已经攻入内网以后,进行流量监听以及修改. 一.什么是arp arp协议是以太网的基础工作协议,其主要作用是是一种将IP地址转化成物理地 ...
- 【大数据之数据仓库】安装部署GreenPlum集群
本篇将向大家介绍如何快捷的安装部署GreenPlum测试集群,大家可以跟着我一块儿实践一把^_^ 1.主机资源 申请2台网易云主机,操作系统必须是RedHat或者CentOS,配置尽量高一点.如果是s ...
- kali linux之DNS,NTP放大攻击
DNS放大: 产生大流量的攻击方法-----单机的带宽优势,巨大的单机数量形成的流量汇聚,利用协议特性实现放大效果的流量 DNS协议放大效果----查询请求流量小,但响应流量可能非常巨大(dig AN ...