设置进程池爬取拉钩网:

# coding = utf-
import json
import pymongo
import pandas as pd
import requests
from lxml import etree
import time
from multiprocessing import Pool # 设置mongodb
client = pymongo.MongoClient('localhost')
db = client['lagou']
# 查询的岗位名称
POSITION_NAME = '数据挖掘'
# 想要爬取的总页面数
PAGE_SUM =
# 每页返回的职位数量
PAGE_SIZE =
# 指定数据库的名字
DATA_NAME = "DataMiningPosition" base_url = 'https://m.lagou.com/search.json?city=%E5%85%A8%E5%9B%BD&positionName={positionName}' \
'&pageNo={pageNo}&pageSize={pageSize}' def page_index(pageno):
headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
# cookie能不要尽量不要,这里正好不用cookie也可以正常返回数据
# "Cookie": "user_trace_token=20181119151914-03711263-38a2-4d81-bd81-5f480d930039; _ga=GA1.2.605262108.1542611954; _gid=GA1.2.249787972.1542611954; LGSID=20181119151916-6c3da9fa-ebcb-11e8-8958-5254005c3644; PRE_UTM=; PRE_HOST=www.baidu.com; PRE_SITE=https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DOnHWjpEfiW4_pVm7hX8NYOFm0iJ7bz1ZJJlaKPPnmMzLE-6ypKNo0f19ABO5bjW4%26wd%3D%26eqid%3D8f61629100016e18000000065bf263e7; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fgongsi%2F147.html; LGUID=20181119151916-6c3dabf3-ebcb-11e8-8958-5254005c3644; index_location_city=%E5%85%A8%E5%9B%BD; JSESSIONID=ABAAABAAAGCABCC2D851CA25D1CFCD2B28DCDD6E00A2C7E; _ga=GA1.3.605262108.1542611954; X_HTTP_TOKEN=a0cc1a4beb8a41f57f144bc0bfd77bd7; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221672adb3834203-08b3706084b44a-3961430f-1327104-1672adb3835428%22%2C%22%24device_id%22%3A%221672adb3834203-08b3706084b44a-3961430f-1327104-1672adb3835428%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1542611954,1542612053,1542612277,1542612493; _gat=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1542613115; LGRID=20181119153837-20bafb1a-ebce-11e8-8958-5254005c3644",
"Host": "m.lagou.com",
"Proxy-Connection": "keep-alive",
"Referer": "http://m.lagou.com/search.html",
"X-Requested-With": "XMLHttpRequest",
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',
}
url = base_url.format(positionName=POSITION_NAME, pageNo=pageno, pageSize=PAGE_SIZE)
response = requests.get(url, headers=headers)
html = response.text
content = json.loads(html)
print(content)
if content.get("content"):
return content
else:
time.sleep()
return page_index(pageno) def parse_page_index(content): for i in range():
try:
item = content['content']['data']['page']['result'][i]
#print(item)
yield {
'positionId': item.get('positionId'),
'positionName': item.get('positionName'),
'city': item.get('city'),
'createTime': item.get('createTime'),
'salary': item.get('salary'),
'companyId': item.get('companyId'),
'companyFullName': item.get('companyFullName')
}
except IndexError as e:
print('可能没有那么多字段', e) def save_to_mongo(data):
if db[DATA_NAME].update({'positionId': data['positionId']}, {'$set': data}, True):
print('Saved to Mongo', data['positionId'])
else:
print('Saved to Mongo Failed', data['positionId']) def parse_detail(url):
# url = "http://m.lagou.com/jobs/4593934.html"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36",
"Accept": "text / html, application / xhtml + xml, application / xml;q = 0.9, image / webp, image / apng, * / *;q = 0.8",
"Accept - Encoding": "gzip, deflate",
"Accept - Language": "zh - CN, zh;q = 0.9",
"Cache - Control": "max - age = 0",
"Connection": "eep - alive",
# "Cookie": "_ga=GA1.2.474762156.1528795210; _gid=GA1.2.574638607.1528795210; user_trace_token=20180612172010-cdf76dc1-6e21-11e8-9af0-525400f775ce; LGUID=20180612172010-cdf772c0-6e21-11e8-9af0-525400f775ce; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1528795210,1528795215,1528795223; index_location_city=%E5%85%A8%E5%9B%BD; X_HTTP_TOKEN=f3ed266ddeee802fb7d402e4f6d4f4a3; JSESSIONID=ABAAABAAAFDABFG9F9C52FA9D8CAE24F139A0131C45E918; _ga=GA1.3.474762156.1528795210; _gat=1; LGSID=20180612184248-597a7795-6e2d-11e8-9479-5254005c3644; PRE_UTM=; PRE_HOST=; PRE_SITE=http%3A%2F%2Fm.lagou.com%2Fsearch.html; PRE_LAND=http%3A%2F%2Fm.lagou.com%2Fjobs%2F4079910.html; LGRID=20180612184505-ab051d02-6e2d-11e8-9479-5254005c3644; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1528800306" }
try:
response = requests.get(url, headers=headers)
if response.status_code == :
print("请求成功")
text = response.content.decode()
# print(text)
html = etree.HTML(text)
workyear = html.xpath('//span[@class="item workyear"]/span/text()')
if workyear:
workyear = workyear[]
else:
time.sleep()
parse_detail(url)
positiondesc = html.xpath('//div[@class="positiondesc"]//p/text()')
#print(workyear, positiondesc)
return workyear, positiondesc
except Exception as e:
print(e) # 将爬取的数据存到Mongodb
def to_mongo(page_sum):
# 拉勾网顶多只能显示到334页
for page in range(page_sum):
html = page_index(page)
items = parse_page_index(html)
# print(items)
for item in items:
print(item)
save_to_mongo(item) # 运用进程池将爬取的数据存到Mongodb
def to_mongo_pool(page):
# 拉勾网顶多只能显示到334页
content = page_index(page)
items = parse_page_index(content)
# print(items)
for item in items:
print(item)
save_to_mongo(item) # 解析爬取的字条,以便把数据转为DataFrame格式
def parse_items(page_sum):
for page in range(page_sum):
html = page_index(page)
items = parse_page_index(html)
for item in items:
positionId = item["positionId"]
detail_url = "http://m.lagou.com/jobs/{}.html".format(positionId)
workyear, positiondesc = parse_detail(detail_url)
print(positionId,positiondesc)
yield [
item["positionId"],
item["positionName"],
item["city"],
item["createTime"],
item["salary"],
item["companyId"],
item["companyFullName"],
workyear,
positiondesc
] # 把数据保存为csv格式
def to_csv(page_sum):
item_lists = []
# print(parse_items())
for item in parse_items(page_sum):
item_lists.append(item)
#print(item_lists)
data = pd.DataFrame(item_lists,
columns=["positionId", "positionName", "city", "createTime", "salary", "companyId",
"companyFullName", "workyear", "positiondesc"])
data.to_csv("python_positon.csv") if __name__ == '__main__': #to_csv
#to_mongo()
# 建议保存到mongodb数据库中 start_time = time.time()
pool = Pool() # pool()参数:进程个数:默认的是电脑cpu的核的个数,如果要指定进程个数,这个进程个数要小于等于cpu的核数
# 第一个参数是一个函数体,不需要加括号,也不需指定参数。。
# 第二个参数是一个列表,列表中的每个参数都会传给那个函数体
pool.map(to_mongo_pool,[i for i in range(PAGE_SUM)])
# close它只是把进程池关闭
pool.close()
# join起到一个阻塞的作用,主进程要等待子进程运行完,才能接着往下运行
pool.join()
end_time = time.time()
print("总耗费时间%.2f秒" % (end_time - start_time))

进程池爬取并存入mongodb的更多相关文章

  1. python进程池爬取下载美女图片(xpath)--lowbiprogrammer

    # -*- coding: utf-8 -*-import requests,osfrom lxml import etreeimport multiprocessingfrom retrying i ...

  2. 基于requests模块的cookie,session和线程池爬取

    目录 基于requests模块的cookie,session和线程池爬取 基于requests模块的cookie操作 基于requests模块的代理操作 基于multiprocessing.dummy ...

  3. 5 使用ip代理池爬取糗事百科

    从09年读本科开始学计算机以来,一直在迷茫中度过,很想学些东西,做些事情,却往往陷进一些技术细节而蹉跎时光.直到最近几个月,才明白程序员的意义并不是要搞清楚所有代码细节,而是要有更宏高的方向,要有更专 ...

  4. Python使用Scrapy框架爬取数据存入CSV文件(Python爬虫实战4)

    1. Scrapy框架 Scrapy是python下实现爬虫功能的框架,能够将数据解析.数据处理.数据存储合为一体功能的爬虫框架. 2. Scrapy安装 1. 安装依赖包 yum install g ...

  5. Python爬虫-代理池-爬取代理入库并测试代理可用性

    目的:建立自己的代理池.可以添加新的代理网站爬虫,可以测试代理对某一网址的适用性,可以提供获取代理的 API. 整个流程:爬取代理 ----> 将代理存入数据库并设置分数 ----> 从数 ...

  6. 42.scrapy爬取数据入库mongodb

    scrapy爬虫采集数据存入mongodb采集效果如图: 1.首先开启服务切换到mongodb的bin目录下 命令:mongod --dbpath e:\data\db 另开黑窗口 命令:mongo. ...

  7. 使用requests、BeautifulSoup、线程池爬取艺龙酒店信息并保存到Excel中

    import requests import time, random, csv from fake_useragent import UserAgent from bs4 import Beauti ...

  8. 使用requests、re、BeautifulSoup、线程池爬取携程酒店信息并保存到Excel中

    import requests import json import re import csv import threadpool import time, random from bs4 impo ...

  9. 19 03 13 关于 scrapy 框架的 对环球网的整体爬取(存储于 mongodb 数据库里)

    关于  spinder  在这个框架里面   和不用数据库  相同 # -*- coding: utf-8 -*- import scrapy from yang_guan.items import ...

随机推荐

  1. Flask 键盘事件

    <div class="container" style="margin-top: 300px; "> <div class="ro ...

  2. Django(十二)—关于查询知识点总结

    https://www.cnblogs.com/haiyan123/p/7763710.html models.Book.objects.filter(**kwargs):   querySet   ...

  3. 关于Java中扫描仪next()与nextLine()的区别

    首先,next()一定要读取到有效字符后才可以结束输入,对输入有效字符之前遇到的空格键.Tab键或Enter键等结束符,next()方法会自动将其去掉,只有在输入有效字符之后,next()方法才将其后 ...

  4. 为什么在Python里推荐使用多进程而不是多线程?(为什么python多线程无法增加CPU使用率?)

    最近在看Python的多线程,经常我们会听到老手说:“Python下多线程是鸡肋,推荐使用多进程!”,但是为什么这么说呢? 要知其然,更要知其所以然.所以有了下面的深入研究: 首先强调背景:     ...

  5. latex 导入pdf

    pdflatex \includepdf[addtotoc={1,section,1,something would show in catalog,cc},pages=-,offset=0cm 0. ...

  6. ElasticSearch6.3.2------入门

    先去官网下载,方便测试用的Windows版本的 都解压了 --- 启动ElasticSearch和Kibana [E:\elasticsearch-]$ .\bin\elasticsearch.bat ...

  7. bigdata learning unit one--Hadoop environment setting

    1.配置ssh,使集群服务器之间的通讯,不再每次都输入密码进行认证. 2. [root@hc--uatbeta2 hadoop]# start-all.shStarting namenodes on ...

  8. DB9 ------ 接口定义

    下图是母座和公座的接口定义: 特别提醒:以上是公座和母座的接口定义,如果是串口线,RXD就变成TXD,以此类推.

  9. C# 实现身份验证之WCF篇(2)

    前面总结了三种方法,今天又将分享三种方法,完成WCF篇. 第四种:SOAP Header验证 首先定义一个WCF服务契约及服务实现类(后面的各种验证均采用该WCF服务),我这里直接采用默认的代码,如下 ...

  10. Hbase学习01

    1.1  快速介绍 1.1.1 快速入门,单节点Hbase 本小节介绍单节点独立HBase的设置. 独立实例包含所有HBase守护进程 - Master,RegionServers和ZooKeeper ...