1、定义爬取的字段items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html import scrapy
class GosuncnItem(scrapy.Item):
"""
定义爬虫的字段
"""
# define the fields for your item here like:
# name = scrapy.Field()
platform = scrapy.Field()
position = scrapy.Field()
num = scrapy.Field()
time = scrapy.Field()
url = scrapy.Field()
content = scrapy.Field()
responsible = scrapy.Field()
page = scrapy.Field()
pass

2、配置设置settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for gosuncn project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'gosuncn' SPIDER_MODULES = ['gosuncn.spiders']
NEWSPIDER_MODULE = 'gosuncn.spiders' LOG_LEVEL="WARNING"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'gosuncn (+http://www.yourdomain.com)' # Obey robots.txt rules
ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'gosuncn.middlewares.GosuncnSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'gosuncn.middlewares.GosuncnDownloaderMiddleware': 543,
#} # Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'gosuncn.pipelines.GosuncnPipeline': 300,
}
LOG_LEVEL ="WARNING"
LOG_FILE = "./log.log"
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

3、爬取某集团的招聘信息,注意:items的字段必须和该文件爬取的字段一直,否则报错

# -*- coding: utf-8 -*-
import scrapy
import logging
from gosuncn.items import GosuncnItem
logger = logging.getLogger(__name__)
#引入日志
class GaoxinxingSpider(scrapy.Spider):
name = 'gaoxinxing'
allowed_domains = ['gosuncn.zhiye.com']
start_urls = ['http://gosuncn.zhiye.com/Social']
next_page_num = 1
def parse(self, response):
tr_list = response.xpath("//table[@class='jobsTable']/tr")[1:]
#print(tr_list)
for tr in tr_list:
item = GosuncnItem()
item["position"]=tr.xpath(".//td[1]/a/text()").extract_first()
item["url"] = "http://gosuncn.zhiye.com"+tr.xpath(".//td[1]/a/@href").extract_first()
item["platform"] = tr.xpath(".//td[3]/text()").extract_first()
item["num"] = tr.xpath(".//td[4]/text()").extract_first()
item["time"] = tr.xpath(".//td[6]/text()").extract_first()
item["page"]= self.next_page_num
#print(item)
#logger.warning(item) #打印日志
#yield item
################爬取详情页######################
yield scrapy.Request(
item["url"],
callback=self.url_parse, # 不能打括号,否则是调用了
meta = {"item":item}# 将数据传递给url_parse()
) ##############翻页爬取###############################
next_page_url = response.xpath("//div[@class='pager2']//a[@class='next']/@href").extract_first()
print(next_page_url)
self.next_page_num = self.next_page_num+1
if self.next_page_num<5:
next_url = "http://gosuncn.zhiye.com/social/?PageIndex=" + str(self.next_page_num)
#print(next_url)
yield scrapy.Request(
next_url,
callback=self.parse #不能打括号,否则是调用了
) def url_parse(self,response):
"""
爬取详情页
:param response:
:return:
"""
item = response.meta["item"]
item["content"] = response.xpath("//div[@class='xiangqingcontain']/ul[@class='xiangqinglist clearfix']/li[6]/text()").extract()
#item["responsible"] = response.xpath("//div[@class='xiangqingcontain']/div[@class='xiangqingtext']/p[2]/text()").extract()
logger.warning(item) # 打印日志
#print(item)
yield item

4、在pipelines处理传递过来的数据

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import re
from gosuncn.items import GosuncnItem
class GosuncnPipeline(object):
def process_item(self, item, spider):
if isinstance(item,GosuncnItem):
item["content"] = self.process_content(item["content"])
print(item)
return item def process_content(self,content):
content =[re.sub(r"\r\n|' '","",i) for i in content]
content = [i for i in content if len(i)>0]
return content
# class GosuncnPipeline1(object):
# def process_item(self, item, spider):
# if isinstance(item,GosuncnItem):
# print(item)
# return item

python之scrapy爬取某集团招聘信息以及招聘详情的更多相关文章

  1. python之scrapy爬取某集团招聘信息

    1.创建工程 scrapy startproject gosuncn 2.创建项目 cd gosuncn scrapy genspider gaoxinxing gosuncn.zhiye.com 3 ...

  2. 使用python scrapy爬取知乎提问信息

    前文介绍了python的scrapy爬虫框架和登录知乎的方法. 这里介绍如何爬取知乎的问题信息,并保存到mysql数据库中. 首先,看一下我要爬取哪些内容: 如下图所示,我要爬取一个问题的6个信息: ...

  3. [Python爬虫] Selenium爬取新浪微博客户端用户信息、热点话题及评论 (上)

    转载自:http://blog.csdn.net/eastmount/article/details/51231852 一. 文章介绍 源码下载地址:http://download.csdn.net/ ...

  4. 利用 Scrapy 爬取知乎用户信息

    思路:通过获取知乎某个大V的关注列表和被关注列表,查看该大V和其关注用户和被关注用户的详细信息,然后通过层层递归调用,实现获取关注用户和被关注用户的关注列表和被关注列表,最终实现获取大量用户信息. 一 ...

  5. 爬虫(十六):scrapy爬取知乎用户信息

    一:爬取思路 首先我们应该找到一个账号,这个账号被关注的人和关注的人都相对比较多的,就是下图中金字塔顶端的人,然后通过爬取这个账号的信息后,再爬取他关注的人和被关注的人的账号信息,然后爬取被关注人的账 ...

  6. Python爬虫项目--爬取自如网房源信息

    本次爬取自如网房源信息所用到的知识点: 1. requests get请求 2. lxml解析html 3. Xpath 4. MongoDB存储 正文 1.分析目标站点 1. url: http:/ ...

  7. python之scrapy爬取jingdong招聘信息到mysql数据库

    1.创建工程 scrapy startproject jd 2.创建项目 scrapy genspider jingdong 3.安装pymysql pip install pymysql 4.set ...

  8. python之scrapy爬取jd和qq招聘信息

    1.settings.py文件 # -*- coding: utf-8 -*- # Scrapy settings for jd project # # For simplicity, this fi ...

  9. python之crawlscrapy爬取某集团招聘信息以及招聘详情

    针对这种招聘信息,使用crawlscrapy很适合. 1.settings.py # -*- coding: utf-8 -*- # Scrapy settings for gosuncn proje ...

随机推荐

  1. Oracle笔记(十二) 集合、序列

    一.集合 在数学的操作之中存在交.差.并.补的概念,而在数据的查询中也存在此概念,有如下几个连接符号: UNION:连接两个查询,相同的部分不显示: UNION ALL:连接两个查询,相同的部分显示: ...

  2. Python3.5环境安装及使用 Speech问题解决(转)

    修改speech.py line59 修改import thread,改成import threading line157 修改print prompt,改成print(prompt) 对最后的函数_ ...

  3. 剖析ajax

    学过javascript和接触过后端PHP语言必然要用到ajax,这是必学的一门学科,AJAX指的是Asynchronous JavaScript and XML,它使用XMLHttpRequest对 ...

  4. Load store and memoryless

    metal https://developer.apple.com/library/archive/documentation/3DDrawing/Conceptual/MTLBestPractice ...

  5. osm3ge

    https://www.acugis.com/opentileserver/ https://openmaptiles.org/docs/ https://www.maptiler.com/?_ga= ...

  6. CodeForces 835C - Star sky | Codeforces Round #427 (Div. 2)

    s <= c是最骚的,数组在那一维开了10,第八组样例直接爆了- - /* CodeForces 835C - Star sky [ 前缀和,容斥 ] | Codeforces Round #4 ...

  7. 解决Spring AOP Controller 不生效

    在spring-mvc.xml文件中,进行以下配置,就可以实现在Controller中, 方法一:最简单的,在spring-mvc.xml配置文件中,添加以下语句 spring-mvc.xml < ...

  8. 解决ubuntu安装软件has install-snap change in progress错误

    解决ubuntu安装软件has install-snap change in progress错误 2018年05月06日 13:45:39 山间明月江上清风_ 阅读数:14316 标签: ubunt ...

  9. B/S架构大文件上传问题

    核心原理: 该项目核心就是文件分块上传.前后端要高度配合,需要双方约定好一些数据,才能完成大文件分块,我们在项目中要重点解决的以下问题. * 如何分片: * 如何合成一个文件: * 中断了从哪个分片开 ...

  10. 逆向bfs搜索打表+康拓判重

    HDU 1043八数码问题 八数码,就是1~8加上一个空格的九宫格,这道题以及这个游戏的目标就是把九宫格还原到从左到右从上到下是1~8然后最后是空格. 没了解康托展开之前,这道题怎么想都觉得很棘手,直 ...