1.tencentSpider.py

# -*- coding: utf-8 -*-
import scrapy
from Tencent.items import TencentItem
#创建爬虫类
class TencentspiderSpider(scrapy.Spider):
name = 'tencentSpider'#爬虫名字
allowed_domains = ['tencent.com']#容许爬虫的作用范围 # 定义开始的URL
offset = 0
url = 'https://hr.tencent.com/position.php?&start='
#urll='#a' start_urls = [url + str(offset)] # 爬虫开始的URL def parse(self, response):
# 继承
item = TencentItem()
# 根节点
movies = response.xpath("//tr[@class='odd']|//tr[@class='even']")
for each in movies:
item['zhiwei']=each.xpath(".//td[@class='l square']/a/text()").extract()[0]
item['lianjie'] = each.xpath(".//td[@class='l square']/a/@href").extract()[0]
#item['leibie'] = each.xpath("//tr[@class='odd']/td[2]/text()|//tr[@class='even']/td[2]/text()").extract()[0]
item['leibie'] = each.xpath("//tr[@class='odd']/td[2]/text()|//tr[@class='even']/td[2]/text()").extract()[0]
#data = response.xpath(".//tr[@class='odd']/td[2]|//tr[@class='even']/td[2][descendant-or-self::text()]")
#item['leibie'] = data.xpath('string(.)').extract() item['renshu'] = each.xpath("//tr[@class='odd']/td[3]/text()|//tr[@class='even']/td[3]/text()").extract()[0]
item['didian'] = each.xpath("//tr[@class='odd']/td[4]/text()|//tr[@class='even']/td[4]/text()").extract()[0]
item['shijian'] = each.xpath("//tr[@class='odd']/td[5]/text()|//tr[@class='even']/td[5]/text()").extract()[0] # 异常处理
#if len(quote) != 0:
#item['quote'] = quote[0]
print(item)
yield item if self.offset < 2840:
self.offset += 10
# 每次处理完一页之后,重新发送下一页请求
# self offset 自增25,同时拼接为新的URL并调用回调函数,self parse 处理response
yield scrapy.Request(self.url + str(self.offset),callback=self.parse)

2.items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html import scrapy class TencentItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
zhiwei = scrapy.Field()
lianjie = scrapy.Field()
leibie = scrapy.Field()
renshu = scrapy.Field()
didian = scrapy.Field()
shijian = scrapy.Field()

3.main.py

from scrapy import cmdline
#
cmdline.execute("scrapy crawl tencentSpider".split())

4.middlewares.py

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals class TencentSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects. @classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider. # Should return None or raise an exception.
return None def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response. # Must return an iterable of Request, dict or Item objects.
for i in result:
yield i def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict
# or Item objects.
pass def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated. # Must return only requests (not items).
for r in start_requests:
yield r def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name) class TencentDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects. @classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware. # Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None def process_response(self, request, response, spider):
# Called with the response returned from the downloader. # Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception. # Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)

5.pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#Tencent.json
#class TencentPipeline(object):
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import json
from openpyxl import Workbook
class TencentPipeline(object):
wb = Workbook()
ws = wb.active
# 设置表头
ws.append(['职位', '链接', '类型', '人数', '地点', '时间']) def process_item(self, item, spider):
# 添加数据
line = [item['zhiwei'],item['lianjie'],item['leibie'],item['renshu'],item['didian'],item['shijian']]
self.ws.append(line) # 按行添加
self.wb.save('tencentSpider.xlsx')
return item

6.settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for Tencent project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'Tencent' SPIDER_MODULES = ['Tencent.spiders']
NEWSPIDER_MODULE = 'Tencent.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36' # Obey robots.txt rules
#ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Tencent.middlewares.TencentSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'Tencent.middlewares.TencentDownloaderMiddleware': 543,
#} # Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'Tencent.pipelines.TencentPipeline': 300,
} # Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

Scrapy项目 - 项目源码 - 实现腾讯网站社会招聘信息爬取的爬虫设计的更多相关文章

  1. Scrapy项目 - 数据简析 - 实现腾讯网站社会招聘信息爬取的爬虫设计

    一.数据分析截图 本例实验,使用Weka 3.7对腾讯招聘官网中网页上所罗列的招聘信息,如:其中的职位名称.链接.职位类别.人数.地点和发布时间等信息进行数据分析,详见如下图:   图1-1 Weka ...

  2. Scrapy项目 - 实现腾讯网站社会招聘信息爬取的爬虫设计

    通过使Scrapy框架,进行数据挖掘和对web站点页面提取结构化数据,掌握如何使用Twisted异步网络框架来处理网络通讯的问题,可以加快我们的下载速度,也可深入接触各种中间件接口,灵活的完成各种需求 ...

  3. Scrapy项目 - 源码工程 - 实现豆瓣 Top250 电影信息爬取的爬虫设计

    一.项目目录结构 spiders文件夹内包含doubanSpider.py文件,对于项目的构建以及结构逻辑,详见环境搭建篇. 二.项目源码 1.doubanSpider.py # -*- coding ...

  4. Scrapy项目 - 实现斗鱼直播网站信息爬取的爬虫设计

    要求编写的程序可爬取斗鱼直播网站上的直播信息,如:房间数,直播类别和人气等.熟悉掌握基本的网页和url分析,同时能灵活使用Xmind工具对Python爬虫程序(网络爬虫)流程图进行分析.   一.项目 ...

  5. Scrapy项目 - 实现豆瓣 Top250 电影信息爬取的爬虫设计

    通过使Scrapy框架,掌握如何使用Twisted异步网络框架来处理网络通讯的问题,进行数据挖掘和对web站点页面提取结构化数据,可以加快我们的下载速度,也可深入接触各种中间件接口,灵活的完成各种需求 ...

  6. Scrapy项目 - 数据简析 - 实现豆瓣 Top250 电影信息爬取的爬虫设计

    一.数据分析截图(weka数据分析截图 ) 本例实验,使用Weka 3.7对豆瓣电影网页上所罗列的上映电影信息,如:标题.主要信息(年份.国家.类型)和评分等的信息进行数据分析,Weka 3.7数据分 ...

  7. Scrapy项目 - 数据简析 - 实现斗鱼直播网站信息爬取的爬虫设计

    一.数据分析截图(weka数据分析截图 2-3个图,作业文字描述) 本次将所爬取的数据信息,如:房间数,直播类别和人气,导入Weka 3.7工具进行数据分析.有关本次的数据分析详情详见下图所示:   ...

  8. Scrapy项目 - 实现百度贴吧帖子主题及图片爬取的爬虫设计

    要求编写的程序可获取任一贴吧页面中的帖子链接,并爬取贴子中用户发表的图片,在此过程中使用user agent 伪装和轮换,解决爬虫ip被目标网站封禁的问题.熟悉掌握基本的网页和url分析,同时能灵活使 ...

  9. Scrapy案例02-腾讯招聘信息爬取

    目录 1. 目标 2. 网站结构分析 3. 编写爬虫程序 3.1. 配置需要爬取的目标变量 3.2. 写爬虫文件scrapy 3.3. 编写yield需要的管道文件 3.4. setting中配置请求 ...

随机推荐

  1. 使用react定义组件的两种方式

    react组件的两种方式:函数定义,类定义 在定义一个组件之前,首先要明白一点:react元素(jsx)是react组件的最基本的组成单位 组件要求: 1,为了和react元素进行区分,组件名字首必须 ...

  2. Go语言学习——如何实现一个过滤器

    1.过滤器使用场景 做业务的时候我们经常要使用过滤器或者拦截器(听这口音就是从Java过来的).常见的场景如一个HTTP请求,需要经过鉴权过滤器.白名单校验过滤.参数验证过滤器等重重关卡最终拿到数据. ...

  3. 体验RxJava

    RxJava是 ReactiveX在 Java上的开源的实现,简单概括,它就是一个实现异步操作的库,使用时最直观的感受就是在使用一个观察者模式的框架来完成我们的业务需求: 其实java已经有了现成的观 ...

  4. 设计模式(C#)——01单例模式

    推荐阅读:  我的CSDN  我的博客园  QQ群:704621321       为什么要学习设计模式呢?我以前也思考过很多次这个问题,现在也还困惑.为什么我最后还是选择了学设计模式呢?因为在游戏中 ...

  5. java虚拟机10.内存模型与线程

    多任务处理在现代计算机操作系统中是一项必备的功能,让计算机同时去做几件事情,不仅是因为计算机的运算能力强大了,更重要的原因是计算机的运算速度与它的存储和通信子系统速度的差距太大,大量的时间都花费在磁盘 ...

  6. CF #541 E. String Multiplication

    题意: 给定一系列字符串,每次都是后一个字符串和前面的融合,这个融合操作就是原来的串分成独立的,然后把新串插入到这些空格中.问最后,最长的相同连续的长度. 思路: 这道题可以贪心的来,我们压缩状态,记 ...

  7. HDU 5324 Boring Class CDQ分治

    题目传送门 题目要求一个3维偏序点的最长子序列,并且字典序最小. 题解: 这种题目出现的次数特别多了.如果不需要保证字典序的话直接cdq就好了. 这里需要维护字典序的话,我们从后往前配对就好了,因为越 ...

  8. HZNU Training 1 for Zhejiang Provincial Collegiate Programming Contest

    赛后总结: TJ:今天我先到实验室,开始看题,一眼就看了一道防AK的题目,还居然觉得自己能做wwww.然后金姐和彭彭来了以后,我和他们讲了点题目.然后金姐开始搞dfs,我和彭彭看榜研究F题.想了很久脑 ...

  9. Codeforces 729C Road to Cinema(二分)

    题目链接 http://codeforces.com/problemset/problem/729/C 题意:n个价格c[i],油量v[i]的汽车,求最便宜的一辆使得能在t时间内到达s,路途中有k个位 ...

  10. unicode编码原理及问题

    历史在1963年,计算机的使用尚不广泛,那时使用的是7-bit的ASCII码,范围为0-127作为字符的编码,只支持少部分的字符,但是随着计算机的普及,不同的国家地区开始自己制造自己的编码规范,这导致 ...