python之scrapy篇(三)
一、创建工程(cmd)
scrapy startproject xxxx
二、编写item文件
# -*- coding: utf-8 -*- # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html from scrapy import Field, Item class YouyuanItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
# 用户名
username = Field()
# 年龄
age = Field()
# 头像图片链接
herder_url = Field()
# 相册图片
image_url = Field()
# 独白
content = Field()
# 籍贯
place_from = Field()
# 学历
education = Field()
# 兴趣爱好
hobby = Field()
# 个人主页
source_url = Field()
# 数据来源
sourcec = Field()
三、编写settings文件
# -*- coding: utf-8 -*- # Scrapy settings for youyuan project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'youyuan' SPIDER_MODULES = ['youyuan.spiders']
NEWSPIDER_MODULE = 'youyuan.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'youyuan (+http://www.yourdomain.com)' # Obey robots.txt rules
#ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'youyuan.middlewares.YouyuanSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'youyuan.middlewares.YouyuanDownloaderMiddleware': 543,
#} # Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'youyuan.pipelines.YouyuanPipeline': 300,
} # Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
四、进入spider文件创建自定义爬虫文件
scrapy genspider demo 'www.xxxx.com'
编写文件
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from youyuan.items import YouyuanItem
import re class YySpider(CrawlSpider):
name = 'yy'
allowed_domains = ['xxxx.com']
start_urls = ['http://www.xxxx.com/find/beijing/mm18-25/advance-0-0-0-0-0-0-0/p1/'] # 匹配每一页链接匹配规则
page_links = LinkExtractor(allow=(r"xxxx.com/find/beijing/mm18-25/advance-0-0-0-0-0-0-0/p\d+/"))
# 每个主页的匹配规则
profile_links = LinkExtractor(allow=(r"xxxxx.com/\d+-profile/")) rules = (
Rule(page_links),
Rule(profile_links, callback='parse_item'),
) def parse_item(self, response):
item = YouyuanItem()
# 姓名
item['username'] = self.get_username(response)
# 年龄
item['age'] = self.get_age(response)
# 头像图片链接
item['herder_url'] = self.get_herder_url(response)
# 相册图片
item['image_url'] = self.get_image_url(response)
# 独白
item['content'] = self.get_content(response)
# 籍贯
item['place_from'] = self.get_place_from(response)
# 学历
item['education'] = self.get_education(response)
# 兴趣爱好
item['hobby'] = self.get_hobby(response)
# 个人主页
item['source_url'] = response.url
# 数据来源
item['sourcec'] = "youyuan" yield item def get_username(self, response):
username = response.xpath("//dl[@class='personal_cen']//div[@class='main']/strong/text()").extract()
if len(username):
username = username[0]
else:
username = "NULL"
return username.split() def get_age(self, response):
age = response.xpath("//dl[@class='personal_cen']//dd/p/text()").extract()
if len(age):
age = re.findall(u"\d+岁", age[0])[0]
else:
age = "NULL"
return age.strip() def get_herder_url(self, response):
herder_url = response.xpath("//dl[@class='personal_cen']//dt/img/@src").extract()
if len(herder_url):
herder_url = herder_url[0]
else:
herder_url = "NULL"
return herder_url.strip() def get_image_url(self, response):
image_url = response.xpath("//div[@class='ph_show']/ul/li/a/img/@src").extract()
if len(image_url):
image_url = image_url
else:
image_url = "NULL"
return image_url def get_content(self, response):
content = response.xpath("//div[@class='pre_data']/ul/li/p/text()").extract()
if len(content):
content = content[0]
else:
content = "NULL"
return content.strip() def get_place_from(self, response):
place_from = response.xpath("//div[@class='pre_data']/ul/li[2]//ol[1]/li[1]/span/text()").extract()
if len(place_from):
place_from = place_from[0]
else:
place_from = "NULL"
return place_from.strip() def get_education(self, response):
education = response.xpath("//div[@class='pre_data']/ul/li[3]//ol[2]/li[2]/span/text()").extract()
if len(education):
education = education[0]
else:
education = "NULL"
return education.strip() def get_hobby(self, response):
hobby = response.xpath("//dl[@class='personal_cen']//ol/li/text()").extract()
if len(hobby):
hobby = ",".join(hobby).replace(" ", "")
else:
hobby = "NULL"
return hobby.strip()
五、运行
scrapy crawl xxxx
OVER!
python之scrapy篇(三)的更多相关文章
- python之scrapy篇(一)
一.首先创建工程(cmd中进行) scrapy startproject xxx 二.编写Item文件 添加要字段 # -*- coding: utf-8 -*- # Define here the ...
- Python:Scrapy(三) 进阶:额外的一些类ItemLoader与CrawlSpider,使用原理及总结
学习自:Python Scrapy 爬虫框架实例(一) - Blue·Sky - 博客园 这一节是对前两节内容的补充,涉及内容为一些额外的类与方法,来对原代码进行改进 原代码:这里并没有用前两节的代码 ...
- python之scrapy篇(二)
一.创建工程 scarpy startproject xxx 二.编写iteam文件 # -*- coding: utf-8 -*- # Define here the models for your ...
- [Python爬虫] scrapy爬虫系列 <一>.安装及入门介绍
前面介绍了很多Selenium基于自动测试的Python爬虫程序,主要利用它的xpath语句,通过分析网页DOM树结构进行爬取内容,同时可以结合Phantomjs模拟浏览器进行鼠标或键盘操作.但是,更 ...
- Python爬虫学习:三、爬虫的基本操作流程
本文是博主原创随笔,转载时请注明出处Maple2cat|Python爬虫学习:三.爬虫的基本操作与流程 一般我们使用Python爬虫都是希望实现一套完整的功能,如下: 1.爬虫目标数据.信息: 2.将 ...
- python爬虫Scrapy(一)-我爬了boss数据
一.概述 学习python有一段时间了,最近了解了下Python的入门爬虫框架Scrapy,参考了文章Python爬虫框架Scrapy入门.本篇文章属于初学经验记录,比较简单,适合刚学习爬虫的小伙伴. ...
- Python爬虫Scrapy框架入门(0)
想学习爬虫,又想了解python语言,有个python高手推荐我看看scrapy. scrapy是一个python爬虫框架,据说很灵活,网上介绍该框架的信息很多,此处不再赘述.专心记录我自己遇到的问题 ...
- Python之Scrapy爬虫框架安装及简单使用
题记:早已听闻python爬虫框架的大名.近些天学习了下其中的Scrapy爬虫框架,将自己理解的跟大家分享.有表述不当之处,望大神们斧正. 一.初窥Scrapy Scrapy是一个为了爬取网站数据,提 ...
- scrapy 的三个入门应用场景
说明: 本文参照了官网的 dmoz 爬虫例子. 不过这个例子有些年头了,而 dmoz.org 的网页结构已经不同以前.所以我对xpath也相应地进行了修改. 概要: 本文提出了scrapy 的三个入门 ...
随机推荐
- ansible playbook 安装docker
1.新增host配置到/etc/ansible/hosts文件中 [docker] 192.168.43.95 2.配置无密码登录 # 配置ssh,默认rsa加密,保存目录(公钥)~/.ssh/id_ ...
- charles功能(一)修改request请求参数
1.接口处 鼠标右击,选择breakpoints(允许本接口使用breakpionts功能) 2.开始设置断点值 3.然后修改这一条 4.然后执行 5.最终结果
- 一条 SQL 语句在 MySQL 中如何执行的
一 MySQL 基础架构分析 1.1 MySQL 基本架构概览 下图是 MySQL 的一个简要架构图,从下图你可以很清晰的看到用户的 SQL 语句在 MySQL 内部是如何执行的. 先简单介绍一下下图 ...
- 一个使用xlwings操作excel数据优化60倍处理效率的案例
☞ ░ 前往老猿Python博文目录 ░ 一.引言 老猿在将自己的博文数据(包括url地址.标题和阅读数量)从博客中获取后,使用xlwings保存到excel对象时发现,不同的处理方法性能相差非常大. ...
- LeetCode初级算法之数组:350 两个数组的交集 II
两个数组的交集 II 题目地址:https://leetcode-cn.com/problems/intersection-of-two-arrays-ii/ 给定两个数组,编写一个函数来计算它们的交 ...
- Scrum 冲刺 第二篇
Scrum 冲刺 第二篇 每日会议照片 昨天已完成工作 队员 昨日完成任务 黄梓浩 初步完成app项目架构搭建 黄清山 完成部分个人界面模块数据库的接口 邓富荣 完成部分后台首页模块数据库的接口 钟俊 ...
- 结对项目Myapp
·Github地址:https://github.com/Dioikawa/Myapp ·成员:陈杰才(3118005089) 蔡越(3118005086) ·耗费时间估计: PSP2.1 Perso ...
- EF CodeFirst多个数据摸型映射到一张表与各一张表
1. 多个实体映射到一张表 Code First允许将多个实体映射到同一张表上,实体必须遵循如下规则: 实体必须是一对一关系 实体必须共享一个公共键 我们通常有这样的需求,如:同一基类派生出的不同数据 ...
- vue 按键修饰符 keyup
按键修饰符 keyup 通过官方文档可查询到特殊的按键修饰符 .enter .tab .delete (捕获"删除"和"退格"键) .esc .space .u ...
- vue第十三单元(指令的作用,指令的钩子函数)
第十三单元(指令的作用,指令的钩子函数) #课程目标 1.了解自定义指令的应用场景 2.掌握自定义全局和局部指令 3.掌握指令的钩子函数 #知识点 #一.认识自定义指令 除了核心功能默认内置的指令 ( ...