一、创建工程(cmd)

scrapy startproject xxxx

二、编写item文件

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html from scrapy import Field, Item class YouyuanItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
# 用户名
username = Field()
# 年龄
age = Field()
# 头像图片链接
herder_url = Field()
# 相册图片
image_url = Field()
# 独白
content = Field()
# 籍贯
place_from = Field()
# 学历
education = Field()
# 兴趣爱好
hobby = Field()
# 个人主页
source_url = Field()
# 数据来源
sourcec = Field()

三、编写settings文件

# -*- coding: utf-8 -*-

# Scrapy settings for youyuan project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'youyuan' SPIDER_MODULES = ['youyuan.spiders']
NEWSPIDER_MODULE = 'youyuan.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'youyuan (+http://www.yourdomain.com)' # Obey robots.txt rules
#ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'youyuan.middlewares.YouyuanSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'youyuan.middlewares.YouyuanDownloaderMiddleware': 543,
#} # Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'youyuan.pipelines.YouyuanPipeline': 300,
} # Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

四、进入spider文件创建自定义爬虫文件

scrapy genspider demo 'www.xxxx.com'

编写文件

# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from youyuan.items import YouyuanItem
import re class YySpider(CrawlSpider):
name = 'yy'
allowed_domains = ['xxxx.com']
start_urls = ['http://www.xxxx.com/find/beijing/mm18-25/advance-0-0-0-0-0-0-0/p1/'] # 匹配每一页链接匹配规则
page_links = LinkExtractor(allow=(r"xxxx.com/find/beijing/mm18-25/advance-0-0-0-0-0-0-0/p\d+/"))
# 每个主页的匹配规则
profile_links = LinkExtractor(allow=(r"xxxxx.com/\d+-profile/")) rules = (
Rule(page_links),
Rule(profile_links, callback='parse_item'),
) def parse_item(self, response):
item = YouyuanItem()
# 姓名
item['username'] = self.get_username(response)
# 年龄
item['age'] = self.get_age(response)
# 头像图片链接
item['herder_url'] = self.get_herder_url(response)
# 相册图片
item['image_url'] = self.get_image_url(response)
# 独白
item['content'] = self.get_content(response)
# 籍贯
item['place_from'] = self.get_place_from(response)
# 学历
item['education'] = self.get_education(response)
# 兴趣爱好
item['hobby'] = self.get_hobby(response)
# 个人主页
item['source_url'] = response.url
# 数据来源
item['sourcec'] = "youyuan" yield item def get_username(self, response):
username = response.xpath("//dl[@class='personal_cen']//div[@class='main']/strong/text()").extract()
if len(username):
username = username[0]
else:
username = "NULL"
return username.split() def get_age(self, response):
age = response.xpath("//dl[@class='personal_cen']//dd/p/text()").extract()
if len(age):
age = re.findall(u"\d+岁", age[0])[0]
else:
age = "NULL"
return age.strip() def get_herder_url(self, response):
herder_url = response.xpath("//dl[@class='personal_cen']//dt/img/@src").extract()
if len(herder_url):
herder_url = herder_url[0]
else:
herder_url = "NULL"
return herder_url.strip() def get_image_url(self, response):
image_url = response.xpath("//div[@class='ph_show']/ul/li/a/img/@src").extract()
if len(image_url):
image_url = image_url
else:
image_url = "NULL"
return image_url def get_content(self, response):
content = response.xpath("//div[@class='pre_data']/ul/li/p/text()").extract()
if len(content):
content = content[0]
else:
content = "NULL"
return content.strip() def get_place_from(self, response):
place_from = response.xpath("//div[@class='pre_data']/ul/li[2]//ol[1]/li[1]/span/text()").extract()
if len(place_from):
place_from = place_from[0]
else:
place_from = "NULL"
return place_from.strip() def get_education(self, response):
education = response.xpath("//div[@class='pre_data']/ul/li[3]//ol[2]/li[2]/span/text()").extract()
if len(education):
education = education[0]
else:
education = "NULL"
return education.strip() def get_hobby(self, response):
hobby = response.xpath("//dl[@class='personal_cen']//ol/li/text()").extract()
if len(hobby):
hobby = ",".join(hobby).replace(" ", "")
else:
hobby = "NULL"
return hobby.strip()

五、运行

scrapy crawl xxxx

OVER!

python之scrapy篇(三)的更多相关文章

  1. python之scrapy篇(一)

    一.首先创建工程(cmd中进行) scrapy startproject xxx 二.编写Item文件 添加要字段 # -*- coding: utf-8 -*- # Define here the ...

  2. Python:Scrapy(三) 进阶:额外的一些类ItemLoader与CrawlSpider,使用原理及总结

    学习自:Python Scrapy 爬虫框架实例(一) - Blue·Sky - 博客园 这一节是对前两节内容的补充,涉及内容为一些额外的类与方法,来对原代码进行改进 原代码:这里并没有用前两节的代码 ...

  3. python之scrapy篇(二)

    一.创建工程 scarpy startproject xxx 二.编写iteam文件 # -*- coding: utf-8 -*- # Define here the models for your ...

  4. [Python爬虫] scrapy爬虫系列 <一>.安装及入门介绍

    前面介绍了很多Selenium基于自动测试的Python爬虫程序,主要利用它的xpath语句,通过分析网页DOM树结构进行爬取内容,同时可以结合Phantomjs模拟浏览器进行鼠标或键盘操作.但是,更 ...

  5. Python爬虫学习:三、爬虫的基本操作流程

    本文是博主原创随笔,转载时请注明出处Maple2cat|Python爬虫学习:三.爬虫的基本操作与流程 一般我们使用Python爬虫都是希望实现一套完整的功能,如下: 1.爬虫目标数据.信息: 2.将 ...

  6. python爬虫Scrapy(一)-我爬了boss数据

    一.概述 学习python有一段时间了,最近了解了下Python的入门爬虫框架Scrapy,参考了文章Python爬虫框架Scrapy入门.本篇文章属于初学经验记录,比较简单,适合刚学习爬虫的小伙伴. ...

  7. Python爬虫Scrapy框架入门(0)

    想学习爬虫,又想了解python语言,有个python高手推荐我看看scrapy. scrapy是一个python爬虫框架,据说很灵活,网上介绍该框架的信息很多,此处不再赘述.专心记录我自己遇到的问题 ...

  8. Python之Scrapy爬虫框架安装及简单使用

    题记:早已听闻python爬虫框架的大名.近些天学习了下其中的Scrapy爬虫框架,将自己理解的跟大家分享.有表述不当之处,望大神们斧正. 一.初窥Scrapy Scrapy是一个为了爬取网站数据,提 ...

  9. scrapy 的三个入门应用场景

    说明: 本文参照了官网的 dmoz 爬虫例子. 不过这个例子有些年头了,而 dmoz.org 的网页结构已经不同以前.所以我对xpath也相应地进行了修改. 概要: 本文提出了scrapy 的三个入门 ...

随机推荐

  1. JZOJ2020年9月12日提高B组反思

    CSP第1轮倒计时:29天 JZOJ2020年9月12日提高B组反思 T1 放在T1却是最难的一题 明显需要高精度 但是我小学奥数没学好,不知道怎么把正有理数转化成分数 T2 明显的DP 可惜的是我文 ...

  2. Netty 心跳处理

    传统的心跳包设计,基本上是服务端和客户端同时维护 Scheduler,然后双方互相接收心跳包信息,然后重置双方的上下线状态表.此种心跳方式的设计,可以选择在主线程上进行,也可以选择在心跳线程中进行,由 ...

  3. 20200523_mysql中文乱码

    查看字符集: 方法一:show variables like '%character%'; 方法二:show variables like 'collation%';设置字符集: /*建立连接使用的编 ...

  4. NOIp2020游记

    Day 1 考点还是在南航,第三次去已经没有什么新鲜感了,满脑子都是NOIp能不能考好.考前奶了一波这次必考最短路,于是在试机的时候打了一遍Dij和SPFA的板子,信心满满的上场了. 考试右后方是Ki ...

  5. celery使用-win10和linux

    win10启动方式 celery -A celery_tasks.main worker -l debug -P eventlet linux启动方式 /usr/local/bin/celery ce ...

  6. PyQt(Python+Qt)学习随笔:QListView的viewMode属性

    老猿Python博文目录 专栏:使用PyQt开发图形界面Python应用 老猿Python博客地址 QListView的viewMode属性用于控制QListView的视图模式,该属性类型为枚举类型Q ...

  7. MySQL-索引分类及使用索引

    1.什么是索引? 索引:存储引擎用于快速找到记录的一种数据结构,默认使用B-Tree索引.索引是存储引擎层中实现.简单理解为:排好序的快速查找数据结构 索引的目的:提高数据查询的效率,优化查询性能,就 ...

  8. 串口数据监视-Bus Hound

    Bus Hound使用说明 一.打开该工具,会看到最上面的六个图标:1.Capture(捕捉按钮):按下它选择捕捉数据界面2.Save(保存按钮):按下它选择保存数据界面3.Setting(设置按钮) ...

  9. Python不同包之间调用注意事项

    1.不同包之间调用,因为在不同文件夹下,引用的时候加上包名就可以了.运行时,在eclipse直接运行没有问题.但是在,命令行运行时出现找不到模块的错误.原因是,Python只搜索当前目录和内置模块以及 ...

  10. 【Jenkins】环境配置及安装

    下载地址: 国外官网:https://www.jenkins.io/zh/download/(版本最新) 国内镜像:http://mirrors.jenkins-ci.org/windows/ 清华镜 ...