一、创建工程(cmd)

scrapy startproject xxxx

二、编写item文件

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html from scrapy import Field, Item class YouyuanItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
# 用户名
username = Field()
# 年龄
age = Field()
# 头像图片链接
herder_url = Field()
# 相册图片
image_url = Field()
# 独白
content = Field()
# 籍贯
place_from = Field()
# 学历
education = Field()
# 兴趣爱好
hobby = Field()
# 个人主页
source_url = Field()
# 数据来源
sourcec = Field()

三、编写settings文件

# -*- coding: utf-8 -*-

# Scrapy settings for youyuan project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'youyuan' SPIDER_MODULES = ['youyuan.spiders']
NEWSPIDER_MODULE = 'youyuan.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'youyuan (+http://www.yourdomain.com)' # Obey robots.txt rules
#ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'youyuan.middlewares.YouyuanSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'youyuan.middlewares.YouyuanDownloaderMiddleware': 543,
#} # Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'youyuan.pipelines.YouyuanPipeline': 300,
} # Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

四、进入spider文件创建自定义爬虫文件

scrapy genspider demo 'www.xxxx.com'

编写文件

# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from youyuan.items import YouyuanItem
import re class YySpider(CrawlSpider):
name = 'yy'
allowed_domains = ['xxxx.com']
start_urls = ['http://www.xxxx.com/find/beijing/mm18-25/advance-0-0-0-0-0-0-0/p1/'] # 匹配每一页链接匹配规则
page_links = LinkExtractor(allow=(r"xxxx.com/find/beijing/mm18-25/advance-0-0-0-0-0-0-0/p\d+/"))
# 每个主页的匹配规则
profile_links = LinkExtractor(allow=(r"xxxxx.com/\d+-profile/")) rules = (
Rule(page_links),
Rule(profile_links, callback='parse_item'),
) def parse_item(self, response):
item = YouyuanItem()
# 姓名
item['username'] = self.get_username(response)
# 年龄
item['age'] = self.get_age(response)
# 头像图片链接
item['herder_url'] = self.get_herder_url(response)
# 相册图片
item['image_url'] = self.get_image_url(response)
# 独白
item['content'] = self.get_content(response)
# 籍贯
item['place_from'] = self.get_place_from(response)
# 学历
item['education'] = self.get_education(response)
# 兴趣爱好
item['hobby'] = self.get_hobby(response)
# 个人主页
item['source_url'] = response.url
# 数据来源
item['sourcec'] = "youyuan" yield item def get_username(self, response):
username = response.xpath("//dl[@class='personal_cen']//div[@class='main']/strong/text()").extract()
if len(username):
username = username[0]
else:
username = "NULL"
return username.split() def get_age(self, response):
age = response.xpath("//dl[@class='personal_cen']//dd/p/text()").extract()
if len(age):
age = re.findall(u"\d+岁", age[0])[0]
else:
age = "NULL"
return age.strip() def get_herder_url(self, response):
herder_url = response.xpath("//dl[@class='personal_cen']//dt/img/@src").extract()
if len(herder_url):
herder_url = herder_url[0]
else:
herder_url = "NULL"
return herder_url.strip() def get_image_url(self, response):
image_url = response.xpath("//div[@class='ph_show']/ul/li/a/img/@src").extract()
if len(image_url):
image_url = image_url
else:
image_url = "NULL"
return image_url def get_content(self, response):
content = response.xpath("//div[@class='pre_data']/ul/li/p/text()").extract()
if len(content):
content = content[0]
else:
content = "NULL"
return content.strip() def get_place_from(self, response):
place_from = response.xpath("//div[@class='pre_data']/ul/li[2]//ol[1]/li[1]/span/text()").extract()
if len(place_from):
place_from = place_from[0]
else:
place_from = "NULL"
return place_from.strip() def get_education(self, response):
education = response.xpath("//div[@class='pre_data']/ul/li[3]//ol[2]/li[2]/span/text()").extract()
if len(education):
education = education[0]
else:
education = "NULL"
return education.strip() def get_hobby(self, response):
hobby = response.xpath("//dl[@class='personal_cen']//ol/li/text()").extract()
if len(hobby):
hobby = ",".join(hobby).replace(" ", "")
else:
hobby = "NULL"
return hobby.strip()

五、运行

scrapy crawl xxxx

OVER!

python之scrapy篇(三)的更多相关文章

  1. python之scrapy篇(一)

    一.首先创建工程(cmd中进行) scrapy startproject xxx 二.编写Item文件 添加要字段 # -*- coding: utf-8 -*- # Define here the ...

  2. Python:Scrapy(三) 进阶:额外的一些类ItemLoader与CrawlSpider,使用原理及总结

    学习自:Python Scrapy 爬虫框架实例(一) - Blue·Sky - 博客园 这一节是对前两节内容的补充,涉及内容为一些额外的类与方法,来对原代码进行改进 原代码:这里并没有用前两节的代码 ...

  3. python之scrapy篇(二)

    一.创建工程 scarpy startproject xxx 二.编写iteam文件 # -*- coding: utf-8 -*- # Define here the models for your ...

  4. [Python爬虫] scrapy爬虫系列 <一>.安装及入门介绍

    前面介绍了很多Selenium基于自动测试的Python爬虫程序,主要利用它的xpath语句,通过分析网页DOM树结构进行爬取内容,同时可以结合Phantomjs模拟浏览器进行鼠标或键盘操作.但是,更 ...

  5. Python爬虫学习:三、爬虫的基本操作流程

    本文是博主原创随笔,转载时请注明出处Maple2cat|Python爬虫学习:三.爬虫的基本操作与流程 一般我们使用Python爬虫都是希望实现一套完整的功能,如下: 1.爬虫目标数据.信息: 2.将 ...

  6. python爬虫Scrapy(一)-我爬了boss数据

    一.概述 学习python有一段时间了,最近了解了下Python的入门爬虫框架Scrapy,参考了文章Python爬虫框架Scrapy入门.本篇文章属于初学经验记录,比较简单,适合刚学习爬虫的小伙伴. ...

  7. Python爬虫Scrapy框架入门(0)

    想学习爬虫,又想了解python语言,有个python高手推荐我看看scrapy. scrapy是一个python爬虫框架,据说很灵活,网上介绍该框架的信息很多,此处不再赘述.专心记录我自己遇到的问题 ...

  8. Python之Scrapy爬虫框架安装及简单使用

    题记:早已听闻python爬虫框架的大名.近些天学习了下其中的Scrapy爬虫框架,将自己理解的跟大家分享.有表述不当之处,望大神们斧正. 一.初窥Scrapy Scrapy是一个为了爬取网站数据,提 ...

  9. scrapy 的三个入门应用场景

    说明: 本文参照了官网的 dmoz 爬虫例子. 不过这个例子有些年头了,而 dmoz.org 的网页结构已经不同以前.所以我对xpath也相应地进行了修改. 概要: 本文提出了scrapy 的三个入门 ...

随机推荐

  1. cJSON的使用

    1 安装cJSON github地址:https://github.com/DaveGamble/cJSON.git 下载完成后进入cJSON目录,执行下面命令生成Makefile文件 mkdir b ...

  2. 第四十章、PyQt显示部件:QGraphicsView图形视图和QGraphicsScene图形场景简介及应用案例

    专栏:Python基础教程目录 专栏:使用PyQt开发图形界面Python应用 专栏:PyQt入门学习 老猿Python博文目录 老猿学5G博文目录 一.概述 Designer中的Graphics V ...

  3. 第10.9节 Python子包的导入方式介绍

    在<第10.8节 Python包的导入方式详解>详细介绍了包的导入方式,子包也是包,子包的导入与包的导入方法上没有本质区别,但二者还是有所不同.本节对照二者的方式介绍子包与包导入的关系: ...

  4. PyQt(Python+Qt)学习随笔:模式窗口的windowModality属性与modal属性

    windowModality属性 windowModality属性只对窗口对象有效,保存的是哪些类型的窗口被模式窗口阻塞. 模式窗口防止其他窗口中的部件获取输入.此属性的值控制对应窗口可见时阻塞哪些类 ...

  5. [BJDCTF 2nd]old-hack && [GXYCTF2019]禁止套娃

    [BJDCTF 2nd]old-hack 页面很有意思 同时也告诉了我们是THINKPHP5,我们只需要寻找THINKPHP5的漏洞就可以了. https://www.codercto.com/a/5 ...

  6. css之div中纯文字单行和多行垂直居中

    先上效果图 <html lang="en"> <head> <meta charset="UTF-8"> <meta ...

  7. mybatis-generator 插件用法

    xml 配置 1 <?xml version="1.0" encoding="UTF-8"?> 2 <!DOCTYPE generatorCo ...

  8. Java中的Reference类使用

    Java 2 平台引入了 java.lang.ref 包,这个包下面包含了几个Reference相关的类,Reference相关类将Java中的引用也映射成一个对象,这些类还提供了与垃圾收集器(gar ...

  9. PHP文件上传、错误处理

    说明 这篇是针对之前php知识的补充内容 目录 说明 1. PHP目录处理函数 2. PHP文件权限设置 3. PHP文件路径函数 4. PHP实现文件留言本 5.PHP文件上传 1. php文件上传 ...

  10. JDK11 下载安装与配置环境变量

    1.jdk11本身也包含jre,不需要安装jre,低版本需要安装jre 2.jdk下载地址:https://www.oracle.com/technetwork/java/javase/downloa ...