ershoufang.py

# -*- coding: utf-8 -*-
import scrapy class ErshoufangSpider(scrapy.Spider):
name = 'ershoufang'
allowed_domains = ['maitian.com']
start_urls = ['http://maitian.com/'] def parse(self, response):
pass

zufang_spider.py

import scrapy
from maitian.items import MaitianItem class MaitianSpider(scrapy.Spider):
name = "zufang"
start_urls = ['http://bj.maitian.cn/zfall/PG1'] def parse(self, response):
for zufang_itme in response.xpath('//div[@class="list_title"]'):
yield {
'title': zufang_itme.xpath('./h1/a/text()').extract_first().strip(),
'price': zufang_itme.xpath('./div[@class="the_price"]/ol/strong/span/text()').extract_first().strip(),
'area': zufang_itme.xpath('./p/span/text()').extract_first().replace('㎡', '').strip(),
'district': zufang_itme.xpath('./p//text()').re(r'昌平|朝阳|东城|大兴|丰台|海淀|石景山|顺义|通州|西城')[0],
} next_page_url = response.xpath(
'//div[@id="paging"]/a[@class="down_page"]/@href').extract_first()
if next_page_url is not None:
yield scrapy.Request(response.urljoin(next_page_url))

items.py

# -*- coding: utf-8 -*-
# Define here the models for your scraped items
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html import scrapy class MaitianItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
price = scrapy.Field()
area = scrapy.Field()
district = scrapy.Field()

middlewares.py

# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals class MaitianSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects. @classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider. # Should return None or raise an exception.
return None def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response. # Must return an iterable of Request, dict or Item objects.
for i in result:
yield i def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict
# or Item objects.
pass def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated. # Must return only requests (not items).
for r in start_requests:
yield r def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name) class MaitianDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects. @classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware. # Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None def process_response(self, request, response, spider):
# Called with the response returned from the downloader. # Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception. # Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)

pipelines.py

# -*- coding: utf-8 -*-
# Define your item pipelines here
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo
from scrapy.conf import settings class MaitianPipeline(object):
def __init__(self):
host = settings['MONGODB_HOST']
port = settings['MONGODB_PORT']
db_name = settings['MONGODB_DBNAME']
client = pymongo.MongoClient(host=host, port=port)
db = client[db_name]
self.post = db[settings['MONGODB_DOCNAME']] def process_item(self, item, spider):
zufang = dict(item)
self.post.insert(zufang)
return item

settings.py

# -*- coding: utf-8 -*-
# Scrapy settings for maitian project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'maitian' SPIDER_MODULES = ['maitian.spiders']
NEWSPIDER_MODULE = 'maitian.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'maitian (+http://www.yourdomain.com)' # Obey robots.txt rules
ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'maitian.middlewares.MaitianSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'maitian.middlewares.MaitianDownloaderMiddleware': 543,
#} # Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'maitian.pipelines.MaitianPipeline': 300,
#}
ITEM_PIPELINES = {
'maitian.pipelines.DuplicatesPipeline': 301,
} # Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' ITEM_PIPELINES = {'maitian.pipelines.MaitianPipeline': 300,} MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = 27017
MONGODB_DBNAME = 'maitian'
MONGODB_DOCNAME = 'zufang'

18.scrapy_maitian的更多相关文章

  1. CSharpGL(18)分别处理glDrawArrays()和glDrawElements()两种方式下的拾取(ColorCodedPicking)

    CSharpGL(18)分别处理glDrawArrays()和glDrawElements()两种方式下的拾取(ColorCodedPicking) 我在(Modern OpenGL用Shader拾取 ...

  2. ABP(现代ASP.NET样板开发框架)系列之18、ABP应用层——权限验证

    点这里进入ABP系列文章总目录 ABP(现代ASP.NET样板开发框架)系列之18.ABP应用层——权限验证 ABP是“ASP.NET Boilerplate Project (ASP.NET样板项目 ...

  3. ASP.NET MVC5+EF6+EasyUI 后台管理系统(18)-权限管理系统-表数据

    系列目录 这一节,我们插入数据来看看数据流,让各位同学,知道这个权限表交互是怎么一个流程,免得大家后天雾里来雾里去首先我再解释一些表,SysUser和SysRole表不用解释了. SysRoleSys ...

  4. C#开发微信门户及应用(18)-微信企业号的通讯录管理开发之成员管理

    在上篇随笔<C#开发微信门户及应用(17)-微信企业号的通讯录管理开发之部门管理>介绍了通讯录的部门的相关操作管理,通讯录管理包括部门管理.成员管理.标签管理三个部分,本篇主要介绍成员的管 ...

  5. [MySQL Reference Manual] 18 复制

    18 复制 18 复制 18.1 复制配置 18.1.1 基于Binary Log的数据库复制配置 18.1.2 配置基于Binary log的复制 18.1.2.1 设置复制master的配置 18 ...

  6. Hihocoder 太阁最新面经算法竞赛18

    Hihocoder 太阁最新面经算法竞赛18 source: https://hihocoder.com/contest/hihointerview27/problems 题目1 : Big Plus ...

  7. grep-2.26 sed-4.2.2 awk-4.1.4 wget-1.18 pcregrep-8.39 pcre2grep-10.22 for windows 最新版本静态编译

    -------------------------------------------------------------------------------------------- grep (G ...

  8. 《C#本质论》读书笔记(18)多线程处理

    .NET Framework 4.0 看(本质论第3版) .NET Framework 4.5 看(本质论第4版) .NET 4.0为多线程引入了两组新API:TPL(Task Parallel Li ...

  9. Java随机生成18位身份证号

    package com.ihome.data; import java.text.SimpleDateFormat; import java.util.Calendar; import java.ut ...

随机推荐

  1. is与==的区别

    is:比较两边的内存地址是否一样 ==:比较两边的数据值是否一样 list1 = [1, 2] list2 = [1, 2] if list1 == list2: # == 是比较数据 print(' ...

  2. CSS案例1(导航栏)

    文本的装饰 text-decoration 通常我们用于给链接修改装饰效果 使用技巧:在一行内的盒子内,我们设定行高等于盒子的高度,就可以使文字垂直居中. <head> <meta ...

  3. 【原理】LVM(Logical Volume Manager)动态卷管理

    一张图让你学会LVM   导读 随着科技的进步,人们不知不觉的就进入了大数据的时代,数据的不断增加我们发现我们的磁盘越来越不够用了,接下来就是令人头疼的事情--加硬盘,数据的备份与还原.LVM就是Li ...

  4. LOJ #6538. 烷基计数 加强版 加强版(生成函数,burnside引理,多项式牛顿迭代)

    传送门. 不妨设\(A(x)\)表示答案. 对于一个点,考虑它的三个子节点,直接卷起来是\(A(x)^3\),但是这样肯定会计重,因为我们要的是无序的子节点. 那么用burnside引理,枚举一个排列 ...

  5. SP2713 GSS4 - Can you answer these queries IV(线段树)

    传送门 解题思路 大概就是一个数很少次数的开方会开到\(1\),而\(1\)开方还是\(1\),所以维护一个和,维护一个开方标记,维护一个区间是否全部为\(1/0\)的标记.然后每次修改时先看是否有全 ...

  6. 接口(Interfaces)与反射(reflection) 如何利用字符串驱动不同的事件 动态地导入函数、模块

    标准库内部如何实现接口的 package main import ( "fmt" "io" "net/http" "os" ...

  7. 3. Image Structure and Generation

    名词 Extensible Linking Format(ELF) 3.1 The structure of an ARM ELF image ARM ELF映像包含sections, regions ...

  8. 剑指offer——09青蛙跳台阶

    题目描述 一只青蛙一次可以跳上1级台阶,也可以跳上2级.求该青蛙跳上一个n级的台阶总共有多少种跳法(先后次序不同算不同的结果).   题解: 说俗一点,就是找规律: 不不,首先,我们分析一下,青蛙第一 ...

  9. jquery控件的学习

    <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/ ...

  10. ArcGIS和ENVI最新软件下载

    浏览的时候发现一位博主分享了很多版本很新的软件 在这里分享一下下载界面链接 https://www.ixxin.cn/software.html