# coding:utf8
# author:Jery
# datetime:2019/5/1 5:16
# software:PyCharm
# function:爬取瓜子二手车
import requests
from lxml import etree
import re
import csv start_url = 'https://www.guazi.com/www/buy/o1c-1'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
'Cookie': 'uuid=6032a689-d79a-4060-c8b0-f57d8db4e245; antipas=16I7A500578955101K231sG39E; clueSourceCode=10103000312%2300; user_city_id=49; ganji_uuid=3434204287155953305008; sessionid=405f3fb6-fb90-409d-c7ed-32874a157920; lg=1; cainfo=%7B%22ca_s%22%3A%22pz_baidu%22%2C%22ca_n%22%3A%22tbmkbturl%22%2C%22ca_medium%22%3A%22-%22%2C%22ca_term%22%3A%22-%22%2C%22ca_content%22%3A%22%22%2C%22ca_campaign%22%3A%22%22%2C%22ca_kw%22%3A%22-%22%2C%22keyword%22%3A%22-%22%2C%22ca_keywordid%22%3A%22-%22%2C%22scode%22%3A%2210103000312%22%2C%22ca_transid%22%3A%22%22%2C%22platform%22%3A%221%22%2C%22version%22%3A1%2C%22ca_i%22%3A%22-%22%2C%22ca_b%22%3A%22-%22%2C%22ca_a%22%3A%22-%22%2C%22display_finance_flag%22%3A%22-%22%2C%22client_ab%22%3A%22-%22%2C%22guid%22%3A%226032a689-d79a-4060-c8b0-f57d8db4e245%22%2C%22sessionid%22%3A%22405f3fb6-fb90-409d-c7ed-32874a157920%22%7D; cityDomain=mianyang; _gl_tracker=%7B%22ca_source%22%3A%22-%22%2C%22ca_name%22%3A%22-%22%2C%22ca_kw%22%3A%22-%22%2C%22ca_id%22%3A%22-%22%2C%22ca_s%22%3A%22self%22%2C%22ca_n%22%3A%22-%22%2C%22ca_i%22%3A%22-%22%2C%22sid%22%3A20570070983%7D; preTime=%7B%22last%22%3A1556660763%2C%22this%22%3A1556659891%2C%22pre%22%3A1556659891%7D'
} # 获取详情页面url
def get_detail_urls(url):
response = requests.get(url, headers=headers)
text = response.content.decode('utf-8')
html = etree.HTML(text)
index = html.xpath('//ul[@class="pageLink clearfix"]/li[@class="link-on"]/a/span/text()')
next_url = html.xpath('//ul[@class="pageLink clearfix"]/li/a/@href')[-1]
ul = html.xpath('//ul[@class="carlist clearfix js-top"]')[0]
lis = ul.xpath('./li')
urls = []
for li in lis:
detail_url = li.xpath('./a/@href')
detail_url = 'https://www.guazi.com' + detail_url[0]
urls.append(detail_url)
return urls, index, next_url def get_info(url):
response = requests.get(url, headers=headers)
text = response.content.decode('utf-8')
html = etree.HTML(text)
infos_dict = {}
city = html.xpath('//p[@class="city-curr"]/text()')[0]
city = re.search(r'[\u4e00-\u9fa5]+', city).group(0)
infos_dict['city'] = city
title = html.xpath('//div[@class="product-textbox"]/h2/text()')[0]
infos_dict['title'] = title.replace(r'\r\n', '').strip()
infos = html.xpath('//div[@class="product-textbox"]/ul/li/span/text()')
infos_dict['cardtime'] = infos[0]
infos_dict['kms'] = infos[1]
if len(infos) == 4:
infos_dict['cardplace'] = ''
infos_dict['displacement'] = infos[2]
infos_dict['speedbox'] = infos[3]
else:
infos_dict['cardplace'] = infos[2]
infos_dict['displacement'] = infos[3]
infos_dict['speedbox'] = infos[4] price = html.xpath('//div[@class="product-textbox"]/div/span[@class="pricestype"]/text()')[0]
infos_dict['price'] = re.search(r'\d+.?\d+', price).group(0)
return infos_dict def main():
with open(r"C:\Users\Jery\Desktop\guazi.csv", 'w', newline='') as f:
csvwriter_head = csv.writer(f, dialect='excel')
csvwriter_head.writerow(['城市', '车型', '上牌时间', '上牌地', '表显里程', '排量', '变速箱', '价格'])
while True:
global start_url
urls, index, next_url = get_detail_urls(start_url)
print("当前页码:{}*****************".format(index))
# 写表头
with open(r'C:\Users\Jery\Desktop\guazi.csv', 'a') as f:
for url in urls:
print("正在爬取:{}".format(url))
infos = get_info(url)
print(infos)
csvwriter = csv.writer(f, dialect='excel')
csvwriter.writerow(
[infos['city'], infos['title'], infos['cardtime'], infos['cardplace'], infos['kms'],
infos['displacement'],
infos['speedbox'],
infos['price']])
if next_url:
start_url = 'https://www.guazi.com' + next_url if __name__ == '__main__':
main()

后续将进行数据分析

Python——爬取瓜子二手车的更多相关文章

  1. 使用nodejs的puppeteer库爬取瓜子二手车网站

    const puppeteer = require('puppeteer'); (async () => { const fs = require("fs"); const ...

  2. Python scrapy框架爬取瓜子二手车信息数据

    项目实施依赖: python,scrapy ,fiddler scrapy安装依赖的包: 可以到https://www.lfd.uci.edu/~gohlke/pythonlibs/  下载 pywi ...

  3. Python 爬取所有51VOA网站的Learn a words文本及mp3音频

    Python 爬取所有51VOA网站的Learn a words文本及mp3音频 #!/usr/bin/env python # -*- coding: utf-8 -*- #Python 爬取所有5 ...

  4. python爬取网站数据

    开学前接了一个任务,内容是从网上爬取特定属性的数据.正好之前学了python,练练手. 编码问题 因为涉及到中文,所以必然地涉及到了编码的问题,这一次借这个机会算是彻底搞清楚了. 问题要从文字的编码讲 ...

  5. python爬取某个网页的图片-如百度贴吧

    python爬取某个网页的图片-如百度贴吧 作者:vpoet mail:vpoet_sir@163.com 注:随意copy,不用告诉我 #coding:utf-8 import urllib imp ...

  6. Python:爬取乌云厂商列表,使用BeautifulSoup解析

    在SSS论坛看到有人写的Python爬取乌云厂商,想练一下手,就照着重新写了一遍 原帖:http://bbs.sssie.com/thread-965-1-1.html #coding:utf- im ...

  7. 使用python爬取MedSci上的期刊信息

    使用python爬取medsci上的期刊信息,通过设定条件,然后获取相应的期刊的的影响因子排名,期刊名称,英文全称和影响因子.主要过程如下: 首先,通过分析网站http://www.medsci.cn ...

  8. python爬取免费优质IP归属地查询接口

    python爬取免费优质IP归属地查询接口 具体不表,我今天要做的工作就是: 需要将数据库中大量ip查询出起归属地 刚开始感觉好简单啊,毕竟只需要从百度找个免费接口然后来个python脚本跑一晚上就o ...

  9. Python爬取豆瓣指定书籍的短评

    Python爬取豆瓣指定书籍的短评 #!/usr/bin/python # coding=utf-8 import re import sys import time import random im ...

随机推荐

  1. Python 之 文件处理

    文件操作: 文件路径:d:\文件名.txt 编码方式:utf-8.gbk 操作方式:只读.只写.追加.读写.写读... 只读:r    或     rb #相对路径 f=open("文件名& ...

  2. codefirst updatebase

    http://blog.csdn.net/dj2008/article/details/23756895 http://blog.csdn.net/gentle_wolf/article/detail ...

  3. AIO和NIO的理解

    AIO: AIO 背后的基本思想是允许进程发起很多 I/O 操作,而不用阻塞或等待任何操作完成,可以继续做 另外的事情,等I/O操作完成,内核会通过函数回调或者信号机制通知用户进程.这样很大程度提高了 ...

  4. C#将DataTable数据导出到EXCEL的两种方法

    1.在非服务器控件的页面导出数据,需要借助一张temp空页面post回后台的数据. 前台:window.location.href = "../Temp.aspx"; 后台: tr ...

  5. Arduino ADC + 模拟温度传感器LM35D

    LM35是美国国家半导体(后被TI收购)推出的精密温度传感IC系列,其信号输出方式为模拟输出,输出电压值与摄氏温度值呈正比,且用户不需额外的校正就能获得较高的测量精度.其主要特性有: 供电电压:4~3 ...

  6. 转:[web]javascript 增加表單的input

    利用javascript增加form的input 這是js的部份 //用來區分不同input的name var element_count = 0; function add_element(obj) ...

  7. SQL 2005报错之Restore fail for Server 'DatabaseServerName'.

    Restore fail for Server 'DatabaseServerName'.(Microsoft.SqlServer.Smo) Additional information: Syste ...

  8. CentOS目录与文件操作

    pwd:查看当前目录 touch:创建文件 touch a.c ls:查看当前目录下文件,也可以ls /tmp查看tmp下的文件 rm:删除文件 rm a.c,也可以rm a.c -rf 强制删除 c ...

  9. angular component元素

  10. 以太坊系列之十八: 百行go代码构建p2p聊天室

    百行go代码构建p2p聊天室 百行go代码构建p2p聊天室 1. 上手使用 2. whisper 原理 3. 源码解读 3.1 参数说明 3.1 连接主节点 3.2 我的标识 3.2 配置我的节点 3 ...