# coding:utf8
# author:Jery
# datetime:2019/5/1 5:16
# software:PyCharm
# function:爬取瓜子二手车
import requests
from lxml import etree
import re
import csv start_url = 'https://www.guazi.com/www/buy/o1c-1'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
'Cookie': 'uuid=6032a689-d79a-4060-c8b0-f57d8db4e245; antipas=16I7A500578955101K231sG39E; clueSourceCode=10103000312%2300; user_city_id=49; ganji_uuid=3434204287155953305008; sessionid=405f3fb6-fb90-409d-c7ed-32874a157920; lg=1; cainfo=%7B%22ca_s%22%3A%22pz_baidu%22%2C%22ca_n%22%3A%22tbmkbturl%22%2C%22ca_medium%22%3A%22-%22%2C%22ca_term%22%3A%22-%22%2C%22ca_content%22%3A%22%22%2C%22ca_campaign%22%3A%22%22%2C%22ca_kw%22%3A%22-%22%2C%22keyword%22%3A%22-%22%2C%22ca_keywordid%22%3A%22-%22%2C%22scode%22%3A%2210103000312%22%2C%22ca_transid%22%3A%22%22%2C%22platform%22%3A%221%22%2C%22version%22%3A1%2C%22ca_i%22%3A%22-%22%2C%22ca_b%22%3A%22-%22%2C%22ca_a%22%3A%22-%22%2C%22display_finance_flag%22%3A%22-%22%2C%22client_ab%22%3A%22-%22%2C%22guid%22%3A%226032a689-d79a-4060-c8b0-f57d8db4e245%22%2C%22sessionid%22%3A%22405f3fb6-fb90-409d-c7ed-32874a157920%22%7D; cityDomain=mianyang; _gl_tracker=%7B%22ca_source%22%3A%22-%22%2C%22ca_name%22%3A%22-%22%2C%22ca_kw%22%3A%22-%22%2C%22ca_id%22%3A%22-%22%2C%22ca_s%22%3A%22self%22%2C%22ca_n%22%3A%22-%22%2C%22ca_i%22%3A%22-%22%2C%22sid%22%3A20570070983%7D; preTime=%7B%22last%22%3A1556660763%2C%22this%22%3A1556659891%2C%22pre%22%3A1556659891%7D'
} # 获取详情页面url
def get_detail_urls(url):
response = requests.get(url, headers=headers)
text = response.content.decode('utf-8')
html = etree.HTML(text)
index = html.xpath('//ul[@class="pageLink clearfix"]/li[@class="link-on"]/a/span/text()')
next_url = html.xpath('//ul[@class="pageLink clearfix"]/li/a/@href')[-1]
ul = html.xpath('//ul[@class="carlist clearfix js-top"]')[0]
lis = ul.xpath('./li')
urls = []
for li in lis:
detail_url = li.xpath('./a/@href')
detail_url = 'https://www.guazi.com' + detail_url[0]
urls.append(detail_url)
return urls, index, next_url def get_info(url):
response = requests.get(url, headers=headers)
text = response.content.decode('utf-8')
html = etree.HTML(text)
infos_dict = {}
city = html.xpath('//p[@class="city-curr"]/text()')[0]
city = re.search(r'[\u4e00-\u9fa5]+', city).group(0)
infos_dict['city'] = city
title = html.xpath('//div[@class="product-textbox"]/h2/text()')[0]
infos_dict['title'] = title.replace(r'\r\n', '').strip()
infos = html.xpath('//div[@class="product-textbox"]/ul/li/span/text()')
infos_dict['cardtime'] = infos[0]
infos_dict['kms'] = infos[1]
if len(infos) == 4:
infos_dict['cardplace'] = ''
infos_dict['displacement'] = infos[2]
infos_dict['speedbox'] = infos[3]
else:
infos_dict['cardplace'] = infos[2]
infos_dict['displacement'] = infos[3]
infos_dict['speedbox'] = infos[4] price = html.xpath('//div[@class="product-textbox"]/div/span[@class="pricestype"]/text()')[0]
infos_dict['price'] = re.search(r'\d+.?\d+', price).group(0)
return infos_dict def main():
with open(r"C:\Users\Jery\Desktop\guazi.csv", 'w', newline='') as f:
csvwriter_head = csv.writer(f, dialect='excel')
csvwriter_head.writerow(['城市', '车型', '上牌时间', '上牌地', '表显里程', '排量', '变速箱', '价格'])
while True:
global start_url
urls, index, next_url = get_detail_urls(start_url)
print("当前页码:{}*****************".format(index))
# 写表头
with open(r'C:\Users\Jery\Desktop\guazi.csv', 'a') as f:
for url in urls:
print("正在爬取:{}".format(url))
infos = get_info(url)
print(infos)
csvwriter = csv.writer(f, dialect='excel')
csvwriter.writerow(
[infos['city'], infos['title'], infos['cardtime'], infos['cardplace'], infos['kms'],
infos['displacement'],
infos['speedbox'],
infos['price']])
if next_url:
start_url = 'https://www.guazi.com' + next_url if __name__ == '__main__':
main()

后续将进行数据分析

Python——爬取瓜子二手车的更多相关文章

  1. 使用nodejs的puppeteer库爬取瓜子二手车网站

    const puppeteer = require('puppeteer'); (async () => { const fs = require("fs"); const ...

  2. Python scrapy框架爬取瓜子二手车信息数据

    项目实施依赖: python,scrapy ,fiddler scrapy安装依赖的包: 可以到https://www.lfd.uci.edu/~gohlke/pythonlibs/  下载 pywi ...

  3. Python 爬取所有51VOA网站的Learn a words文本及mp3音频

    Python 爬取所有51VOA网站的Learn a words文本及mp3音频 #!/usr/bin/env python # -*- coding: utf-8 -*- #Python 爬取所有5 ...

  4. python爬取网站数据

    开学前接了一个任务,内容是从网上爬取特定属性的数据.正好之前学了python,练练手. 编码问题 因为涉及到中文,所以必然地涉及到了编码的问题,这一次借这个机会算是彻底搞清楚了. 问题要从文字的编码讲 ...

  5. python爬取某个网页的图片-如百度贴吧

    python爬取某个网页的图片-如百度贴吧 作者:vpoet mail:vpoet_sir@163.com 注:随意copy,不用告诉我 #coding:utf-8 import urllib imp ...

  6. Python:爬取乌云厂商列表,使用BeautifulSoup解析

    在SSS论坛看到有人写的Python爬取乌云厂商,想练一下手,就照着重新写了一遍 原帖:http://bbs.sssie.com/thread-965-1-1.html #coding:utf- im ...

  7. 使用python爬取MedSci上的期刊信息

    使用python爬取medsci上的期刊信息,通过设定条件,然后获取相应的期刊的的影响因子排名,期刊名称,英文全称和影响因子.主要过程如下: 首先,通过分析网站http://www.medsci.cn ...

  8. python爬取免费优质IP归属地查询接口

    python爬取免费优质IP归属地查询接口 具体不表,我今天要做的工作就是: 需要将数据库中大量ip查询出起归属地 刚开始感觉好简单啊,毕竟只需要从百度找个免费接口然后来个python脚本跑一晚上就o ...

  9. Python爬取豆瓣指定书籍的短评

    Python爬取豆瓣指定书籍的短评 #!/usr/bin/python # coding=utf-8 import re import sys import time import random im ...

随机推荐

  1. getContextPath、getServletPath、getRequestURI,getRealPath的区别

    假定你的web application 项目名称为news,你在浏览器中输入请求路径: http://localhost:8080/news/main/list.jsp 则执行下面向行代码后打印出如下 ...

  2. [GO]全局变量

    package main import "fmt" func test01() { fmt.Println("test a = ", a) } //a := 1 ...

  3. HDU 3723 Delta Wave (高精度+calelan数)

    题意:给定一个图,问你只能向上向下,或者平着走,有多少种方法可以走到最后一个格. 析:首先先考虑,如果没有平的情况就是calelan数了,现在有平的情况,那么就枚举呗,因为数很大,所以要用高精度. 答 ...

  4. Portal:Machine learning机器学习:门户

    Machine learning Machine learning is a scientific discipline that explores the construction and stud ...

  5. 以太坊系列之十二: solidity变量存储

    solidity中变量的存储 变量存储主要分为两个区域,一个是storage(对应指定是SLOAD,SSTORE),一个是Memory(MLOAD,MSTORE), 这和普通编程语言的内存模型是不一样 ...

  6. 「BZOJ 1001」狼抓兔子

    题目链接 luogu bzoj \(Solution\) 这个貌似没有什么好讲的吧,直接按照这个给的图建图就好了啊,没有什么脑子,但是几点要注意的: 建双向边啊. 要这么写,中间还要写一个\(whil ...

  7. Day3作业 .

    ,))::])]): :-])# 3,使用while和for循环分别打印字符串s=’asdfer’中每个元素. # 4,实现一个整数加法计算器:# 如:content = input(‘请输入内容:’ ...

  8. spring 学习(五):spring 事务

    spring 学习(五):spring 事务 事务概要 一个数据库事务通常包含了一个序列的对数据库的读/写操作.它的存在包含有以下两个目的: 为数据库操作序列提供了一个从失败中恢复到正常状态的方法,同 ...

  9. Mysql数据库申请

    前段时间大部门下新成立了一个推广百度OCR.文字识别.图像识别等科技能力在金融领域应用的子部门.因为部门刚成立,基础设施和人力都是欠缺的.当时分到我们部门的任务是抽调一个人做新部门主站前端开发工作.本 ...

  10. 老男孩Day11作业:selectors版socket

    一.作业需求: 使用SELECT或SELECTORS模块实现并发简单版FTP 允许多用户并发上传下载文件 二.readme 一.作业需求: 使用SELECT或SELECTORS模块实现并发简单版FTP ...