爬取YY评级信息
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : 爬取YY评级基本信息.py
# @Author: lattesea
# @Date : 2019/10/7
# @Desc :
import requests
import json
import csv
from fake_useragent import UserAgent
import time
import random class YYpingjiSpider(object):
def __init__(self):
self.url = 'https://api.ratingdog.cn/v1/search?limit=10&offset={}&type=3&qtext=&filter=%7B%7D&_=1570391570681'
self.url2 = 'https://api.ratingdog.cn/v1/GetIssuerInfo?IssuerID={}&IssuerType=1001'
self.url3 = 'https://api.ratingdog.cn/v1/GetIssuerInfo?IssuerID={}&IssuerType=1002' def get_headers(self):
ua = UserAgent()
headers = {
"Accept": "application/json, text/plain, */*",
"Origin": "https://www.ratingdog.cn",
"Referer": "https://www.ratingdog.cn/",
"Sec-Fetch-Mode": "cors",
"User-Agent": ua.random
}
return headers def parse_IssuerID_IssuerType(self, url):
IssuerID_list = []
html_json = requests.get(url=url, headers=self.get_headers()).text
html_py = json.loads(html_json)
for i in html_py['rows']:
IssuerID_list.append((i['IssuerID'], i['IssuerType']))
print(IssuerID_list)
return IssuerID_list def parse_basic_message_1002(self, IssuerID):
url = self.url3.format(IssuerID)
basic_message = {}
html_json = requests.get(url=url, headers=self.get_headers()).text
html_py = json.loads(html_json)
for i in html_py['rows']:
basic_message['IssuerName'] = html_py['rows']['IssuerName']
basic_message['CorporateRating'] = html_py['rows']['CorporateRating']
basic_message['RatingAgency'] = html_py['rows']['RatingAgency']
basic_message['Holder'] = html_py['rows']['Holder']
basic_message['Industry'] = html_py['rows']['Industry']
basic_message['Nature'] = html_py['rows']['Nature']
basic_message['YYRating'] = html_py['rows']['YYRating']
basic_message['IssuerType'] = html_py['rows']['IssuerType']
basic_message['CreditAnalysis'] = html_py['rows']['CreditAnalysis']
basic_message['PlatformImportance'] = html_py['rows']['CtExtendInfo']['PlatformImportance']
basic_message['PrincipalBusiness'] = html_py['rows']['CtExtendInfo']['PrincipalBusiness']
basic_message['GDP'] = html_py['rows']['CtExtendInfo']['GDP']
basic_message['Revenue'] = html_py['rows']['CtExtendInfo']['Revenue']
basic_message['YYRatio'] = html_py['rows']['CtExtendInfo']['YYRatio']
basic_message['IssuerCity'] = html_py['rows']['CtExtendInfo']['IssuerCity']
basic_message['ADLevel'] = html_py['rows']['CtExtendInfo']['ADLevel']
print(basic_message)
return basic_message def parse_basic_message_1001(self, IssuerID):
url = self.url2.format(IssuerID)
basic_message = {}
html_json = requests.get(url=url, headers=self.get_headers()).text
html_py = json.loads(html_json)
for i in html_py['rows']:
basic_message['IssuerName'] = html_py['rows']['IssuerName']
basic_message['CorporateRating'] = html_py['rows']['CorporateRating']
basic_message['RatingAgency'] = html_py['rows']['RatingAgency']
basic_message['Holder'] = html_py['rows']['Holder']
basic_message['Industry'] = html_py['rows']['Industry']
basic_message['Nature'] = html_py['rows']['Nature']
basic_message['YYRating'] = html_py['rows']['YYRating']
basic_message['IssuerType'] = html_py['rows']['IssuerType']
basic_message['CreditAnalysis'] = html_py['rows']['CreditAnalysis']
basic_message['YYIndustry'] = html_py['rows']['CyExtendInfo']['YYIndustry']
basic_message['YYIndustryId'] = html_py['rows']['CyExtendInfo']['YYIndustryId']
basic_message['IndustrylStatus'] = html_py['rows']['CyExtendInfo']['IndustrylStatus']
basic_message['ShareholderBackground'] = html_py['rows']['CyExtendInfo']['ShareholderBackground']
basic_message['OperatingStatus'] = html_py['rows']['CyExtendInfo']['OperatingStatus']
basic_message['FinancialStatus'] = html_py['rows']['CyExtendInfo']['FinancialStatus']
basic_message['Focus'] = html_py['rows']['CyExtendInfo']['Focus']
print(basic_message)
return basic_message def save_csv_1001(self, result):
keyword_list1 = ['IssuerName', 'CorporateRating', 'RatingAgency', 'Holder', 'Industry', 'Nature', 'YYRating',
'IssuerType', 'CreditAnalysis', 'YYIndustry', 'YYIndustryId', 'IndustrylStatus',
'ShareholderBackground', 'OperatingStatus', 'FinancialStatus', 'Focus'] with open('1001.csv', 'a', newline='') as f:
writer = csv.DictWriter(f, keyword_list1)
# for row in result:
writer.writerow(result) def save_csv_1002(self, result):
keyword_list2 = ['IssuerName', 'CorporateRating', 'RatingAgency', 'Holder', 'Industry', 'Nature', 'YYRating',
'IssuerType', 'CreditAnalysis', 'PlatformImportance', 'PrincipalBusiness', 'PrincipalBusiness',
'GDP', 'Revenue', 'YYRatio', 'IssuerCity', 'ADLevel'] with open('1002.csv', 'a', newline='') as f:
writer = csv.DictWriter(f, keyword_list2)
# for row in result:
writer.writerow(result) def run(self):
# self.parse_IssuerID()
# self.parse_basic_message_1001()
for i in range(0, 4631, 20):
url = self.url.format(i)
IssuerID_IssuerType = self.parse_IssuerID_IssuerType(url)
for j in IssuerID_IssuerType: if j[1] == '产业':
result = self.parse_basic_message_1001(j[0])
self.save_csv_1001(result)
elif j[1] == '城投':
result = self.parse_basic_message_1002(j[0])
self.save_csv_1002(result)
time.sleep(random.uniform(1, 4)) if __name__ == '__main__':
spider = YYpingjiSpider()
spider.run()
该网站主要是访问频率太高会被封账号
爬取YY评级信息的更多相关文章
- 【图文详解】scrapy爬虫与动态页面——爬取拉勾网职位信息(2)
上次挖了一个坑,今天终于填上了,还记得之前我们做的拉勾爬虫吗?那时我们实现了一页的爬取,今天让我们再接再厉,实现多页爬取,顺便实现职位和公司的关键词搜索功能. 之前的内容就不再介绍了,不熟悉的请一定要 ...
- 爬取拉勾网招聘信息并使用xlwt存入Excel
xlwt 1.3.0 xlwt 文档 xlrd 1.1.0 python操作excel之xlrd 1.Python模块介绍 - xlwt ,什么是xlwt? Python语言中,写入Excel文件的扩 ...
- 爬虫系列2:Requests+Xpath 爬取租房网站信息
Requests+Xpath 爬取租房网站信息 [抓取]:参考前文 爬虫系列1:https://www.cnblogs.com/yizhiamumu/p/9451093.html [分页]:参考前文 ...
- python itchat 爬取微信好友信息
原文链接:https://mp.weixin.qq.com/s/4EXgR4GkriTnAzVxluJxmg 「itchat」一个开源的微信个人接口,今天我们就用itchat爬取微信好友信息,无图言虚 ...
- 使用request爬取拉钩网信息
通过cookies信息爬取 分析header和cookies 通过subtext粘贴处理header和cookies信息 处理后,方便粘贴到代码中 爬取拉钩信息代码 import requests c ...
- Scrapy实战篇(七)之Scrapy配合Selenium爬取京东商城信息(下)
之前我们使用了selenium加Firefox作为下载中间件来实现爬取京东的商品信息.但是在大规模的爬取的时候,Firefox消耗资源比较多,因此我们希望换一种资源消耗更小的方法来爬取相关的信息. 下 ...
- 简单的python爬虫--爬取Taobao淘女郎信息
最近在学Python的爬虫,顺便就练习了一下爬取淘宝上的淘女郎信息:手法简单,由于淘宝网站本上做了很多的防爬措施,应此效果不太好! 爬虫的入口:https://mm.taobao.com/json/r ...
- selenium+phantomjs爬取京东商品信息
selenium+phantomjs爬取京东商品信息 今天自己实战写了个爬取京东商品信息,和上一篇的思路一样,附上链接:https://www.cnblogs.com/cany/p/10897618. ...
- Python爬虫-爬取京东商品信息-按给定关键词
目的:按给定关键词爬取京东商品信息,并保存至mongodb. 字段:title.url.store.store_url.item_id.price.comments_count.comments 工具 ...
随机推荐
- 用python绘制趋势图
import matplotlib.pyplot as plt #plt用于显示图片 import matplotlib.image as mping #mping用于读取图片 import date ...
- under the hood
under the hood adjective a metaphorical area that contains the underlying implementation of somethin ...
- spark streaming 与 storm的对比
feature strom (trident) spark streaming 说明 并行框架 基于DAG的任务并行计算引擎(task parallel continuous computati ...
- 【.net core 0基础】创建你的第一个.net core应用
1.下载.NET core相应的SDK,https://dotnet.microsoft.com/download 2.安装完成后,打开命令提示符,输入命令 dotnet,检查是否正确安装,如果命令能 ...
- EXCEL中自定义格式输入的数据怎么完整复制
在用设置单元格式里 自定义 输入数值 如图,B列的数据,我复制后,用选择性粘贴到别的地方,还是无法将75FG4Y2一起复制过去,只能复制过去FG 怎么办? ===>先把这些复制到一个记事本里,再 ...
- Java之HSF搭建demo
1.去阿里云官网下载Demo edas-app-demo.zip 2.下载Ali-Tomcat和Pandora,注意红色下面字体 a)下载 Ali-Tomcat,保存后解压至相应的目录(如:d:\wo ...
- 我非要捅穿这 Neutron(四)Open vSwitch in Neutron
目录 文章目录 目录 前文列表 OvS In Neutron 网络拓扑 OvS In Neutron 网络实现模型 双节点混合平面网络 双节点网络拓扑 OvS Bridges 与初始流表项 OvS b ...
- 2019.11.10【每天学点SAP小知识】Day3 - ABAP 7.40新语法 值转化和值赋值
1.语法为 CONV dTYPE|#(...)\ # 代表任意类型 "7.40之前表达式 . DATA helper TYPE string. DATA xstr TYPE xstring. ...
- 重启Tomcat, vsftpd
关闭,启动,查看Tomcat /usr/local/tomcat8/bin/shutdown.sh /usr/local/tomcat8/bin/startup.sh tail -300f /usr/ ...
- Lua for Mac环境搭建
1⃣️在Mac上安装Lua的运行环境再简单不过了,如果你的Mac Terminal上安装了Homebrew的话,只需要键入`brew install lua`即可. longsl-mac:~ long ...