Python网络爬虫 - 爬取中证网银行相关信息
最终版:07_中证网(Plus -Pro).py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
import os
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
for qq in range(8):
# query = input("【中证网】请输入你想搜索的内容:")
query = '苏州银行'
#年份
year = [2014,2015,2016,2017,2018,2019,2020,2021]
#总页数
pages = [2,1,1,1,11,1,19,7]
year = year[qq]
pages = pages[qq]
if not os.path.isdir(f'D:/桌面/爬虫-银行/中国证券网/{query}'): # 如果没有此文件夹
os.mkdir(f'D:/桌面/爬虫-银行/中国证券网/{query}') # 创建此文件夹
m = 0
for p in range(1, pages + 1):
url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline=={year}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find_all("table")
datalist = []
for ii in alist:
ss=ii.find('td', style='font-size: 12px;line-height: 24px;color: #333333;margin-top: 4px;')
# print('ss=\n\n',ss)
if ss != None:
ss = ss.get_text()
datalist.append(ss)
# print('data:',datalist,len(datalist))
if not os.path.isdir(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}'): # 如果没有此文件夹
os.mkdir(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}') # 创建此文件夹
for ii in range(len(datalist)):
fp = open(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}/({year}){ii + m + 1}.txt', 'w+', encoding='utf-8')
fp.write(datalist[ii] + '\n') # 只包含文本
print(datalist[ii])
print(f'\n> > >{year}年,第{p}页,第{ii + 1}篇,成功! < < <')
fp.close()
m = m + len(datalist) + 1
print('----------------------------')
print(f'------\n{year}年,爬取完毕----')
print('----------------------------')
历史优化记录:01_中证网.py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
url = f'http://search.cs.com.cn/search?channelid=215308&perpage=&templet=&token=12.1462412070719.47&searchword={query}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all("a")
# print(alist)
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# ----------------单页每个文章---------------------------------
m = 0
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp=open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/{ii+1}.txt','w+',encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print(txt_a.text)
txt_list.append(txt_a.text)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
fp.close()
print(f'>>{ii+1}成功!')
m = ii+1
# +-+++-----------++++++++++-----多页------++++++++++++----------++++
if pages > 1:
for p in range(pages):
url_s = f"http://search.cs.com.cn/search?page={p+1}&channelid=215308&searchword={query}"
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all("a")
# print(alist)
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp = open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/{ii + 1 + m}.txt', 'w+', encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print(txt_a.text)
txt_list.append(txt_a.text)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
print(f'>>{ii + 1 + m}成功!')
m = m + ii + 1
fp.close()
print('---------------\n>>>爬取完毕<<<')
历史优化记录:02_中证网.py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
url = f'http://search.cs.com.cn/search?page=1&channelid=215308&searchword={query}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all("a")
# print(alist)
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# ----------------单页每个文章---------------------------------
m = 0
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp=open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/0/(2021){ii+1}.txt','w+',encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print(txt_a.text)
txt_list.append(txt_a.text)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
fp.close()
print(f'>>{ii+1}成功!')
m = ii+1
# +-+++-----------++++++++++-----多页------++++++++++++----------++++
# +-+++-----------++++++++++-----多页------++++++++++++----------++++
if pages > 1:
for p in range(pages):
url_s = f"http://search.cs.com.cn/search?page={p+1}&channelid=215308&searchword={query}"
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all("a")
# print(alist)
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp = open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/0/(2021){ii + 1 + m}.txt', 'w+', encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print(txt_a.text)
txt_list.append(txt_a.text)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
print(f'>>{ii + 1 + m}成功!')
m = m + ii + 1
fp.close()
print('---------------\n>>>爬取完毕<<<')
历史优化记录:03_中证网.py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
m = 0
for p in range(1,pages+1):
url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline==2021'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all('a')
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# print('weblist==',weblist)
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp=open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/2021/(2021){ii+m+1}.txt','w+',encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print('txt_a===',txt_a.text)
txt_list.append(txt_a.text)
print(f'\n-++++++++++++++++++第{ii+1}篇文章++++++++++++++++-\n',txt_list,len(txt_list))
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
# print('-----------------------------------')
print(f'\n> > >{ii+1}成功! < < <')
fp.close()
m=m+len(weblist)+1
print('---------------\n>>>爬取完毕<<<')
历史优化记录:04_中证网(网址筛选问题).py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
m = 0
for p in range(1,pages+1):
url = f'http://search.cs.com.cn/search?page={pages}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline==2020'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all('a')
print('alist:',alist)
weblist = []
for a in alist:
if a.get('href')[4:] == "http":
weblist.append(a.get('href'))
print('weblist==',weblist)
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp=open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/2020/(2020){ii+m+1}.txt','w+',encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print('txt_a===',txt_a.text)
txt_list.append(txt_a.text)
print(f'\n-++++++++++++++++++第{ii+1}篇文章++++++++++++++++-\n',txt_list,len(txt_list))
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
# print('-----------------------------------')
print(f'\n> > >{ii+1}成功! < < <')
fp.close()
m=m+len(weblist)+1
print('---------------\n>>>爬取完毕<<<')
历史优化记录:05_中证网.py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
year = int(input('要爬取的年份:'))
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
m = 0
for p in range(1, pages + 1):
url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline=={year}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find('tr').find_all('a')
# print('alist:', alist)
weblist = []
for a in alist:
if a.get('href')[:4] == "http":
weblist.append(a.get('href'))
print('weblist==', weblist)
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find_all('p')
# print(page_b)
fp = open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/{year}/({year}){ii + m + 1}.txt', 'w+', encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print('txt_a===',txt_a.text)
txt_list.append(txt_a.text)
print(f'\n-++++++++++++++++++第{ii + 1}篇文章++++++++++++++++-\n', txt_list, len(txt_list))
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
# print('-----------------------------------')
print(f'\n> > >{ii + 1}成功! < < <')
fp.close()
m = m + len(weblist) + 1
print('---------------\n>>>爬取完毕<<<')
历史优化记录:06_中证网(Plus).py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
import os
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
# query = input("【中证网】请输入你想搜索的内容:")
query = '交通银行'
year = int(input('要爬取的年份:'))
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
m = 0
for p in range(1, pages + 1):
url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline=={year}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find_all("table")
datalist = []
for ii in alist:
ss=ii.find('td', style='font-size: 12px;line-height: 24px;color: #333333;margin-top: 4px;')
# print('ss=\n\n',ss)
if ss != None:
ss = ss.get_text()
datalist.append(ss)
# print('data:',datalist,len(datalist))
if not os.path.isdir(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}'): # 如果没有此文件夹
os.mkdir(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}') # 创建此文件夹
for ii in range(len(datalist)):
fp = open(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}/({year}){ii + m + 1}.txt', 'w+', encoding='utf-8')
fp.write(datalist[ii] + '\n') # 只包含文本
print(datalist[ii])
print(f'\n> > >第{p}页,第{ii + 1}篇,成功! < < <')
fp.close()
m = m + len(datalist) + 1
print('----------------------------')
print(f'------\n{year}年,爬取完毕----')
print('----------------------------')
Python网络爬虫 - 爬取中证网银行相关信息的更多相关文章
- 如何利用Python网络爬虫爬取微信朋友圈动态--附代码(下)
前天给大家分享了如何利用Python网络爬虫爬取微信朋友圈数据的上篇(理论篇),今天给大家分享一下代码实现(实战篇),接着上篇往下继续深入. 一.代码实现 1.修改Scrapy项目中的items.py ...
- 利用Python网络爬虫爬取学校官网十条标题
利用Python网络爬虫爬取学校官网十条标题 案例代码: # __author : "J" # date : 2018-03-06 # 导入需要用到的库文件 import urll ...
- 如何用Python网络爬虫爬取网易云音乐歌曲
今天小编带大家一起来利用Python爬取网易云音乐,分分钟将网站上的音乐down到本地. 跟着小编运行过代码的筒子们将网易云歌词抓取下来已经不再话下了,在抓取歌词的时候在函数中传入了歌手ID和歌曲名两 ...
- 04 Python网络爬虫 <<爬取get/post请求的页面数据>>之requests模块
一. urllib库 urllib是Python自带的一个用于爬虫的库,其主要作用就是可以通过代码模拟浏览器发送请求.其常被用到的子模块在Python3中的为urllib.request和urllib ...
- Python网络爬虫-爬取微博热搜
微博热搜的爬取较为简单,我只是用了lxml和requests两个库 url=https://s.weibo.com/top/summary?Refer=top_hot&topnav=1& ...
- python网络爬虫&&爬取网易云音乐
#爬取网易云音乐 url="https://music.163.com/discover/toplist" #歌单连接地址 url2 = 'http://music.163.com ...
- 如何利用Python网络爬虫抓取微信朋友圈的动态(上)
今天小编给大家分享一下如何利用Python网络爬虫抓取微信朋友圈的动态信息,实际上如果单独的去爬取朋友圈的话,难度会非常大,因为微信没有提供向网易云音乐这样的API接口,所以很容易找不到门.不过不要慌 ...
- 如何利用Python网络爬虫抓取微信好友数量以及微信好友的男女比例
前几天给大家分享了利用Python网络爬虫抓取微信朋友圈的动态(上)和利用Python网络爬虫爬取微信朋友圈动态——附代码(下),并且对抓取到的数据进行了Python词云和wordart可视化,感兴趣 ...
- 利用Python网络爬虫抓取微信好友的所在省位和城市分布及其可视化
前几天给大家分享了如何利用Python网络爬虫抓取微信好友数量以及微信好友的男女比例,感兴趣的小伙伴可以点击链接进行查看.今天小编给大家介绍如何利用Python网络爬虫抓取微信好友的省位和城市,并且将 ...
随机推荐
- 常用的公共 DNS 服务器 IP 地址
转载自:小哈龙 2019-04-12 09:34:42 公共 DNS 服务器 IP 地址 名称 DNS 服务器 IP 地址 阿里 AliDNS 223.5.5.5 223.6.6.6 CNNIC SD ...
- SQL常用数据类型 字段约束
SQL中的常用数据类型: 整数:int 小数:double 字符串:varchar(长度),建议 用2的整数倍 日期:date 格式: 'YYYY-MM-DD' SQL中的约束: a.主键约束:pri ...
- AT2300题解
两种做法都说一说吧... 题意很明确. 1.数论分块 对于一个 \(d\) 和给定的 \((l,r)\),\((l,r)\) 对其造成贡献的条件很明显是 \(\lfloor \frac {l-1} d ...
- linux下hadoop2.6.1源码64位的编译
linux下hadoop2.6.1源码64位的编译 一. 前言 Apache官网上提供的hadoop本地库是32位的,如果我们的Linux服务器是64位的话,就会现问题.我们在64位服务器执行Hado ...
- MySQL — 索引
目录 1.索引概述 2.索引结构 3.索引分类 4.索引语法 5.SQL 性能分析 5.1.执行频次 5.2.慢日志查询 5.3.profile 5.4.explain 6.索引使用规则 6.1.单列 ...
- QFramework Pro 开发日志(七)v0.4 版本审核通过 与 对话编辑器功能预告
经过一周的工作,v0.4 版本总算完成了. 就在刚刚笔者在 AssetStore 提交了 v0.4 版本. v0.4 版本主要内容有两个 一键生成简单继承类图功能 底层兼容 QFramework v0 ...
- [SPDK/NVMe存储技术分析]007 - 初识UIO
注: 要进一步搞清楚SSD盘对应的PCI的BAR寄存器的映射,有必要先了解一下UIO(Userspace I/O). UIO(Userspace I/O)是运行在用户空间的I/O技术.在Linux系统 ...
- VS Code Java 3月更新|代码补全、Maven 以及 Java 插件预览版本新升级!
Nick Zhu Senior Program Manager, Developer Division at Microsoft 大家好,欢迎来到 Visual Studio Code Java 的 ...
- 不使用数字和字母的PHP webshell
Round 1 代码如下: <?php if(!preg_match('/[a-z0-9]/is',$_GET['shell'])) { eval($_GET['shell']); } 思路 将 ...
- HTTP发展史,HTTP1.1与HTTP2.0的区别
前言 我们知道HTTP是浏览器中最重要且使用最多的协议,它不仅是浏览器与服务端的通信语言,更是互联网的基石.随着浏览器的不断更新迭代,HTTP为了适应技术的更新也在不断进化,学习HTTP的最佳途径就是 ...