Python 爬取所有51VOA网站的Learn a words文本及mp3音频

#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Python 爬取所有51VOA网站的Learn a words文本及mp3音频
import os
import sys
import time
import urllib as req
from threading import Thread
import urllib2
import urllib
from threading import Thread
import xml
import re
class MyWorkThread(Thread, urllib.FancyURLopener):
"""
Multi-thread downloading class.
run() is a vitual method of Thread
"""
def __init__(self, threadname, url, filename, ranges = 0):
Thread.__init__(self, name = threadname)
urllib.FancyURLopener.__init__(self)
self.name = threadname
self.url = url
self.filename = filename
self.ranges = ranges
self.downloaded = 0
def run(self):
"""
virtual function in Thread
"""
try:
self.downloaded = os.path.getsize(self.filename)
except OSError:
self.downloaded = 0
#rebuild start point
self.startpoint = self.ranges[0] + self.downloaded #if this part is completed
if self.startpoint >= self.ranges[1]:
print 'Part %s has been downloaded over.' % self.filename
return
self.oneTimeSize = 8 * 1024 #8K bytes / time
print 'task %s will download from %d to %d' %(self.name, self.startpoint, self.ranges[1])
self.addheader('Range', 'bytes=%d-%d' %(self.startpoint, self.ranges[1]))
self.urlhandle = self.open(self.url)
data = self.urlhandle.read(self.oneTimeSize)
while data:
filehandle = open(self.filename, 'ab+')
filehandle.write(data)
filehandle.close()
self.downloaded += len(data)
data = self.urlhandle.read(self.oneTimeSize)
def GetUrlFileSize(url):
urlHandler = urllib.urlopen(url)
headers = urlHandler.info().headers
length = 0
for header in headers:
if header.find('Length') != -1:
length = header.split(':')[-1].strip()
length = int(length)
return length
def SpliteBlocks(totalsize, blocknumber):
blocksize = totalsize / blocknumber
ranges = []
for i in range(0, blocknumber -1):
ranges.append((i * blocksize, i * blocksize + blocksize -1))
ranges.append((blocksize * (blocknumber -1), totalsize -1))
return ranges
def isLive(tasks):
for task in tasks:
if task.isAlive():
return True
return False
def downLoadFile(url, output, blocks = 6):
sys.stdout.write('Begin to download from %s\n' %url )
sys.stdout.flush()
size = GetUrlFileSize(url)
ranges = SpliteBlocks(size, blocks) threadname = ["thread_%d" %i for i in range(0, blocks)]
filename = ["tmpfile_%d" %i for i in range(0, blocks)]
tasks = []
for i in range(0, blocks):
task = MyWorkThread(threadname[i], url, filename[i], ranges[i])
task.setDaemon(True)
task.start()
tasks.append(task)
time.sleep(2)
while isLive(tasks):
downloaded = sum([task.downloaded for task in tasks])
process = downloaded / float(size) * 100
show = u'\rFilesize: %d Downloaded:%d Completed: %.2f%%' %(size, downloaded, process)
sys.stdout.write(show)
sys.stdout.flush
time.sleep(1) output = formatFileName(output)
filehandle = open(output, 'wb+')
for i in filename:
f = open(i, 'rb')
filehandle.write(f.read())
f.close()
os.remove(i)
filehandle.close()
sys.stdout.write("Completed!\n")
sys.stdout.flush()
def formatFileName(filename):
if isinstance(filename, str):
header, tail = os.path.split(filename)
if tail != '':
tuple = ('\\','/',':','*', '?', '"', '<', '>', '|')
for char in tuple:
if tail.find(char) != -1:
tail = tail.replace(char, ' ')
filename = os.path.join(header, tail)
#print filename
return filename
else:
return 'None' def remove_tags(raw_html):
cleanr =re.compile('<.*?>')
cleantext = re.sub(cleanr,'', raw_html)
return cleantext def saveword(url,name):
res=req.urlopen(url)
data=res.readlines()
res.close()
startag=r'id="mp3"'
endtag=r'</div>'
k=80
data2=''
data3=''
data4=''
while k<len(data)-10:
if(data[k].find(startag)!=-1):
data2=data[k]
if(data[k].find('<div id="content">')!=-1):
data3=data[k]
if(data[k+1].find('<p>')!=-1):
data4=data[k+1]
# if(data4.rfind('...')!=-1):
# endid = data4.find('...')+3
# else:
# endid = data4.find('</p>')
# data4 = data4[3:endid]
data4=remove_tags(data4)
k=k+1
# print data2
## data=str(data)
## data2=data[(data.find(startag)+14):data.lower().find(endtag)+3]
## data3=data[105]
# print data3
mp3url=data2[data2.find('http'):data2.find(''' title="''')-1]
if(data3.find(endtag)!=-1):
sent = data3[data3.find('今天我们要学'):data3.find(endtag)]
else:
sent = data3[data3.find('今天我们要学'):].strip('\n').strip('\r')+data4.strip('\n')
# sent = sent.replace('\n','. ')
# print mp3url,sent
f=open('LearningWord.txt','a+')
sent=remove_tags(sent)
f.write(name+'\n'+sent.strip('\r')+'\n')
f.close()
# print str(name)+'.mp3'
if(data2.find(startag)!=-1):
downLoadFile(mp3url,str(formatFileName(name.replace(':', ' ')))+'.mp3', blocks = 4) def savepage(url):
res=req.urlopen(url)
data=res.read()
res.close()
startag='''<ul><li>'''
endtag='''</li></ul>'''
data=str(data)
data2=data[data.find(startag)+12:data.find(endtag)]
linestart='href'
meddle = '''" target'''
lineend = '</a>'
urls=[]
words = []
i=data2.find(linestart)
while(i!=-1):
k = data2.find(meddle)
j = data2.find(lineend)
url = 'http://www.51voa.com/'+data2[i+6:k]
urls = urls+[url]
word = data2[k+16:j]
print i,k,j, word,url
words = words + [word]
data2=data2[j+3:]
saveword(url,word)
i=data2.find(linestart)
# break #下载所有单词
f=open('LearningWord.txt','w')
f.close()
i=53
while i<=54:
url = 'http://www.51voa.com/Learn_A_Word_'+str(i)+'.html'
savepage(url)
i=i+1 #下载指定单词
#url = "http://www.51voa.com/Voa_English_Learning/Learn_A_Word_21951.html"
#name ='9:pop up'
#saveword(url,name)

下载单词文本示例:(全部单词文本下载地址:http://pan.baidu.com/s/1o8pmojS)

2650 endorse
今天我们要学的词是 endorse. Endorse 作为动词,有支持的意思。Senator Ted Cruz endorsed Donald Trump, but later said the decision was “agonizing.” 美国联邦参议员克鲁兹支持川普,但是后来又表示,他做出这一决定十分痛苦。The New York Times endorsed Hillary Clinton for president in a Saturday editorial, and dismissed Donald Trump as “the worst nominee put forward by a major party in modern American history.” 纽约时报在星期六的社论中支持希拉里.克林顿当总统,并批评说,川普是“美国现代史上主要政党推举出的最差劲的候选人”。好的,我们今天学习的词是 endorse, endorse, endorse...
2649 deportation
今天我们要学的词是 deportation. Deportation 名词,驱逐出境,递解出境。The Obama administration said it would fully resume deportations of undocumented Haitian immigrants. 奥巴马政府表示,将全面恢复对无证海地移民的遣返工作。China and Canada have reached a new border agreement that would speed up the deportation of Chinese nationals inadmissible in Canada. 中国和加拿大达成新的边境协议,加快遣返那些本不该被允许进入加拿大的中国公民。好的,我们今天学习的词是 deportation, deportation, deportation...
2648 voluntarily
今天我们要学的词是 voluntarily. Voluntarily 副词,自愿地。The International Organization for Migrants says that more people are voluntarily returning to their home countries. 国际移民组织说,越来越多的人开始自愿返回自己的祖国。A high-tech diagnostic company voluntarily withdrew its Zika virus blood test from FDA approval. 一家高科技诊断公司自愿撤回递交美国食品药物管理局的寨卡病毒血液检测批准申请。好的,我们今天学习的词是 voluntarily, voluntarily, voluntarily...
2647 guerrilla
今天我们要学的词是 guerrilla. Guerrilla 形容词,游击队的。The Columbian government signed a peace agreement on Monday with the Revolutionary Armed Forces of Columbia (FARC), a national guerrilla movement. 哥伦比亚政府星期一跟全国游击队运动“哥伦比亚革命武装力量”签署了和平协议。The agreement needs to be approved by an Oct. 2 referendum before roughly 7,000 guerrilla fighters start their transition to civilian life. 这项协议还需经过10月2号全民公决批准,大约七千名游击队员才会开始向平民生活过渡。好的,我们今天学习的词是 guerrilla, guerrilla, guerrilla...
2646 curfew
今天我们要学的词是 curfew. Curfew 名词,宵禁。The city of Charlotte in North Carolina has lifted its midnight curfew, but the state of emergency remains in effect. 北卡罗来纳州夏洛特市取消了午夜宵禁,但是紧急状态依旧生效。Authorities in an Austrian city imposed a curfew on young immigrants following a series of sexual attacks at a local beer and wine festival. 奥地利一个城市的有关当局对未成年移民实施宵禁,此前当地一个啤酒葡萄酒节期间发生了一系列性侵事件。 好的,我们今天学习的词是 curfew, curfew, curfew...
2645 estimate
今天我们要学的词是 estimate. Estimate 动词,估计。A recent study estimates that the Indonesian forest fires that created a smoky haze last year may have caused more than 100,000 premature deaths. 一项最新研究估计,去年印尼山火引发的雾霾可能造成了10万人过早死亡。A new survey estimates that Americans own 265 million guns, but half of these guns are in the hands of only 3% of Americans. 最新调查估计,美国人拥有枪支总数2.65亿支,但其中半数都集中在3%的人手中。好的,我们今天学习的词是 estimate, estimate, estimate...
2644 mercy killing
今天我们要学的词是 mercy killing. Mercy killing 名词,安乐死。A terminally ill 17-year-old has become the first minor to be euthanized in Belgium since the age restrictions on such mercy killings were lifted in 2014. 比利时一个17岁绝症男孩安乐死,他是比利时2014年取消对安乐死年龄限制以来第一个安乐死的未成年人。The United Arab Emirates passed a new law banning all mercy killings. 阿联酋通过新法律,禁止安乐死。好的,我们今天学习的词是 mercy killing, mercy killing, mercy killing...

  

Python 爬取所有51VOA网站的Learn a words文本及mp3音频的更多相关文章

  1. [Python]爬取 游民星空网站 每周精选壁纸(1080高清壁纸) 网络爬虫

    一.检查 首先进入该网站的https://www.gamersky.com/robots.txt页面 给出提示: 弹出错误页面 注: 网络爬虫:自动或人工识别robots.txt,再进行内容爬取 约束 ...

  2. python爬取中国天气网站数据并对其进行数据可视化

    网址:http://www.weather.com.cn/textFC/hb.shtml 解析:BeautifulSoup4 爬取所有城市的最低天气   对爬取的数据进行可视化处理 按温度对城市进行排 ...

  3. python爬取网站数据

    开学前接了一个任务,内容是从网上爬取特定属性的数据.正好之前学了python,练练手. 编码问题 因为涉及到中文,所以必然地涉及到了编码的问题,这一次借这个机会算是彻底搞清楚了. 问题要从文字的编码讲 ...

  4. Python开发爬虫之BeautifulSoup解析网页篇:爬取安居客网站上北京二手房数据

    目标:爬取安居客网站上前10页北京二手房的数据,包括二手房源的名称.价格.几室几厅.大小.建造年份.联系人.地址.标签等. 网址为:https://beijing.anjuke.com/sale/ B ...

  5. 利用Python爬取电影网站

    #!/usr/bin/env python #coding = utf-8 ''' 本爬虫是用来爬取6V电影网站上的电影资源的一个小脚本程序,爬取到的电影链接会通过网页的形式显示出来 ''' impo ...

  6. python爬取网站数据保存使用的方法

    这篇文章主要介绍了使用Python从网上爬取特定属性数据保存的方法,其中解决了编码问题和如何使用正则匹配数据的方法,详情看下文     编码问题因为涉及到中文,所以必然地涉及到了编码的问题,这一次借这 ...

  7. python爬取某个网站的图片并保存到本地

    python爬取某个网站的图片并保存到本地 #coding:utf- import urllib import re import sys reload(sys) sys.setdefaultenco ...

  8. Python轻松爬取Rosimm写真网站全部图片

    RosimmImage 爬取Rosimm写真网站图片 有图有真相 def main_start(url): """ 爬虫入口,主要爬取操作 ""&qu ...

  9. 使用python爬取MedSci上的期刊信息

    使用python爬取medsci上的期刊信息,通过设定条件,然后获取相应的期刊的的影响因子排名,期刊名称,英文全称和影响因子.主要过程如下: 首先,通过分析网站http://www.medsci.cn ...

随机推荐

  1. HttpClient 与 HtmlParser 简介 转载

    转载地址:https://www.ibm.com/developerworks/cn/opensource/os-cn-crawler/ 本小结简单的介绍一下 HttpClinet 和 HtmlPar ...

  2. sql server常用语法点

    if exists(select name from sysobjects where name = 'stuInfo')drop table stuInfogocreate table stuInf ...

  3. css基础样式四

    上次我们讲到了相对定位: 这次我们了解下绝对定位; 绝对定位: #box_relative { position: absolute; left: 30px; top: 20px; } 绝对定位会脱离 ...

  4. Qt on Android 蓝牙开发

    版权声明:本文为MULTIBEANS ORG研发跟随文章,未经MLT ORG允许不得转载. 最近做项目,需要开发安卓应用,实现串口的收发,目测CH340G在安卓手机上非常麻烦,而且驱动都是Java版本 ...

  5. angular源码阅读的起点,setupModuleLoader方法

    angular源码其实结构非常清晰,划分的有条有理的,大概就是这样子: (function(window,document,jquery,undefined){ //一些工具函数 //EXPR 编译器 ...

  6. linux批量查找文件内容

    find ./ -name "*.php" | xargs grep '要查找的内容' 如果需要查找的内容包含特殊符号,比如$等等,grep要加参数 find ./ -name & ...

  7. Js零散知识点笔记

    1.将类数组对象转换为数组 ES5: var arrLike = document.querySelectorAll('span'); var arr = Array.prototype.slice. ...

  8. Python if 和 for 的多种写法

    a, b, c = 1, 2, 3 [对比Cpp里:c = a >b? a:b]这个写法,Python只能常规的空行,缩进吗? 人生苦短,我用python,下面介绍几种if的方便的方法. 1.常 ...

  9. 浏览器中的Javascript的简单对话框

    简单对话框是指对话框不去做设计,而直接使用默认的,如alert.confirm.prompt: <html> <head> <meta http-equiv=" ...

  10. test homework ~ coverage about method printPrimes

    /******************************************************* * Finds and prints n prime integers * Jeff ...