decorator.py

#!/usr/bin/env python
# -*- coding: utf-8 -*- import logging
import os
from functools import wraps #set the handler string format
FORMAT = '%(asctime)-15s %(filename)s %(message)s %(imageurl)s %(imagename)s'
logging.basicConfig(format=FORMAT,level=logging.INFO,filename="biaoqingDownloader.log",datefmt="[%Y-%m-%d %H:%M:%S]")
my_log_extra={"imageurl" :"","imagename":""} #downloader logger
def downloader_logger(func) :
@wraps(func)
def wrapper(*args, **kwargs) :
func(*args,**kwargs)
try:
image_name=args[0]
image_url=args[1]
except KeyError as e:
raise e
my_log_extra["imagename"]=image_name
my_log_extra["imageurl"]=image_url
logging.info("biaoqingbaoDownloader downloaded image:",extra=my_log_extra)
return wrapper if __name__ == '__main__':
## test this logger
@downloader_logger
def foo(filename,imageurl) :
print('logging test') foo('test.png','www.baidu.com')
#if no error appears, clear the log file
with open('./biaoqingDownloader.log','w') as f :
f.truncate()

downloader.py

#!/usr/bin/env python
# -*- coding: utf-8 -*
# @Link : https://github.com/coderchaser import os
import time
import argparse
import requests
import random
import json
import threading
from decorator import downloader_logger # code 0 represents https://www.doutula.com/apidoc
API_URL_DICT={0:"https://www.doutula.com/api/search?keyword={keyword}&mime={image_type}&page={page}"}
#random choice from these user agents
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), ) class Downloader(object) :
def __init__(self,number,keyword,image_type,filepath,verbose,api_code=0) :
#TODO: self.__image_url_list defined as queue instead of list to support multi thread downloading
self.__image_url_list = []
#a list for storing image urls
self.__number = number
#the number of images to be downloaded
self.__api_code = api_code
#which site's api to choose. Currently only one.
self.__keyword = keyword
#keywords of the images
self.__image_type = image_type
#image type 0 :all kinds 1:GIF 2:static images
self.__filepath = filepath
#where to store those images
self.__verbose = verbose
#enable the verbose info?
def __get_image_url(self) :
for i in range(1,51) :
api_url=API_URL_DICT[self.__api_code].format(keyword=self.__keyword,image_type=self.__image_type,page=i)
try:
rq=requests.get(api_url,headers={'User-Agent':random.choice(USER_AGENTS)},timeout=5)
response_dict=rq.json()
self.__image_url_list.extend([entry['image_url'] for entry in response_dict['data']['list']])
#TODO: Can i use multi threads here? This is a kind of IO-intenseive work ?
if len(self.__image_url_list) >= self.__number:
break
if response_dict['data']['more'] != 1:
break;
except requests.ConnectionError as e:
print(e)
finally :
rq.close()# close exsists? def __download(self):
self.__get_image_url()
print('Now downloading images from https://www.doutula.com ...')
if not os.path.exists(self.__filepath):
os.makedirs(self.__filepath)
for i in range(self.__number) :
image_url=self.__image_url_list[i]
if self.__verbose :
print("Dwonload images: {}".format(image_url))
extension='.'+image_url.split('.')[-1]
try:
filename=os.path.join(self.__filepath,'{0}{1}'.format(self.__keyword,i)+extension)
# self.image_download(filename,image_url)
download_rq=requests.get(image_url)
with open(filename,'wb') as f :
f.write(download_rq.content)
except Exception as e:
print(e)
time.sleep(1)
print("Images about {} have been downloaded.".format(self.__keyword))
def run(self) :
self.__download() # @downloader_logger
# def image_download(self,filename,image_url):
# download_rq=requests.get(image_url)
# with open(filename,'wb') as f :
# f.write(download_rq.content) def get_parser() :
parser = argparse.ArgumentParser(description="download interesting emoj images from www.doutula.com via command line")
parser.add_argument('keywords',metavar='KEYWORD',type=str,nargs='*',
help='the keywords to be searched')
parser.add_argument('-t','--type',type=int,default=0,choices=range(0,3),
help='choose image type to be downloaded. 0 represents all, 1 represents GIF , 2 represents static image')
parser.add_argument('-n','--num',type=int,default=50,
help='number of images to be downloaded')
parser.add_argument('-c','--clear',action='store_true',
help='enable clear the log file')
parser.add_argument('-d','--dir',type=str,
help='where to store the images, default is ./tmp/keyword/')
parser.add_argument('-v','--verbose',action='store_true',
help='enable show the whole downloading info') return parser def download(**kwargs):
for keyword in kwargs['keywords'] :
if kwargs['dir'] :
dirpath=kwargs['dir']+"/"+keyword
else :
dirpath='./tmp/'+keyword
print('Making dir:',dirpath)
downloader=Downloader(kwargs['num'],keyword,kwargs['type'],dirpath,kwargs['verbose'])
downloader.run() def command_line_runner():
parser=get_parser()
kwargs=vars(parser.parse_args()) if kwargs['clear']:
with open('./biaoqingDownloader.log','w') as f:
f.truncate() if not kwargs['keywords']:
#if no keywords assigned, return with help info
parser.print_help()
return download(**kwargs) if __name__ == '__main__': ###
#test this downloader
###
# downloader=Downloader(20,'金馆长',0,'./tmp',false)
# downloader.run()
command_line_runner()

代码以上传至Github,link
使用方法:python downloader.py -h

利用斗图啦网站API批量下载表情图片的更多相关文章

  1. 利用wget 抓取 网站网页 包括css背景图片

    利用wget 抓取 网站网页 包括css背景图片 wget是一款非常优秀的http/ftp下载工具,它功能强大,而且几乎所有的unix系统上都有.不过用它来dump比较现代的网站会有一个问题:不支持c ...

  2. python多线程批量下载远程图片

    python多线程使用场景:多线程采集, 以及性能测试等 . 数据库驱动类-简单封装下 mysqlDriver.py #!/usr/bin/python3 #-*- coding: utf-8 -*- ...

  3. python图片爬虫 - 批量下载unsplash图片

    前言 unslpash绝对是找图的绝佳场所, 但是进网站等待图片加载真的令人捉急, 仿佛是一场拼RP的战争 然后就开始思考用爬虫帮我批量下载, 等下载完再挑选, 操作了一下不算很麻烦, 顺便也给大家提 ...

  4. C++代码利用pthread线程池与curl批量下载地图瓦片数据

    项目需求编写的程序,稳定性有待进一步测试. 适用场景:在网络地图上,比如天地图与谷歌地图,用户用鼠标在地图上拉一个矩形框,希望下载该矩形框内某一层级的瓦片数据,并将所有瓦片拼接成一个完整的,包含地理坐 ...

  5. java实现批量下载百度图片搜索到的图片

    就是写的个小程序,用于记录一下,方便后续查看,首先感谢下面这个博客,从这篇文章衍生的吧,大家可以学习下: http://www.cnblogs.com/lichenwei/p/4610298.html ...

  6. 用 Python 批量下载百度图片

    ​ 为了做一个图像分类的小项目,需要制作自己的数据集.要想制作数据集,就得从网上下载大量的图片,再统一处理. 这时,一张张的保存下载,就显得很繁琐.那么,有没有一种方法可以把搜索到的图片直接下载到本地 ...

  7. 利用免费二维码API自动生成网址图片二维码

    调用第三方接口生成二维码 官方地址:http://goqr.me/api/ 示例 https://api.qrserver.com/v1/create-qr-code/?size=180x180&am ...

  8. 批量下载网站图片的Python实用小工具

    定位 本文适合于熟悉Python编程且对互联网高清图片饶有兴趣的筒鞋.读完本文后,将学会如何使用Python库批量并发地抓取网页和下载图片资源.只要懂得如何安装Python库以及运行Python程序, ...

  9. python爬虫我是斗图之王

    python爬虫我是斗图之王 本文会以斗图啦网站为例,爬取所有表情包. 阅读之前需要对线程池.连接池.正则表达式稍作了解. 分析网站 页面url分析 打开斗图啦网站,简单翻阅之后发现最新表情每页包含的 ...

随机推荐

  1. MonoSymbolFileException in CheckLineNumberTable

    Mono.CompilerServices.SymbolWriter.MonoSymbolFileException: Exception of type 'Mono.CompilerServices ...

  2. mysql 插入一个字段 id自增并设置为主键

    案例 ALTER TABLE customer MODIFY COLUMN custID int NOT NULL AUTO_INCREMENT 开启: ALTER TABLE 表名 MODIFY C ...

  3. org.springframework.dao.DuplicateKeyException: 问题

    转自:https://blog.51cto.com/chengxuyuan/1786938 org.springframework.dao.DuplicateKeyException: a diffe ...

  4. 使用ranger替代资源浏览器

    使用方法参考,这个是比较高校的: http://www.mikewootc.com/wiki/linux/usage/ranger_file_manager.html

  5. TCP、UDP、HTTP、HTTPS之间的区别

    网络由下往上分为: 物理层--- 数据链路层--- 网络层 -- IP协议 传输层 -- TCP协议 会话层 -- 表示层和应用层 -- HTTP协议 1.TCP/IP连接 TCP传输控制协议,是一种 ...

  6. Linux命令——getconf

    转自:灵活使用getconf命令来获取系统信息 简介 getconf本身是个ELF可执行文件,用于获取系统信息 用法 getconf -a可以获取全部系统信息 对于这个命令,记住几个常用的信息获取方法 ...

  7. JSON空值处理与 StringUtils工具类使用

    JSON 动态查询时,需要的条件本应是null,前端传入的是" " //null转换为"" private static ValueFilter filter ...

  8. netty websocket

    1 全局保存websocket的通道  NettyConfig.java public class NettyConfig { public static ChannelGroup group = n ...

  9. CentOS7怎样安装Nginx1.12.2

    通过nginx官网的源码安装 yum -y install gcc* openssl* pcre* zlib* 安装相关依赖 这一步很重要 不然会报乱七八糟的错误 cd /usr/local进入/us ...

  10. java 懒汉式、饿汉式单例模式 不含多线程的情况

    //饿汉式 提前生成了单例对象 class Singleton{ private static final Singleton instance=new Singleton(); private Si ...