# -*- coding: utf-8 -*-
from datetime import datetime
import pymongo
import scrapy
from wooyun.items import WooyunItem
from scrapy.conf import settings class WooyunSpider(scrapy.Spider):
name = "wooyun"#蜘蛛名字,运行命令为:scrapy crawl wooyun
allowed_domains = ["wooyun.org"]
start_urls = [
'http://wooyun.org/bugs/new_public/'
]#spider类会遍历该类变量for url in self.start_urls:yield Request(url, dont_filter=True)爬行的起点
               
def __init__(self,page_max=settings['PAGE_MAX_DEFAULT'],local_store=settings['LOCAL_STORE_DEFAULT'],\
update=settings['UPDATE_DEFAULT'],*args, **kwargs):
self.page_max = int(page_max)
self.local_store = 'true' == local_store.lower()
self.update = 'true' == update.lower() self.connection_string = "mongodb://%s:%d" % (settings['MONGODB_SERVER'],settings['MONGODB_PORT'])
self.client = pymongo.MongoClient(self.connection_string)
self.db = self.client[settings['MONGODB_DB']]
self.collection = self.db[settings['MONGODB_COLLECTION']] def closed(self,reason):
self.client.close() def parse(self, response):#当爬行返回第一个响应时会调用这个函数
total_pages = response.xpath("//p[@class='page']/text()").re('\d+')[1]
if self.page_max == 0:
end_page = int(total_pages)
else:
end_page = self.page_max
for n in range(1,end_page + 1):
page = "/bugs/new_public/page/%d" %n#乌云公开漏洞列表的一页
url = response.urljoin(page)
yield scrapy.Request(url, self.parse_list)#分析一页的漏洞列表 def parse_list(self,response):#取得一页列表的链接
links = response.xpath('//tbody/tr/td/a/@href').extract()
for url in links:
wooyun_id = url.split('/')[2]
if self.update == True or self.__search_mongodb(wooyun_id) == False:
url = response.urljoin(url)
yield scrapy.Request(url, self.parse_detail) def parse_detail(self,response):#对每一个漏洞的内容的提取
item = WooyunItem()
item['wooyun_id'] = response.xpath('//*[@id="bugDetail"]/div[5]/h3[1]/a/@href').extract()[0].split('/')[2]
item['title'] = response.xpath('//title/text()').extract()[0].split("|")[0]
item['bug_type'] = response.xpath("//h3[@class='wybug_type']/text()").extract()[0].split(u':')[1].strip()
#item['bug_type'] = response.xpath('//*[@id="bugDetail"]/div[5]/h3[7]/text()').extract()[0].split(u':')[1].strip()
#some author not text,for examp:
#http://wooyun.org/bugs/wooyun-2010-01010
#there will be error while parse author,so do this
try:
#item['author'] = response.xpath("//h3[@class='wybug_author']/a/text()").extract()[0]
item['author'] = response.xpath('//*[@id="bugDetail"]/div[5]/h3[4]/a/text()').extract()[0]
except:
item['author'] ='<Parse Error>'
#the response.body type is str,so we need to convert to utf-8
#if not utf-8,saving to mongodb may have some troubles
item['html'] = response.body.decode('utf-8','ignore')
#dt = response.xpath("//h3[@class='wybug_date']/text()").re("[\d+]{4}-[\d+]{2}-[\d+]{2}")[0].split('-')
dt = response.xpath('//*[@id="bugDetail"]/div[5]/h3[5]/text()').re("[\d+]{4}-[\d+]{2}-[\d+]{2}")[0].split('-')
item['datetime'] = datetime(int(dt[0]),int(dt[1]),int(dt[2]))
#dt = response.xpath("//h3[@class='wybug_open_date']/text()").re("[\d+]{4}-[\d+]{2}-[\d+]{2}")[0].split('-')
dt = response.xpath('//*[@id="bugDetail"]/div[5]/h3[6]/text()').re("[\d+]{4}-[\d+]{2}-[\d+]{2}")[0].split('-')
item['datetime_open'] = datetime(int(dt[0]),int(dt[1]),int(dt[2]))
#images url for download
item['image_urls']=[]
if self.local_store:
#乌云图片目前发两种格式,一种是http://static.wooyun.org/wooyun/upload/,另一格式是/upload/...
#因此,对后一种在爬取时,增加http://www.wooyun.org,以形成完整的url地址
#同时,在piplines.py存放时,作相应的反向处理
image_urls = response.xpath("//img[contains(@src, '/upload/')]/@src").extract()
for u in image_urls:
if self.__check_ingnored_image(u):
continue
if u.startswith('/'):
u = 'http://www.wooyun.org' + u
item['image_urls'].append(u)
return item #产生一个item项目 def __check_ingnored_image(self,image_url):
for ignored_url in settings['IMAGE_DOWLOAD_IGNORED']:
if ignored_url in image_url:
return True return False def __search_mongodb(self,wooyun_id):
#
wooyun_id_exsist = True if self.collection.find({'wooyun_id':wooyun_id}).count()>0 else False
#
return wooyun_id_exsist

scrapy.Request(url, self.parse_detail) Request对象有回调函数。Request对象放到scheduler的队列中,engine会从schedule中取得request。

scrapy的crawl命令调用的函数

def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0] self.crawler_process.crawl(spname, **opts.spargs)#crawler_process爬行进程应该是ExecutionEngine实例
self.crawler_process.start()

C:\Python27\Lib\site-packages\scrapy\cmdline.py

def execute(argv=None, settings=None):
......
cmd.settings = settings
cmd.add_options(parser)
opts, args = parser.parse_args(args=argv[1:])
_run_print_help(parser, cmd.process_options, args, opts) cmd.crawler_process = CrawlerProcess(settings)#为命令对象添加爬行进程
_run_print_help(parser, _run_command, cmd, args, opts)
sys.exit(cmd.exitcode)

CrawlerProcess(settings)[C:\Python27\Lib\site-packages\scrapy\cawler.py]

class CrawlerProcess(CrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously. This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
for starting a Twisted `reactor`_ and handling shutdown signals, like the
keyboard interrupt command Ctrl-C. It also configures top-level logging. This utility should be a better fit than
:class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
Twisted `reactor`_ within your application. The CrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object. This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
""" def __init__(self, settings=None):
super(CrawlerProcess, self).__init__(settings)
install_shutdown_handlers(self._signal_shutdown)
configure_logging(self.settings)
log_scrapy_info(self.settings) def _signal_shutdown(self, signum, _):
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
logger.info("Received %(signame)s, shutting down gracefully. Send again to force ",
{'signame': signame})
reactor.callFromThread(self._graceful_stop_reactor) def _signal_kill(self, signum, _):
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
logger.info('Received %(signame)s twice, forcing unclean shutdown',
{'signame': signame})
reactor.callFromThread(self._stop_reactor) def start(self, stop_after_crawl=True):
"""
This method starts a Twisted `reactor`_, adjusts its pool size to
:setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based
on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`. If `stop_after_crawl` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`. :param boolean stop_after_crawl: stop or not the reactor when all
crawlers have finished
"""
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(self._stop_reactor) reactor.installResolver(self._get_dns_resolver())
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
reactor.run(installSignalHandlers=False) # blocking call def _get_dns_resolver(self):
if self.settings.getbool('DNSCACHE_ENABLED'):
cache_size = self.settings.getint('DNSCACHE_SIZE')
else:
cache_size = 0
return CachingThreadedResolver(
reactor=reactor,
cache_size=cache_size,
timeout=self.settings.getfloat('DNS_TIMEOUT')
) def _graceful_stop_reactor(self):
d = self.stop()
d.addBoth(self._stop_reactor)
return d def _stop_reactor(self, _=None):
try:
reactor.stop()
except RuntimeError: # raised if already stopped or in shutdown stage
pass

scrapy爬行乌云网公开漏洞程序的分析的更多相关文章

  1. CVE-2015-1641 Office类型混淆漏洞及shellcode分析

    作者:枕边月亮 原文来自:CVE-2015-1641 Office类型混淆漏洞及shellcode分析 0x1实验环境:Win7_32位,Office2007 0x2工具:Windbg,OD,火绒剑, ...

  2. 【转】cve2014-3153 漏洞之详细分析与利用

    背景学习: Linux Futex的设计与实现 使用者角度看bionic pthread_mutex和linux futex实现 By kernux TopSec α-lab 一 漏洞概述 这个漏洞是 ...

  3. 漏洞扫描与分析-Nessus-8.7.2最新版-安装-部署-使用

    漏洞扫描与分析-Nessus 2019/10/10 Chenxin 简介 官网 https://zh-cn.tenable.com/ 产品 https://zh-cn.tenable.com/prod ...

  4. BEC合约整数溢出漏洞还原与分析

    一.币圈一秒,人间一年 有道是币圈一日,人间一年.这个说法又得升级了,叫币圈一秒,人间一年. 前不久,币圈又出大事啦.BEC智能合约被爆出整数溢出漏洞,导致黑客能无限印币,在一次交易中,也就那么几秒钟 ...

  5. 堆栈上的舞蹈之释放重引用(UAF) 漏洞原理实验分析

    0x01 前言 释放重引用的英文名名称是 Use After Free,也就是著名的 UAF 漏洞的全称.从字面意思可以看出 After Free 就是释放后的内存空间,Use 就是使用的意思,使用释 ...

  6. Java反序列化漏洞Apache CommonsCollections分析

    Java反序列化漏洞Apache CommonsCollections分析 cc链,既为Commons-Collections利用链.此篇文章为cc链的第一条链CC1.而CC1目前用的比较多的有两条链 ...

  7. [原创]推荐一款强大的.NET程序内存分析工具.NET Memory Profiler

    [原创]推荐一款强大的.NET程序内存分析工具.NET Memory Profiler 1 官方网站:http://memprofiler.com/2 下载地址:http://memprofiler. ...

  8. VS2010/MFC编程入门之四(MFC应用程序框架分析)

    VS2010/MFC编程入门之四(MFC应用程序框架分析)-软件开发-鸡啄米 http://www.jizhuomi.com/software/145.html   上一讲鸡啄米讲的是VS2010应用 ...

  9. 服务器程序源代码分析之三:gunicorn

    服务器程序源代码分析之三:gunicorn 时间:2014-05-09 11:33:54 类别:网站架构 访问: 641 次 gunicorn是一个python web 服务部署工具,类似flup,完 ...

随机推荐

  1. 将字符串表示的IP地址转变为整形表示

    当时面试上机的想法是,直接使用uint32_t变量来存ip地址,遍历字符串带".",然后去值,利用移位来将这个值填到uint32_t对应的位置上.这样的麻烦之处在于: 1,遍历字符 ...

  2. PAT 乙级 1060 爱丁顿数(25) C++版

    1060. 爱丁顿数(25) 时间限制 250 ms 内存限制 65536 kB 代码长度限制 8000 B 判题程序 Standard 作者 CHEN, Yue 英国天文学家爱丁顿很喜欢骑车.据说他 ...

  3. Consul实践指导-DNS接口

    DNS是consul提供的主要查询接口之一.DNS接口允许应用程序在没有与consul高度集成的情况下使用服务发现. 例如:替代consul的HTTP API请求,主机能够通过名字查找直接使用DNS服 ...

  4. [UE4]多播(广播)

    只有服务器才有权限做广播,所以要判断确保是服务器端才做广播,有以下几种方法: 一.使用“Switch Has Authority”判断是否在服务器端 因为character一定是在服务器端创建出来的, ...

  5. sqlserver创建数据库

    --指向当前要使用的master数据库,向master数据库中注册创建信息 use master go --创建数据库 create database StudentManageDB on prima ...

  6. 00009 - cat、tail、head、tee、wc、sort文件操作和过滤

    绝大多数命令行工作是针对文件的.我们会在本节中讨论如何观察及过滤文件内容,使用一条命令从文件中提取所需信息,以及对文件的内容进行排序. cat.tail.head.tee:文件打印命令这些命令的语法基 ...

  7. [UE4]C++实现动态加载的问题:LoadClass<T>()和LoadObject<T>() 及 静态加载问题:ConstructorHelpers::FClassFinder()和FObjectFinder()

    转自:http://aigo.iteye.com/blog/2281558 动态加载UObject和动态加载UClass分别用LoadObject<T>(),和LoadClass<T ...

  8. MySQL 之管理脚本

    Mysql中查看每个IP的连接数 ) as ip , count(*) from information_schema.processlist group by ip;

  9. Centos7修改系统时区timezone

    第一步:查询服务器时间 [root@localhost ~]# timedatectl Local time: Sat 2018-03-31 01:11:46 UTC Universal time: ...

  10. 反射中的BindingFlags

    不指定绑定标志  BindingFlags.Default 表示忽略 name 的大小写,不应考虑成员名的大小写   BindingFlags.IgnoreCase 只应考虑在所提供类型的层次结构级别 ...