aiohttp_spider_def:

import asyncio
import re
import aiohttp
import aiomysql
from pyquery import PyQuery
from lxml import etree

start_url = 'http://news.baidu.com/'
waitting_urs = []
seen_uels = set()
stoppint = False
sem = asyncio.Semaphore(10)  # 现在并发为3个
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"}

async def fetch(url, session):
    async with sem:
        #     await asyncio.sleep(1)
        try:
            async with session.get(url, headers=headers, timeout=1) as resp:
                print('url status:{}'.format(resp.status))
                # if resp.status in [200, 201]:
                data = etree.HTML(await resp.read())
                return data
        except Exception as e:
            print('错误为:{}  url:{}'.format(e, url))

def extract_urls(html):
    try:
        for url in html.xpath('//a/@href'):
            if url and url.startswith("http") and url not in seen_uels:
                if re.findall(r'baidu', url):
                    waitting_urs.append(url)
    except:
        pass

async def init_urls(url, session):
    html = await fetch(url, session)
    seen_uels.add(url)
    extract_urls(html)

async def article_handler(url, session, pool):
    # 获取文章详情
    html = await fetch(url, session)
    seen_uels.add(url)
    extract_urls(html)
    try:
        title = html.xpath('//title/text()')[0].strip()
        print('title:{}'.format(title))
        async with pool.acquire() as conn:
            async with conn.cursor() as cursor:
                try:
                    # 插入
                    await cursor.execute('insert into async_test_async(title) values("{}")'.format(title))

                    # 插入数据
                    await cursor.execute("insert into async_test_async(title) values('{}')".format(title))

                    # 查询数据
                    await cursor.execute("select * from async_test_async")
                    data = await cursor.fetchall()
                    print("data:", data)

                    # 更新数据
                    await cursor.execute("update async_test_async set title='{}' where id={}".format('update', 10168))

                    # 删除数据
                    await cursor.execute("delete from async_test_async where id={}".format(10174))
                except:
                    pass
    except:
        pass

async def consumer(pool):
    async with aiohttp.ClientSession() as session:
        while not stoppint:
            if len(waitting_urs) < 10:
                if url not in seen_uels:
                    asyncio.ensure_future(init_urls(url, session))

            url = waitting_urs.pop()
            print('start get url:{}'.format(url))
            if re.findall(r'baidu', url):
                if url not in seen_uels:
                    print('waitting_urs:{}'.format(waitting_urs[0: 3]))
                    asyncio.ensure_future(article_handler(url, session, pool))
                    await asyncio.sleep(0.1)

async def main(loop):
    pool = await aiomysql.create_pool(host='127.0.0.1', port=3306, user='root', password='root', db='cfda', loop=loop,
                                      charset='utf8', autocommit=True)
    async with aiohttp.ClientSession() as session:
        html = await fetch(start_url, session)
        seen_uels.add(start_url)
        extract_urls(html)

    asyncio.ensure_future(consumer(pool))

if __name__ == "__main__":
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main(loop))
    loop.run_forever() 

aiohttp_spider_class:

import asyncio
import re
import aiohttp
import aiomysql
from pyquery import PyQuery
from lxml import etree

start_url = 'http://news.baidu.com/'
waitting_urs = []
seen_uels = set()
stoppint = False
sem = asyncio.Semaphore(10)  # 现在并发为3个
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"}

class async_text(object):

    async def fetch(self, url, session):
        print("self:", self)
        async with sem:
            #     await asyncio.sleep(1)
            try:
                async with session.get(url, headers=headers, timeout=1) as resp:
                    print('url status:{}'.format(resp.status))
                    # if resp.status in [200, 201]:
                    data = etree.HTML(await resp.read())
                    return data
            except Exception as e:
                print('错误为:{}  url:{}'.format(e, url))

    def extract_urls(self, html):
        try:
            for url in html.xpath('//a/@href'):
                if url and url.startswith("http") and url not in seen_uels:
                    if re.findall(r'baidu', url):
                        waitting_urs.append(url)
        except:
            pass

    async def init_urls(self, url, session):
        html = await self.fetch(self, url, session)
        seen_uels.add(url)
        self.extract_urls(self, html)

    async def article_handler(self, url, session, pool):
        # 获取文章详情
        html = await self.fetch(self, url, session)
        seen_uels.add(url)
        self.extract_urls(self, html)
        try:
            title = html.xpath('//title/text()')[0].strip()
            print('title:{}'.format(title))
            async with pool.acquire() as conn:
                async with conn.cursor() as cur:
                    try:
                        # 插入
                        await cur.execute('insert into async_test_async(title) values("{}")'.format(title))
                    except:
                        pass
        except:
            pass

    async def consumer(self, pool):
        async with aiohttp.ClientSession() as session:
            while not stoppint:
                if len(waitting_urs) < 10:
                    if url not in seen_uels:
                        asyncio.ensure_future(self.init_urls(self, url, session))

                url = waitting_urs.pop()
                print('start get url:{}'.format(url))
                if re.findall(r'baidu', url):
                    if url not in seen_uels:
                        print('waitting_urs:{}'.format(waitting_urs[0: 3]))
                        asyncio.ensure_future(self.article_handler(self, url, session, pool))
                        await asyncio.sleep(0.1)

    @classmethod
    async def main(self, loop):
        pool = await aiomysql.create_pool(host='127.0.0.1', port=3306, user='root', password='root', db='cfda',
                                          loop=loop,
                                          charset='utf8', autocommit=True)
        async with aiohttp.ClientSession() as session:
            html = await self.fetch(self, start_url, session)
            seen_uels.add(start_url)
            self.extract_urls(self, html)

        asyncio.ensure_future(self.consumer(self, pool))

if __name__ == "__main__":
    loop = asyncio.get_event_loop()
    loop.run_until_complete(async_text.main(loop))
    loop.run_forever()

  

aiohttp_spider的更多相关文章

随机推荐

  1. June 03rd, 2019. Week 23rd, Monday

    There is no shame in hard work. 努力从来不丢人. Stop complaining about the current work arrangements, just ...

  2. TensorFlow从1到2(十五)(完结)在浏览器做机器学习

    TensorFlow的Javascript版 TensorFlow一直努力扩展自己的基础平台环境,除了熟悉的Python,当前的TensorFlow还实现了支持Javascript/C++/Java/ ...

  3. layUI学习第四日:layUI布局系列一

    1.栅格布局规则 1.1 layui-row定义行,如:<div class="layui-row"></div> 1.2 layui-col-md*这样的 ...

  4. 剑指Offer-31.整数中1出现的次数(从1到n整数中1出现的次数)(C++/Java)

    题目: 求出1~13的整数中1出现的次数,并算出100~1300的整数中1出现的次数?为此他特别数了一下1~13中包含1的数字有1.10.11.12.13因此共出现6次,但是对于后面问题他就没辙了.A ...

  5. <Stack> (高频)394 ( 高频)224

    394. Decode String 四种情况: 1. 数字,把之前有的数字乘以10再加本数字 2. ' [ ', 入口, 把之前的数字压入栈中并num归零. 3. ' ] ' ,出口,归零.用dfs ...

  6. 【计算机网络】UDP基础知识总结

    1. UDP概念相关 [!NOTE] UDP(User Datagram Protocol),又叫用户数据报协议. UDP是一个无连接的.不可靠.基于数据报的传输协议.UDP只是报文(报文可以理解为一 ...

  7. eclipse彻底去除validation(彻底解决编辑js文件的卡顿问题)

    Eclipse中默认的JS编辑器非常慢,尤其在拷贝粘贴代码时,CPU总是占用很高甚至到100%,也就导致了编辑起来很卡. 这是因为Eclipse中带的Validate功能导致的,这个鸡肋的功能简直让人 ...

  8. 数据库——SQL-SERVER练习(2)连接与子查询

    一.实验准备 1.复制实验要求文件及“CREATE-TABLES.SQL”文件, 粘贴到本地机桌面. 2.启动SQL-SERVER服务. 3. 运行查询分析器, 点击菜单<文件>/< ...

  9. 2018-8-10-win10-uwp-关联文件

    原文:2018-8-10-win10-uwp-关联文件 title author date CreateTime categories win10 uwp 关联文件 lindexi 2018-08-1 ...

  10. EF-入门操作

    EntityFramework Core 理解 DbContext :数据库 DbSet: 数据库表 Model : 数据行 IQueryable<Model> 查询结果集合 Lamada ...