并发版爬虫

代码实现

/crawler/main.go
package main

import (
"learn/crawler/engine"
"learn/crawler/scheduler"
"learn/crawler/zhenai/parser"
) func main() {
e := engine.ConcurrentEngine{
Scheduler: &scheduler.QueuedScheduler{},
WorkerCount: 20,
}
e.Run(engine.Request{
Url: "http://www.zhenai.com/zhenghun",
ParseFunc: parser.ParseCityList,
})
//测试上海单个城市
//e.Run(engine.Request{
// Url: "http://www.zhenai.com/zhenghun/shanghai",
// ParseFunc: parser.ParseCity,
//})
}
/crawler/engine/simple.go
package engine

import (
"learn/crawler/fetcher"
"log"
) type SimpleEngine struct { } func (e SimpleEngine) Run(seeds ...Request) {
var requests []Request
for _, r := range seeds {
requests = append(requests, r)
}
for len(requests) > 0 {
r := requests[0]
requests = requests[1:] parseResult, err := worker(r)
if err != nil {
continue
}
requests = append(requests, parseResult.Requests...)
for _, item := range parseResult.Items{
log.Printf("Got item %v", item)
}
}
}
func worker(r Request) (ParseResult, error) {
log.Printf("Fetching %s", r.Url)
body, err := fetcher.Fetch(r.Url)
if err != nil {
log.Printf("Fetcher: error" + "fetching url %s: %v", r.Url, err)
return ParseResult{}, err
}
return r.ParseFunc(body), nil
}
/crawler/engine/concurrent.go
package engine

import (
"log"
) type ConcurrentEngine struct {
Scheduler Scheduler
WorkerCount int
}
type Scheduler interface {
ReadyNotifier
Submit(Request)
WorkerChan() chan Request
Run()
}
type ReadyNotifier interface {
WorkerReady(chan Request)
}
func (e *ConcurrentEngine) Run(seeds ...Request) {
out := make(chan ParseResult)
e.Scheduler.Run() for i := 0; i < e.WorkerCount; i++ {
createWork(e.Scheduler.WorkerChan(), out, e.Scheduler)
}
for _, r := range seeds {
e.Scheduler.Submit(r)
}
itemCount := 0
for {
result := <- out
for _, item := range result.Items {
log.Printf("Got item #%d: %v", itemCount, item)
itemCount++
}
for _, request := range result.Requests {
e.Scheduler.Submit(request)
}
}
}
func createWork(in chan Request, out chan ParseResult, ready ReadyNotifier) {
go func() {
for {
ready.WorkerReady(in)
request := <- in
result, err := worker(request)
if err != nil {
continue
}
out <- result
}
}()
}
/crawler/engine/typers.go
package engine

type Request struct {
Url string
ParseFunc func([]byte) ParseResult
}
type ParseResult struct {
Requests []Request
Items []interface{}
}
func NilParser([]byte) ParseResult{
return ParseResult{}
}
/crawler/fetcher/fetcher.go
package fetcher

import (
"bufio"
"fmt"
"golang.org/x/net/html/charset"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
"io/ioutil"
"log"
"net/http"
"time"
) var rateLimiter = time.Tick(100 * time.Millisecond)
func Fetch(url string) ([]byte, error) {
<- rateLimiter
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36")
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Wrong status code: %d", resp.StatusCode)
}
bodyReader := bufio.NewReader(resp.Body)
e := determineEncoding(bodyReader)
utf8Reader := transform.NewReader(bodyReader, e.NewDecoder())
return ioutil.ReadAll(utf8Reader)
}
func determineEncoding(r *bufio.Reader) encoding.Encoding {
bytes, err := r.Peek(1024)
if err != nil {
log.Printf("Fetcher error: %v", err)
return unicode.UTF8
}
e, _, _ := charset.DetermineEncoding(bytes, "")
return e
}
/crawler/zhenai/parser/citylist.go
package parser

import (
"learn/crawler/engine"
"regexp"
) const cityListRe = `<a href="(http://www.zhenai.com/zhenghun/[0-9a-z]+)" [^>]*>([^<]+)</a>`
func ParseCityList(contents []byte) engine.ParseResult {
re := regexp.MustCompile(cityListRe)
matches := re.FindAllSubmatch(contents, -1)
result := engine.ParseResult{}
for _, m := range matches {
result.Items = append(result.Items, "City: "+string(m[2]))
result.Requests = append(result.Requests, engine.Request{
Url: string(m[1]),
ParseFunc: ParseCity,
})
}
return result
}
/crawler/zhenai/parser/city.go
package parser

import (
"learn/crawler/engine"
"regexp"
)
var (
profileRe = regexp.MustCompile(`<a href="(http://album.zhenai.com/u/[0-9]+)" [^>]*>([^<]+)</a>`)
cityUrlRe = regexp.MustCompile(`href="(http://www.zhenai.com/zhenghun/[^"]+)"`)
)
func ParseCity(contents []byte) engine.ParseResult {
matches := profileRe.FindAllSubmatch(contents, -1)
result := engine.ParseResult{}
for _, m := range matches {
name := string(m[2])
result.Items = append(result.Items, "User "+name)
result.Requests = append(result.Requests, engine.Request{
Url: string(m[1]),
ParseFunc: func(c []byte) engine.ParseResult {
return ParseProfile(c, "name:"+name)
},
})
}
matches = cityUrlRe.FindAllSubmatch(contents, -1)
for _, m := range matches {
result.Requests = append(result.Requests, engine.Request{
Url: string(m[1]),
ParseFunc: ParseCity,
})
}
return result
}
/crawler/zhenai/parser/profile.go
package parser

import (
"learn/crawler/engine"
"learn/crawler/model"
"regexp"
) const all = `<div class="m-btn purple" data-v-8b1eac0c>([^<]+)</div>`
func ParseProfile(contents []byte, name string) engine.ParseResult {
profile := model.Profile{}
profile.User = append(profile.User, name)
re := regexp.MustCompile(all)
match := re.FindAllSubmatch(contents,-1)
if match != nil {
for _, m := range match {
profile.User = append(profile.User, string(m[1]))
}
} result := engine.ParseResult{
Items: []interface{}{profile},
}
return result
}
/crawler/model/profile.go
package model

type Profile struct {
User []string
}
/crawler/scheduler/queued.go
package scheduler

import "learn/crawler/engine"

type QueuedScheduler struct {
requestChan chan engine.Request
workChan chan chan engine.Request
} func (s *QueuedScheduler) WorkerChan() chan engine.Request {
return make(chan engine.Request)
} func (s *QueuedScheduler) Submit(r engine.Request) {
s.requestChan <- r
}
func (s *QueuedScheduler) WorkerReady(w chan engine.Request){
s.workChan <- w
}
func (s *QueuedScheduler) Run(){
s.workChan = make(chan chan engine.Request)
s.requestChan = make(chan engine.Request)
go func() {
var requestQ []engine.Request
var workerQ []chan engine.Request
for {
var activeRequest engine.Request
var activeWorker chan engine.Request
if len(requestQ) > 0 && len(workerQ) > 0 {
activeRequest = requestQ[0]
activeWorker = workerQ[0]
}
select {
case r := <-s.requestChan:
requestQ = append(requestQ, r)
case w := <-s.workChan:
workerQ = append(workerQ, w)
case activeWorker <- activeRequest:
workerQ = workerQ[1:]
requestQ = requestQ[1:]
}
}
}()
}
/crawler/scheduler/simple.go
package scheduler

import "learn/crawler/engine"

type SimpleScheduler struct {
workerChan chan engine.Request
} func (s *SimpleScheduler) WorkerChan() chan engine.Request {
return s.workerChan
} func (s *SimpleScheduler) WorkerReady(chan engine.Request) {
} func (s *SimpleScheduler) Run() {
s.workerChan = make(chan engine.Request)
} func (s *SimpleScheduler) Submit(r engine.Request) {
go func() { s.workerChan <- r }()
}

完整项目

https://gitee.com/FenYiYuan/golang-cpdcrawler.git

go并发版爬虫的更多相关文章

  1. Go语言之进阶篇爬百度贴吧并发版

    1.爬百度贴吧并发版 示例: package main import ( "fmt" "net/http" "os" "strco ...

  2. Go HelloWorld 网络版和并发版

    网络版 package main import ( "net/http" "fmt" ) func main() { http.HandleFunc(" ...

  3. go-爬虫-百度贴吧(并发版)

    爬取百度贴吧的网页 非并发版 package main import ( "fmt" "io" "net/http" "os&qu ...

  4. go单任务版爬虫

    go单任务版爬虫(爬取珍爱网) 爬虫总体算法 单任务版爬虫架构 任务 获取并打印所在城市第一页用户的详细信息 代码实现 /crawler/main.go package main import ( & ...

  5. 区划代码 node 版爬虫尝试

    前言 对于区划代码数据,很多人都不会陌生,大多公司数据库都会维护一份区划代码,包含省市区等数据.区划信息跟用户信息息息相关,往往由于历史原因很多数据都是比较老的数据,且不会轻易更改.网上也有很多人提供 ...

  6. python链家网高并发异步爬虫asyncio+aiohttp+aiomysql异步存入数据

    python链家网二手房异步IO爬虫,使用asyncio.aiohttp和aiomysql 很多小伙伴初学python时都会学习到爬虫,刚入门时会使用requests.urllib这些同步的库进行单线 ...

  7. 最新IP地址数据库Dat格式-高性能高并发版(2019年3月)

    最新IP地址数据库->Dat  二进制文件 高性能高并发-qqzeng-ip.dat 格式 全球IP数据库-20190301-Dat 版                国内IP数据库-20190 ...

  8. python链家网高并发异步爬虫and异步存入数据

    python链家网二手房异步IO爬虫,使用asyncio.aiohttp和aiomysql 很多小伙伴初学python时都会学习到爬虫,刚入门时会使用requests.urllib这些同步的库进行单线 ...

  9. python学习_新闻联播文字版爬虫(V 1.0版)

    python3的爬虫练习,爬取的是新闻联播文字版网站 #!/usr/bin/env python # -*- coding: utf-8 -*- ''' __author__ = 'wyf349' _ ...

随机推荐

  1. vue2.x中使用计算属性巧妙的实现多选框的“全选”

    接下来我会以一个购物车的例子,来演示如果借助计算属性,精巧的实现多选框的全选功能.当然,有全选,自然对应的也还有取消全选. 以下这张gif图,就是最终的实现效果: 第一步,针对购物车每一个商品进行设置 ...

  2. CodeBlocks 断点调试

    启动调试器 1. 一般,调试器的按钮可以在工具栏找到 如果没有,可以从view菜单项中调出 2. 设置断点 使用调试器时需要让程序在需要的位置中断,在启动调试器前设置断点如下,鼠标点击编辑器的左边即可 ...

  3. 《快乐编程大本营》java语言训练班 2课:java的变量

    <快乐编程大本营>java语言训练班 2课:java的变量 1变量介绍 2变量分类,数值变量 3变量分类-字符串变量 4变量分类-布尔变量 5变量分类-对象 http://code6g.c ...

  4. python写的用WMI检测windows系统信息的脚本

    脚本如下: #!/usr/bin/env python #coding:utf- import wmi import sys,time,platform def get_system_info(os) ...

  5. 为了不复制粘贴,我被逼着学会了JAVA爬虫

    整理了一些Java方面的架构.面试资料(微服务.集群.分布式.中间件等),有需要的小伙伴可以关注公众号[程序员内点事],无套路自行领取 本文作者:程序员内点事 更多精选 技术部突然宣布:JAVA开发人 ...

  6. python学习记录(六)

    0903--https://www.cnblogs.com/fnng/archive/2013/04/21/3034442.html 基本语句的用法 使用逗号输出(想要同时输出文本和变量值,又不希望使 ...

  7. 在C#中通过使用Newtonsoft.Json库来解析百度地图地理编码(GeoCoder)服务接口返回的Json格式的数据

    百度地图地理编码(GeoCoder)服务接口返回的Json格式的数据,如下所示: http://api.map.baidu.com/geocoding/v3/?address=**省**市**区**路 ...

  8. GNU C相关

    GNU __attribute__ 用于在函数声明时,定义函数参数的一些特殊属性,比如,如果函数的某个参数可能用不到,那么,将该参数增加unused属性即可,如下(一般用两个括号包括属性unused) ...

  9. STM32F4相关

    常用术语 AHB与APB的地位相当于PC中的南北桥,是两道独立的片内总线.AHB:advanced high-performance bus:APB: advanced peripherals bus

  10. centos7 nginx 启动脚本

    [root@localhost ~]# vim /lib/systemd/system/nginx.service [Unit] Description=nginx After=network.tar ...