最近学了下网络爬虫,打算从一个网站上提取点东西,自己练练手,刚开始还从这个网站上取了正确的html,后来百般尝试还是不能取正确的html,希望能得到大家的帮助~

我刚开始的代码是:

1     url="http://www.karger.com/Collections/Hospital";
2 data = urllib.request.urlopen(url).read();
3 data=data.decode('gb2312');
4 data=BeautifulSoup(data);
5 print(data);

后来改成下面这样:

 1 url="http://www.karger.com/Collections/Hospital";
2
3 headers = [('Host','www.karger.com'),
4 ('Connection', 'keep-alive'),
5 ('Cache-Control', 'max-age=0'),
6 ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
7 ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0'),
8 ('Accept-Encoding','gzip, deflate'),
9 ('Accept-Language', 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'),
10 ('If-None-Match', '90101f995236651aa74454922de2ad74'),
11 ('Referer','"http://www.karger.com/Collections/Hospital"'),
12 ('If-Modified-Since', 'Thu, 01 Jan 1970 00:00:00 GMT')]
13 opener = urllib.request.build_opener()
14 opener.addheaders = headers
15 data = opener.open(url).read();
16 print(data)

得到的执行结果都是下面这样:

b'<html>\r\n<head>\r\n<META NAME="robots" CONTENT="noindex,nofollow">\r\n<script>\r\n(function() {  function getSessionCookies() {   cookieArray = new Array();   var cName = /^\\s?incap_ses_/;   var c = document.cookie.split(";");   for (var i = 0; i < c.length; i++) {    key = c[i].substr(0, c[i].indexOf("="));    value = c[i].substr(c[i].indexOf("=") + 1, c[i].length);    if (cName.test(key)) {     cookieArray[cookieArray.length] = value    }   }   return cookieArray  }  function setIncapCookie(vArray) {   try {    cookies = getSessionCookies();    digests = new Array(cookies.length);    for (var i = 0; i < cookies.length; i++) {     digests[i] = simpleDigest((vArray) + cookies[i])    }    res = vArray + ",digest=" + (digests.join())   } catch (e) {    res = vArray + ",digest=" + (encodeURIComponent(e.toString()))   }   createCookie("___utmvc", res, 20)  }  function simpleDigest(mystr) {   var res = 0;   for (var i = 0; i < mystr.length; i++) {    res += mystr.charCodeAt(i)   }   return res  }  function createCookie(name, value, seconds) {   if (seconds) {    var date = new Date();    date.setTime(date.getTime() + (seconds * 1000));    var expires = "; expires=" + date.toGMTString()   } else {    var expires = ""   }   document.cookie = name + "=" + value + expires + "; path=/"  }  function test(o) {   var res = "";   var vArray = new Array();   for (var j = 0; j < o.length; j++) {    var test = o[j][0]    switch (o[j][1]) {    case "exists_boolean":     try { \t if(typeof(eval(test)) != "undefined"){ \t\tvArray[vArray.length] = encodeURIComponent(test + "=true") \t } \t else{ \t\tvArray[vArray.length] = encodeURIComponent(test + "=false") \t }     } catch (e) {      vArray[vArray.length] = encodeURIComponent(test + "=false")     }     break;    case "exists":     try {      vArray[vArray.length] = encodeURIComponent(test + "=" + typeof(eval(test)))     } catch (e) {      vArray[vArray.length] = encodeURIComponent(test + "=" + e)     }     break;    case "value":     try {      vArray[vArray.length] = encodeURIComponent(test + "=" + eval(test).toString())     } catch (e) {      vArray[vArray.length] = encodeURIComponent(test + "=" + e)     }     break;     case "plugins":     try{         p=navigator.plugins         pres=""         for (a in p){pres+=(p[a][\'description\']+" ").substring(0,20)}         vArray[vArray.length] = encodeURIComponent("plugins=" + pres);         }     catch(e){         vArray[vArray.length] = encodeURIComponent("plugins=" +e);         } \tbreak;      case "plugin":     try {      a = navigator.plugins;      for (i in a) {       f = a[i]["filename"].split(".");       if (f.length == 2) {        vArray[vArray.length] = encodeURIComponent("plugin=" + f[1]);        break       }      }     } catch (e) {      vArray[vArray.length] = encodeURIComponent("plugin=" + e)     }     break    }   }   vArray = vArray.join();   return vArray  }  var o = [   ["navigator", "exists_boolean"],   ["navigator.vendor", "value"],   ["opera", "exists_boolean"],   ["ActiveXObject", "exists_boolean"],   ["navigator.appName", "value"],   ["platform", "plugin"],   ["webkitURL", "exists_boolean"],   ["navigator.plugins.length==0", "value"],   ["_phantom", "exists_boolean"] ];  try {   setIncapCookie(test(o));   document.createElement("img").src = "/_Incapsula_Resource?SWKMTFSR=1&e=" + Math.random()  } catch (e) {   img = document.createElement("img");   img.src = "/_Incapsula_Resource?SWKMTFSR=1&e=" + e  } })();\r\n</script>\r\n<script>\r\n(function() { \r\nvar z="";var b="7472797B766172207868723B76617220743D6E6577204461746528292E67657454696D6528293B766172207374617475733D227374617274223B7661722074696D696E673D6E65772041727261792833293B77696E646F772E6F6E756E6C6F61643D66756E6374696F6E28297B74696D696E675B325D3D22723A222B286E6577204461746528292E67657454696D6528292D74293B646F63756D656E742E637265617465456C656D656E742822696D6722292E7372633D222F5F496E63617073756C615F5265736F757263653F4553324C555243543D363726743D373826643D222B656E636F6465555249436F6D706F6E656E74287374617475732B222028222B74696D696E672E6A6F696E28292B222922297D3B69662877696E646F772E584D4C4874747052657175657374297B7868723D6E657720584D4C48747470526571756573747D656C73657B7868723D6E657720416374697665584F626A65637428224D6963726F736F66742E584D4C4854545022297D7868722E6F6E726561647973746174656368616E67653D66756E6374696F6E28297B737769746368287868722E72656164795374617465297B6361736520303A7374617475733D6E6577204461746528292E67657454696D6528292D742B223A2072657175657374206E6F7420696E697469616C697A656420223B627265616B3B6361736520313A7374617475733D6E6577204461746528292E67657454696D6528292D742B223A2073657276657220636F6E6E656374696F6E2065737461626C6973686564223B627265616B3B6361736520323A7374617475733D6E6577204461746528292E67657454696D6528292D742B223A2072657175657374207265636569766564223B627265616B3B6361736520333A7374617475733D6E6577204461746528292E67657454696D6528292D742B223A2070726F63657373696E672072657175657374223B627265616B3B6361736520343A7374617475733D22636F6D706C657465223B74696D696E675B315D3D22633A222B286E6577204461746528292E67657454696D6528292D74293B6966287868722E7374617475733D3D323030297B706172656E742E6C6F636174696F6E2E72656C6F616428297D627265616B7D7D3B74696D696E675B305D3D22733A222B286E6577204461746528292E67657454696D6528292D74293B7868722E6F70656E2822474554222C222F5F496E63617073756C615F5265736F757263653F535748414E45444C3D313134343931363832343539363439303734382C31343832303638373230383234383031333036362C31343339363333303438303432363939313530392C3735353431222C66616C7365293B7868722E73656E64286E756C6C297D63617463682863297B7374617475732B3D6E6577204461746528292E67657454696D6528292D742B2220696E6361705F6578633A20222B633B646F63756D656E742E637265617465456C656D656E742822696D6722292E7372633D222F5F496E63617073756C615F5265736F757263653F4553324C555243543D363726743D373826643D222B656E636F6465555249436F6D706F6E656E74287374617475732B222028222B74696D696E672E6A6F696E28292B222922297D3B";for (var i=0;i<b.length;i+=2){z=z+parseInt(b.substring(i, i+2), 16)+",";}z = z.substring(0,z.length-1); eval(eval(\'String.fromCharCode(\'+z+\')\'));})();\r\n</script></head>\r\n<body>\r\n<iframe style="display:none;visibility:hidden;" src="//content.incapsula.com/jsTest.html" id="gaIframe"></iframe>\r\n</body></html>'

希望大家能够帮我找到问题,谢谢大家~

python3.4 伪装成浏览器获取页面信息失败的更多相关文章

  1. 爬取斗图网图片,使用xpath格式来匹配内容,对请求伪装成浏览器, Referer 防跨域请求

    6.21自我总结 一.爬取斗图网 1.摘要 使用xpath匹配规则查找对应信息文件 将请求伪装成浏览器 Referer 防跨域请求 2.爬取代码 #导入模块 import requests #爬取网址 ...

  2. 爬虫实例——爬取煎蛋网OOXX频道(反反爬虫——伪装成浏览器)

    煎蛋网在反爬虫方面做了不少工作,无法通过正常的方式爬取,比如用下面这段代码爬取无法得到我们想要的源代码. import requests url = 'http://jandan.net/ooxx' ...

  3. Python+Selenium自动化-获取页面信息

    Python+Selenium自动化-获取页面信息   1.获取页面title title:获取当前页面的标题显示的字段 from selenium import webdriver import t ...

  4. 8021x 获取IP信息失败,请检查锐捷认证客户端当前配置是否符合所在网络的要求,检查完毕后尝试重新认证

    早上一起床,登陆锐捷客户端上网,谁知道错问题了.不能联网了,锐捷登陆成功,但是一会儿就提示失败,获取IP信息失败了.下面我描述一下问题原因: 锐捷登陆后有认证提示,和往常正常情况一样的,不过有个小感叹 ...

  5. scrapy获取页面信息

    本例子用命令行调试的方式,演示如何获取页面的特定信息: 0) 示例页面 1) 使用scrapy shell获取目标页面: scrapy shell http://bj.lianjia.com/ersh ...

  6. 通过jquery获取页面信息

    获取浏览器显示区域(可视区域)的高度 : $(window).height(); 获取浏览器显示区域(可视区域)的宽度 :$(window).width(); 获取页面的文档高度 $(document ...

  7. Python3类和实例之获取对象信息

    当我们拿到一个对象的引用时,如何知道这个对象是什么类型,有哪些方法呢 使用type() 判断对象类型使用type()函数 基本类型都可以用type()判断 <class 'int'> &g ...

  8. java,利用Selenium调用浏览器,动态模拟浏览器事件,动态获取页面信息

    1.环境搭建 jdk1.6版本:selenium 2.4版本. jdk1.8版本:selenium3.14版本. (1)selenium的jar包下载: 地址:http://selenium-rele ...

  9. 通过js触发launch事件获取页面信息

    注:图片如果损坏,点击文章链接:https://www.toutiao.com/i6814776265602499080/ 承接上一篇文档<页面Cookie的JS文件编写> 思路 继续之前 ...

随机推荐

  1. 基于cygwin构建u-boot(四)libgcc引用

    接上文,config.mk文件修改后,即使没有.depend也可以正常处理了: 六.错误:gcclib引用错误 完成之前几篇的工作后,程序就可以一直执行了,直到最后生成u-boot, 出现如下错误告警 ...

  2. CoreData学习-最好的一片文章

    CoreData学习-最好的一片文章 分类: IOS重新上路2014-05-25 18:00 1937人阅读 评论(0) 收藏 举报   目录(?)[+]   写的很好的一篇教程,我什么时候能写出这么 ...

  3. [UVA] 11995 - I Can Guess the Data Structure! [STL应用]

    11995 - I Can Guess the Data Structure! Time limit: 1.000 seconds Problem I I Can Guess the Data Str ...

  4. BIT_COUNT()和BIT_OR()

    在学习MySQL手册时,看到根据天数计算访问量时,出现了BIT_COUNT()和BIT_OR()两个函数来处理天数计算的问题 所使用的表格信息如下: mysql> select year,mon ...

  5. Qt自定义带游标的slider,在滑块正上方显示当前值(非常有意思,继承QSlider之后增加一个QLabel,然后不断移动它)

    首先自定义QSlider的子类MyCustomSlider,如下所示. mycustomslider.h #ifndef MYCUSTOMSLIDER_H #define MYCUSTOMSLIDER ...

  6. 【czy系列赛】czy的后宫4 && bzoj1925 [Sdoi2010]地精部落

    [问题描述] czy有很多妹子,妹子虽然数量很多,但是质量不容乐观,她们的美丽值全部为负数(喜闻乐见). czy每天都要带N个妹子到机房,她们都有一个独一无二的美丽值,美丽值为-1到-N之间的整数.他 ...

  7. RFC3261--sip

    本文转载自 http://www.ietf.org/rfc/rfc3261.txt 中文翻译可参考 http://wenku.baidu.com/view/3e59517b1711cc7931b716 ...

  8. visual studio 中GIT的用法

    http://msdn.microsoft.com/zh-cn/library/vstudio/hh850445    Git 使用最新版:Git-1.8.4-preview20130916http: ...

  9. WEB打印插件jatoolsPrinter

    为什么选择 jatoolsPrinter 免费版? 支持无预览直接打印 真正免费,不加水印,没有ip或域名限制,不限时间,兼容ie6+ 无须注册,下载即用 提供经过微软数字签名的cab自动安装包,安装 ...

  10. 【poj】1001

    [题目] ExponentiationTime Limit: 500MS Memory Limit: 10000KTotal Submissions: 123707 Accepted: 30202De ...