"""
urllib.request.urlopen(url,data,timeout)
"""
# from urllib.request import urlopen
# import urllib.parse
# from urllib.error import URLError
# import socket
# url = "http://httpbin.org/post"
# data = bytes(urllib.parse.urlencode({'name': 'dc'}), encoding="utf-8")
# try:
# reponse = urlopen(url = url, data=data, timeout=5)
# except URLError as e:
# if isinstance(e.reason,socket.timeout):
# print("TIME OUT")
# else:
# print(reponse.read().decode("utf-8"))
"""
urllib.request.Request(url,data,headers,method)
"""
# from urllib.request import Request,urlopen
# import urllib.parse
# url = "http://httpbin.org/post"
# data = bytes(urllib.parse.urlencode({'name': 'dc'}), encoding="utf-8")
# headers = {
# 'User-Agent': 'Mozilla/4.0(compatible;Msie5.5;Windows NT)'
# }
# req = Request(url=url, data=data, headers=headers, method="POST")
# reponse = urlopen(req)
# print(reponse.read().decode("utf-8"))
"""
Handler 验证 代理 Cookies build_opener
"""
"""
验证
"""
# from urllib.request import HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, build_opener
# import urllib.error
# url = "https://www.zhihu.com/signup?next=%2F"
# name = "17380646919"
# value = "dc201637"
# p = HTTPPasswordMgrWithDefaultRealm()
# p.add_password(None, url, name, value)
# handler = HTTPBasicAuthHandler(p)
# opener = build_opener(handler) # try:
# reponse = opener.open(url)
# except urllib.error.URLError as e:
# print(e.reason)
# else:
# print(reponse.read().decode("utf-8"))
"""
代理
"""
# from urllib.error import URLError
# from urllib.request import ProxyHandler, build_opener
# url = "http://www.baidu.com"
# Proxy_Handler = ProxyHandler({
# 'http': 'http://127.0.0.1:9743',
# 'https':'https://127.0.0.1:9743'
# })
# opener = build_opener(Proxy_Handler)
# try:
# reponse = opener.open(url)
# print(reponse.read().decode("utf-8"))
# except URLError as e:
# print(e.reason)
"""
cookies
"""
"""
打印cookies的值
"""
# from urllib.request import HTTPCookieProcessor,build_opener
# import http.cookiejar
# url = "http://www.baidu.com"
# cookie = http.cookiejar.CookieJar()
# handler = HTTPCookieProcessor(cookie)
# opener = build_opener(handler)
# reponse = opener.open(url)
# for items in cookie:
# print(items.name + "=" + items.value)
"""
保存cookies的值
"""
# from urllib.request import HTTPCookieProcessor,build_opener
# import http.cookiejar
# url = "http://www.baidu.com"
# filename = "cookies.txt"
# # cookie = http.cookiejar.MozillaCookieJar(filename)
# cookie = http.cookiejar.LWPCookieJar(filename)
# handler = HTTPCookieProcessor(cookie)
# opener = build_opener(handler)
# reponse = opener.open(url)
# cookie.save(ignore_discard=True, ignore_expires=True)
"""
读取cookies的值并应用
"""
# from urllib.request import HTTPCookieProcessor,build_opener
# import http.cookiejar
# url = "http://www.baidu.com"
# cookie = http.cookiejar.LWPCookieJar()
# cookie.load('cookies.txt',ignore_discard=True,ignore_expires=True)
# handler = HTTPCookieProcessor(cookie)
# opener = build_opener(handler)
# reponse = opener.open(url)
# print(reponse.read().decode("utf-8"))
"""
异常处理 URLError、HTTPError
"""
# from urllib import error, request
# try:
# reponse = request.urlopen("http://cuiqingcai.com/index.htm")
# except error.HTTPError as e:
# print(e.reason,e.code,e.headers,sep= '\n')
# except error.URLError as e:
# print(e.reason)
# else:
# print('no worry')
"""
reson属性返回一个对象
"""
# import urllib.request
# import socket
# from urllib.error import HTTPError, URLError
# try:
# reponse = urllib.request.urlopen("https://www.baidu.com", timeout=0.1)
# except URLError as e:
# print(type(e.reason))
# if isinstance(e.reason, socket.timeout):
# print("TIMEOUT")
"""
解析链接
"""

  

2 25urllib.py的更多相关文章

  1. python调用py中rar的路径问题。

    1.python调用py,在py中的os.getcwd()获取的不是py的路径,可以通过os.path.split(os.path.realpath(__file__))[0]来获取py的路径. 2. ...

  2. Python导入其他文件中的.py文件 即模块

    import sys sys.path.append("路径") import .py文件

  3. import renumber.py in pymol

    cp renumber.py /usr/local/lib/python2.7/dist-packages/pymol import renumber or run /path/to/renumber ...

  4. python gettitle.py

    #!/usr/bin/env python # coding=utf-8 import threading import requests import Queue import sys import ...

  5. 解决 odoo.py: error: option --addons-path: The addons-path 'local-addons/' does not seem to a be a valid Addons Directory!

    情况说明 odoo源文件路径-/odoo-dev/odoo/: 我的模块插件路径 ~/odoo-dev/local-addons/my-module 在my-module中创建了__init__.py ...

  6. caffe机器学习自带图片分类器classify.py实现输出预测结果的概率及caffe的web_demo例子运行实例

    caffe机器学习环境搭建及python接口编译参见我的上一篇博客:机器学习caffe环境搭建--redhat7.1和caffe的python接口编译 1.运行caffe图片分类器python接口 还 ...

  7. 【转】Windows下使用libsvm中的grid.py和easy.py进行参数调优

    libsvm中有进行参数调优的工具grid.py和easy.py可以使用,这些工具可以帮助我们选择更好的参数,减少自己参数选优带来的烦扰. 所需工具:libsvm.gnuplot 本机环境:Windo ...

  8. MySqlNDB使用自带的ndb_setup.py安装集群

    在用Mysql做集群时,使用Mysql的NDB版本更易于集群的扩展,稳定和数据的实时性. 我们可以使用Mysql自带的工具进行集群安装与管理:ndb_setup.py.位于Mysql的安装目录bin下 ...

  9. 将做好的py文件打包成模块,供别人安装调用

    现在要将写完的3个py文件,打包. 步骤: 1.新建一个文件夹setup(名字随便取),在setup文件夹下,再新建一个文件夹financeapi. 2.将上面4个py文件拷贝至financeapi文 ...

随机推荐

  1. Java nio socket与as3 socket(粘包解码)连接的应用实例

    对Java nio socket与as3 socket连接的简单应用 <ignore_js_op>Java nio socket与as3 socket连接的应用实例.rar (9.61 K ...

  2. 【洛谷P3390】矩阵快速幂

    矩阵快速幂 题目描述 矩阵乘法: A[n*m]*B[m*k]=C[n*k]; C[i][j]=sum(A[i][1~n]+B[1~n][j]) 为了便于赋值和定义,我们定义一个结构体储存矩阵: str ...

  3. FTP服务安装及使用

    准备工作:一台服务器.我这里使用的是阿里云的ECS. 环境使用的是:windows 2008 r2 用途:FTP是用来进行文件传输的,我们可以把这个目录在IIS上配置成发布的网站,我们在本地只用把我们 ...

  4. 前端之HTML和CSS

    html概述及html文档基本结构 html概述 HTML是 HyperText Mark-up Language 的首字母简写,意思是超文本标记语言,超文本指的是超链接,标记指的是标签,是一种用来制 ...

  5. React路由-进阶篇

    路由进阶 1.多级路由,和之前的思想一样,在子路由里面继续写Route,继续挂载组件,就可以实现多级路由 比如这样:class Food extends Component{ render() { r ...

  6. ethereum(以太坊)(十)--函数修饰符

    pragma solidity ^0.4.0; contract modifierTest{ uint public v1; uint constant v2 =10; //uint constant ...

  7. Python容器--list, tuple, dict, set

    ## Python 中有四种用于存放数据的序列--list, tuple, dict, set ## list 列表 - 可以存放任意类型数据的有序序列 - 列表可以由零个或多个元素组成,元素之间用逗 ...

  8. emplace_back

    c++11 的 list deque 和 vector 增加了emplace_back函数,相对于push_back函数,它减少了一次类的构造,因此效率更高,推荐使用. #include <li ...

  9. 笔记-scrapy-Request/Response

    笔记-scrapy-Request/Response 1.     简介 Scrapy使用Request和Response来爬取网站. 2.     request class scrapy.http ...

  10. 14 Django的用户认证组件

    用户认证 auth模块 from django.contrib import auth django.contrib.auth中提供了许多方法,这里主要介绍其中的三个: 1.1 .authentica ...