Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit d23ea37

Browse files
committed
urllib and urllib2
Change-Id: I86cf915afa565735874e9a96b945ef672b8a6398
1 parent 8c3b310 commit d23ea37

14 files changed

+712
-0
lines changed

‎README.md‎

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -295,6 +295,10 @@ python的强大之处有很大的一方面在于它有各种各样非常强大
295295

296296
## [abc](content/abc.md)
297297

298+
## [urllib](content/urllib.md)
299+
300+
## [urllib2](content/urllib2.md)
301+
298302
## [tools](content/tools.md)
299303

300304
## [Other_thing](content/other_thing.md)

‎code/urllib2_basic_auth.py‎

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import urllib2
4+
5+
# first try
6+
try:
7+
resp = urllib2.urlopen("http://httpbin.org/basic-auth/admin/password")
8+
print resp.read()
9+
except Exception as e:
10+
print "error", e
11+
12+
13+
# with basic auth
14+
basic_auth = urllib2.HTTPBasicAuthHandler()
15+
basic_auth.add_password(
16+
realm="Fake Realm", # 资源域空间
17+
uri="http://httpbin.org/basic-auth/admin/password", # 资源地址
18+
user='admin', # 用户名
19+
passwd='password' # 密码
20+
)
21+
22+
opener = urllib2.build_opener(basic_auth)
23+
urllib2.install_opener(opener)
24+
25+
26+
# second try
27+
try:
28+
resp = urllib2.urlopen("http://httpbin.org/basic-auth/admin/password")
29+
print resp.read()
30+
except Exception as e:
31+
print "error", e

‎code/urllib2_cookies.py‎

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import cookielib
4+
import urllib2
5+
6+
7+
def creat_cookie(name, value, **kwargs):
8+
result = {
9+
'version': 0,
10+
'name': name,
11+
'value': value,
12+
'port': None,
13+
'domain': '',
14+
'path': '/',
15+
'secure': False,
16+
'expires': None,
17+
'discard': True,
18+
'comment': None,
19+
'comment_url': None,
20+
'rest': {'HttpOnly': None},
21+
'rfc2109': False,
22+
}
23+
result.update(kwargs)
24+
result['port_specified'] = bool(result['port'])
25+
result['domain_specified'] = bool(result['domain'])
26+
result['domain_initial_dot'] = result['domain'].startswith('.')
27+
result['path_specified'] = bool(result['path'])
28+
29+
return cookielib.Cookie(**result)
30+
31+
32+
def header():
33+
cookie_jar = cookielib.CookieJar()
34+
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
35+
opener = urllib2.build_opener(cookie_handler)
36+
37+
request = urllib2.Request("http://httpbin.org/cookies")
38+
request.add_header("Cookie", "name=windard")
39+
40+
resp = opener.open(request)
41+
print resp.read()
42+
43+
for cookie in cookie_jar:
44+
print cookie.name, ":", cookie.value
45+
46+
47+
def main():
48+
cookie_jar = cookielib.CookieJar()
49+
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
50+
opener = urllib2.build_opener(cookie_handler)
51+
cookie_jar.set_cookie(creat_cookie("name", "Windard"))
52+
cookie_jar.set_cookie(creat_cookie("location", "Shanghai"))
53+
54+
resp = opener.open("http://httpbin.org/cookies")
55+
print resp.read()
56+
57+
for cookie in cookie_jar:
58+
print cookie.name, ":", cookie.value
59+
60+
61+
if __name__ == '__main__':
62+
header()

‎code/urllib2_get.py‎

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import urllib2
4+
5+
6+
# 发起请求
7+
resp = urllib2.urlopen("http://httpbin.org/get")
8+
9+
# 返回是一个类 file 对象,可以通过 read() 读取
10+
print resp.read()
11+
12+
print "HTTP code", resp.code
13+
print "HTTP msg", resp.msg
14+
print "HTTP Status Code:", resp.getcode()
15+
print "HTTP Request Url:", resp.geturl()
16+
print "HTTP Response Headers:"
17+
print resp.info()
18+
print "HTT Content-Length:", resp.info().get("Content-Length")

‎code/urllib2_headers.py‎

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import urllib
4+
import urllib2
5+
6+
7+
headers = {
8+
"From": "China",
9+
"Year": "2020",
10+
}
11+
12+
data = {
13+
"name": "windard",
14+
"country": "china",
15+
}
16+
17+
request = urllib2.Request("http://httpbin.org/post", headers=headers)
18+
request.add_data(urllib.urlencode(data))
19+
request.add_header("To", "USA")
20+
21+
resp = urllib2.urlopen(request)
22+
print resp.read()

‎code/urllib2_proxy.py‎

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import urllib
4+
import urllib2
5+
6+
7+
def urllib_proxy():
8+
resp = urllib.urlopen("http://httpbin.org/ip", proxies={
9+
"http": "http://117.69.152.162:8691"
10+
})
11+
print resp.read()
12+
13+
14+
def urllib2_proxy():
15+
proxy_handler = urllib2.ProxyHandler({
16+
"http": "http://117.69.152.162:8691"
17+
})
18+
opener = urllib2.build_opener(proxy_handler)
19+
urllib2.install_opener(opener)
20+
21+
resp = urllib2.urlopen("http://httpbin.org/ip")
22+
print resp.read()
23+
24+
25+
if __name__ == '__main__':
26+
urllib_proxy()
27+
urllib2_proxy()

‎code/urllib_data.py‎

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import urllib
4+
5+
data = {
6+
"name": "windard",
7+
"country": "china",
8+
}
9+
10+
data_string = urllib.urlencode(data)
11+
12+
# 发起请求
13+
resp = urllib.urlopen("http://httpbin.org/post", data_string)
14+
15+
# 返回是一个类 file 对象,可以通过 read() 读取
16+
print resp.read()

‎code/urllib_download.py‎

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import urllib
4+
5+
url = "http://techslides.com/demos/sample-videos/small.mp4"
6+
filename = url.split('/')[-1]
7+
8+
download_name, headers = urllib.urlretrieve(url, filename)
9+
10+
print "filename: ", download_name
11+
print "headers : "
12+
print headers
13+
14+
urllib.urlcleanup()
15+
16+
17+
resp = urllib.urlopen(url)
18+
with open(filename, "w") as f:
19+
chunk = resp.read(1024)
20+
while chunk:
21+
f.write(chunk)
22+
chunk = resp.read(1024)
23+
24+
print "filename: ", filename

‎code/urllib_get.py‎

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import urllib
4+
5+
# 发起请求
6+
resp = urllib.urlopen("http://httpbin.org/get")
7+
8+
# 返回是一个类 file 对象,可以通过 read() 读取
9+
print resp.read()
10+
11+
print "HTTP Status Code:", resp.getcode()
12+
print "HTTP Request Url:", resp.geturl()
13+
print "HTTP Response Headers:"
14+
print resp.info()
15+
print "HTT Content-Length:", resp.info().get("Content-Length")

‎code/urllib_param.py‎

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import urllib
4+
5+
6+
param = {
7+
"name": "windard",
8+
"country": "china",
9+
}
10+
11+
query_string = urllib.urlencode(param)
12+
13+
# 发起请求
14+
resp = urllib.urlopen("http://httpbin.org/get"+"?"+query_string)
15+
16+
# 返回是一个类 file 对象,可以通过 read() 读取
17+
print resp.read()

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /