urllib是Python自帶的標(biāo)準(zhǔn)庫(kù),無需安裝,直接可以用。
提供了如下功能:
- 網(wǎng)頁(yè)請(qǐng)求
- 響應(yīng)獲取
- 代理和cookie設(shè)置
- 異常處理
- URL解析
爬蟲所需要的功能,基本上在urllib中都能找到,學(xué)習(xí)這個(gè)標(biāo)準(zhǔn)庫(kù),可以更加深入的理解后面更加便利的requests庫(kù)。
urllib庫(kù)
urlopen 語(yǔ)法
urllib.request.urlopen(url,data=None,[timeout,]*,cafile=None,capath=None,cadefault=False,context=None)
#url:訪問的網(wǎng)址
#data:額外的數(shù)據(jù),如header,form data
用法
# request:GET
import urllib.request
response = urllib.request.urlopen('http://www.baidu.com')
print(response.read().decode('utf-8'))
# request: POST
# http測(cè)試:http://httpbin.org/
import urllib.parse
import urllib.request
data = bytes(urllib.parse.urlencode({'word':'hello'}),encoding='utf8')
response = urllib.request.urlopen('http://httpbin.org/post',data=data)
print(response.read())
# 超時(shí)設(shè)置
import urllib.request
response = urllib.request.urlopen('http://httpbin.org/get',timeout=1)
print(response.read())
import socket
import urllib.request
import urllib.error
try:
response = urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)
except urllib.error.URLError as e:
if isinstance(e.reason,socket.timeout):
print('TIME OUT')
響應(yīng)
# 響應(yīng)類型
import urllib.open
response = urllib.request.urlopen('https:///www.python.org')
print(type(response))
# 狀態(tài)碼, 響應(yīng)頭
import urllib.request
response = urllib.request.urlopen('https://www.python.org')
print(response.status)
print(response.getheaders())
print(response.getheader('Server'))
Request
聲明一個(gè)request對(duì)象,該對(duì)象可以包括header等信息,然后用urlopen打開。
# 簡(jiǎn)單例子
import urllib.request
request = urllib.request.Requests('https://python.org')
response = urllib.request.urlopen(request)
print(response.read().decode('utf-8'))
# 增加header
from urllib import request, parse
url = 'http://httpbin.org/post'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'
'Host':'httpbin.org'
}
# 構(gòu)造POST表格
dict = {
'name':'Germey'
}
data = bytes(parse.urlencode(dict),encoding='utf8')
req = request.Request(url=url,data=data,headers=headers,method='POST')
response = request.urlopen(req)
print(response.read()).decode('utf-8')
# 或者隨后增加header
from urllib import request, parse
url = 'http://httpbin.org/post'
dict = {
'name':'Germey'
}
req = request.Request(url=url,data=data,method='POST')
req.add_hader('User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
Handler:處理更加復(fù)雜的頁(yè)面
官方說明
代理
import urllib.request
proxy_handler = urllib.request.ProxyHandler({
'http':'http://127.0.0.1:9743'
'https':'https://127.0.0.1.9743'
})
opener = urllib.request.build_openner(proxy_handler)
response = opener.open('http://www.baidu.com')
print(response.read())
Cookie:客戶端用于記錄用戶身份,維持登錄信息
import http.cookiejar, urllib.request
cookie = http.cookiejar.CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open("http://www.baidu.com")
for item in cookie:
print(item.name+"="+item.value)
# 保存cooki為文本
import http.cookiejar, urllib.request
filename = "cookie.txt"
# 保存類型有很多種
## 類型1
cookie = http.cookiejar.MozillaCookieJar(filename)
## 類型2
cookie = http.cookiejar.LWPCookieJar(filename)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open("http://www.baidu.com")
# 使用相應(yīng)的方法讀取
import http.cookiejar, urllib.request
cookie = http.cookiejar.LWPCookieJar()
cookie.load('cookie.txt',ignore_discard=True,ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open("http://www.baidu.com")
異常處理
捕獲異常,保證程序穩(wěn)定運(yùn)行
# 訪問不存在的頁(yè)面
from urllib import request, error
try:
response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.URLError as e:
print(e.reason)
# 先捕獲子類錯(cuò)誤
from urllib imort request, error
try:
response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.HTTPError as e:
print(e.reason, e.code, e.headers, sep='\n')
except error.URLError as e:
print(e.reason)
else:
print("Request Successfully')
# 判斷原因
import socket
import urllib.request
import urllib.error
try:
response = urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)
except urllib.error.URLError as e:
if isinstance(e.reason,socket.timeout):
print('TIME OUT')
URL解析
主要是一個(gè)工具模塊,可用于為爬蟲提供URL。
urlparse:拆分URL
urlib.parse.urlparse(urlstring,scheme='', allow_fragments=True)
# scheme: 協(xié)議類型
# 是否忽略’#‘部分
舉個(gè)例子
from urllib import urlparse
result = urlparse("https://edu.hellobi.com/course/157/play/lesson/2580")
result
##ParseResult(scheme='https', netloc='edu.hellobi.com', path='/course/157/play/lesson/2580', params='', query='', fragment='')
urlunparse:拼接URL,為urlparse的反向操作
from urllib.parse import urlunparse
data = ['http','www.baidu.com','index.html','user','a=6','comment']
print(urlunparse(data))
urljoin:拼接兩個(gè)URL

urljoin
urlencode:字典對(duì)象轉(zhuǎn)換成GET請(qǐng)求對(duì)象
from urllib.parse import urlencode
params = {
'name':'germey',
'age': 22
}
base_url = 'http://www.baidu.com?'
url = base_url + urlencode(params)
print(url)
最后還有一個(gè)robotparse,解析網(wǎng)站允許爬取的部分。