解析庫-LXML
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple lxml
使用解析規(guī)則:XPath
| 表達(dá)式 | 描述 |
|---|---|
| nodename | 選取此節(jié)點的所有子節(jié)點 |
| / | 從當(dāng)前節(jié)點,選取直接子節(jié)點 |
| // | 從當(dāng)前節(jié)點,選取所有子孫節(jié)點 |
| . | 選取當(dāng)前節(jié)點 |
| .. | 選取當(dāng)前節(jié)點的父節(jié)點 |
| @ | 選取屬性 |
構(gòu)建實例
從文本構(gòu)建
def load_text():
text = '''
<div>
<ul>
<li class="item-0"><a href="link1.html">first item</a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link3.html">third item</a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a>
</ul>
</div>
'''
html = etree.HTML(text)
result = etree.tostring(html)
print(result.decode("utf-8"))
從文件構(gòu)建
def load_text2():
html = etree.parse("./test.html", etree.HTMLParser())
result = etree.tostring(html)
print(result.decode("utf-8"))
注意
etree.toString()返回的是bytes類型,需要調(diào)用decode方法將其轉(zhuǎn)換成String類型
經(jīng)過處理后的html代碼,會被自動修復(fù),添加缺少的標(biāo)簽
選中所有節(jié)點
要選中某個類型的所有節(jié)點,以 // 開頭就可以了
# 選中所有節(jié)點
html = etree.parse("./test.html", etree.HTMLParser())
results = html.xpath("http://*")
print(results)
# 選中所有的li節(jié)點
results = html.xpath("http://li")
print(results)
print(results[0])
選中所有子節(jié)點
# 選中直接子節(jié)點
html = etree.parse("./test.html", etree.HTMLParser())
result = html.xpath("http://li/a")
print(result)
選中所有子孫節(jié)點
# 選取所有子孫節(jié)點
result = html.xpath("http://li//a")
print(result)
選取父親節(jié)點
def select_parent_nodes():
html = etree.parse("./test.html", etree.HTMLParser())
# 選中屬性href='link4.html'的a標(biāo)簽的直接父親
result = html.xpath("http://a[@href='link4.html']/..")
print(result)
# 選中屬性href='link4.html'的a標(biāo)簽的父親的class屬性
result = html.xpath("http://a[@href='link4.html']/../@class")
print(result)
# 使用parent::選中父親
result = html.xpath("http://a[@href='link4.html']/parent::*/@class")
print(result)
test.html
<div>
<ul>
<li class="item-0"><a href="link1.html">first item</a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link3.html">third item</a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a>
</ul>
</div>
按屬性選擇
def select_node_by_attrs():
html = etree.parse("./test.html", etree.HTMLParser())
result = html.xpath("http://li[@class='item-0']")
print(result)
獲取節(jié)點文本
def get_text():
html = etree.parse("./test.html", etree.HTMLParser())
print(etree.tostring(html).decode("utf-8"))
# 使用/獲取自身的文本內(nèi)容
result = html.xpath("http://li[@class='item-0']/text()")
print(result) # ['\r\n\t']
# 使用//獲取子孫的文本和自己的文本
result = html.xpath("http://li[@class='item-0']//text()")
print(result) # ['first item', 'fifth item', '\r\n\t']
獲取屬性的值
def get_attr_content():
html = etree.parse("./test.html", etree.HTMLParser())
result = html.xpath("http://li/a/@href")
print(result)
多值屬性匹配
使用contains函數(shù)
def get_multi_attr_content():
text = '''
<li class="li li-first"><a href="link1.html">first item</a></li>
'''
html = etree.HTML(text)
result = html.xpath("http://li[@class='li']/a/text()")
print(result)# []
result = html.xpath("http://li[contains(@class, 'li')]/a/text()")
print(result)# ['first item']
多屬性匹配
def get_multi_attr_match():
text = '''
<li class="li li-first" name="item"><a href="link1.html">first item</a></li>
'''
html = etree.HTML(text)
result = html.xpath("http://li[contains(@class, 'li') and @name='item']/a/text()")
print(result)# ['first item']
and 是xpath中的運(yùn)算符
| 運(yùn)算符 | 描述 | 實例 |
|---|---|---|
| or | 或 | age=19 or age=20 |
| and | 與 | age>19 and age<21 |
| mod | 取余 | 5 mod 2 |
| 計算兩個節(jié)點集 | //book | //cd 返回所有擁有book和cd元素的節(jié)點集 | |
| + | 加法 | 6+4 |
| - | 減法 | 6-4 |
| * | 乘法 | 6*4 |
| div | 除法 | 6 div 3 |
| = | 等于 | age=19 |
| != | 不等于 | age!=19 |
| < | 小于 | age<19 |
| <= | 小于或等于 | age<=19 |
| > | 大于 | age>19 |
| >= | 大于或等于 | age>=19 |
按順序選取節(jié)點
def select_node_by_order():
html = etree.parse("./test.html", etree.HTMLParser())
# 選取第一個節(jié)點
result = html.xpath("http://li[1]/a/text()")
print(result)
# 選取最后一個節(jié)點
result = html.xpath("http://li[last()]/a/text()")
print(result)
# 選取前兩個節(jié)點
result = html.xpath("http://li[position()<3]/a/text()")
print(result)
# 選取倒數(shù)第三個節(jié)點
result = html.xpath("http://li[last()-2]/a/text()")
print(result)

image.png
各種查詢
def select_node_by_axies():
text = '''
<div>
<ul>
<li class="item-0"><a href="link1.html"><span>first item</span></a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link3.html">third item</a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a>
</ul>
</div>
'''
html = etree.HTML(text)
# 指定元素所有祖先
result = html.xpath("http://li[1]/ancestor::*")
print(result)
# 輸出[<Element html at 0x37bf350>, <Element body at 0x37bf300>, <Element div at 0x37bf2d8>, <Element ul at 0x37b9fd0>]
# 指定元素所有div祖先
result = html.xpath("http://li[1]/ancestor::div")
print(result)
# 獲取所有屬性值
result = html.xpath("http://li[1]/attribute::*")
print(result)
# 找到直接子節(jié)點中滿足條件的元素
result = html.xpath("http://li[1]/child::a[@href='link1.html']")
print(result)
# 找到所有的后代元素
result = html.xpath("http://li[1]/descendant::*")
print(result)
# 找到后低元素的span
result = html.xpath("http://li[1]/descendant::span")
print(result)
# 獲取該節(jié)點后面的兄弟節(jié)點以及兄弟節(jié)點的子孫節(jié)點
result = html.xpath("http://li[1]/following::*")
print(result)
# 獲取該節(jié)點后面的兄弟節(jié)點以及兄弟節(jié)點的子孫節(jié)點的第一個節(jié)點
result = html.xpath("http://li[1]/following::*[1]")
print(result)
# 獲取該節(jié)點后面的所有兄弟節(jié)點
result = html.xpath("http://li[1]/following-sibling::*")
print(result)

image.png
爬取百度貼吧圖片
?;ò?br>
獲取網(wǎng)頁源碼
BaiduTieBa.py
import requests
class BaiduTieBa:
def __init__(self,name,pn):
self.name = name
self.url = 'http://tieba.baidu.com/f?kw={}&ie=utf-8&pn='.format(name)
self.headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)'
}
self.url_list = [self.url + str(n * 50) for n in range(pn)]
print(self.url_list)
def get_data(self,url):
r = requests.get(url, headers=self.headers)
return r.content
def save_data(self,data,num):
file_name = self.name + "_" + str(num) + ".html"
with open(file_name,'wb') as f:
f.write(data)
def run(self):
for url in self.url_list:
data = self.get_data(url)
num = self.url_list.index(url)
self.save_data(data,num)
import sys
if __name__ == '__main__':
name = sys.argv[1]
pn = int(sys.argv[2])
baidu = BaiduTieBa(name, pn)
baidu.run()
輸入命令行
python BaiduTieBa.py ?;?5

image.png
get_photo.py
from lxml import etree
import requests
class DownloadPhoto:
def __init__(self):
pass
def down_load_img(self,url):
r = requests.get(url)
index = url.rfind("/")
file_name = url[index+1:]
save_name = './photo/' + file_name
print("下載圖片" + save_name)
with open(save_name, 'wb') as f:
f.write(r.content)
def parse_photo_url(self, page):
html = etree.parse(page,etree.HTMLParser())
nodes = html.xpath("http://a[contains(@class,'thumbnail')]/img/@bpic")
for node in nodes:
self.down_load_img(node)
if __name__ == '__main__':
down = DownloadPhoto()
file_name = ["?;╛0.html","?;╛1.html","?;╛2.html","?;╛3.html","校花_4.html"]
for url in file_name:
down.parse_photo_url(url)
run此方法

image.png

image.png
注意使用老版本的請求頭,可以在ie中調(diào)整版本獲取