爬取策略
? 在爬蟲系統(tǒng)中,待抓取URL隊(duì)列是很重要的一部分。待抓取URL隊(duì)列中的URL以什么樣的順序排列也是一個(gè)很重要的問題,因?yàn)檫@涉及到先抓取哪個(gè)頁面,后抓取哪個(gè)頁面。而決定這些URL排列順序的方法,叫做抓取策略。下面重點(diǎn)介紹幾種常見的抓取策略:
深度優(yōu)先遍歷策略
深度優(yōu)先遍歷策略是指網(wǎng)絡(luò)爬蟲會(huì)從起始頁開始,一個(gè)鏈接一個(gè)鏈接跟蹤下去,處理完這條線路之后再轉(zhuǎn)入下一個(gè)起始頁,繼續(xù)跟蹤鏈接。
import re
import requests
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}
hrefre = "<a.*href=\"(https?://.*?)\".*>"
def getPage(url):
'''
獲取html
:param url:
:return: html源碼
'''
html = requests.get(url, headers=header)
return html.text
def getUrl(url):
'''
獲取url
:param url:
:return: URLList
'''
html = getPage(url)
urllist = re.findall(hrefre, html)
return urllist
def deepSpider(url, depth):
'''
深度爬蟲
:param url:
:param depth:深度控制
:return:
'''
print("\t\t\t" * depthDict[url], "爬取了第%d級(jí)頁面:%s" % (depthDict[url], url))
if depthDict[url] > depth:
return # 超出深度則跳出
sonlist = getUrl(url)
for i in sonlist:
if i not in depthDict:
depthDict[i] = depthDict[url] + 1 # 層級(jí)+1
deepSpider(i, depth)
if __name__ == '__main__':
depthDict = {} # 爬蟲層級(jí)控制
# 起始url
startUrl = "https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=baidu&wd=島國郵箱"
depthDict[startUrl] = 1
deepSpider(startUrl, 4)
寬度優(yōu)先遍歷策略
寬度優(yōu)先遍歷策略的基本思路是,將新下載網(wǎng)頁中發(fā)現(xiàn)的鏈接直接**待抓取URL隊(duì)列的末尾。也就是指網(wǎng)絡(luò)爬蟲會(huì)先抓取起始網(wǎng)頁中鏈接的所有網(wǎng)頁,然后再選擇其中的一個(gè)鏈接網(wǎng)頁,繼續(xù)抓取在此網(wǎng)頁中鏈接的所有網(wǎng)頁。還是以上面的圖為例:遍歷路徑:A-B-C-D-E-F-G-H-I
import re
import requests
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}
hrefre = "<a.*href=\"(https?://.*?)\".*>"
def getUrl(url):
'''
獲取網(wǎng)頁的全部url
:param url:
:return: url列表
'''
html = getPage(url)
'''
<a data-click="{}" fasdf>...</a>
'''
urlre = "<a.*href=\"(https?://.*?)\".*>"
urllist = re.findall(urlre, html)
return urllist
def getPage(url):
'''
抓取網(wǎng)頁html
:param url:
:return: HTML源碼
'''
html = requests.get(url, headers=header).text
return html
def vastSpider(depth):
while len(urlList) > 0:
url = urlList.pop(0) # 彈出首個(gè)url
print("\t\t\t" * depthDict[url], "抓取了第%d級(jí)頁面:%s" % (depthDict[url], url))
if depthDict[url] < depth:
sonList = getUrl(url)
for s in sonList:
if s not in depthDict: # 去重
depthDict[s] = depthDict[url] + 1
urlList.append(s)
if __name__ == '__main__':
# 去重
urlList = [] # url列表
depthDict = {}
starUrl = "https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=baidu&wd=島國郵箱"
depthDict[starUrl] = 1
urlList.append(starUrl)
vastSpider(4)
頁面解析和數(shù)據(jù)提取
一般來講對(duì)我們而言,需要抓取的是某個(gè)網(wǎng)站或者某個(gè)應(yīng)用的內(nèi)容,提取有用的價(jià)值。內(nèi)容一般分為兩部分,非結(jié)構(gòu)化的數(shù)據(jù) 和 結(jié)構(gòu)化的數(shù)據(jù)。
- 非結(jié)構(gòu)化數(shù)據(jù):先有數(shù)據(jù),再有結(jié)構(gòu),
- 結(jié)構(gòu)化數(shù)據(jù):先有結(jié)構(gòu)、再有數(shù)據(jù)
不同類型的數(shù)據(jù),我們需要采用不同的方式來處理。
- 非結(jié)構(gòu)化的數(shù)據(jù)處理
HTML
- 結(jié)構(gòu)化的數(shù)據(jù)處理
JSON
XML
Beautiful Soup 4.2.0 文檔
https://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.html
示例:爬取前程無憂招聘崗位數(shù)量
from bs4 import BeautifulSoup
import requests
def download(url):
headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
response = requests.get(url, headers=headers)
html = response.content.decode('gbk')
soup = BeautifulSoup(html, 'lxml')
# 獲取崗位數(shù)量的多種查找方式
# 方式1: 使用find_all
jobnum = soup.find_all('div', class_='rt')
print(jobnum[0].text)
# 方式2: 使用select
jobnum = soup.select('.rt')[0].string
print(jobnum.strip()) # 去掉首尾空格
# 方式3:正則匹配re
# jobnum_re = '<div class="rt">(.*?)</div>'
# jobnum_comp = re.compile(jobnum_re, re.S)
# jobnums = jobnum_comp.findall(html)
# print(jobnums[0])
download(url = "https://search.51job.com/list/000000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare=")
示例:爬取股票基金
import urllib
from urllib import request
from bs4 import BeautifulSoup
stockList = []
def download(url):
headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
request = urllib.request.Request(url, headers=headers) # 請(qǐng)求,修改,模擬http.
data = urllib.request.urlopen(request).read() # 打開請(qǐng)求,抓取數(shù)據(jù)
soup = BeautifulSoup(data, "html5lib", from_encoding="gb2312")
mytable = soup.select("#datalist")
for line in mytable[0].find_all("tr"):
print(line.get_text()) # 提取每一個(gè)行業(yè)
print(line.select("td:nth-of-type(3)")[0].text) # 提取具體的某一個(gè)
if __name__ == '__main__':
download("http://quote.stockstar.com/fund/stock_3_1_2.html")
練習(xí):爬取騰訊崗位說明
import urllib
from urllib import request
from bs4 import BeautifulSoup
def download(url):
headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
request = urllib.request.Request(url, headers=headers) # 請(qǐng)求,修改,模擬http.
data = urllib.request.urlopen(request).read() # 打開請(qǐng)求,抓取數(shù)據(jù)
soup = BeautifulSoup(data, "html5lib")
print(soup)
data = soup.find_all("ul", class_="squareli")
for dataline in data:
for linedata in dataline.find_all("li"):
print(linedata.string)
data = soup.select('ul[class="squareli"]')
for dataline in data:
for linedata in dataline.select("li"):
print(linedata.get_text())
download("https://hr.tencent.com/position_detail.php?id=43940&keywords=%E7%88%AC%E8%99%AB&tid=0&lid=0")
練習(xí):獲取騰訊崗位列表
import urllib
from urllib import request
from bs4 import BeautifulSoup
def download(url):
headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
request = urllib.request.Request(url, headers=headers) # 請(qǐng)求,修改,模擬http.
data = urllib.request.urlopen(request).read() # 打開請(qǐng)求,抓取數(shù)據(jù)
soup = BeautifulSoup(data, "lxml")
data = soup.find_all("table", class_="tablelist")
for line in data[0].find_all("tr", class_=["even", "odd"]):
print(line.find_all("td")[0].a["href"])
for data in line.find_all("td"):
print(data.string)
download("https://hr.tencent.com/position.php?keywords=python&lid=0&tid=0#a")
存入數(shù)據(jù)庫
import pymysql
# 存入數(shù)據(jù)庫
def save_job(tencent_job_list):
# 連接數(shù)據(jù)庫
db = pymysql.connect(host="127.0.0.1", port=3306, user='root', password="root",database='tencent1', charset='utf8')
# 游標(biāo)
cursor = db.cursor()
# 遍歷,插入job
for job in tencent_job_list:
sql = 'insert into job(name, address, type, num) VALUES("%s","%s","%s","%s") ' % (job["name"], job["address"], job["type"], job["num"])
cursor.execute(sql)
db.commit()
cursor.close()
db.close()