
網(wǎng)易云音樂歌單首頁

歌單第二頁鏈接

歌單第三頁鏈接
可以看出只有最后的數(shù)字在變化,所以進行url的構造,然后開始爬取
import csv
from lxml import etree
import requests
from multiprocessing.dummy import Pool
import time
headers = {
'Referer':'http://music.163.com/',
'Host':'music.163.com',
# 'User-Agent':'Mozilla/5.0 (X11: Linux *86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0',
'User-Agent': 'Mozilla/5.0 (X11: Linux *86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
}
wangyi='http://music.163.com'
base_url='http://music.163.com/discover/playlist/?order=hot&cat=%E5%85%A8%E9%83%A8&limit=35&offset={}'
urlList=[] #保存構造出的鏈接
def totalPage():
for i in range(0,1300,35):
url=base_url.format(i)
urlList.append(url)
def getData(url):
r=requests.get(url,headers=headers)
html=etree.HTML(r.text)
MFlist=html.xpath("http://p[@class='dec']/a/text()")
MFurl=html.xpath("http://p[@class='dec']/a/@href")
LisNum=html.xpath("http://span[@class='nb']/text()")
for i in range(len(MFlist)):
write.writerow([MFlist[i],wangyi+MFurl[i],LisNum[i]])
time.sleep(3) #延時,防止IP被封
f=open('Allmusicform.csv','a+',newline='',encoding='utf-8')
write=csv.writer(f)
p=Pool(4)
totalPage()
p.map(getData,urlList)
p.close()

分別爬取了歌單名稱、url、播放次數(shù)