【Python爬蟲(chóng)】第十七次作業(yè)

import requests
import os
from lxml import etree
from multiprocessing import Pool
def get_menu_urls(url):
    req = requests.get(url)
    req.encoding=                                                                                                                                                                                          'gb2312'
    html=req.text
    selector=etree.HTML(html)
    infos=selector.xpath('//div[@class="contain"][1]/ul/li[position()<10]/a')
    url_list=[]
    for info in infos:
        a_text=info.xpath('text()')
        a_href=info.xpath('@href')
        if len(a_text)==0 or a_text[0]=='經(jīng)典影片':
            pass
        else:
            menu_url=url+a_href[0]
            print(a_text[0],menu_url)
            req2 = requests.get(menu_url)
            req2.encoding='gb2312'
            html2=req2.text
            selector2=etree.HTML(html2)
            page_total=selector2.xpath('//div[@class="co_content8"]/div[@class="x"]//text()')[1].split('/')[0].replace('共','').replace('頁(yè)','')
            print(page_total)
            list_id=selector2.xpath('//div[@class="co_content8"]/div[@class="x"]//a/@href')[0].replace('2.html','')
            print(list_id)
            for i in range(1,int(page_total)+1):
                right_url=list_id+str(i)
                page_url=menu_url.replace('index',right_url)
                item={}
                item['menu']=a_text[0]
                item['page']=page_url
                url_list.append(item)
    return url_list
def get_source(item):
    page_url = item['page']
    menu_name = item['menu']
    base_dir=os.path.abspath(__file__)
    parent_dir=os.path.dirname(base_dir)
    menu_dir=os.path.join(parent_dir,menu_name)
    print(menu_dir)
    if os.path.isdir(menu_dir):
        pass
    else:
        os.mkdir(menu_dir)
    os.chdir(menu_dir)
    req3=requests.get(page_url)
    req3.encoding='gb2312'
    html3=req3.text
    selector3=etree.HTML(html3)
    infos3=selector3.xpath('//div[@class="co_content8"]//a[@class="ulink"]/@href')
    for info3 in infos3:
        movie_url ='http://www.ygdy8.com' + info3
    req4 = requests.get(movie_url)
    req4.encoding = 'gb2312'
    html4 = req4.text
    selector4 = etree.HTML(html4)
    movie_name = selector4.xpath('//div[@class="title_all"]/h1/font/text()')[0]
    short_name = movie_name.split('《')[1].split('》')[0].replace('/','-')
    file_name = os.path.join(menu_dir, short_name + 'txt')
    with open(file_name,'w',encoding='utf-8') as file:
        movie_sources=selector4.xpath('//td[@style="WORD-WRAP: break-word"]/a/@href')
        if len(movie_sources)>0:
            for movie_source in movie_sources:
                print(page_url, movie_url, movie_name,movie_source)
                file.write(page_url + '\n')
                file.write(movie_name+'\n')
                file.write(movie_source + '\n')
        else:
            pass
if __name__ == '__main__':
    root_url = 'http://www.ygdy8.com'
    url_list=get_menu_urls(root_url)
    p=Pool(4)
    p.map(get_source,url_list)
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡(jiǎn)書(shū)系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容