day04-selenium語(yǔ)法

1. selenium語(yǔ)法

"""__author__= 雍新有"""
import time

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait

# # 打開(kāi)瀏覽器
# # 默認(rèn)Chrome()中需要傳入chromedriver.exe,只需配置chromedriver環(huán)境變量就行了
# browser = webdriver.Chrome()
# # get(url): 表示打開(kāi)某個(gè)地址
# browser.get('https://www.baidu.com')
# # 關(guān)閉瀏覽器
# browser.close()


# 1.獲取豆瓣電影信息
# browser = webdriver.Chrome()
# browser.get('https://movie.douban.com/explore#!type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=20&page_start=0')
# # 獲取‘加載更多按鈕’, 實(shí)現(xiàn)點(diǎn)擊事件
# # 模擬向下拉進(jìn)度條
# js1 = 'window.scrollTo(0, 1000)'
# # browser.execute_script(js1)
# # 獲取加載按鈕
# # 按鈕的xpath代碼  -- //*[@id="content"]/div/div[1]/div/div[4]/a
# more_button = browser.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/div[4]/a')
# # 實(shí)現(xiàn)點(diǎn)擊
# more_button.click()
# # 由于點(diǎn)擊事件剛執(zhí)行,頁(yè)面還沒(méi)有被ajax獲取到的數(shù)據(jù)所渲染,所以要睡眠幾秒
# time.sleep(3)
#
# # page_source獲取源碼,經(jīng)過(guò)ajax渲染后的源碼
# a1 = browser.page_source
# # a1為源碼,解析可采用正則、lxml.etree、beactifulsoup4
# print(a1)
# browser.close()

# 2. 京東 - 查詢?cè)?# browser = webdriver.Chrome()
# browser.get('https://www.jd.com')
# # 輸入框
# input = browser.find_element_by_xpath('//*[@id="key"]')
# print(input)
# input = browser.find_element_by_id('key')
# print(input)
# # 選擇器,copy里面有
# input = browser.find_element_by_css_selector('#key')
#
# # 通用寫(xiě)法
# # input = browser.find_element(By.XPATH, '//*[@id="key"]')
# # input = browser.find_element(By.ID, 'ID')
# # input = browser.find_element(By.CSS_SELECTOR, '#key')
#
# # 輸入內(nèi)容
# input.send_keys('Mac')

# 3. 前進(jìn)后退
# 依次在一個(gè)窗口打開(kāi)下面3個(gè)網(wǎng)站
# browser = webdriver.Chrome()
# browser.get('https://www.baidu.com')
# browser.get('https://www.jd.com')
# browser.get('http://blog.vincent-whf.top')
#
# # 回退
# browser.back()
# # 前進(jìn)
# browser.forward()


# 4.獲取京東,輸入內(nèi)容,點(diǎn)擊搜索
# 等待(隱式等待、顯式等待)
# browser = webdriver.Chrome()
# browser.get('https://www.jd.com')
# # 不睡眠,可能網(wǎng)頁(yè)加載不出來(lái),后面的xpath就找不到元素
# # time.sleep(3)
#
# # 隱式等待,如果獲取元素找不到,則等待規(guī)定時(shí)長(zhǎng)(默認(rèn)為0),等待時(shí)長(zhǎng)自定義
# browser.implicitly_wait(10)
#
# input = browser.find_element(By.XPATH, '//*[@id="key"]')
# # 將輸入框的內(nèi)容清空
# input.clear()
# # 輸入信息
# input.send_keys('零食')
# # 獲取搜索點(diǎn)擊按鈕
# button = browser.find_element(By.XPATH, '//*[@id="search"]/div/div[2]/button')
# # 搜索點(diǎn)擊
# button.click()
# browser.implicitly_wait(10)
#
# # 獲取頁(yè)碼
# total = browser.find_element(By.XPATH, '//*[@id="J_bottomPage"]/span[2]/input')

# 顯式等待
browser = webdriver.Chrome()
browser.get('https://www.jd.com')

# 獲取等待對(duì)象,等待10秒
wait = WebDriverWait(browser, 10)
# presence_of_all_elements_located: 當(dāng)元素被加載出來(lái)了才獲取信息
input = wait.until(
    EC.presence_of_element_located((By.XPATH, '//*[@id="key"]'))
)

# 將輸入框的內(nèi)容清空
input.clear()
# 輸入信息
input.send_keys('水果')

# element_to_be_clickable: 等待元素可被點(diǎn)擊
button = wait.until(
    EC.element_to_be_clickable((By.XPATH, '//*[@id="search"]/div/div[2]/button'))
)
# 搜索點(diǎn)擊
button.click()

# 獲取頁(yè)碼
total = wait.until(
    EC.presence_of_element_located((By.XPATH, '//*[@id="J_bottomPage"]/span[2]/em[1]/text()'))
)
print(total)

2. 京東selenium爬蟲(chóng)

"""__author__= 雍新有"""
import re
import time

import pymongo
from lxml import etree
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait


chrome_options = webdriver.ChromeOptions()
# 配置不用加載圖片
prefs = {"profile.managed_default_content_settings.images":2}
chrome_options.add_experimental_option("prefs", prefs)

# 獲取瀏覽器
browser = webdriver.Chrome(chrome_options=chrome_options)
# 顯式等待
wait = WebDriverWait(browser, 20)


def search():
    browser.get('https://www.jd.com')
    # 獲取輸入框,輸入內(nèi)容
    input = wait.until(
        EC.presence_of_element_located((By.XPATH, '//*[@id="key"]'))
    )
    input.clear()
    input.send_keys('水果')
    # 獲取按鈕,實(shí)現(xiàn)點(diǎn)擊
    button = wait.until(
        EC.element_to_be_clickable((By.XPATH, '//*[@id="search"]/div/div[2]/button'))
    )
    button.click()

    for i in range(1, 17):
        js = f'window.scrollTo(0, {i} * document.body.scrollHeight / 16)'
        browser.execute_script(js)
        time.sleep(5)

    total = wait.until(
        EC.presence_of_element_located((By.XPATH, '//*[@id="J_bottomPage"]/span[2]/em[1]'))
    )
    # 獲取第一頁(yè)源碼
    html = browser.page_source
    result = parse_html(html)
    save_mongo(result, 1)

    return total.text


def next_page(page):
    # 獲取從第二頁(yè)開(kāi)始的每一頁(yè)信息z
    # 翻頁(yè): 1. 可點(diǎn)擊下一頁(yè)  2. 可輸入頁(yè)面,實(shí)現(xiàn)點(diǎn)擊
    # 下一頁(yè)xpath =  // *[ @ id = "J_bottomPage"] / span[1] / a[9]

    # 執(zhí)行滾動(dòng)
    for i in range(1, 33):
        js = f'window.scrollTo(0, {i} * document.body.scrollHeight / 32)'
        browser.execute_script(js)
        time.sleep(5)

    # js = 'window.scrollTo(0, 2*document.body.scrollHeight / 4)'
    # browser.execute_script(js)
    # time.sleep(1)
    #
    # js = 'window.scrollTo(0, 3*document.body.scrollHeight / 4)'
    # browser.execute_script(js)
    # time.sleep(1)
    #
    # js = 'window.scrollTo(0, 4*document.body.scrollHeight / 4)'
    # browser.execute_script(js)
    # time.sleep(1)

    # print(f'----------點(diǎn)擊{page}頁(yè)--------')
    # next_input = wait.until(
    #     EC.element_to_be_clickable((By.XPATH, '//*[@id="J_bottomPage"]/span[1]/a[9]'))
    # )
    # next_input.click()

    page_input = wait.until(
        EC.presence_of_element_located((By.XPATH, '// *[ @ id = "J_bottomPage"] / span[2] / input'))
    )
    page_input.clear()
    print(f'---------第{page}頁(yè)----')
    page_input.send_keys(str(page))
    button = wait.until(
        EC.element_to_be_clickable((By.XPATH, '//*[@id="J_bottomPage"]/span[2]/a'))
    )
    button.click()

    # 點(diǎn)擊過(guò)后,頁(yè)面將重新加載,主動(dòng)等待3秒后再獲取源碼
    time.sleep(10)
    html = browser.page_source
    return html
    # 判斷當(dāng)前頁(yè)是否調(diào)轉(zhuǎn)過(guò)來(lái)了


def parse_html(html):
    tree = etree.HTML(html)
    goods_list = tree.xpath('//*[@id="J_goodsList"]/ul/li')
    result = []
    for item in goods_list:
        # item就是每一個(gè)li
        data = {
            'img': item.xpath('./div/div[1]/a/img/@src'),
            'goods_name': item.xpath('./div/div[3]/a/@title'),
            'goods_price': item.xpath('./div/div[2]/strong/i/text()')
        }
        result.append(data)
    return result


def save_mongo(data, page):
    db = pymongo.MongoClient(host='127.0.0.1', port=27017)
    print(len(data))
    try:
        print(f'第{page}頁(yè)數(shù)據(jù)插入成功')
        for item in data:
            print(item)
            db['spider']['jd1'].insert_one(item)
    except:
        print(f'第{page}頁(yè)數(shù)據(jù)插入失敗')


if __name__ == '__main__':
    # search(): 打開(kāi)瀏覽器輸入京東,搜索某商品,返回總頁(yè)碼
    total = search()
    # 正則search匹配字符串內(nèi)的內(nèi)容,fallmatch要匹配' -- 字符串標(biāo)識(shí)符,
    all_page = int(re.search('(\d+)', total).group())
    for page in range(2, 4):
        html = next_page(page)
        result = parse_html(html)
        save_mongo(result, page)

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡(jiǎn)書(shū)系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容