frombs4importBeautifulSoup
importrequests
importtime
importrandom
content_all= []
link_all= []
defget_content(url):
web_data=requests.get(url)
Soup=BeautifulSoup(web_data.text,'lxml')
title=Soup.select(' div.pho_info > h4 > em')[0].get_text()
address=Soup.select('div.pho_info > p')[0].get('title').strip()
brice=Soup.select('#pricePart > div.day_l > span')[0].get_text()
house_img=Soup.select('#curBigImage')[0].get('src')
master_img=Soup.select('#floatRightBox > div.js_box.clearfix > div.member_pic > a > img')[0].get('src')
master_name=Soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > a')[0].get_text()
master_gender=Soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > span')[0].get('class')[0]
data={
'title':title,
'address':address,
'brice':int(brice),
'house_img':house_img,
'master_img':master_img,
'master_name':master_name,
'master_gender':master_gender
}
ifdata['master_gender'] =='member_girl_ico':
#print('女')
data['master_gender'] ='女'
else:
#print('男')
data['master_gender'] ='男'
returndata
defget_single_web(url):
link_emp= []
web_data=requests.get(url)
Soup=BeautifulSoup(web_data.text,'lxml')
urls=Soup.select('a.resule_img_a')
for i in urls:
link_emp.append(i.get('href'))
returnlink_emp
url_all= [r'http://xa.*******.com/search-duanzufang-p{}-0/'.format(str(i))foriinrange(1,2)]
count=0
foriinurl_all:
web_data_host_links=get_single_web(i)
link_all+=web_data_host_links#將所有的鏈接存放到一個(gè)列表中
time.sleep(random.randrange(1,3))
forjinlink_all:
count+=1
print('link is {} num is {} \n {}'.format(j, count, get_content(j)))#將每個(gè)鏈接和抓取的內(nèi)容打印出來
content_all.append(get_content(j))#將每個(gè)詳情頁的內(nèi)容以字典形式存放,然后將每個(gè)詳情頁抓取內(nèi)容存放到列表中
time.sleep(random.randrange(1,3))#時(shí)間隨機(jī)等待1到2秒
sort_content_all=sorted(content_all,key=lambdax:x['brice'])
print(sort_content_all)
思路:
1、對(duì)所有頁數(shù)進(jìn)行統(tǒng)計(jì)
2、對(duì)每一頁的詳情頁列表統(tǒng)計(jì)
3、通過字典結(jié)構(gòu)化保存
4、定位詳情頁抓取的內(nèi)容
知識(shí)小結(jié):
lamada x:x[ ] ? ?函數(shù)運(yùn)用
formate() ?格式化輸出