爬蟲代碼--我愛我家(小區(qū)成交)

先放代碼,具體過程明天再說。

Scrapy結(jié)構(gòu)

定義item部分

import scrapy

class WawjItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    blockurl = scrapy.Field()
    test = scrapy.Field()
    blockname = scrapy.Field()
    houseinfo = scrapy.Field()
    housearea = scrapy.Field()
    data = scrapy.Field()
    totalprice = scrapy.Field()
    unitprice = scrapy.Field()

Spider部分

# -*- coding: utf-8 -*-
import scrapy
from wawj.items import WawjItem
from scrapy.http import Request

class WawjspiderSpider(scrapy.Spider):
    name = 'w2spider'
    start_urls = ['http://hz.5i5j.com/community/gongshu/p1',
                     'http://hz.5i5j.com/community/xiacheng/p1',
                     'http://hz.5i5j.com/community/shangcheng/p1',
                     'http://hz.5i5j.com/community/binjiang/p1',
                     'http://hz.5i5j.com/community/yuhang/p1',
                     'http://hz.5i5j.com/community/xiaoshan/p1',
                     'http://hz.5i5j.com/community/xihu/p1',
                     'http://hz.5i5j.com/community/jianggan/p1',
                     'http://hz.5i5j.com/community/fuyang/p1',
                     'http://hz.5i5j.com/community/p2/',
                     'http://hz.5i5j.com/community/p3/',
                     'http://hz.5i5j.com/community/p4/',
                     'http://hz.5i5j.com/community/p5/',
                     'http://hz.5i5j.com/community/p6/',
                     'http://hz.5i5j.com/community/p7/']
#    start_urls = ['http://hz.5i5j.com/community/p7/']
    
    base_url = 'http://hz.5i5j.com'
    base_exchang_url = '/exchange/getdeals?communityId='

    def parse(self, response):
        maxnum = int(response.xpath("http://div[@class='list-comm-l']/h3[@class='list-comm-sort']/font[@class='font-houseNum']/text()").extract()[0])
        if maxnum % 12 == 0:
            maxpage = maxnum/12
        else:
            maxpage = maxnum/12 + 1
        for page in range(1,maxpage+1):
            url = response.url + 'n' + str(page)
            yield Request(url,callback=self.get_blockid)
            
    def get_blockid(self,response):    
        block_list = response.xpath("http://ul[@class='list-body']/li/div[@class='list-info-comm']/h2/a")
        block_num = []
        for block in block_list:
            item = WawjItem()
            block_name = block.xpath("./text()").extract()[0]
            item['blockname'] = block_name
            block_url = block.xpath("./@href").extract()[0]
            id = block_url.split('/')[2]
            block_url = self.base_url + block_url
            item['blockurl'] = block_url
            block_num.append(block_url)
            yield Request(block_url,callback=self.get_maxpage_block,dont_filter=True,meta={'item':item,'id':id,'block_num':block_num})

    def get_maxpage_block(self,response):       
        item = response.meta['item']
        id = response.meta['id']
        maxpage_block_lst = response.xpath("http://ul[@class='deal-page']/a[last()-1]/li/text()").extract()
        if len(maxpage_block_lst) != 0:
            maxpage_block = int(maxpage_block_lst[0])
        else:
            maxpage_block = 1
        for page in range(1,maxpage_block+1):
            url = self.base_url + self.base_exchang_url + id + '&page=' + str(page)
            yield Request(url,dont_filter=True,headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36','X-Requested-With':'XMLHttpRequest'},callback=self.get_info,meta={'item':item})
    

    def get_info(self,response):
        print 'Sell url: ',response.url
        item = response.meta['item']
        node_list = response.xpath("http://ul[@class='watch-record-text2']")      
        if len(node_list) == 0:                  
            item['houseinfo'] = 'None'
            item['housearea'] = 'None'
            item['data'] = 'None'
            item['totalprice'] = 'None'
            item['unitprice'] = 'None'
            yield item
        else:
            for node in node_list:                    
                item['houseinfo'] = node.xpath("./li[1]/p[2]/b/text()").extract()[0]
                item['housearea'] = node.xpath("./li[2]/text()").extract()[0]
                item['data'] = node.xpath("./li[3]/text()").extract()[0]
                item['totalprice'] = node.xpath("./li[4]/text()").extract()[0]
                item['unitprice'] = node.xpath("./li[5]/text()").extract()[0]
                yield item 

定義peplines部分

import json
import pandas


class LianjiaPipeline(object):
    def __init__(self):
        self.f = open('c:\\test\\ceshi.json','w')
    
    
    def process_item(self, item, spider):
        content = json.dumps(dict(item),ensure_ascii=False)+'\n'
        self.f.write(content.encode('utf-8'))
        return item
    
    def close_spider(self,spider):
        self.f.close()
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時請結(jié)合常識與多方信息審慎甄別。
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點,簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

  • Android 自定義View的各種姿勢1 Activity的顯示之ViewRootImpl詳解 Activity...
    passiontim閱讀 179,111評論 25 709
  • scrapy學習筆記(有示例版) 我的博客 scrapy學習筆記1.使用scrapy1.1創(chuàng)建工程1.2創(chuàng)建爬蟲模...
    陳思煜閱讀 13,085評論 4 46
  • ¥開啟¥ 【iAPP實現(xiàn)進入界面執(zhí)行逐一顯】 〖2017-08-25 15:22:14〗 《//首先開一個線程,因...
    小菜c閱讀 7,353評論 0 17
  • 簡悅直播教練恬源閱讀 487評論 2 5
  • 昨晚瑯琊榜結(jié)局了,今天滿滿的都是男主死掉,大家傷心不已的畫面。這個劇是根據(jù)小說改編的,最近幾年的大熱電視劇好多都是...
    空思我閱讀 177評論 0 1

友情鏈接更多精彩內(nèi)容