爬取瓜子二手車代碼


setting.py中

ROBOTSTXT_OBEY = False

guazispider.py

import json
from ..items import CarItem
import scrapy
from fake_headers import Headers

header = Headers(
    browser='chrome',
    os='win',
    headers=True
)


class guaziSpider(scrapy.Spider):
    name = 'guazi'
    allowed_domains = ['guazi.com']
    url_format = 'https://mapi.guazi.com/car-source/carList/pcList?page={}&pageSize=12&city_filter=12&city=12&guazi_city=12&tag_types=18&versionId=0.0.0.0&osv=Unknown&platfromSource=wap'

    font0_num_map = {
        "": "0",
        "": "1",
        "": "2",
        "": "3",
        "": "4",
        "": "5",
        "": "6",
        "": "7",
        "": "8",
        "": "9"
    }

    font1_num_map = {
        "": "0",
        "": "1",
        "": "2",
        "": "3",
        "": "4",
        "": "5",
        "": "6",
        "": "7",
        "": "8",
        "": "9"
    }

    font2_num_map = {
        "": "0",
        "": "1",
        "": "2",
        "": "3",
        "": "4",
        "": "5",
        "": "6",
        "": "7",
        "": "8",
        "": "9"
    }

    font3_num_map = {
        '': '0',
        '': '1',
        '': '2',
        '': '3',
        '': '4',
        '': '5',
        '': '6',
        '': '7',
        '': '8',
        '': '9'
    }

    font4_num_map = {
        "": "0",
        "": "1",
        "": "2",
        "": "3",
        "": "4",
        "": "5",
        "": "6",
        "": "7",
        "": "8",
        "": "9"
    }

    # 選取第一種編碼格式,
    # 其實並不知道到底是哪一個編碼規則,所以五個編碼規則都要試一下,通過首付價<一口價,首付價/一口價~30%
    # 所以這里換一下
    # def decode_num(self, text: str):
    #     result = text
    #     for key in self.font1_num_map:
    #         result = result.replace(key, self.font1_num_map.get(key))
    #     return result

    # 將上面五種格式方法存儲在列表中
    font_num_map_list = [font0_num_map, font1_num_map, font2_num_map, font3_num_map, font4_num_map]

    # 用五套編碼規則解析,並保存在value_list中
    def decode_num_with_font_list(self, text):
        value_list = []
        for l in self.font_num_map_list:
            value_list.append(self.decode_num(text, l))
        return value_list

    def decode_num(self, text: str, l):
        result = text
        for key in l:
            result = result.replace(key, l.get(key))
        return result

    # 通過首付價格和價格,判斷五套編碼中正確的索引index
    def predict_best_index(self, price_list, first_pay_list):
        list1 = [0, 1, 2, 3, 4]
        for i in range(5):
            price = float(price_list[i].split("萬")[0])
            first_pay = 0
            if first_pay_list[i] != '':
                first_pay = float(first_pay_list[i].split("萬")[0])
            percent = int(first_pay * 10 / price)
            if first_pay >= price or percent > 3:
                list1.remove(i)
        if len(list1) != 0:
            return list1[0]
        return 0

    def start_requests(self):
        # 設置可以選擇頁數
        for i in range(1, 20):
            url = self.url_format.format(i)
            yield scrapy.Request(url=url, headers=header.generate(), callback=self.parse)

    def parse(self, response):
        jsonObj = json.loads(response.text)
        data = jsonObj.get('data', None)
        if data is not None:
            postList = data.get('postList', None)
            if postList is not None:
                for car in postList:
                    title = car['title']
                    # 有的沒有buyOutPrice,所以設為0,這時候必須用get方法了
                    buyOutPrice = car.get('buyOutPrice', '0萬')

                    # 后面這三個是加密的需要解密
                    price = car['price']
                    first_pay = car['first_pay']
                    road_haul = car['road_haul']
                    decode_price_list = self.decode_num_with_font_list(price)
                    decode_first_pay_list = self.decode_num_with_font_list(first_pay)
                    decode_road_haul_list = self.decode_num_with_font_list(road_haul)
                    print(title, price, first_pay, road_haul, buyOutPrice)
                    print(title, decode_price_list, decode_first_pay_list, decode_road_haul_list, buyOutPrice)
                    index = self.predict_best_index(decode_price_list, decode_first_pay_list)
                    print("預測后的最佳價格為:", decode_price_list[index], decode_first_pay_list[index],
                          decode_road_haul_list[index])

                    # 傳入到item中去
                    car = CarItem()
                    car['title'] = title
                    car['buyOutPrice'] = buyOutPrice
                    car['price'] = decode_price_list[index]
                    car['first_pay'] = decode_first_pay_list[index]
                    car['road_haul'] = decode_road_haul_list[index]

                    yield car

items.py

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class SpiderdemoItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    pass


class CarItem(scrapy.Item):
    title = scrapy.Field()
    price = scrapy.Field()
    first_pay = scrapy.Field()
    road_haul = scrapy.Field()
    buyOutPrice = scrapy.Field()

pipelines.py

class SpiderdemoPipeline:
    def process_item(self, item, spider):
        return item

init.py

from scrapy import cmdline

# cmdline.execute("scrapy crawl guazi".split())

# 保存
cmdline.execute("scrapy crawl guazi -O cars.csv".split())



免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM