一直聽說淘寶的反爬很厲害,只爬取數十條數據的話不會有感覺,當破百了就很容易被識別出來
自己試了一下,依舊存在問題,現在提供源碼供大家一起學習
import requests from lxml import etree import time import pymongo url='https://hotel.fliggy.com/hotel_list3.htm?' #原網址是沒有頁號的!自己加params上去 #通過刷新頁面查看XHR可以發現,出現了一個很長的URL,把里面的頁號加到原有params進行嘗試,可以翻頁了! # 'https://hotel.fliggy.com/ajax/hotelList.htm?pageSize=20¤tPage=7&totalItem=12644&startRow=100&endRow=119&city=440100&tid=null&market=0&previousChannel=&u=null&detailLinkCity=440100&cityName=%E5%B9%BF%E5%B7%9E&checkIn=2019-05-01&checkOut=2019-05-0。。。。' # 'https://hotel.fliggy.com/hotel_list3.htm?pageSize=20¤tPage=6' #設置mongo數據庫 mon_client=pymongo.MongoClient() mon_db=mon_client.home feizhu_col=mon_db.guangzhou1 #編號 id=0 for i in range(1,5): params={ 'cityName':'%B9%E3%D6%DD',#(unable to decode value) 'city':'440100', 'keywords':'', 'checkIn':'2019-05-01', 'checkOut':'2019-05-02', 'ttid':'seo.000000583', # 'pageSize':'20' if i!=633 else , #每頁有20條信息,嘗試后發現可以去掉 'currentPage':str(i), #第幾頁 #選擇多次頁面觀察得出 'priceRange':'R2', #價格檔次為第二檔100-300 'poiNameFilter':'%E4%BD%93%E8%82%B2%E8%A5%BF%E8%B7%AF', #體育西路 'searchPoiName':'%E4%BD%93%E8%82%B2%E8%A5%BF%E8%B7%AF', } #ValueError: Invalid header name b':authority' #這是表頭參數設置錯誤,把:method改為method headers={ 'authority':'hotel.fliggy.com', 'method':'GET', 'path':'/hotel_list3.htm?cityName=%B9%E3%D6%DD&city=440100&keywords=&checkIn=2019-05-01&checkOut=2019-05-02&ttid=seo.000000583', 'scheme':'https', 'referer':'https://hotel.fliggy.com/hotel_list3.htm?_input_charset=utf-8&_output_charset=utf-8&searchBy=&market=0&previousChannel=&cityName=%E5%B9%BF%E5%B7%9E&city=440100&_fmd.h._0.r=&checkIn=2019-05-01&checkOut=2019-05-02&keywords=&ttid=seo.000000583', 'cache-control':'max-age=0', 'upgrade-insecure-requests':'1', 'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36' } cookies={ #部分cookie刪除掉了,需要自己去更新 'chanelStatExpire':'2019-04-11 09:44:29', #cookie到期時間 #更新cookie后新增的內容 'cookieCheck':'27487', 'v':'0', 'last':'true', } response=requests.get(url,headers=headers,params=params,cookies=cookies) # print(response.text) #網頁源碼出現:親,小二正忙,滑動一下馬上回來 #被識別為爬蟲了! r=requests.utils.dict_from_cookiejar(response.cookies) print(r) #不一定每次都更新cookies,更新格式為{'cookieCheck': '15798', 'v': '0'} cookies.update(r) response=requests.get(url,headers=headers,params=params,cookies=cookies) html=etree.HTML(response.text) # print(response.text) hotel_name=html.xpath('//*[@id="J_List"]/div/div/div[4]/div/h5/a/text()') coment_url=html.xpath('//*[@id="J_List"]/div/div/div[3]/a/@href') hotel_score=html.xpath('//*[@id="J_List"]/div/div/div[3]/a/p[1]/span[1]/text()') hotel_price=html.xpath('//*[@id="J_List"]/div/div/div[2]/div[1]/p/span[1]/text()') print(hotel_name) for i,j,k,l in zip(hotel_name,hotel_score,hotel_price,coment_url): id+=1 #id加一 data={ 'id':str(id), 'name':i, 'score':j, 'price':k, 'url':l, } # print(data) feizhu_col.insert(data) #插入數據 # print(i) time.sleep(2) mon_client.close() #關閉數據庫連接