myspider.py
import scrapy from scrapy1.items import Scrapy1Item from selenium import webdriver ''' 在scrapy中使用selenium的編碼流程: 1.在spider的構造方法中創建一個瀏覽器對象(作為當前spider的一個屬性) 2.重寫spider的一個方法closed(self,spider),在該方法中執行瀏覽器關閉的操作 3.在下載中間件的process_response方法中,通過spider參數獲取瀏覽器對象 4.在中間件的process_response中定制基於瀏覽器自動化的操作代碼(獲取動態加載出來的頁面源碼數據) 5.實例化一個響應對象,且將page_source返回的頁面源碼封裝到該對象中 6.返回該新的響應對象 ''' class TestSpider(scrapy.Spider): name = 'qiubai' # allowed_domains = ['www.xxx.com'] start_urls = ['http://war.163.com/'] def __init__(self): self.bro = webdriver.Chrome(executable_path=r'D:\Day\爬蟲存儲\chromedriver.exe') def parse(self, response): div_list = response.xpath('//div[@class="data_row news_article clearfix "]') for div in div_list: title = div.xpath('.//div[@class="news_title"]/h3/a/text()').extract_first() print(title) def closed(self, spider): print('bro has been closed') self.bro.quit()
middlewares.py
from scrapy import signals from scrapy.http import HtmlResponse from time import sleep class WangyiproDownloaderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest print('即將返回一個新的響應對象!!!') #如何獲取動態加載出來的數據 bro = spider.bro bro.get(url=request.url) sleep(3) #包含了動態加載出來的新聞數據 page_text = bro.page_source sleep(3) return HtmlResponse(url=spider.bro.current_url,body=page_text,encoding='utf-8',request=request) def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name)
在中間件判斷是否需要使用selenium來爬取內容的版本
from scrapy.http import HtmlResponse #參數介紹: #攔截到響應對象(下載器傳遞給Spider的響應對象) #request:響應對象對應的請求對象 #response:攔截到的響應對象 #spider:爬蟲文件中對應的爬蟲類的實例 def process_response(self, request, response, spider): #響應對象中存儲頁面數據的篡改 if request.url in['http://news.163.com/domestic/','http://news.163.com/world/','http://news.163.com/air/','http://war.163.com/']: spider.bro.get(url=request.url) js = 'window.scrollTo(0,document.body.scrollHeight)' spider.bro.execute_script(js) time.sleep(2) #一定要給與瀏覽器一定的緩沖加載數據的時間 #頁面數據就是包含了動態加載出來的新聞數據對應的頁面數據 page_text = spider.bro.page_source #篡改響應對象 return HtmlResponse(url=spider.bro.current_url,body=page_text,encoding='utf-8',request=request) else: return response