# 增量式 爬蟲
概念: 監測網站的數據更新的情況,只爬取網站更新的數據.
核心: 去重
實現 Redis set集合也行
-- 如何實現redis去重? --
# 爬取電影站的更新數據 url去重 https://www.4567tv.tv/frim/index1.html
# 下面代碼以 http://www.922dyy.com/dianying/dongzuopian/ 為例 作為起始頁
# spider.py 爬蟲文件 # -*- coding: utf-8 -*- import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from redis import Redis from shipin.items import ShipinItem class DianyingSpider(CrawlSpider): conn = Redis(host='127.0.0.1',port=6379) # 連接對象 name = 'dianying' # allowed_domains = ['www.xx.com'] start_urls = ['http://www.922dyy.com/dianying/dongzuopian/'] rules = ( Rule(LinkExtractor(allow=r'/dongzuopian/index\d+\.html'), callback='parse_item', follow=False), #這里需要所有頁面時候改為True ) # 只提取頁碼url def parse_item(self, response): # 解析出當前頁碼對應頁面中 電影詳情頁 的url li_list = response.xpath('/html/body/div[2]/div[2]/div[2]/ul/li') for li in li_list: # 解析詳情頁的url detail_url = 'http://www.922dyy.com' + li.xpath('./div/a/@href').extract_first() # ex = self.conn.sadd('mp4_detail_url',detail_url) # 有返回值 # ex == 1 該url沒有被請求過 ex==0在集合中,該url已經被請求過了 if ex==1: print('有新數據可爬.....') yield scrapy.Request(url=detail_url,callback=self.parse_detail) else: print('暫無新數據可以爬取') def parse_detail(self,response): name = response.xpath('//*[@id="film_name"]/text()').extract_first() m_type = response.xpath('//*[@id="left_info"]/p[1]/text()').extract_first() print(name,'--',m_type) item = ShipinItem() #實例化 item['name'] = name item['m_type'] = m_type yield item
# items.py # -*- coding: utf-8 -*- import scrapy class ShipinItem(scrapy.Item): name = scrapy.Field() m_type = scrapy.Field()
# pipelines.py 管道 # -*- coding: utf-8 -*- class ShipinPipeline(object): def process_item(self, item, spider): conn = spider.conn dic = { 'name':item['name'], 'm_type':item['m_type'] } conn.lpush('movie_data',str(dic)) #一般這里不str的話會報錯,數據類型dict的錯誤 return item
# settings.py 里面 ITEM_PIPELINES = { 'shipin.pipelines.ShipinPipeline': 300, } BOT_NAME = 'shipin' SPIDER_MODULES = ['shipin.spiders'] NEWSPIDER_MODULE = 'shipin.spiders' USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' ROBOTSTXT_OBEY = False LOG_LEVEL = 'ERROR'

流程: scrapy startproject Name
cd Name
scrapy genspider -t crawl 爬蟲文件名 www.example.com
注意點: 增量式爬蟲,會判斷url在不在集合里面,sadd (集合的方法) 返回值1就是沒在里面,就是新數據.
lpush lrange llen --key 都是redis里面列表類型的方法
下面是糗事百科段子頁面的數據 (作者/段子) 增量式爬取
# 爬蟲.py # -*- coding: utf-8 -*- import scrapy,hashlib from qbPro.items import QbproItem from redis import Redis # 只爬取當前頁面 class QiubaiSpider(scrapy.Spider): name = 'qiubai' conn = Redis(host='127.0.0.1',port=6379) start_urls = ['https://www.qiushibaike.com/text/'] def parse(self, response): div_list = response.xpath('//*[@id="content-left"]/div') for div in div_list: # 數據指紋:爬取到一條數據的唯一標識 author = div.xpath('./div/a[2]/h2/text() | ./div/span[2]/h2/text()').extract_first().strip() content = div.xpath('./a/div/span[1]//text()').extract() content = ''.join(content).replace('\n','') item = QbproItem() # 實例化 item['author'] = author item['content'] = content # 給爬取到的數據生成一個數據指紋 data = author+content hash_key = hashlib.sha256(data.encode()).hexdigest() ex = self.conn.sadd('hash_key',hash_key) # 輸指紋存進 集合里面 if ex == 1: print('有數據更新') yield item else: print('無數據更新')
# items.py # -*- coding: utf-8 -*- import scrapy class QbproItem(scrapy.Item): author = scrapy.Field() content = scrapy.Field()
# pipelines.py 管道 # -*- coding: utf-8 -*- class QbproPipeline(object): def process_item(self, item, spider): conn = spider.conn dic = { 'author': item['author'], 'content': item['content'] } conn.lpush('qiubai', str(dic)) return item
# settings.py 設置
ITEM_PIPELINES = { 'qbPro.pipelines.QbproPipeline': 300, } USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' ROBOTSTXT_OBEY = False LOG_LEVEL = 'ERROR'

