
# -*- coding: utf-8 -*- # Scrapy settings for AMAZON project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html DB="amazon" COLLECTION="goods" HOST="localhost" PORT=27017 USER="root" PWD="123456" FILE_PATH="goods.txt" BOT_NAME = 'AMAZON' SPIDER_MODULES = ['AMAZON.spiders'] NEWSPIDER_MODULE = 'AMAZON.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = 'AMAZON (+http://www.yourdomain.com)' # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: DEFAULT_REQUEST_HEADERS = { "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36", } # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'AMAZON.middlewares.AmazonSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html DOWNLOADER_MIDDLEWARES = { 'AMAZON.middlewares.DownMiddleware1': 200, # 'AMAZON.middlewares.DownMiddleware2': 300, } # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { 'AMAZON.pipelines.MongoPipeline': 200, 'AMAZON.pipelines.FilePipeline': 300, } # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' # DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter' DUPEFILTER_CLASS = 'AMAZON.cumstomdupefilter.MyDupeFilter'

from scrapy.cmdline import execute # execute(['scrapy', 'crawl', 'amazon','--nolog']) # execute(['scrapy', 'crawl', 'amazon',]) #scrapy crawl amazon -a keyword=iphone8手機 execute(['scrapy', 'crawl', 'amazon','-a','keyword=iphone8手機','--nolog']) # execute(['scrapy', 'crawl', 'baidu',]) # execute(['scrapy', 'crawl', 'baidu','--nolog'])
# -*- coding: utf-8 -*- import scrapy from urllib.parse import urlencode from AMAZON.items import AmazonItem # from scrapy.http import Request # from scrapy.spiders import Spider,CrawlSpider,XMLFeedSpider,CSVFeedSpider,SitemapSpider # from scrapy.selector import HtmlXPathSelector #response.xpath # print(Spider is scrapy.Spider) # print(XMLFeedSpider is scrapy.XMLFeedSpider) # print(Request is scrapy.Request) # from scrapy.dupefilter import RFPDupeFilter # from scrapy.core.scheduler import Scheduler class AmazonSpider(scrapy.Spider): name = 'amazon' allowed_domains = ['www.amazon.cn'] start_urls = ['http://www.amazon.cn/',] #self.settings.get() custom_settings = { "BOT_NAME" : 'EGON_AMAZON', 'REQUSET_HEADERS':{ }, } def __init__(self,keyword,*args,**kwargs): super(AmazonSpider,self).__init__(*args,**kwargs) self.keyword=keyword def start_requests(self): ''' 爬蟲第一次請求執行的函數 :return: ''' url='https://www.amazon.cn/s/ref=nb_sb_noss_1/461-4093573-7508641?' #https://www.amazon.cn/ref=nb_sb_noss_null url+=urlencode({"field-keywords" : self.keyword}) print(url) yield scrapy.Request(url, callback=self.parse_index, #解析完頁面直接回調self.parse_index dont_filter=False, ) def parse_index(self, response): # print('============>',self.settings['NEWSPIDER_MODULE']) # print('============>',self.settings['BOT_NAME']) # print('============>',self.settings['REQUSET_HEADERS']) # self.logger.warn('============>%s' %self.settings['REQUSET_HEADERS']) # print('======>',response.request.meta,response.meta) # print('======>',response.request.url,response.url) # print('%s 解析結果:%s' %(response.url,len(response.body))) detail_urls=response.xpath('//*[contains(@id,"result_")]/div/div[3]/div[1]/a/@href').extract() #//*[contains(@id,"result_")]/div/div[3]/div[1]/a/@href # print(detail_urls) #解析到全部的url for detail_url in detail_urls: #獲取到的url是一個列表 # 拿到每一件商品的url信息,重新發起request請求,並且調用回調函數 yield scrapy.Request(url=detail_url, #向detail_url發起請求解析詳情頁面 callback=self.parse_detail ) next_url=response.urljoin(response.xpath('//*[@id="pagnNextLink"]/@href').extract_first()) #因為詳情會有好幾頁,所以想下一頁發起請求,重新使用該回調函數 # print(next_url) yield scrapy.Request(url=next_url, callback=self.parse_index ) def parse_detail(self,response): # print('%s 詳情頁解析結果:%s' % (response.url, len(response.body))) name=response.xpath('//*[@id="productTitle"]/text()').extract_first().strip() # 找到商品名,找到下邊的全部文本,切分 price=response.xpath('//*[@id="price"]//*[@class="a-size-medium a-color-price"]/text()').extract_first() # 找到價格信息 delivery_method=''.join(response.xpath('//*[@id="ddmMerchantMessage"]//text()').extract()) #找子子孫孫,吧所有的文本放在一起 print(response.url) print(name) print(price) print(delivery_method) item=AmazonItem() #得到一個item對象 item["name"]=name #這里的key值必須 item["price"]=price #將價格添加到item對象 item["delivery_method"]=delivery_method return item def close(spider, reason): print('結束啦')

# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy class AmazonItem(scrapy.Item): # define the fields for your item here like: name = scrapy.Field() price = scrapy.Field() delivery_method = scrapy.Field()

# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html from scrapy.exceptions import DropItem from pymongo import MongoClient class MongoPipeline(object): def __init__(self,db,collection,host,port,user,pwd): self.db=db self.collection=collection self.host=host self.port=port self.user=user self.pwd=pwd @classmethod def from_crawler(cls, crawler): """ Scrapy會先通過getattr判斷我們是否自定義了from_crawler,有則調它來完 成實例化 """ db = crawler.settings.get('DB') collection = crawler.settings.get('COLLECTION') host = crawler.settings.get('HOST') port = crawler.settings.getint('PORT') user = crawler.settings.get('USER') pwd = crawler.settings.get('PWD') return cls(db,collection,host,port,user,pwd) def open_spider(self,spider): """ 爬蟲剛啟動時執行一次 """ print('==============>爬蟲程序剛剛啟動,自動開始鏈接數據庫') self.client = MongoClient("mongodb://%s:%s@%s:%s" %( self.user, self.pwd, self.host, self.port )) def process_item(self, item, spider): # 操作並進行持久化 # return表示會被后續的pipeline繼續處理 d=dict(item) #將Amazon傳過來的json數據轉換為一個字典 if all(d.values()): #這句主要判斷字典里面的每個key都有對應的value self.client[self.db][self.collection].save(d) #保存數據 # return之后代表可以繼續往后走 return item # 表示將item丟棄,不會被后續pipeline處理 # raise DropItem() def close_spider(self,spider): """ 爬蟲關閉時執行一次 """ print('==============>爬蟲程序運行完畢,數據庫鏈接關閉') self.client.close() class FilePipeline(object): def __init__(self, file_path): self.file_path=file_path @classmethod def from_crawler(cls, crawler): """ Scrapy會先通過getattr判斷我們是否自定義了from_crawler,有則調它來完 成實例化 """ file_path = crawler.settings.get('FILE_PATH') # 獲取到文件保存的路徑 return cls(file_path) def open_spider(self, spider): """ 爬蟲剛啟動時執行一次 """ print('==============>爬蟲程序剛剛啟動,打開一個文件准備寫入數據') self.fileobj=open(self.file_path,'w',encoding='utf-8') def process_item(self, item, spider): # 操作並進行持久化 # return表示會被后續的pipeline繼續處理 d = dict(item) if all(d.values()): self.fileobj.write(r"%s\n" %str(d)) return item # 表示將item丟棄,不會被后續pipeline處理 # raise DropItem() def close_spider(self, spider): """ 爬蟲關閉時執行一次 """ print('==============>爬蟲程序運行完畢') self.fileobj.close()
這個只是簡單的代碼,自己可以加上代理

# -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals from scrapy.http import Response from scrapy.exceptions import IgnoreRequest from AMAZON.proxy_handle import get_proxy,delete_proxy #爬取代理的蜘蛛 class AmazonSpiderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, dict or Item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict # or Item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class DownMiddleware1(object): def process_request(self, request, spider): """ 請求需要被下載時,經過所有下載器中間件的process_request調用 :param request: :param spider: :return: None,繼續后續中間件去下載; Response對象,停止process_request的執行,開始執行process_response Request對象,停止中間件的執行,將Request重新調度器 raise IgnoreRequest異常,停止process_request的執行,開始執行process_exception """ # spider.name print('下載中間件1') # request.meta['proxy']='http://user:pwd@ip:port' request.meta['download_timeout']=10 #超時等待時間 request.meta['proxy']='http://'+get_proxy() #獲取一個代理ip print(request.meta) # return Response('http://www.xxx.com') # print(request.dont_filter) # return request # raise IgnoreRequest # raise TimeoutError def process_response(self, request, response, spider): """ spider處理完成,返回時調用 :param response: :param result: :param spider: :return: Response 對象:轉交給其他中間件process_response Request 對象:停止中間件,request會被重新調度下載 raise IgnoreRequest 異常:調用Request.errback """ print('response1') return response def process_exception(self, request, exception, spider): """ 當下載處理器(download handler)或 process_request() (下載中間件)拋出異常 :param response: :param exception: :param spider: :return: None:繼續交給后續中間件處理異常; Response對象:停止后續process_exception方法 Request對象:停止中間件,request將會被重新調用下載 """ print('異常1') # return None # 刪舊代理 delelte request.meta['proxy'] old_proxy=request.meta['proxy'].split("//")[-1] #切出xx.xx.xx.x:port delete_proxy(old_proxy) #刪除沒有用的代理 request.meta['proxy']='http://'+get_proxy() #重新給一個代理,繼續發起request請求 return request # class DownMiddleware2(object): # def process_request(self, request, spider): # """ # 請求需要被下載時,經過所有下載器中間件的process_request調用 # :param request: # :param spider: # :return: # None,繼續后續中間件去下載; # Response對象,停止process_request的執行,開始執行process_response # Request對象,停止中間件的執行,將Request重新調度器 # raise IgnoreRequest異常,停止process_request的執行,開始執行process_exception # """ # print('下載中間件2') # # def process_response(self, request, response, spider): # """ # spider處理完成,返回時調用 # :param response: # :param result: # :param spider: # :return: # Response 對象:轉交給其他中間件process_response # Request 對象:停止中間件,request會被重新調度下載 # raise IgnoreRequest 異常:調用Request.errback # """ # print('response2') # # return response # # return request # # raise IgnoreRequest # # def process_exception(self, request, exception, spider): # """ # 當下載處理器(download handler)或 process_request() (下載中間件)拋出異常 # :param response: # :param exception: # :param spider: # :return: # None:繼續交給后續中間件處理異常; # Response對象:停止后續process_exception方法 # Request對象:停止中間件,request將會被重新調用下載 # """ # print('異常2') # return None
scrapy的中間件和django的中間件有一點差別,假如他有三個中間件,當你在第二個中間件返回之后,他不會走第三個的request方法,他走的是,request1,request2,response3,response2,response1