定制起始url(scrapy_redis)


爬蟲:(在這里不用配置start_url,直接可以取redis里面取start_url,可以多個)
from   scrapy_redis.spiders import RedisSpider # class ChoutiSpider(scrapy.Spider):
class ChoutiSpider(RedisSpider): name = 'baidu'##在這里設置了這個name,那么在redispider里面就可以按照這個key來找到里面對應的值(開始url,可能多個),
  #key的格式是:self.redis_key = self.redis_key % {'name': self.name}
    allowed_domains = ['baidu.com']   def parse(self, response):   print('執行操作')   print(response)

在settings里面的配置:
#true的話,就是集合,false的話,就為列表
REDIS_START_URLS_AS_SET=False#默認是false,列表的格式取數據出來
# REDIS_START_URLS_KEY = '%(name)s:start_urls'#不設置默認是這個,這個是存入redis里面的key,可以根據這來取value,例如:baidu:start_urls
如果是列表的話,取數據是lpop(key),key就是下面的這個
如果是集合的話,集合取數據是spop(key),例如:spop('baidu:start_urls')>>后面就是對應的全部的開始url(可以多個)

redis存入開始url:

在了一個.py文件里面存入開始url


列表:
import redis conn=redis.Redis(host='127.0.0.1',port=6379) conn.lpush('baidu:start_urls','http://www.baidu.com') 如果是settings里面: REDIS_START_URLS_AS_SET=False的話,就是列表的形式,存入就是lpush或者是rpush等操作 如果是true的話,那么存入就是集合的形式,sadd等操作
集合:
import redis conn=redis.Redis(host='127.0.0.1',port=6379) conn.sadd('baidu:start_urls','http://www.baidu.com')##按照這個格式來存數據的
print(conn.smembers('baidu:start_urls')) 

 



scrapy_redis里面的spider源碼分析:
class RedisMixin(object): """Mixin class to implement reading urls from a redis queue.""" redis_key = None redis_batch_size = None redis_encoding = None # Redis client placeholder.
    server = None def start_requests(self): """Returns a batch of start requests from redis."""
        return self.next_requests() def setup_redis(self, crawler=None): """Setup redis connection and idle signal. This should be called after the spider has set its crawler object. """
        if self.server is not None: return

        if crawler is None: # We allow optional crawler argument to keep backwards
            # compatibility.
            # XXX: Raise a deprecation warning.
            crawler = getattr(self, 'crawler', None) if crawler is None: raise ValueError("crawler is required") settings = crawler.settings #####去配置文件里面那這個其始url,START_URLS_KEY = '%(name)s:start_urls',如果沒有配置文件的話,就讀取后面部分
        if self.redis_key is None: self.redis_key = settings.get( 'REDIS_START_URLS_KEY', defaults.START_URLS_KEY, )##在這里可以自己設置這個格式,REDIS_START_URLS_KEY在settigs里面設置成自己想要保存的格式,注意:自己就按照這個
,格式進行保存,下面就以這個格式作為鍵進行查找到相對應的全部的開始url self.redis_key = self.redis_key % {'name': self.name}####在這里設置這個name的redis查詢的key,如果在redis里面有這個key存在的話,就取出里面的值進行查找
        ''' 所以可以自己在添加開始到這個name里面去,這個key格式是固定的,START_URLS_KEY = '%(name)s:start_urls'''
######寫入redis的這個key里面存進去,里面可以放url,多個,然后拿到多個開始的url
        if not self.redis_key.strip(): raise ValueError("redis_key must not be empty") if self.redis_batch_size is None: # TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
            self.redis_batch_size = settings.getint( ###取配置文件里面取值,后面是int的類型,轉化為int的類型
                'REDIS_START_URLS_BATCH_SIZE', settings.getint('CONCURRENT_REQUESTS'), ) try: self.redis_batch_size = int(self.redis_batch_size) except (TypeError, ValueError): raise ValueError("redis_batch_size must be an integer") if self.redis_encoding is None: self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING) self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
                         "(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s", self.__dict__) self.server = connection.from_settings(crawler.settings) # The idle signal is called when the spider has no requests left,
        # that's when we will schedule new requests from redis queue
        crawler.signals.connect(self.spider_idle, signal=signals.spider_idle) def next_requests(self): """Returns a request to be scheduled or none.""" use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET) fetch_one = self.server.spop if use_set else self.server.lpop ##做了判斷,如果是REDIS_START_URLS_AS_SET=True得話,那么就為集合
        ##做了判斷,如果是REDIS_START_URLS_AS_SET=False得話,那么就為列表
        # XXX: Do we need to use a timeout here?
        found = 0 # TODO: Use redis pipeline execution.
        ####在下面進行尋找,如果存在這個redis_key的話,就執行,有多個就執行多個其實url,
        
        
        '''' 下面是一直循環着,看有沒有其實url,在redis里面,這個格式是,REDIS_START_URLS_KEY = '%(name)s:start_urls'>> 當為false的時候,就是以列表的形式查找 conn.lpush('baidu:start_urls','http://www.baidu.com') 當true的時候,就是集合 '''
        while found < self.redis_batch_size: data = fetch_one(self.redis_key)##可能是spop或者是lpop
            if not data: # Queue empty.
                break req = self.make_request_from_data(data) if req: yield req found += 1
            else: self.logger.debug("Request not made from data: %r", data) if found: self.logger.debug("Read %s requests from '%s'", found, self.redis_key) def make_request_from_data(self, data): """Returns a Request instance from data coming from Redis. By default, ``data`` is an encoded URL. You can override this method to provide your own message decoding. Parameters ---------- data : bytes Message from redis. """ url = bytes_to_str(data, self.redis_encoding) return self.make_requests_from_url(url) def schedule_next_requests(self): """Schedules a request if available"""
        # TODO: While there is capacity, schedule a batch of redis requests.
        for req in self.next_requests(): self.crawler.engine.crawl(req, spider=self) def spider_idle(self): """Schedules a request if available, otherwise waits."""
        # XXX: Handle a sentinel to close the spider.
 self.schedule_next_requests() raise DontCloseSpider

 



  


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM