爬蟲scrapy組件 請求傳參,post請求,中間件


 

post請求

在scrapy組件使用post請求需要調用

def start_requests(self): 
進行傳參再回到
yield scrapy.FormRequest(url=url,formdata=data,callback=self.parse)
進行post請求 其中FormRequest()為post 請求方式

import scrapy

class PostSpider(scrapy.Spider):
    name = 'post'
    # allowed_domains = ['www.xxx.com']
    start_urls = ['https://fanyi.baidu.com/sug']

    def start_requests(self):
        data = {
            'kw':'dog'
        }
        for url in self.start_urls:
            yield scrapy.FormRequest(url=url,formdata=data,callback=self.parse)
#scrapy.FormRequest() 進行post請求
    def parse(self, response):
        print(response.text)

 

 

 

請求傳參

scrapy請求傳參 主核心的就是

meta={'item':item}

是一個字典結構,用來存儲item 等

通過回調函數的返回url進行訪問

import scrapy
from moviePro.items import MovieproItem

class MovieSpider(scrapy.Spider):
    name = 'movie'
    # allowed_domains = ['www.xxx.com']
    start_urls = ['https://www.4567tv.tv/frim/index1.html']
    #解析詳情頁中的數據
    def parse_detail(self,response):
        #response.meta返回接收到的meta字典
        item = response.meta['item']
        actor = response.xpath('/html/body/div[1]/div/div/div/div[2]/p[3]/a/text()').extract_first()
        item['actor'] = actor

        yield item

    def parse(self, response):
        li_list = response.xpath('//li[@class="col-md-6 col-sm-4 col-xs-3"]')
        for li in li_list:
            item = MovieproItem()
            name = li.xpath('./div/a/@title').extract_first()
            detail_url = 'https://www.4567tv.tv'+li.xpath('./div/a/@href').extract_first()
            item['name'] = name
            #meta參數:請求傳參.meta字典就會傳遞給回調函數的response參數
            yield scrapy.Request(url=detail_url,callback=self.parse_detail,meta={'item':item})

        #這里url 值定的start_urls 調用直接訪問

 

注意:這里存儲的字段一定要與items.py 創建的一致,就是以items.py的字段為主

items.py

import scrapy


class MovieproItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
     name = scrapy.Field()
     actor = scrapy.Field()

 

pipelines.py

import pymysql

class MovieproPipeline(object):
    conn = None
    cursor = None

    def open_spider(self,spider):
        print("開始爬蟲")
        self.conn = pymysql.Connect(host='127.0.0.1',port=3306, user='root', password="",db='movie',charset='utf8')

    def process_item(self, item, spider):
        self.cursor = self.conn.cursor()
        try:
            self.cursor.execute('insert into av values("%s","%s")'%(item['name'],item['actor']))
            self.conn.commit()
        except Exception as e:
            self.conn.rollback()

    def close_spider(self,spider):
        print('結束爬蟲')
        self.cursor.close()
        self.conn.close()

在執行時可以 省去--nolog,在setting中配置LOG_LEVEL="ERROR"

也可以定義寫入文件 ,在setting中配置LOG_FILE = "./log.txt"

 

五大核心組件

 

 其中dowmloader 最為重要,它分為三大重要的方法

(1)
def process_request(self, request, spider):
return None
需要有返回NONE 同django的中間建,表示在訪問來之前進行操作
用來范文時 ,進行user_agent 的替換, request.headers['User-Agent'] = random.choice([ ...])


(2)
def process_response(self, request, response, spider):

需要有返回respons給 spider 進行數據 處理
用作selenium 模擬訪問時,倘若放在spider訪問,一條數據就需要生成一個bro,所以添加到這里,一次就好
它將獲取的數據
return HtmlResponse(url=spider.bro.current_url,body=page_text,encoding='utf-8',request=request)
返回給spider 進行數據解析
return response

(3)
def process_exception(self, request, exception, spider):
pass

指進行報錯使用的情況
用作代理吃進行請求代理ip 的設置

request.meta['proxy'] = random.choice([])



使用
 1 import random
 2 
 3 class MiddleproDownloaderMiddleware(object):
 4     user_agent_list = [
 5         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
 6         "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
 7         "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
 8         "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
 9         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
10         "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
11         "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
12         "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
13         "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
14         "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
15         "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
16         "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
17         "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
18         "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
19         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
20         "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
21         "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
22         "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
23         "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
24         "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
25         "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
26         "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
27         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
28         "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
29         "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
30         "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
31         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
32         "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
33         "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
34         "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
35         "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
36         "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
37         "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
38         "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
39         "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
40         "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
41     ]
42     # 可被選用的代理IP
43     PROXY_http = [
44         '153.180.102.104:80',
45         '195.208.131.189:56055',
46     ]
47     PROXY_https = [
48         '120.83.49.90:9000',
49         '95.189.112.214:35508',
50     ]
51    #攔截所有未發生異常的請求
52     def process_request(self, request, spider):
53 
54 
55         #使用UA池進行請求的UA偽裝
56         print('this is process_request')
57         request.headers['User-Agent'] = random.choice(self.user_agent_list)
58         print(request.headers['User-Agent'])
59 
60 
61         return None
62     #攔截所有的響應
63     def process_response(self, request, response, spider):
64 
65         return response
66     #攔截到產生異常的請求
67     def process_exception(self, request, exception, spider):
68 
69         print('this is process_exception!')
70         if request.url.split(':')[0] == 'http':
71             request.meta['proxy'] = random.choice(self.PROXY_http)
72         else:
73             request.meta['proxy'] = random.choice(self.PROXY_https)
View Code
 
        

 

selenium的中間件使用

注意:使用中間件需要打開中間件的封印 (p56-58)

DOWNLOADER_MIDDLEWARES = {
'wangyiPro.middlewares.WangyiproDownloaderMiddleware': 543,
}

 

通過實例化bro對象,在請求結束后訪問download中間件的 def response(self):

通過獲取的數據

return HtmlResponse(url=spider.bro.current_url,body=page_text,encoding='utf-8',request=request)
返回給spider

import scrapy
from selenium import webdriver


'''
在scrapy中使用selenium的編碼流程:
    1.在spider的構造方法中創建一個瀏覽器對象(作為當前spider的一個屬性)
    2.重寫spider的一個方法closed(self,spider),在該方法中執行瀏覽器關閉的操作
    3.在下載中間件的process_response方法中,通過spider參數獲取瀏覽器對象
    4.在中間件的process_response中定制基於瀏覽器自動化的操作代碼(獲取動態加載出來的頁面源碼數據)
    5.實例化一個響應對象,且將page_source返回的頁面源碼封裝到該對象中
    6.返回該新的響應對象
'''

class WangyiSpider(scrapy.Spider):
    name = 'wangyi'
    # allowed_domains = ['www.xxx.com']
    start_urls = ['http://war.163.com/']
    def __init__(self):
        self.bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\chromedriver.exe')
    def parse(self, response):
        div_list = response.xpath('//div[@class="data_row news_article clearfix "]')
        for div in div_list:
            title = div.xpath('.//div[@class="news_title"]/h3/a/text()').extract_first()
            print(title)
    def closed(self,spider):
        print('關閉瀏覽器對象!')
        self.bro.quit()
 
        

 

 
        
def process_response(self, request, response, spider):
 
    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        print('即將返回一個新的響應對象!!!')
        #如何獲取動態加載出來的數據
        bro = spider.bro
        bro.get(url=request.url)
        sleep(3)
        #包含了動態加載出來的新聞數據
        page_text = bro.page_source
        sleep(3)
        return HtmlResponse(url=spider.bro.current_url,body=page_text,encoding='utf-8',request=request)
 
        

 








 
 
 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM