一、介紹
由於頭條現在采取了動態js渲染的反爬措施,還有其他各種js加密反爬,使用簡單的requests非常困難
Puppeteer 是 Google 基於 Node.js 開發的一個工具,有了它我們可以通過 JavaScript 來控制 Chrome 瀏覽器的一些操作,當然也可以用作網絡爬蟲上,其 API 極其完善,功能非常強大。 而 Pyppeteer 又是什么呢?它實際上是 Puppeteer 的 Python 版本的實現,但他不是 Google 開發的,是一位來自於日本的工程師依據 Puppeteer 的一些功能開發出來的非官方版本。
詳細使用方法,官方文檔: https://miyakogi.github.io/pyppeteer/reference.html
二、簡單的使用
import asyncio from pyppeteer import launch async def main(): # browser = await launch() browser = await launch(headless=True) page = await browser.newPage() await page.setViewport(viewport={'width': 1366, 'height': 768}) # await page.setUserAgent('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3494.0 Safari/537.36') await page.goto('https://www.toutiao.com') # 是否啟用js await page.setJavaScriptEnabled(enabled=True) await page.evaluate( '''() =>{ Object.defineProperties(navigator,{ webdriver:{ get: () => false } }) }''') # 以下為插入中間js,將淘寶會為了檢測瀏覽器而調用的js修改其結果。 await page.evaluate('''() =>{ window.navigator.chrome = { runtime: {}, }; }''') await page.evaluate( '''() =>{ Object.defineProperty(navigator, 'languages', { get: () => ['en-US', 'en'] }); }''') await page.evaluate( '''() =>{ Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5,6], }); }''') await page.goto('https://www.toutiao.com/search/?keyword=%E5%B0%8F%E7%B1%B310') # 打印cookie頁面 print(await page.cookies()) await asyncio.sleep(5) # # 打印頁面文本 print(await page.content()) # # # 打印當前首頁的標題 print(await page.title()) with open('toutiao.html', 'w', encoding='utf-8') as f: f.write(await page.content()) await browser.close() loop = asyncio.get_event_loop() task = asyncio.ensure_future(main()) loop.run_until_complete(task)
三、多次請求的使用
import asyncio from pyppeteer import launch async def main(url): browser = await launch(headless=True) page = await browser.newPage() await page.setViewport(viewport={'width': 1366, 'height': 768}) await page.setUserAgent( 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3494.0 Safari/537.36') # 是否啟用js await page.setJavaScriptEnabled(enabled=True) await page.evaluate( '''() =>{ Object.defineProperties(navigator,{ webdriver:{ get: () => false } }) }''') # 以下為插入中間js,將淘寶會為了檢測瀏覽器而調用的js修改其結果。 await page.evaluate('''() =>{ window.navigator.chrome = { runtime: {}, }; }''') await page.evaluate( '''() =>{ Object.defineProperty(navigator, 'languages', { get: () => ['en-US', 'en'] }); }''') await page.evaluate( '''() =>{ Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5,6], }); }''') await page.goto(url,options={'timeout': 5000}) # await asyncio.sleep(5) # 打印頁面文本 return await page.content() tlist = ["https://www.toutiao.com/a6794863795366789636/", "https://www.toutiao.com/a6791790405059871236/", "https://www.toutiao.com/a6792756350095983104/", "https://www.toutiao.com/a6792852490845946376/", "https://www.toutiao.com/a6795883286729064964/", ] task = [main(url) for url in tlist] loop = asyncio.get_event_loop() results = loop.run_until_complete(asyncio.gather(*task)) for res in results: print(res)
四、scarpy+pyppeteer
github地址:https://github.com/fengfumin/Toutiao_pyppeteer
主要功能:
1、指定搜素單詞
from scrapy.cmdline import execute execute(['scrapy', 'crawl', 'toutiao','-a','keyword=小米10'])
2、爬取頁面內存保存csv
3、生成詞雲圖片
4、主要代碼scrapy中間件
from scrapy import signals import pyppeteer import asyncio import os from scrapy.http import HtmlResponse pyppeteer.DEBUG = False class ToutiaoPyppeteerDownloaderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. def __init__(self): print("Init downloaderMiddleware use pypputeer.") os.environ['PYPPETEER_CHROMIUM_REVISION'] = '588429' # pyppeteer.DEBUG = False print(os.environ.get('PYPPETEER_CHROMIUM_REVISION')) loop = asyncio.get_event_loop() task = asyncio.ensure_future(self.getbrowser()) loop.run_until_complete(task) # self.browser = task.result() print(self.browser) print(self.page) # self.page = await browser.newPage() async def getbrowser(self): self.browser = await pyppeteer.launch(headless=True) self.page = await self.browser.newPage() await self.page.setViewport(viewport={'width': 1366, 'height': 768}) await self.page.setUserAgent( 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3494.0 Safari/537.36') await self.page.setJavaScriptEnabled(enabled=True) await self.page.evaluate( '''() =>{ Object.defineProperties(navigator,{ webdriver:{ get: () => false } }) }''') # 以下為插入中間js,將淘寶會為了檢測瀏覽器而調用的js修改其結果。 await self.page.evaluate('''() =>{ window.navigator.chrome = { runtime: {}, }; }''') await self.page.evaluate( '''() =>{ Object.defineProperty(navigator, 'languages', { get: () => ['en-US', 'en'] }); }''') await self.page.evaluate( '''() =>{ Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5,6], }); }''') return self.page @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called loop = asyncio.get_event_loop() task = asyncio.ensure_future(self.usePypuppeteer(request)) loop.run_until_complete(task) # return task.result() return HtmlResponse(url=request.url, body=task.result(), encoding="utf-8", request=request) async def usePypuppeteer(self, request): print(request.url) # self.page = await self.getbrowser() await self.page.goto(request.url) await asyncio.sleep(3) #鼠標滾動到底 await self.page.evaluate('window.scrollBy(0, document.body.scrollHeight)') await asyncio.sleep(3) content = await self.page.content() return content def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name)
5、主要代碼請求與處理
import csv import time from scrapy import Spider, Request from bs4 import BeautifulSoup from Toutiao_pyppeteer.cloud_word import * class TaobaoSpider(Spider): name = 'toutiao' allowed_domains = ['www.toutiao.com'] start_url = ['https://www.toutiao.com/','https://www.toutiao.com/search/?keyword={keyword}'] def __init__(self, keyword, *args, **kwargs): super(TaobaoSpider, self).__init__(*args, **kwargs) self.keyword = keyword def open_csv(self): self.csvfile=open('train/data.csv', 'w', newline='', encoding='utf-8') fieldnames = ['title', 'comment'] self.dict_writer = csv.DictWriter(self.csvfile, delimiter=',', fieldnames=fieldnames) self.dict_writer.writeheader() def close_csv(self): self.csvfile.close() def start_requests(self): for url in self.start_url: if 'search' in url: r_url = url.format(keyword=self.keyword) else: r_url=url yield Request(r_url, callback=self.parse_list) def parse_list(self, response): if "小米10" in response.text: soup = BeautifulSoup(response.text, 'lxml') # 具有容錯功能 res = soup.prettify() # 處理好縮進,結構化顯示 div_list=soup.find_all('div', class_='articleCard') # print(res) print(div_list) print(len(div_list)) self.open_csv() for div in div_list: title=div.find('span',class_='J_title') self.dict_writer.writerow({"title":title.text}) con=div.find('div',class_='y-left') self.dict_writer.writerow({"comment": con.text}) print("關閉csv") self.close_csv() print("開始分詞") text_segment() print("開始制圖") chinese_jieba()