

import time
from pyquery import PyQuery
import os
import asyncio
import aiohttp
import warnings
# 獲取一個請求里的所有圖片頁面詳情鏈接
async def url_pages(page):
async with semaphore:
_url = 'https://wallhaven.cc/toplist?page={}'.format(page)
async with session.get(_url) as response:
result = await response.text()
status = response.status
while status == 429:
await asyncio.sleep(2)
async with session.get(_url) as resp:
result = await resp.text()
status = resp.status
for _item in PyQuery(result)('.thumb-listing-page li').items():
async with lock:
img_url.append(_item('.preview').attr('href'))
# 獲取圖片昵稱、鏈接
async def get_img_link(_url):
async with session.get(_url) as response:
html = await response.text()
status = response.status
while status == 429:
await asyncio.sleep(2)
async with session.get(_url) as resp:
html = await resp.text()
status = resp.status
_url = PyQuery(html)('#wallpaper').attr('src')
img_name = os.path.basename(_url)
return _url, img_name
# 下載圖片
async def img_save():
async with semaphore:
flag = 3
while flag:
if not len(img_url):
await asyncio.sleep(5)
flag = flag - 1
continue
_url = img_url.pop(0)
url, name = await get_img_link(_url)
global number
number = number + 1
print("下載第{}張 --> {}".format(number, name))
if os.path.exists(name):
continue
async with session.get(url) as res:
with open(name, 'wb') as f:
f.write(await res.read())
# 主方法
async def scrape_main():
global session
session = aiohttp.ClientSession()
scrape_index_tasks = [asyncio.ensure_future(img_save()) for i in range(20)]
scrape_index_tasks.extend([asyncio.ensure_future(url_pages(page)) for page in range(1, pages+1)])
await asyncio.wait(scrape_index_tasks)
await session.close()
print("任務爬取結束O(∩_∩)O~ 共爬取{}張圖片".format(number))
if __name__ == '__main__':
warnings.filterwarnings("ignore", category=DeprecationWarning)
semaphore = asyncio.Semaphore(30)
start_time = time.time()
save_path = os.getcwd() + "\wallpaper"
if not os.path.exists(save_path):
os.mkdir(save_path)
os.chdir(save_path)
number = 0
img_url = []
session = None
lock = asyncio.Lock()
# 這個是輸入的下載頁數,最多201頁,一頁24張圖片
pages = 3
asyncio.get_event_loop().run_until_complete(scrape_main())
print(f"累計耗時{time.time()-start_time:.2f}s")