必備條件:
一台能上404的機子..
過程:
由於也只是初學爬蟲,個中技巧也不熟練,寫的過程中的語法用法參考了很多文檔和博客,我是對於當前搜索頁用F12看過去..找到每個本子的地址再一層層下去最后下載圖片...然后去根據標簽一層層遍歷將文件保存在本地,能夠直接爬取搜索頁下一整頁的所有本,並保存在該文件同級目錄下,用着玩玩還行中途還被E站封了一次IP,現在再看覺得很多地方還能改進(差就是還有進步空間嘛,不排除失效的可能
這就是個試驗頁 別想太多
代碼:
from bs4 import BeautifulSoup
import re
import requests
import os
import urllib.request
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1'}
r = requests.get("https://e-hentai.org/", headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
divs = soup.find_all(class_='gl3c glname')
# 爬取當前頁面的本子網址
for div in divs:
url = div.a.get('href')
r2 = requests.get(url, headers=headers)
soup2 = BeautifulSoup(r2.text, 'lxml')
manga = soup2.find_all(class_='gdtm')
title = soup2.title.get_text() # 獲取該本子標題
# 遍歷本子的各頁
for div2 in manga:
picurl = div2.a.get('href')
picr = requests.get(picurl, headers=headers)
soup3 = BeautifulSoup(picr.text, 'lxml')
downurl = soup3.find_all(id='img')
page = 0
for dur in downurl:
# print(dur.get('src'))
# 判斷是否存在該文件夾
purl=dur.get('src')
fold_path = './'+title
if not os.path.exists(fold_path):
print("正在創建文件夾...")
os.makedirs(fold_path)
print("正在嘗試下載圖片....:{}".format(purl))
#保留后綴
filename = title+str(page)+purl.split('/')[-1]
filepath = fold_path + '/' + filename
page = page + 1
if os.path.exists(filepath):
print("已存在該文件,不下了不下了")
else:
try:
urllib.request.urlretrieve(purl, filename=filepath)
except Exception as e:
print("error發生:")
print(e)
然后還利用pyinstaller做了一個exe文件
Updata1:
發現忘了考慮各個分頁,導致一本本子最多只能爬取四十張圖片,而且由於爬取一頁的本子數量太多且良莠不齊測試爬蟲的時候正好是半夜..還是一個人住,批量爬取時混入了恐怖本子嚇得沒睡好,現在只從本子打開后的鏈接進行爬取一本本子,我的寫法照理說從分頁的鏈接也能爬取一整本(雖然我沒試過試過了確實可以,而且修正了由於標題問題導致爬取失敗的bug
# coding:utf-8
# author:graykido
# data=2020.5.3
from bs4 import BeautifulSoup
import re
import requests
import os
import urllib.request
import threading
import time
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1'}
#批量處理
urls = []
temp = input('請輸入要爬取的指定頁(輸入空白結束):')
while temp!="":
urls.append(temp)
temp = input('輸入成功 請繼續輸入鏈接或者輸入空白結束:')
for url in urls:
start = time.perf_counter()
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
manga = soup.find_all(class_='gdtm')
title = soup.title.get_text() # 獲取該本子標題
# 去除非法字符
for ch in '!"#$%&()*+,-./:;<=>?@\\^_‘{|}~ ':
title = title.replace(ch, "")
# 避免因標題過長導致無法儲存
if len(title) > 50:
title = title[:50]
pagetag = soup.find(class_='ptt').find_all('a')
mxpage = 0
baseurl = ""
for page in pagetag:
temstr = str(page.get('href'))
temspl = temstr.split('?p=')
if len(temspl) > 1:
mxpage = max(mxpage, int(temspl[1]))
else:
baseurl = page.get('href')
pages = [baseurl]
for i in range(1, mxpage + 1):
pages.append(baseurl + '?p=' + str(i))
if mxpage==0:
mxpage = 1
print("正在獲取的漫畫名:{0:},共計{1:}分頁".format(soup.title.get_text(), mxpage))
count = 0
# 遍歷各分頁
for page in pages:
r = requests.get(page, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
manga = soup.find_all(class_='gdtm')
# 遍歷分頁的各張圖片
for div in manga:
picurl = div.a.get('href')
picr = requests.get(picurl, headers=headers)
soup2 = BeautifulSoup(picr.text, 'lxml')
downurl = soup2.find_all(id='img')
for dur in downurl:
purl = dur.get('src')
fold_path = './' + title
# 判斷是否存在該文件夾
if not os.path.exists(fold_path):
print("正在創建文件夾...")
os.makedirs(fold_path)
print("正在嘗試下載圖片....:{}".format(purl))
# 保留后綴
filename = title + str(count) + '.' + purl.split('.')[-1]
filepath = fold_path + '/' + filename
count = count + 1
if os.path.exists(filepath):
print("已存在該文件,不下了不下了")
else:
try:
urllib.request.urlretrieve(purl, filename=filepath)
print("已成功")
except Exception as e:
print("error發生:")
print(e)
print('————下完收工————')
end = time.perf_counter()
print("下載總時長:{}秒".format(end - start))
之后的事情:
發現了E站原來有自己的API,雖然他家的API也不太好用,但至少比純手寫爬蟲方便一點了
文檔