源码地址: https://gitee.com/Black-sky-cloud/python-spider/tree/master/bqg_Spider
exe 下载地址: https://www.lanzouw.com/iKz7gxdhsne 密码:8d9f
不愿意下载的可以直接复制下面代码:
点击查看代码
"""
这个爬虫脚本可以再笔趣阁中搜索相应的小说并爬取
"""
import requests
import time
from prettytable import PrettyTable
from lxml import etree
headers = {
# 设置 UA 反爬
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36",
}
def search():
"""
查找并输出搜索到的相关图书信息
:return:
"""
se = requests.Session()
se.get("https://www.biqugeq.com/")
name = input("请输入你要查找的书名: \t")
url = "https://www.biqugeq.com/search/?ie=gbk&siteid=xszww.com&q=" + name
# 获取查询到的页面
res = etree.HTML(se.get(url, headers=headers).text)
bookIndex = output(res)
Save().__int__(se, bookIndex, name)
def output(res):
"""
从 html 中获取到 页面列表进行 打印输出
:return:
"""
cssLi = res.xpath('//div[@class="l bd"]/ul')[0]
bookNames = cssLi.xpath("li/span[2]/a/text()")
authors = cssLi.xpath("li/span[4]/text()")
table = PrettyTable(['序号', '书名', '作者名'])
for i in range(len(bookNames)):
table.add_row([i + 1, bookNames[i], authors[i]])
table.align[1] = "c"
print(table)
num = input("请输入图书序号开始下载: \t")
return "https://www.biqugeq.com" + cssLi.xpath("li[" + num + "]/span[2]/a/@href")[0]
class Save():
"""
拿到 url 后 爬取每一页 url 保存
"""
pageDict = {}
def __int__(self, session, url, book):
self.session = session
self.url = url
self.book = book
self.path = input("请输入你要保存的位置路径, 输入0或按回车 默认保存到D盘根目录: \t")
pageList = self.getHeadHtml()
for i in pageList:
self.save(self.getText(i), self.book)
time.sleep(2)
def getHeadHtml(self):
"""
获取当前页面的 html 中每一章的请求路径
:return: text 数据
"""
# 请求链接地址
res = self.session.get(self.url, headers=headers)
# 设置字符集编码
res.encoding = "gbk123"
# 格式化拿到的 html 页面
etreeHtml = etree.HTML(res.text)
# uri 请求头
urlHead = "https://www.biqugeq.com"
# 获取首页每一章的请求地址
urlNoHeadList = etreeHtml.xpath('//div[@class="listmain"]/dl/dd/a/@href')[12:]
pageList = []
for i in urlNoHeadList:
# 拼接 uri
pageList.append(urlHead + i)
return pageList
def getText(self, href):
# 请求链接地址
res = self.session.get(href, headers=headers)
# 设置字符集编码
res.encoding = "gbk123"
# 格式化拿到的 html 页面
etreeHtml = etree.HTML(res.text)
# 获取章节名
pageName = etreeHtml.xpath('//div[@class="content"]/h1/text()')[0]
# 获取章节内容
pageTextList = etreeHtml.xpath('//div[@id="content"]/text()')
pageText = ""
for i in pageTextList:
pageText += i.replace("\u3000", "").replace("\n", "").replace("(https://www.biqumo.com/0_269/2243417.html)",
"").replace(
"请记住本书首发域名:https://www.biqumo.com。笔趣阁手机版阅读网址:https://m.biqumo.com", "").replace(
"(https://www.biqumo.com/2_2784/57553374.html)", "")
return [pageName, pageText]
def save(self, page, bookName):
# path = input("请输入你要保存的位置路径, 输入0或按回车 默认保存到D盘根目录: \t")
savePath = ""
if self.path == "0":
savePath = "D://" + bookName + ".txt"
elif self.path == "":
savePath = "D://" + bookName + ".txt"
else:
savePath = self.path + "/" + bookName + ".txt"
pageName = page[0]
pageText = page[1]
print("开始保存 {}".format(pageName))
with open(savePath, "a", encoding="utf8") as f:
f.write(pageName)
f.write("\n\n")
f.write(pageText)
f.write("\n\n")
if __name__ == '__main__':
search()
注: 此爬虫是练习爬虫,侵删.