# 解決爬蟲中文問題
# 1 對整個返回的結果進行重新的編碼
response = requests.get(url=url, headers=headers)
response.encoding = 'utf-8'
page_text = response.text
# 上面有時候不能解決編碼的問題,使用局部解決辦法
# 2 對需要的文字進行重新編碼
title = li.xpath('./a/h3/text()')[0].encode('iso-8859-1').decode('gbk')
# 3 全部重新編碼
response = requests.get(url=url).text.encode('iso-8859-1').decode('utf-8')
import requests
from lxml import etree
url = 'https://www.xxx.com.cn'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
page_text = requests.get(url=url, headers=headers).text
tree = etree.HTML(page_text)
li_list = tree.xpath('//div[@id="auto-channel-lazyload-article"]/ul/li')
for li in li_list:
try:
title = li.xpath('./a/h3/text()')[0].encode('iso-8859-1').decode('gbk')
a_url = f"https:{li.xpath('./a/@href')[0]}"
img_src = f"https:{li.xpath('./a/div/img/@src')[0]}"
desc = li.xpath('./a/p/text()')[0].encode('iso-8859-1').decode('gbk')
except IndexError:
continue
print(title)
print(a_url)
print(img_src)
print(desc)