解决爬虫中文的编码问题


# 解决爬虫中文问题
# 1 对整个返回的结果进行重新的编码
response = requests.get(url=url, headers=headers)
response.encoding = 'utf-8'
page_text = response.text
# 上面有时候不能解决编码的问题,使用局部解决办法
# 2 对需要的文字进行重新编码
title = li.xpath('./a/h3/text()')[0].encode('iso-8859-1').decode('gbk')
# 3 全部重新编码
response = requests.get(url=url).text.encode('iso-8859-1').decode('utf-8')
import requests
from lxml import etree

url = 'https://www.xxx.com.cn'

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}

page_text = requests.get(url=url, headers=headers).text


tree = etree.HTML(page_text)

li_list = tree.xpath('//div[@id="auto-channel-lazyload-article"]/ul/li')
for li in li_list:
    try:
        title = li.xpath('./a/h3/text()')[0].encode('iso-8859-1').decode('gbk')
        a_url = f"https:{li.xpath('./a/@href')[0]}"
        img_src = f"https:{li.xpath('./a/div/img/@src')[0]}"
        desc = li.xpath('./a/p/text()')[0].encode('iso-8859-1').decode('gbk')
    except IndexError:
        continue
    print(title)
    print(a_url)
    print(img_src)
    print(desc)


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM