Python爬虫__微博某个话题的内容数据


  1 # -*- coding: utf-8 -*-
  2 # @Time : 2020/8/18 15:39
  3 # @Author : Chunfang
  4 # @Email : 3470959534@qq.com
  5 # @File : Weibo_content.py
  6 # @Software: PyCharm
  7 
  8 from urllib.parse import urlencode
  9 import requests
 10 from pyquery import PyQuery as pq
 11 import time
 12 import os
 13 import csv
 14 import json
 15 
 16 base_url = 'https://m.weibo.cn/api/container/getIndex?'
 17 
 18 headers = {
 19     'Host': 'm.weibo.cn',
 20     'Referer': 'https://m.weibo.cn/u/2830678474',
 21     'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
 22     'X-Requested-With': 'XMLHttpRequest',
 23 }
 24 class SaveCSV(object):
 25 
 26     def save(self, keyword_list,path, item):
 27         """
 28         保存csv方法
 29         :param keyword_list: 保存文件的字段或者说是表头
 30         :param path: 保存文件路径和名字
 31         :param item: 要保存的字典对象
 32         :return:
 33         """
 34         try:
 35             # 第一次打开文件时,第一行写入表头
 36             if not os.path.exists(path):
 37                 with open(path, "w", newline='', encoding='utf-8-sig') as csvfile:  # newline='' 去除空白行
 38                     writer = csv.DictWriter(csvfile, fieldnames=keyword_list)  # 写字典的方法
 39                     writer.writeheader()  # 写表头的方法
 40 
 41             # 接下来追加写入内容
 42             with open(path, "a", newline='', encoding='utf-8-sig') as csvfile:  # newline='' 一定要写,否则写入数据有空白行
 43                 writer = csv.DictWriter(csvfile, fieldnames=keyword_list)
 44                 writer.writerow(item)  # 按行写入数据
 45                 print("^_^ write success")
 46 
 47         except Exception as e:
 48             print("write error==>", e)
 49             # 记录错误数据
 50             with open("error.txt", "w") as f:
 51                 f.write(json.dumps(item) + ",\n")
 52             pass
 53 
 54 def get_page(page,title): #得到页面的请求,params是我们要根据网页填的,就是下图中的Query String里的参数
 55     params = {
 56         'containerid': '100103type=1&q='+title,
 57         'page': page,#page是就是当前处于第几页,是我们要实现翻页必须修改的内容。
 58         'type':'all',
 59         'queryVal':title,
 60         'featurecode':'20000320',
 61         'luicode':'10000011',
 62         'lfid':'106003type=1',
 63         'title':title
 64     }
 65     url = base_url + urlencode(params)
 66     print(url)
 67     try:
 68         response = requests.get(url, headers=headers)
 69         if response.status_code == 200:
 70             print(page)
 71             return response.json()
 72     except requests.ConnectionError as e:
 73         print('Error', e.args)
 74 
 75 # 解析接口返回的json字符串
 76 def parse_page(json , label):
 77     res = []
 78     if json:
 79         items = json.get('data').get('cards')
 80         for i in items:
 81             if i == None:
 82                 continue
 83             item = i.get('mblog')
 84             if item == None:
 85                 continue
 86             weibo = {}
 87             weibo['id'] = item.get('id')
 88             weibo['label'] = label
 89             weibo['text'] = pq(item.get('text')).text().replace(" ", "").replace("\n" , "")
 90             res.append(weibo)
 91     return res
 92 
 93 if __name__ == '__main__':
 94 
 95     title = input("请输入搜索关键词:")
 96     path = "article.csv"
 97     item_list = ['id','text', 'label']
 98     s = SaveCSV()
 99     for page in range(10,20):#循环页面
100         try:
101             time.sleep(1)         #设置睡眠时间,防止被封号
102             json = get_page(page , title )
103             results = parse_page(json , title)
104             if requests == None:
105                 continue
106             for result in results:
107                 if result == None:
108                     continue
109                 print(result)
110                 s.save(item_list, path , result)
111         except TypeError:
112             print("完成")
113             continue

 


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM