用selenium爬取某人的微博數據,面向過程方式


爬取某人的微博數據,把某人所有時間段的微博數據都爬下來。
具體思路:
創建driver-----get網頁----找到並提取信息-----保存csv----翻頁----get網頁(開始循環)----...----沒有“下一頁”就結束,
用了while True,沒用自我調用函數

嘟大海的微博:https://weibo.com/u/1623915527
辦公室小野的微博:https://weibo.com/bgsxy
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import csv
import os
import time

#只有這2個參數設置,想爬誰的微博數據就在這里改地址和目標csv名稱就行
weibo_url = 'https://weibo.com/bgsxy?profile_ftype=1&is_all=1#_0'
csv_name = 'bgsxy_allweibo.csv'

def start_chrome():
    print('開始創建瀏覽器')
    driver = webdriver.Chrome(executable_path='C:/Users/lori/Desktop/python52project/chromedriver_win32/chromedriver.exe')
    driver.start_client()
    return driver

def get_web(url):      #獲取網頁,並下拉到最底部
    print('開始打開指定網頁')
    driver.get(url)
    time.sleep(7)
    scoll_down()
    time.sleep(5)

def scoll_down():   # 滾輪下拉到最底部
    html_page = driver.find_element_by_tag_name('html')
    for i in range(7):
        print(i)
        html_page.send_keys(Keys.END)
        time.sleep(1)

def get_data():
    print('開始查找並提取數據')
    card_sel = 'div.WB_cardwrap.WB_feed_type'
    time_sel = 'a.S_txt2[node-type="feed_list_item_date"]'
    source_sel = 'a.S_txt2[suda-uatrack="key=profile_feed&value=pubfrom_guest"]'
    content_sel = 'div.WB_text.W_f14'
    interact_sel = 'span.line.S_line1>span>em:nth-child(2)'

    cards = driver.find_elements_by_css_selector(card_sel)
    info_list = []

    for card in cards:
        time = card.find_elements_by_css_selector(time_sel)[0].text  #雖然有可能在一個card中有2個time元素,我們取第一個就對
        if card.find_elements_by_css_selector(source_sel):
            source = card.find_elements_by_css_selector(source_sel)[0].text
        else:
            source = ''
        content = card.find_elements_by_css_selector(content_sel)[0].text
        link = card.find_elements_by_css_selector(time_sel)[0].get_attribute('href')
        trans = card.find_elements_by_css_selector(interact_sel)[1].text
        comment = card.find_elements_by_css_selector(interact_sel)[2].text
        like = card.find_elements_by_css_selector(interact_sel)[3].text
        info_list.append([time,source,content,link,trans,comment,like])

    return info_list

def save_csv(info_list,csv_name):
    csv_path = './' + csv_name
    print('開始寫入csv文件')
    if os.path.exists(csv_path):
        with open(csv_path,'a',newline='',encoding='utf-8-sig') as f: #newline=''避免空行;encoding='utf-8-sig'比utf8牛,保存中文沒問題
            writer = csv.writer(f)
            writer.writerows(info_list)
    else:
        with open(csv_path,'w+',newline='',encoding='utf-8-sig') as f:
            writer = csv.writer(f)
            writer.writerow(['發布時間','來源','內容','鏈接','轉發數','評論數','點贊數'])
            writer.writerows(info_list)
    time.sleep(5)

def next_page_url():
    next_page_sel = 'a.page.next'
    next_page_ele = driver.find_elements_by_css_selector(next_page_sel)
    if next_page_ele:
        return next_page_ele[0].get_attribute('href')
    else:
        return None


driver = start_chrome()
input('請在chrome中登錄weibo.com')     # 暫停程序,手動登錄weibo.com

while True:
    get_web(weibo_url)
    info_list = get_data()
    save_csv(info_list,csv_name)
    if next_page_url():
        weibo_url = next_page_url()
    else:
        print('爬取結束')
        break

  


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM