財經數據(1)-開盤啦營業部標簽及龍虎榜數據爬蟲


目標:爬取開盤啦特色營業部標簽數據及每日龍虎榜數據

 

上代碼:

# -*- coding: utf-8 -*-
import requests
import json
import pandas as pd
from sqlalchemy import create_engine
import time
import datetime
from requests.packages.urllib3.exceptions import InsecureRequestWarning


# ====================開盤啦風口概念====================================================================================================================
def fengkoulSpider(flag, engine, *args):
    print("開始抓取龍虎榜風口概念")
    url = 'https://pclhb.kaipanla.com/w1/api/index.php'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3314.0 Safari/537.36 SE 2.X MetaSr 1.0'}

    # 空DataFrame定義,用於存儲:風口概念;空list定義,分別用於存儲:股票代碼
    tuyere = pd.DataFrame();
    code_list = []
    date_list = []

    # 實例化session,維持會話
    session = requests.Session()
    # 禁用安全請求警告
    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
    if flag == "Y":
        print("開始抓取當前開盤啦龍虎榜風口概念數據")
        cur_date = str(current)
        date_list.append(cur_date)

    elif flag == "N":
        print("開始抓取歷史開盤啦龍虎榜風口概念數據")
        st = datetime.datetime.strptime(start, '%Y-%m-%d')
        ed = datetime.datetime.strptime(end, '%Y-%m-%d')
        for i in range((ed - st).days + 1):
            cur_date = st + datetime.timedelta(days=i)
            date_list.append(cur_date)

    # 構造URL請求表單,用於獲取開盤啦當天龍虎榜列表,暫時忽略參數:'Index': 0
    data = {'c': 'LongHuBang', 'a': 'GetStockList', 'st': 300, 'Time': cur_date, 'UserID': 399083, 'Token': '71aef0e806e61ad3169ddc9473e37886'}

    # 模擬發送post請求,並實現json格式數據轉換
    html_list = json.loads(session.post(url=url, headers=headers, data=data).text)['list']

    # 開始解析龍虎榜列表數據
    for html in html_list:
        code = html['ID']
        fengkou = html['FengKou']
        # 存儲解析完成的code、fengkou
        code_list.append(code)
        tuyere = tuyere.append({'code': code, 'trade_date': cur_date, 'fengkou': fengkou}, ignore_index=True)

    print("開盤啦龍虎榜風口概念數據解析完成")
    print("開始存儲開盤啦龍虎榜風口概念數據")
    exist_tuyere = pd.read_sql('select * from inst_tuyere_concept', engine)
    tuyere = tuyere[['code', 'trade_date', 'fengkou']]
    tuyere = tuyere.append(exist_tuyere, ignore_index=True, sort=False)
    tuyere.drop_duplicates(keep=False, inplace=True)
    tuyere.to_sql('inst_tuyere_concept', engine, if_exists='append', index=False, chunksize=10000)

    print(tuyere)
    print("本次存儲開盤啦龍虎榜風口概念數據%s條" % tuyere.shape[0])
    print("開盤啦龍虎榜風口概念數據存儲完成")
    print("---------------------------------")

    return date_list, code_list


def tagSpider(engine, date_list, code_list):
    print("開始抓取開盤啦龍虎榜營業部標簽數據")
    url = 'https://pclhb.kaipanla.com/w1/api/index.php'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3314.0 Safari/537.36 SE 2.X MetaSr 1.0'}

    # 空DataFrame定義,用於存儲:營業部標簽
    depart_tag = pd.DataFrame()

    # 實例化session,維持會話
    session = requests.Session()
    print(date_list)
    print(code_list)
    for cur_date in date_list:

        for code in code_list:
            print("正在抓取%s-%s龍虎榜營業部明細數據" % (cur_date, code))
            # 構造URL請求表單,用於獲取單只個股龍虎榜明細數據
            data = {'c': 'Stock', 'a': 'GetNewOneStockInfo', 'StockID': code, 'Time': date_list, 'UserID': '399083', 'Token': '71aef0e806e61ad3169ddc9473e37886'}

            # 模擬發送post請求,並實現json格式數據轉換
            html_list = json.loads(session.post(url=url, headers=headers, data=data).text)['List'][0]

            # 開始解析
            buy_list = html_list['BuyList']
            sell_list = html_list['SellList']
            for sell in sell_list:
                buy_list.append(sell)

            # 由於部分營業部無標簽,執行報錯,此處采用try...except結構
            for depart in buy_list:
                try:
                    tag = depart['GroupIcon'][0]
                    yybname = depart['Name']
                    depart_tag = depart_tag.append({'yybname': yybname, 'tag': tag}, ignore_index=True)
                except Exception as parse_error:
                    print("html解析過程報錯,錯誤信息為:%s" % parse_error)

    print("正在存儲%s龍虎榜營業部明細數據" % cur_date)
    # 連接獲取sql存儲數據,求差集
    exist_tag = pd.read_sql('select * from department_label', engine)
    depart_tag = depart_tag[['yybname', 'tag']]
    depart_tag = depart_tag.append(exist_tag, ignore_index=True)
    depart_tag.drop_duplicates(keep=False, inplace=True)

    # 完成數據存儲
    depart_tag.to_sql('department_label', engine, if_exists='replace', index=False)

    print(depart_tag)
    print("本次存儲開盤啦營業部特色標簽數據%s條" % depart_tag.shape[0])
    print("---------------------------------")


# ====================主函數====================================================================================================================================
if __name__ == '__main__':
    print("開盤啦營業部特色標簽爬蟲程序開始執行")
    print("--------------------------------------------")
    begin = time.time()

    # 創建Pandas讀寫數據庫引擎
    engine = create_engine('mysql://root:123456@127.0.0.1/quant?charset=utf8')

    flag = input("是否獲取當天數據,選擇Y/N:")
    if flag == "Y":
        current = time.strftime("%Y-%m-%d", time.localtime())
        con_list = fengkoulSpider(flag, engine, current)
        date_list, code_list = fengkoulSpider(flag, engine, current)

    elif flag == "N":
        start = input("時間格式為:1949-10-01,請輸入起始時間:")
        end = input("時間格式為:1949-10-01,請輸入截止時間:")
        date_list, code_list = fengkoulSpider(flag, engine, start, end)

    tagSpider(engine, date_list, code_list)
    ed = time.time()
    print('本次程序共執行%0.2f秒.' % ((ed - begin)))
    print("開盤啦營業部特色標簽爬蟲程序執行完成")

 

  

  

  

    


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM