中文詞頻統計與詞雲生成


1. 下載一長篇中文小說

  此處下載的長篇中文小說是:三體

2. 從文件讀取待分析文本

1 text = open("C:三體.txt", "r", encoding="UTF-8").read()      # 讀取文本

3. 安裝並使用jieba進行中文分詞

  通過命令行,使用命令:pip install jieba 安裝jieba

1 import jieba
2 
3 
4 text = open("C:三體.txt", "r", encoding="UTF-8").read()      # 讀取文本
5 word_txt = jieba.lcut(text)                                 # 進行中文分詞

4. 更新詞庫,加入所分析對象的專業詞匯

1 jieba.load_userdict(r'C:三體詞匯.txt')                       # 加入小說分析對象的特有詞匯
2 jieba.add_word("量子力學")                                   # 豐富詞匯
3 jieba.add_word("萬有引力")

  詞庫下載地址:https://pinyin.sogou.com/dict/

  詞匯格式轉換代碼(scel格式轉txt格式):

  1 # -*- coding: utf-8 -*-
  2 import struct
  3 import os
  4  
  5 # 拼音表偏移,
  6 startPy = 0x1540;
  7  
  8 # 漢語詞組表偏移
  9 startChinese = 0x2628;
 10  
 11 # 全局拼音表
 12 GPy_Table = {}
 13  
 14 # 解析結果
 15 # 元組(詞頻,拼音,中文詞組)的列表
 16  
 17  
 18 # 原始字節碼轉為字符串
 19 def byte2str(data):
 20     pos = 0
 21     str = ''
 22     while pos < len(data):
 23         c = chr(struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0])
 24         if c != chr(0):
 25             str += c
 26         pos += 2
 27     return str
 28  
 29 # 獲取拼音表
 30 def getPyTable(data):
 31     data = data[4:]
 32     pos = 0
 33     while pos < len(data):
 34         index = struct.unpack('H', bytes([data[pos],data[pos + 1]]))[0]
 35         pos += 2
 36         lenPy = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 37         pos += 2
 38         py = byte2str(data[pos:pos + lenPy])
 39  
 40         GPy_Table[index] = py
 41         pos += lenPy
 42  
 43 # 獲取一個詞組的拼音
 44 def getWordPy(data):
 45     pos = 0
 46     ret = ''
 47     while pos < len(data):
 48         index = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 49         ret += GPy_Table[index]
 50         pos += 2
 51     return ret
 52  
 53 # 讀取中文表
 54 def getChinese(data):
 55     GTable = []
 56     pos = 0
 57     while pos < len(data):
 58         # 同音詞數量
 59         same = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 60  
 61         # 拼音索引表長度
 62         pos += 2
 63         py_table_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 64  
 65         # 拼音索引表
 66         pos += 2
 67         py = getWordPy(data[pos: pos + py_table_len])
 68  
 69         # 中文詞組
 70         pos += py_table_len
 71         for i in range(same):
 72             # 中文詞組長度
 73             c_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 74             # 中文詞組
 75             pos += 2
 76             word = byte2str(data[pos: pos + c_len])
 77             # 擴展數據長度
 78             pos += c_len
 79             ext_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 80             # 詞頻
 81             pos += 2
 82             count = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
 83  
 84             # 保存
 85             GTable.append((count, py, word))
 86  
 87             # 到下個詞的偏移位置
 88             pos += ext_len
 89     return GTable
 90  
 91  
 92 def scel2txt(file_name):
 93     print('-' * 60)
 94     with open(file_name, 'rb') as f:
 95         data = f.read()
 96  
 97     print("詞庫名:", byte2str(data[0x130:0x338])) # .encode('GB18030')
 98     print("詞庫類型:", byte2str(data[0x338:0x540]))
 99     print("描述信息:", byte2str(data[0x540:0xd40]))
100     print("詞庫示例:", byte2str(data[0xd40:startPy]))
101  
102     getPyTable(data[startPy:startChinese])
103     getChinese(data[startChinese:])
104     return getChinese(data[startChinese:])
105  
106 if __name__ == '__main__':
107     # scel所在文件夾路徑
108     in_path = r"C:\Users\Administrator\Downloads"   #修改為你的詞庫文件存放文件夾
109     # 輸出詞典所在文件夾路徑
110     out_path = r"C:\Users\Administrator\Downloads\text"  # 轉換之后文件存放文件夾
111     fin = [fname for fname in os.listdir(in_path) if fname[-5:] == ".scel"]
112     for f in fin:
113         try:
114             for word in scel2txt(os.path.join(in_path, f)):
115                 file_path=(os.path.join(out_path, str(f).split('.')[0] + '.txt'))
116                 # 保存結果
117                 with open(file_path,'a+',encoding='utf-8')as file:
118                     file.write(word[2] + '\n')
119             os.remove(os.path.join(in_path, f))
120         except Exception as e:
121             print(e)
122             pass
scel_to_text

5. 生成詞頻統計

1 for word in word_list:
2     if len(word) == 1:
3         continue
4     else:
5         word_list = word_lists.append(word)
6         word_dict[word] = word_dict.get(word, 0)+1                         # get()函數返回指定鍵的值,若沒有則返回默認值

6. 排序

1 wd = list(word_dict.items())                                               # 為了排序,使字典列表化
2 wd.sort(key=lambda x: x[1], reverse=True)                                  # 根據字典的值排序

7. 排除語法型詞匯,代詞、冠詞、連詞等停用詞

1 stops_word = open("C:stops_chinese.txt", "r", encoding="UTF-8").read()     # 讀取停用詞文本
2 exclude = {'兩個', '東西', '很快', '一種', '這是', '看着', '真的', '發出', '回答',
3            '感覺', '仿佛', '\u3000', '\n',''}                            # 自定義停用詞
4 stop_list = stops_word.split()
5 stops_all = set(stop_list).union(set(stop_list), exclude)                  # 求停用詞的並集
6 word_list = [element for element in word_txt if element not in stops_all]  # 去掉停用詞

8. 輸出詞頻最大TOP20,把結果存放到文件里

1 for i in range(20):                                                        # 輸出前20個高頻的詞
2     print(wd[i])
3 word_csv = wd                                                              # 生成csv文件
4 pd.DataFrame(data=word_csv[0:20]).to_csv('The_three_body.csv', encoding='UTF-8')
5 mywc.to_file('三體詞雲.png') # 生成保存詞雲圖片

9. 生成詞雲

   完整源碼:

 1 from wordcloud import WordCloud
 2 
 3 import matplotlib.pyplot as plt
 4 import jieba
 5 import pandas as pd
 6 
 7 text = open("C:三體.txt", "r", encoding="UTF-8").read()                    # 讀取小說文本
 8 text = text.strip()
 9 word_txt = jieba.lcut(text)                                                # 進行中文分詞
10 jieba.load_userdict(r'C:三體詞匯.txt')                                     # 加入小說分析對象的特有詞匯
11 jieba.add_word("量子力學")                                                 # 豐富詞匯
12 jieba.add_word("萬有引力")
13 # jieba.add_word('')                                                       # 添加小說特有詞匯
14 stops_word = open("C:stops_chinese.txt", "r", encoding="UTF-8").read()     # 讀取停用詞文本
15 exclude = {'兩個', '東西', '很快', '一種', '這是', '看着', '真的', '發出', '回答',
16            '感覺', '仿佛', ''}                                           # 自定義停用詞
17 stop_list = stops_word.split()
18 stops_all = set(stop_list).union(set(stop_list), exclude)                  # 求停用詞的並集
19 word_list = [element for element in word_txt if element not in stops_all]  # 去掉停用詞
20 word_dict = {}
21 word_lists = []
22 for word in word_list:
23     if len(word) == 1:
24         continue
25     else:
26         word_lists.append(word)
27         word_dict[word] = word_dict.get(word, 0)+1                         # get()函數返回指定鍵的值,若沒有則返回默認值
28 wd = list(word_dict.items())                                               # 為了排序,使字典列表化
29 wd.sort(key=lambda x: x[1], reverse=True)                                  # 根據字典的值排序
30 for i in range(20):                                                        # 輸出前20個高頻的詞
31     print(wd[i])
32 word_csv = wd                                                              # 生成csv文件
33 pd.DataFrame(data=word_csv[0:20]).to_csv('The_three_body.csv', encoding='UTF-8')
34 
35 mywc = WordCloud(font_path="C:/Windows/Fonts/msyh.ttc",background_color='black', margin=2,width=1800, height=800, random_state=42).generate(str(word_lists))
36 plt.imshow(mywc,interpolation='bilinear')
37 plt.axis("off")
38 plt.tight_layout()
39 mywc.to_file('三體詞雲.png')
40 plt.show()
中文詞頻統計與詞雲生成

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM