Python 分詞並統計詞頻



#先對建立匯總到txt文件中,然后進行分詞,讀到另外一個txt 文件中
import matplotlib
import matplotlib.pyplot as plt #數據可視化
import jieba #詞語切割
import wordcloud #分詞
from wordcloud import WordCloud,ImageColorGenerator,STOPWORDS #詞雲,顏色生成器,停止
import numpy as np #科學計算
from PIL import Image #處理圖片
f = open('D:/SAB/Desktop/res.txt')
textfile= f.read() #讀取文本內容
wordlist = jieba.cut_for_search(textfile)#切割詞語
space_list = ' '.join(wordlist) # 鏈接詞語
with open("D:/SAB/Desktop/word4.txt", "w", encoding='utf-8') as f:
f.write(space_list)

#統計上面分好的詞語的個數
import io
import jieba
txt = io.open("D:\SAB\Desktop/word.txt", "r", encoding='utf-8').read()
words = jieba.lcut(txt)
counts = {}
for word in words:
if len(word) == 1:
continue
else:
counts[word] = counts.get(word,0) + 1
items = list(counts.items())
items.sort(key=lambda x:x[1], reverse=True)
for i in range(15):
word, count = items[i]
print (u"{0:<10}{1:>5}".format(word, count))

 

結果:

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM