python jieba 庫分詞結合Wordcloud詞雲統計


import jieba
jieba.add_word("福軍")
jieba.add_word("少安")
excludes={"一個","他們","自己","現在","已經","什么","這個","沒有","這樣","知道","兩個"}
txt = open("D:\\Users\\Benny\平凡的世界.txt", "r", encoding='utf-8').read()
words = jieba.lcut(txt) # 使用精確模式對文本進行分詞
counts = {} # 通過鍵值對的形式存儲詞語及其出現的次數
for word in words:
    if len(word)==1:
        continue
    elif word =="少平":
        rword="孫少平"
    elif word =="少安":
        rword="孫少平"
    elif word =="玉厚"or word=="父親":
        rword="孫玉厚"
    elif word =="福軍":
        rword="田福軍"
    else:
        rword=word
    counts[rword]=counts.get(rword,0)+1 
for word in excludes:
    del(counts[word])
items=list(counts.items()) 
items.sort(key=lambda x: x[1], reverse=True) # 根據詞語出現的次數進行從大到小排序

for i in range(30):
    word, count = items[i]
    print("{0:<5}{1:>5}".format(word, count))

  

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM