總體思路
- 導入想要產生詞雲的文章或者段落
- 對導入的文字進行
jieba
分詞
- 統計分詞之后的詞頻
- 生成並繪制詞雲
Demo
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import jieba
# Now, There is no 'word.txt' under this path
path_txt = "/home/alan/Desktop/word.txt"
f = open(path_txt, 'r', encoding = 'UTF-8').read()
cut_text = " ".join(jieba.cut(f))
wordcloud = WordCloud(
font_path = "/home/alan/.local/share/fonts/STKAITI.TTF",
background_color="white",
width=1000,
height = 800
).generate(cut_text)
plt.imshow(wordcloud, interpolation = "bilinear")
plt.axis("off")
plt.show()