jieba分词

import jieba
#读取文本
path = "聊斋志异.txt"
file = open(path, "r", encoding="utf-8")
text = file.read()
file.close()
#使用jieba分词
words = jieba.lcut(text)
#统计词语频率
counts = {}
for word in words:
    if len(word) == 1:
        continue
    counts[word] = counts.get(word, 0) + 1
items = list(counts.iteams())
items.sort(key=lambda x: x[1], reverse=True)
for i in range(20):
    word, count = items[i]
    print(f"{word:<10}{count:>5}")

posted @ 2023-12-18 21:20  1無灬  阅读(14)  评论(0)    收藏  举报