gensim_主题

# https://blog.csdn.net/whzhcahzxh/article/details/17528261
# gensim包中引用corpora,models, similarities,分别做语料库建立,模型库和相似度比较库

from gensim import corpora, models, similarities
import jieba
sentences = ["我喜欢吃土豆","土豆是个百搭的东西","我不喜欢今天雾霾的北京"]
words=[]
for doc in sentences:
words.append(list(jieba.cut(doc)))


#得到的分词结果构造词典
dic = corpora.Dictionary(words)

# 词典生成好之后,就开始生成语料库了
corpus = [dic.doc2bow(word) for word in words]

#TF-IDF变换
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]


# 训练LSI模型,假定三句话属于2个主题,
print('>>>>>>LSI>>>>>>>')
lsi = models.LsiModel(corpus_tfidf, id2word=dic, num_topics=2)
## 建立的两个主题模型内容
## lsiout=lsi.print_topics(2)
# 将文章投影到主题空间中
corpus_lsi = lsi[corpus_tfidf]
for doc in corpus_lsi:
print(doc)

'''
[(0, -0.70861576320682107), (1, 0.1431958007198823)]
[(0, -0.42764142348481798), (1, -0.88527674470703799)]
[(0, -0.66124862582594512), (1, 0.4190711252114323)]

'''
print('>>>>>>LDA>>>>>>>')
# 接着训练LDA模型,假定三句话属于2个主题,
lda = models.LdaModel(corpus_tfidf, id2word=dic, num_topics=2)
## 建立的两个主题模型内容
## ldaOut=lda.print_topics(2)
# 将文章投影到主题空间中
corpus_lda = lda[corpus_tfidf]
for doc in corpus_lda:
print(doc)

# 输入一句话,查询属于LSI得到的哪个主题类型,先建立索引:

index = similarities.MatrixSimilarity(lsi[corpus])
query = "雾霾很严重"
query_bow = dic.doc2bow(list(jieba.cut(query)))
query_lsi = lsi[query_bow]
print('query_lsi',query_lsi)


# 比较和第几句话相似,用LSI得到的索引接着做,并排序输出

sims = index[query_lsi]
sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])
print('sort_sims',sort_sims)







猜你喜欢

转载自www.cnblogs.com/hapyygril/p/9970160.html
今日推荐