利用 Keras 的类 Tokenizer 对原始文本数据进行单词级 one-hot 编码

from keras.preprocessing.text import Tokenizer

text_corpus = ['The cat sat on the mat.', 'The dog ate my homework.'] #语料库
tokenizer = Tokenizer(num_words=12) #只标记出现次数最多的num_words个单词
tokenizer.fit_on_texts(text_corpus) #统计语料库

print('index_word:\n', tokenizer.index_word) #统计结果

samples = ['cat and dog', 'the mat'] #待向量化的样本
print('sequences:\n', tokenizer.texts_to_sequences(samples)) #转为序列
print('mattrix:\n', tokenizer.texts_to_matrix(str_list, mode='binary')) #转为矩阵,出现过的单词标记为1,未出现过的单词标记为0

输出:

index_word:
 {1: 'the', 2: 'cat', 3: 'sat', 4: 'on', 5: 'mat', 6: 'dog', 7: 'ate', 8: 'my', 9: 'homework'}
sequences:
 [[2, 6], [1, 5]]
mattrix:
 [[0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
 [0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]]

猜你喜欢

转载自blog.csdn.net/chouchoubuchou/article/details/104941019