《python深度学习》学习笔记与代码实现(第六章,6.1处理文本数据)

《python深度学习》第六章深度学习用于文本和序列

6.1 处理文本数据

自然语言处理的应用:文档分类,情感分析,作者识别甚至问答

处理文本数据,就是将其转换成数值张量,即文本向量化

# one_hot编码
# 独热编码,是处理文本最常用的编码方式

# one-hot编码简单示例

# 单词级别的one-hot编码
import numpy as np 
samples = ['the cat sat on the mat.','the dog ate my homework.']

token_index = {}  # 构建数据中所有标记的索引
for sample in samples:
    for word in sample.split():
        if word not in token_index:
            token_index[word] = len(token_index) + 1  # 为每个唯一单词指定一个唯一的索引
            
# 对样本进行分词,只考虑样本前10个单词
max_length = 10

# 将结果保留在results中
results = np.zeros(shape = (len(samples),max_length,max(token_index.values())+1))

for i ,sample in enumerate(samples):
    for j,word in list(enumerate(sample.split()))[:max_length]:
        index = token_index.get(word)
        results[i,j,index] = 1
print(results)           

独热编码

[[[0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
  [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]

 [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]]
# 字符级别的one-hot编码示例

import string

samples = ['the cat sat on the mat.','the dog ate my homework.']
characters = string.printable
token_index = dict(zip(range(1,len(characters) + 1),characters))

max_length = 50
results = np.zeros((len(samples),max_length,max(token_index.keys())+1))

for i ,sample in enumerate(samples):
    for j,character in enumerate(sample):
        index = token_index.get(character)
        results[i,j,index] = 1
        
print(results)

字符级编码

[[[1. 1. 1. ... 1. 1. 1.]
  [1. 1. 1. ... 1. 1. 1.]
  [1. 1. 1. ... 1. 1. 1.]
  ...
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]]

 [[1. 1. 1. ... 1. 1. 1.]
  [1. 1. 1. ... 1. 1. 1.]
  [1. 1. 1. ... 1. 1. 1.]
  ...
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]]]
# 使用keras的内置函数实现单词级别的one-hot编码

from keras.preprocessing.text import Tokenizer

samples = ['The cat san on the mat.','The dog ate my homework.']

tokenizer = Tokenizer(num_words = 1000)  # 创建一个分词器,设置为只考虑前1000个最常见的单词
tokenizer.fit_on_texts(samples)  # 构建单词索引

sequences = tokenizer.texts_to_sequences(samples)  # 将字符串转换成为由整数索引组成的列表
print(sequences )


one_hot_results = tokenizer.texts_to_matrix(samples,mode = 'binary')
print(one_hot_results)


word_index = tokenizer.word_index

print('Found %s unique tokens.',len(word_index))

[[1, 2, 3, 4, 1, 5], [1, 6, 7, 8, 9]]
[[0. 1. 1. ... 0. 0. 0.]
 [0. 1. 0. ... 0. 0. 0.]]
Found %s unique tokens. 9
samples = ['The cat sat on the mat.', 'The dog ate my homework.']

# We will store our words as vectors of size 1000.
# Note that if you have close to 1000 words (or more)
# you will start seeing many hash collisions, which
# will decrease the accuracy of this encoding method.
dimensionality = 1000
max_length = 10

results = np.zeros((len(samples), max_length, dimensionality))
for i, sample in enumerate(samples):
    for j, word in list(enumerate(sample.split()))[:max_length]:
        # Hash the word into a "random" integer index
        # that is between 0 and 1000
        index = abs(hash(word)) % dimensionality
        results[i, j, index] = 1.
        
print(results)
[[[0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]
  ...
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]]

 [[0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]
  ...
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]
  [0. 0. 0. ... 0. 0. 0.]]]
# 使用词嵌入
# 词嵌入的两种方法

# 1,第一种方法:利用Embedding层学习词嵌入
from keras.layers import Embedding
embedding_layer = Embedding(1000,64)

from keras.datasets import imdb
from keras import preprocessing
# 选最常见的1000个单词,在20个单词后截断文本
max_features = 10000
maxlen = 20

(x_train,y_train),(x_test,y_test) = imdb.load_data(num_words = max_features)
print(x_train.shape)

x_train = preprocessing.sequence.pad_sequences(x_train,maxlen = maxlen)
x_test = preprocessing.sequence.pad_sequences(x_test,maxlen = maxlen)
print(x_train.shape)

(25000,)
(25000, 20)
# 在IMDB数据集上使用Embedding层和分类器
from keras.models import Sequential
from keras.layers import Flatten,Dense,Embedding

model = Sequential()
# 输入20个单词,把每个单词用8位向量来表示
model.add(Embedding(10000,8,input_length = maxlen))

model.add(Flatten())

model.add(Dense(1,activation = 'sigmoid'))
model.compile(optimizer = 'rmsprop',loss = 'binary_crossentropy',metrics = ['acc'])

model.summary()

history = model.fit(x_train,y_train,epochs = 10,batch_size = 32,validation_split = 0.2)
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_2 (Embedding)      (None, 20, 8)             80000     
_________________________________________________________________
flatten_1 (Flatten)          (None, 160)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 161       
=================================================================
Total params: 80,161
Trainable params: 80,161
Non-trainable params: 0
_________________________________________________________________
Train on 20000 samples, validate on 5000 samples
Epoch 1/10
20000/20000 [==============================] - 18s 891us/step - loss: 0.6759 - acc: 0.6050 - val_loss: 0.6398 - val_acc: 0.6814
Epoch 2/10
20000/20000 [==============================] - 4s 189us/step - loss: 0.5657 - acc: 0.7427 - val_loss: 0.5467 - val_acc: 0.7206
Epoch 3/10
20000/20000 [==============================] - 4s 183us/step - loss: 0.4752 - acc: 0.7808 - val_loss: 0.5113 - val_acc: 0.7384
Epoch 4/10
20000/20000 [==============================] - 4s 181us/step - loss: 0.4263 - acc: 0.8077 - val_loss: 0.5008 - val_acc: 0.7452
Epoch 5/10
20000/20000 [==============================] - 4s 182us/step - loss: 0.3930 - acc: 0.8258 - val_loss: 0.4981 - val_acc: 0.7538
Epoch 6/10
20000/20000 [==============================] - 3s 174us/step - loss: 0.3668 - acc: 0.8395 - val_loss: 0.5014 - val_acc: 0.7530
Epoch 7/10
20000/20000 [==============================] - 4s 177us/step - loss: 0.3435 - acc: 0.8533 - val_loss: 0.5052 - val_acc: 0.7520
Epoch 8/10
20000/20000 [==============================] - 3s 174us/step - loss: 0.3223 - acc: 0.8657 - val_loss: 0.5132 - val_acc: 0.7486
Epoch 9/10
20000/20000 [==============================] - 3s 175us/step - loss: 0.3022 - acc: 0.8766 - val_loss: 0.5213 - val_acc: 0.7490
Epoch 10/10
20000/20000 [==============================] - 4s 177us/step - loss: 0.2839 - acc: 0.8860 - val_loss: 0.5303 - val_acc: 0.7466

你可以从一个预先计算好的嵌入空间中加载嵌入向量,这个嵌入空间被认为是高度结构化的,并且显示出有用的特性——它捕获了语言结构的一般方面,而不是与你想要解决的问题一起学习单词嵌入。在自然语言处理中使用预先训练过的单词嵌入的基本原理与在图像分类中使用预先训练过的convnet的基本原理非常相似:我们没有足够的数据来自己学习真正强大的功能,但是我们希望我们需要的功能是相当通用的,即常见的视觉功能或语义功能。特征。在这种情况下,重用在不同问题上学习的特性是有意义的。

这种嵌入的单词通常是使用单词出现统计(观察句子或文档中哪些单词同时出现)来计算的,使用的技术多种多样,有些涉及神经网络,有些不涉及。Bengio等人首先探讨了以无监督方式计算单词的密集、低维嵌入空间的概念。在21世纪初,但在2013年由Mikolov在Google开发的Word2vec算法(最著名和最成功的单词嵌入方案之一)发布后,它才真正开始在研究和工业应用领域取得进展。word2vec维度捕获特定的语义属性,例如性别。

有各种预计算的Word嵌入数据库,可以下载并开始在Keras嵌入层中使用。word2vec就是其中之一。另一种流行的是“手套”,由斯坦福大学研究人员于2014年开发。它代表着“全局向量的词表示”,是一种基于因式分解词共现统计矩阵的嵌入技术。它的开发人员已经为数百万的英语令牌提供了预先计算的嵌入,这些令牌是从维基百科数据或普通的爬行数据中获得的。

让我们来看看如何开始在Keras模型中使用手套嵌入。同样的方法当然也适用于Word2vec嵌入或任何其他可以下载的嵌入数据库。我们还将使用这个例子来刷新我们在几段前介绍的文本标记化技术:我们将从原始文本开始,并逐步发展。

# 2.使用预训练的词嵌入
# 整合在一起,从原始文本到词嵌入

import os

imdb_dir = r'D:\study\Python\Deeplearning\Untitled Folder\aclImdb'
train_dir = os.path.join(imdb_dir, 'train')

labels = []
texts = []

for label_type in ['neg', 'pos']:
    dir_name = os.path.join(train_dir, label_type)
    for fname in os.listdir(dir_name):
        if fname[-4:] == '.txt':
            f = open(os.path.join(dir_name, fname),encoding='gb18030', errors='ignore')
            texts.append(f.read())
            f.close()
            if label_type == 'neg':
                labels.append(0)
            else:
                labels.append(1)

让我们对收集的文本进行矢量化,并准备一个训练和验证分割。我们将只使用本节前面介绍的概念。 因为预先培训过的单词嵌入在培训数据很少的问题上特别有用(否则,特定于任务的嵌入可能会优于它们),我们将添加以下限制:我们将培训数据限制在前200个样本。因此,我们将学习分类电影评论后,只看了200个例子…

from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np

maxlen = 100 #在100个单词后截断评论
training_samples = 200  # 在200个样本上进行训练
validation_samples = 10000  #在10000个样本上进行验证
max_words = 10000  #只考虑数据集中前10000个最常见的单词

tokenizer = Tokenizer(num_words = max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
# print(sequences)

word_index = tokenizer.word_index
print('Found %s unique tokens.'%len(word_index))

data = pad_sequences(sequences,maxlen = maxlen)

labels = np.asarray(labels)
print('Shape of data tensor:',data.shape)
print('Shape of labbel tensor',labels.shape)

indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]

x_train = data[:training_samples]
y_train = labels[:training_samples]

x_val = data[training_samples:training_samples+validation_samples]
y_val = labels[training_samples:training_samples + validation_samples]

Found 87396 unique tokens.
Shape of data tensor: (25000, 100)
Shape of labbel tensor (25000,)

glove_dir = r'D:\study\Python\Deeplearning\Untitled Folder\glove.6B'

embeddings_index = {}
f = open(os.path.join(glove_dir,'glove.6B.100d.txt'),encoding='gb18030', errors='ignore')
for line in f:
    values = line.split()
    word = values[0]
    coefs = np.asarray(values[1:],dtype = 'float32')
    embeddings_index[word] = coefs
f.close()

print('Found %s word vectors.'% len(embeddings_index))
Found 399917 word vectors.

我们可以将其加载到嵌入层中。它必须是一个形状矩阵(max_words,Embedding_dim),其中每个条目i都包含索引i中单词的Embedding_dim维向量(在标记化过程中构建)。注意,索引0不应该代表任何单词或标记——它是一个占位符。目i都包含索引i中单词的Embedding_dim维向量(在标记化过程中构建)。注意,索引0不应该代表任何单词或标记——它是一个占位符。

embedding_dim = 100

embedding_matrix = np.zeros((max_words,embedding_dim))

for word,i in word_index.items():
    if i < max_words:
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector
            
# 模型定义
from keras.models import Sequential
from keras.layers import Embedding,Flatten,Dense

model = Sequential()
# 100个单词,每个单词用100位的向量表示
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32,activation = 'relu'))
model.add(Dense(1,activation='sigmoid'))
model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_7 (Embedding)      (None, 100, 100)          1000000   
_________________________________________________________________
flatten_6 (Flatten)          (None, 10000)             0         
_________________________________________________________________
dense_10 (Dense)             (None, 32)                320032    
_________________________________________________________________
dense_11 (Dense)             (None, 1)                 33        
=================================================================
Total params: 1,320,065
Trainable params: 1,320,065
Non-trainable params: 0
_________________________________________________________________

在模型中加载GloVe嵌入

嵌入层有一个单一的权重矩阵:一个2d浮点数矩阵,其中每个条目i都是与索引i关联的单词向量。让我们将准备好的GloVe矩阵加载到我们的嵌入层中,模型的第一层: 此外,我们冻结嵌入层(我们将其可训练属性设置为false),遵循与在预训练convnet特性上下文中您已经熟悉的相同原理:当模型的部分经过预训练(如嵌入层),并且部分被随机初始化(如分类器),预训练的部分在培训过程中不应该被更新以避免忘记他们已经知道的东西。由随机初始化的层触发的大梯度更新会对已经学习的特性造成很大的破坏。

model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
# 训练模型与评估模型
model.compile(optimizer = 'rmsprop',loss = 'binary_crossentropy',metrics = ['acc'])

history = model.fit(x_train,y_train,epochs = 10,batch_size = 32,validation_data = (x_val,y_val))

model.save_weights('pre_trained_glove_model.h5')
Train on 200 samples, validate on 10000 samples
Epoch 1/10
200/200 [==============================] - 1s 7ms/step - loss: 1.6734 - acc: 0.4850 - val_loss: 0.6934 - val_acc: 0.5043
Epoch 2/10
200/200 [==============================] - 1s 6ms/step - loss: 0.6637 - acc: 0.6150 - val_loss: 0.7098 - val_acc: 0.5083
Epoch 3/10
200/200 [==============================] - 1s 5ms/step - loss: 0.6759 - acc: 0.6200 - val_loss: 0.7333 - val_acc: 0.4974
Epoch 4/10
200/200 [==============================] - 1s 5ms/step - loss: 0.4815 - acc: 0.7650 - val_loss: 0.8634 - val_acc: 0.4938
Epoch 5/10
200/200 [==============================] - 1s 4ms/step - loss: 0.3846 - acc: 0.8100 - val_loss: 0.7980 - val_acc: 0.4927
Epoch 6/10
200/200 [==============================] - 1s 4ms/step - loss: 0.2463 - acc: 0.9150 - val_loss: 1.1067 - val_acc: 0.5082
Epoch 7/10
200/200 [==============================] - 1s 5ms/step - loss: 0.1543 - acc: 0.9550 - val_loss: 1.1594 - val_acc: 0.4917
Epoch 8/10
200/200 [==============================] - 1s 4ms/step - loss: 0.3597 - acc: 0.8000 - val_loss: 0.7827 - val_acc: 0.5008
Epoch 9/10
200/200 [==============================] - 1s 5ms/step - loss: 0.0655 - acc: 1.0000 - val_loss: 0.8562 - val_acc: 0.5041
Epoch 10/10
200/200 [==============================] - 1s 5ms/step - loss: 0.0439 - acc: 1.0000 - val_loss: 1.7667 - val_acc: 0.4927
import matplotlib.pyplot as plt 

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1,len(acc)+1)

plt.plot(epochs,acc,'b',label = 'training_acc')
plt.plot(epochs,val_acc,'r',label = 'Validation_acc')
plt.title('training and validation acc')
plt.legend()

plt.figure()

plt.plot(epochs,loss,'b',label = 'training_loss')
plt.plot(epochs,val_loss,'r',label = 'validation_loss')
plt.title('training and validation loss')
plt.legend()

plt.show()

在这里插入图片描述
在这里插入图片描述

# 在不使用预训练词嵌入的情况下,训练相同的模型
model = Sequential()
# 100个单词,每个单词用100位的向量表示
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32,activation = 'relu'))
model.add(Dense(1,activation='sigmoid'))
model.summary()
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_6 (Embedding)      (None, 100, 100)          1000000   
_________________________________________________________________
flatten_5 (Flatten)          (None, 10000)             0         
_________________________________________________________________
dense_8 (Dense)              (None, 32)                320032    
_________________________________________________________________
dense_9 (Dense)              (None, 1)                 33        
=================================================================
Total params: 1,320,065
Trainable params: 1,320,065
Non-trainable params: 0
_________________________________________________________________
model.compile(optimizer = 'rmsprop',loss = 'binary_crossentropy',metrics = ['acc'])

history = model.fit(x_train,y_train,epochs = 10,batch_size = 32,validation_data = (x_val,y_val))

Train on 200 samples, validate on 10000 samples
Epoch 1/10
200/200 [==============================] - 1s 7ms/step - loss: 0.6977 - acc: 0.4600 - val_loss: 0.6931 - val_acc: 0.5046
Epoch 2/10
200/200 [==============================] - 1s 5ms/step - loss: 0.5455 - acc: 0.9850 - val_loss: 0.6961 - val_acc: 0.5021
Epoch 3/10
200/200 [==============================] - 1s 4ms/step - loss: 0.3479 - acc: 0.9950 - val_loss: 0.7009 - val_acc: 0.5034
Epoch 4/10
200/200 [==============================] - 1s 5ms/step - loss: 0.1636 - acc: 1.0000 - val_loss: 0.7082 - val_acc: 0.5029
Epoch 5/10
200/200 [==============================] - 1s 5ms/step - loss: 0.0773 - acc: 1.0000 - val_loss: 0.7152 - val_acc: 0.5046
Epoch 6/10
200/200 [==============================] - 1s 6ms/step - loss: 0.0401 - acc: 1.0000 - val_loss: 0.7301 - val_acc: 0.4958
Epoch 7/10
200/200 [==============================] - 1s 5ms/step - loss: 0.0219 - acc: 1.0000 - val_loss: 0.7309 - val_acc: 0.5031
Epoch 8/10
200/200 [==============================] - 1s 5ms/step - loss: 0.0125 - acc: 1.0000 - val_loss: 0.7391 - val_acc: 0.5032
Epoch 9/10
200/200 [==============================] - 1s 4ms/step - loss: 0.0074 - acc: 1.0000 - val_loss: 0.7439 - val_acc: 0.4986
Epoch 10/10
200/200 [==============================] - 1s 5ms/step - loss: 0.0045 - acc: 1.0000 - val_loss: 0.7528 - val_acc: 0.4984
import matplotlib.pyplot as plt 

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1,len(acc)+1)

plt.plot(epochs,acc,'b',label = 'training_acc')
plt.plot(epochs,val_acc,'r',label = 'Validation_acc')
plt.title('training and validation acc')
plt.legend()


plt.figure()

plt.plot(epochs,loss,'b',label = 'training_loss')
plt.plot(epochs,val_loss,'r',label = 'validation_loss')
plt.title('training and validation loss')
plt.legend()

plt.show()

在这里插入图片描述
在这里插入图片描述

# 对测试数据进行分词
test_dir = os.path.join(imdb_dir,'test')

labels = []
texts = []

for label_type in ['neg','pos']:
    dir_name = os.path.join(test_dir,label_type)
    for fname in sorted(os.listdir(dir_name)):
        if fname[-4:] == '.txt':
            f = open(os.path.join(dir_name,fname),encoding='gb18030', errors='ignore')
            texts.append(f.read())
            f.close()
            if label_type == 'neg':
                labels.append(0)
            else:
                labels.append(1)
                
sequences = tokenizer.texts_to_sequences(texts)
x_text = pad_sequences(sequences,maxlen = maxlen)
y_test = np.asarray(labels)
# 在测试集上评估模型
model.load_weights('pre_trained_glove_model.h5')
model.evaluate(x_test,y_test)
24736/25000 [============================>.] - ETA: 0s
[0.93747248332977295, 0.53659999999999997]

小结:

1. 将原始文本转换为神经网络能够处理的格式

2.使用keras模型的EMmbedding层来学习针对特定任务的标记嵌入

3. 使用预训练词嵌入在小型自然语言处理问题上来获得额外的提升

猜你喜欢

转载自blog.csdn.net/qq_41718518/article/details/89599012
今日推荐