使用Keras搭建CNN-MNIST 手写数字 Dataset

import tensorflow as tf
F:\Anaconda3\envs\tensorflow-gpu\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
tf.__version__
'1.10.0'
import keras
keras.__version__
Using TensorFlow backend.
'2.2.2'
import numpy as np
import pandas as pd
from keras.utils import np_utils
np.random.seed(10)
from keras.datasets import mnist
mnist.load_data()
((array([[[0, 0, 0, ..., 0, 0, 0],
          [0, 0, 0, ..., 0, 0, 0],
          [0, 0, 0, ..., 0, 0, 0],
          ...,
          [0, 0, 0, ..., 0, 0, 0],
          [0, 0, 0, ..., 0, 0, 0],
          [0, 0, 0, ..., 0, 0, 0]]], dtype=uint8),
  array([7, 2, 1, ..., 4, 5, 6], dtype=uint8)))
(x_Train, y_Train), (x_Test, y_Test) = mnist.load_data()
# 数据预处理
x_Train4D=x_Train.reshape(x_Train.shape[0],784).astype('float32')
x_Test4D=x_Test.reshape(x_Test.shape[0],784).astype('float32')
x_Train4D_normalize = x_Train4D / 255
x_Test4D_normalize = x_Test4D / 255
y_TrainOneHot = np_utils.to_categorical(y_Train)
y_TestOneHot = np_utils.to_categorical(y_Test)
# 建立模型
from keras.models import Sequential
from keras.layers import Dense
# 增加dropout层以避免过拟合
from keras.layers import Dropout
# 增加两个dropout层和隐藏层
model = Sequential()
model.add(Dense(units=1000, input_dim=784, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=1000, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=10, kernel_initializer='normal', activation='softmax'))
print(model.summary())
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_1 (Dense)              (None, 1000)              785000    
_________________________________________________________________
dropout_1 (Dropout)          (None, 1000)              0         
_________________________________________________________________
dense_2 (Dense)              (None, 1000)              1001000   
_________________________________________________________________
dropout_2 (Dropout)          (None, 1000)              0         
_________________________________________________________________
dense_3 (Dense)              (None, 10)                10010     
=================================================================
Total params: 1,796,010
Trainable params: 1,796,010
Non-trainable params: 0
_________________________________________________________________
None
# 训练模型
model.compile(loss='categorical_crossentropy',
              optimizer='adam',metrics=['accuracy']) 
train_history=model.fit(x=x_Train4D_normalize, 
                        y=y_TrainOneHot,validation_split=0.2, 
                        epochs=10, batch_size=200,verbose=2)
Train on 48000 samples, validate on 12000 samples
Epoch 1/10
 - 15s - loss: 0.3684 - acc: 0.8844 - val_loss: 0.1362 - val_acc: 0.9611
Epoch 2/10
 - 2s - loss: 0.1580 - acc: 0.9519 - val_loss: 0.0988 - val_acc: 0.9706
Epoch 3/10
 - 2s - loss: 0.1172 - acc: 0.9637 - val_loss: 0.0900 - val_acc: 0.9727
Epoch 4/10
 - 2s - loss: 0.0996 - acc: 0.9695 - val_loss: 0.0858 - val_acc: 0.9733
Epoch 5/10
 - 2s - loss: 0.0845 - acc: 0.9734 - val_loss: 0.0859 - val_acc: 0.9750
Epoch 6/10
 - 2s - loss: 0.0740 - acc: 0.9765 - val_loss: 0.0756 - val_acc: 0.9778
Epoch 7/10
 - 2s - loss: 0.0653 - acc: 0.9781 - val_loss: 0.0760 - val_acc: 0.9785
Epoch 8/10
 - 2s - loss: 0.0609 - acc: 0.9810 - val_loss: 0.0807 - val_acc: 0.9788
Epoch 9/10
 - 2s - loss: 0.0570 - acc: 0.9813 - val_loss: 0.0693 - val_acc: 0.9804
Epoch 10/10
 - 2s - loss: 0.0521 - acc: 0.9829 - val_loss: 0.0799 - val_acc: 0.9787
# 以测试数据评估模型准确率
scores = model.evaluate(x_Test4D_normalize, y_TestOneHot)
print()
print('acc='+str(scores[1]))
10000/10000 [==============================] - 0s 40us/step

acc=0.9799
import matplotlib.pyplot as plt
def show_train_history(train_history, train, validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train History')
    plt.ylabel('acc')
    plt.xlabel('Epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()
show_train_history(train_history, 'acc', 'val_acc')

png

from keras.utils import np_utils
import numpy as np
from keras.datasets import mnist

np.random.seed(10)
# 卷积神经网络
(x_Train, y_Train), (x_Test, y_Test) = mnist.load_data()
# 数据预处理
x_Train4D=x_Train.reshape(x_Train.shape[0], 28, 28, 1).astype('float32')
x_Test4D=x_Test.reshape(x_Test.shape[0], 28, 28, 1).astype('float32')
x_Train4D_normalize = x_Train4D / 255
x_Test4D_normalize = x_Test4D / 255
y_TrainOneHot = np_utils.to_categorical(y_Train)
y_TestOneHot = np_utils.to_categorical(y_Test)
F:\Anaconda3\envs\tensorflow-gpu\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.
print(x_Train[:1].shape)
print(x_Test[:1].shape)
print(x_Train4D[:1].shape)
(1, 28, 28)
(1, 28, 28)
(1, 28, 28, 1)
print(x_Train[:1])
print(x_Train4D[:1])
# 卷积神经网络
from keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D

model_rnn = Sequential()

# 卷积层 1 图像个数增加
# filters=16将原来的一张图通过卷积变为16张
# kernel_size=(5,5) 卷积核为5*5卷积
# padding='same' 卷积后图像大小不变
# input_shape=(28,28,1) 图像宽高28*28,单色灰度图像 1
model_rnn.add(Conv2D(filters=16,
                 kernel_size=(5,5),
                 padding='same',
                 input_shape=(28,28,1), 
                 activation='relu'))

# 最大池化层 1 图像缩小
# 将输入矩阵分为多个2*2矩阵,选取每个2*2矩阵中最大的值组成新矩阵,16*28*28*1变为16*14*14*1
model_rnn.add(MaxPooling2D(pool_size=(2, 2)))

# 卷积层 2 图像个数增加
model_rnn.add(Conv2D(filters=36,
                 kernel_size=(5,5),
                 padding='same',
                 activation='relu'))

# 池化层 2 图像缩小 36*7*7
model_rnn.add(MaxPooling2D(pool_size=(2, 2)))

# 每次训练迭代随机放弃25%的神经元
model_rnn.add(Dropout(0.25))

# 平坦层 36*7*7转化为一维1764个神经元
model_rnn.add(Flatten())

# 隐藏层 128个神经元
model_rnn.add(Dense(128, activation='relu'))

model_rnn.add(Dropout(0.5))

model_rnn.add(Dense(10,activation='softmax'))

print(model_rnn.summary())
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 28, 28, 16)        416       
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 14, 14, 16)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 14, 14, 36)        14436     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 36)          0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 7, 7, 36)          0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 1764)              0         
_________________________________________________________________
dense_5 (Dense)              (None, 128)               225920    
_________________________________________________________________
dropout_4 (Dropout)          (None, 128)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 10)                1290      
=================================================================
Total params: 242,062
Trainable params: 242,062
Non-trainable params: 0
_________________________________________________________________
None
model_rnn.compile(loss='categorical_crossentropy',
              optimizer='adam',metrics=['accuracy']) 
train_history=model_rnn.fit(x=x_Train4D_normalize, 
                        y=y_TrainOneHot,validation_split=0.2, 
                        epochs=20, batch_size=300,verbose=2)
Train on 48000 samples, validate on 12000 samples
Epoch 1/20
 - 6s - loss: 0.5052 - acc: 0.8405 - val_loss: 0.1111 - val_acc: 0.9664
Epoch 2/20
 - 2s - loss: 0.1513 - acc: 0.9545 - val_loss: 0.0783 - val_acc: 0.9761
Epoch 3/20
 - 2s - loss: 0.1092 - acc: 0.9674 - val_loss: 0.0558 - val_acc: 0.9826
Epoch 4/20
 - 2s - loss: 0.0897 - acc: 0.9725 - val_loss: 0.0521 - val_acc: 0.9844
Epoch 5/20
 - 2s - loss: 0.0769 - acc: 0.9767 - val_loss: 0.0465 - val_acc: 0.9863
Epoch 6/20
 - 2s - loss: 0.0683 - acc: 0.9799 - val_loss: 0.0423 - val_acc: 0.9876
Epoch 7/20
 - 2s - loss: 0.0632 - acc: 0.9814 - val_loss: 0.0408 - val_acc: 0.9891
Epoch 8/20
 - 2s - loss: 0.0539 - acc: 0.9832 - val_loss: 0.0413 - val_acc: 0.9888
Epoch 9/20
 - 2s - loss: 0.0498 - acc: 0.9852 - val_loss: 0.0362 - val_acc: 0.9897
Epoch 10/20
 - 2s - loss: 0.0443 - acc: 0.9864 - val_loss: 0.0360 - val_acc: 0.9901
Epoch 11/20
 - 2s - loss: 0.0444 - acc: 0.9865 - val_loss: 0.0346 - val_acc: 0.9908
Epoch 12/20
 - 2s - loss: 0.0415 - acc: 0.9873 - val_loss: 0.0315 - val_acc: 0.9915
Epoch 13/20
 - 2s - loss: 0.0367 - acc: 0.9882 - val_loss: 0.0359 - val_acc: 0.9902
Epoch 14/20
 - 2s - loss: 0.0358 - acc: 0.9883 - val_loss: 0.0324 - val_acc: 0.9905
Epoch 15/20
 - 2s - loss: 0.0332 - acc: 0.9899 - val_loss: 0.0330 - val_acc: 0.9917
Epoch 16/20
 - 2s - loss: 0.0312 - acc: 0.9898 - val_loss: 0.0321 - val_acc: 0.9913
Epoch 17/20
 - 2s - loss: 0.0297 - acc: 0.9903 - val_loss: 0.0304 - val_acc: 0.9921
Epoch 18/20
 - 2s - loss: 0.0288 - acc: 0.9911 - val_loss: 0.0303 - val_acc: 0.9917
Epoch 19/20
 - 2s - loss: 0.0252 - acc: 0.9915 - val_loss: 0.0315 - val_acc: 0.9920
Epoch 20/20
 - 2s - loss: 0.0258 - acc: 0.9916 - val_loss: 0.0299 - val_acc: 0.9926
# 以测试数据评估模型准确率
scores = model_rnn.evaluate(x_Test4D_normalize, y_TestOneHot)
print()
print('acc='+str(scores[1]))
10000/10000 [==============================] - 1s 125us/step

acc=0.9919
show_train_history(train_history, 'acc', 'val_acc')

png

print(scores)
[0.023901841590534968, 0.9919]
show_train_history(train_history, 'loss', 'val_loss')

png

预测

print(x_Test4D_normalize[:1].shape)
print(model_rnn.predict_classes(x_Test4D_normalize[:1]))
print(y_Test[:1])
(1, 28, 28, 1)
[7]
[7]
prediction=model_rnn.predict_classes(x_Test4D_normalize)
import matplotlib.pyplot as plt
def plot_images_labels_prediction(images,labels,prediction,idx,num=10):
    fig = plt.gcf()
    fig.set_size_inches(12, 14)
    if num>25: num=25 
    for i in range(0, num):
        ax=plt.subplot(5,5, 1+i)
        ax.imshow(images[idx], cmap='binary')

        ax.set_title("label=" +str(labels[idx])+
                     ",predict="+str(prediction[idx])
                     ,fontsize=10) 
        
        ax.set_xticks([]);ax.set_yticks([])        
        idx+=1 
    plt.show()
plot_images_labels_prediction(x_Test,y_Test,prediction,idx=0)

png

import pandas as pd
pd.crosstab(y_Test,prediction,
            rownames=['label'],colnames=['predict'])
predict 0 1 2 3 4 5 6 7 8 9
label
0 976 0 0 0 0 0 1 1 2 0
1 0 1132 3 0 0 0 0 0 0 0
2 1 1 1029 0 0 0 0 1 0 0
3 0 0 1 1000 0 5 0 1 2 1
4 0 0 1 0 976 0 0 1 2 2
5 1 0 0 4 0 884 2 1 0 0
6 4 2 1 0 3 1 947 0 0 0
7 0 2 3 0 0 0 0 1021 1 1
8 2 1 1 1 1 0 1 0 963 4
9 1 3 0 0 4 4 0 5 1 991

模型保存与加载

model_rnn.save_weights("drguo_model/cnn_mnist.h5")
print("save model to drguo_model")
save model to drguo_model
from keras.models import Sequential
from keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D

load_model = Sequential()

# 卷积层 1 图像个数增加
# filters=16将原来的一张图通过卷积变为16张
# kernel_size=(5,5) 卷积核为5*5卷积
# padding='same' 卷积后图像大小不变
# input_shape=(28,28,1) 图像宽高28*28,单色灰度图像 1
load_model.add(Conv2D(filters=16,
                 kernel_size=(5,5),
                 padding='same',
                 input_shape=(28,28,1), 
                 activation='relu'))

# 最大池化层 1 图像缩小
# 将输入矩阵分为多个2*2矩阵,选取每个2*2矩阵中最大的值组成新矩阵,16*28*28*1变为16*14*14*1
load_model.add(MaxPooling2D(pool_size=(2, 2)))

# 卷积层 2 图像个数增加
load_model.add(Conv2D(filters=36,
                 kernel_size=(5,5),
                 padding='same',
                 activation='relu'))

# 池化层 2 图像缩小 36*7*7
load_model.add(MaxPooling2D(pool_size=(2, 2)))

# 每次训练迭代随机放弃25%的神经元
load_model.add(Dropout(0.25))

# 平坦层 36*7*7转化为一维1764个神经元
load_model.add(Flatten())

# 隐藏层 128个神经元
load_model.add(Dense(128, activation='relu'))

load_model.add(Dropout(0.5))

load_model.add(Dense(10,activation='softmax'))

load_model.compile(loss='categorical_crossentropy',
              optimizer='adam',metrics=['accuracy']) 

try:
    load_model.load_weights("drguo_model/cnn_mnist.h5")
    print("model load success!")
except:
    print("model load fail!")
model load success!
print(load_model.predict_classes(x_Test4D_normalize[:1]))
[7]
train_history=load_model.fit(x=x_Train4D_normalize, 
                        y=y_TrainOneHot,validation_split=0.2, 
                        epochs=10, batch_size=300,verbose=2)
Train on 48000 samples, validate on 12000 samples
Epoch 1/10
 - 3s - loss: 0.0254 - acc: 0.9916 - val_loss: 0.0353 - val_acc: 0.9914
Epoch 2/10
 - 2s - loss: 0.0243 - acc: 0.9921 - val_loss: 0.0309 - val_acc: 0.9923
Epoch 3/10
 - 2s - loss: 0.0220 - acc: 0.9925 - val_loss: 0.0317 - val_acc: 0.9919
Epoch 4/10
 - 2s - loss: 0.0237 - acc: 0.9918 - val_loss: 0.0291 - val_acc: 0.9928
Epoch 5/10
 - 2s - loss: 0.0207 - acc: 0.9933 - val_loss: 0.0337 - val_acc: 0.9923
Epoch 6/10
 - 2s - loss: 0.0203 - acc: 0.9937 - val_loss: 0.0297 - val_acc: 0.9928
Epoch 7/10
 - 2s - loss: 0.0177 - acc: 0.9941 - val_loss: 0.0334 - val_acc: 0.9927
Epoch 8/10
 - 2s - loss: 0.0192 - acc: 0.9936 - val_loss: 0.0328 - val_acc: 0.9912
Epoch 9/10
 - 2s - loss: 0.0171 - acc: 0.9943 - val_loss: 0.0279 - val_acc: 0.9934
Epoch 10/10
 - 2s - loss: 0.0160 - acc: 0.9946 - val_loss: 0.0280 - val_acc: 0.9927
load_model.save_weights("drguo_model/cnn_mnist.h5")
print("save weights to drguo_model")
save model to drguo_model
show_train_history(train_history, 'acc', 'val_acc')

png

load_model.save("drguo_model/cnn_mnist_model.h5")
print("save model to drguo_model")
save model to drguo_model
!jupyter nbconvert --to markdown "keras_gpu.ipynb"
原创文章 135 获赞 266 访问量 85万+

猜你喜欢

转载自blog.csdn.net/Dr_Guo/article/details/89375375
今日推荐