Tensorflow 2.0 : FCNN

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u014281392/article/details/89049128

全连接神经网络FCNN

Tensorflow 2.0

import warnings
import seaborn as sns
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
warnings.filterwarnings('ignore')
%matplotlib inline
# Min-Max Normalization
def min_max_normalization(x, y):
    x = tf.cast(x, tf.float32)/255.0
    y = tf.cast(y, tf.int64)
    return x, y

# https://www.kaggle.com/c/digit-recognizer/data
def load_data(path):
    data = pd.read_csv(path)
    data_x = data.drop(['label'], axis=1).values
    data_y = data.label.values
    train_x, valid_x, train_y, valid_y = train_test_split(data_x, data_y, test_size=100)
    # 切分传入Tensor的第一个维度,生成相应的dataset
    train_ds = tf.data.Dataset.from_tensor_slices((train_x, train_y))
    #valid_ds = tf.data.Dataset.from_tensor_slices((valid_x, valid_y))
    valid_x = valid_x/255.0
    valid_ds = (valid_x, valid_y)
    train_ds = train_ds.map(min_max_normalization)
    train_ds = train_ds.take(10000).shuffle(10000).batch(1000)
    return train_ds, valid_ds

# softmax cross entropy
def loss_function(labels, pred_proba):
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=pred_proba)
    return tf.reduce_mean(loss)


def compute_accuracy(pred_proba, labels):
    predictions = tf.argmax(pred_proba, axis=1)
    return tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32))
def train(epoch):
    model = keras.Sequential([layers.Dense(128, activation='relu'),
                              layers.Dense(128, activation='relu'),
                              layers.Dense(10)])
    optimizer = optimizers.Adam()
    trainloss_list = []    # every step loss
    trainaccuracy_list = []
    validaccuracy_list = []
    train_ds, valid_ds = load_data('../DataSet/mnist_csv/train.csv')
    for i in range(epoch):
        for step,(x, y) in enumerate(train_ds):
            with tf.GradientTape() as gdt:
                pred_proba = model(x)
                predictions = tf.argmax(pred_proba, axis=1)
                valid_preds = tf.argmax(model(valid_ds[0]), axis=1)
                tloss_ = loss_function(y, pred_proba)
                taccuracy_ = accuracy_score(y, predictions)
                vaccuracy_ = accuracy_score(valid_ds[1], valid_preds)
                trainloss_list.append(tloss_)
                trainaccuracy_list.append(taccuracy_)
                validaccuracy_list.append(vaccuracy_)
            # compute gradient
            grads = gdt.gradient(tloss_, model.trainable_variables)
            # update weights
            optimizer.apply_gradients(zip(grads, model.trainable_variables))
            if step%1000==0:
                print('Train loss is : %f, Train accuracy is : %f, Valid accuracy is : %f'%(tloss_, taccuracy_, vaccuracy_))
    cfm = confusion_matrix(valid_ds[1], tf.argmax(model(valid_ds[0]), axis=1))
    return trainloss_list, trainaccuracy_list, validaccuracy_list, cfm
def main():
    tloss, taccu, vaccu, cfm = train(20)
    plt.figure(figsize=(20, 11))
    plt.subplot(2, 2, 1)
    plt.title('Train loss & accuracy')
    plt.plot(tloss, label='loss')
    plt.plot(taccu, label='accuracy')
    plt.grid(axis='y')
    plt.legend()
    plt.subplot(2, 2, 2)
    plt.title('Train valid accuracy')
    plt.plot(taccu, label='Train')
    plt.plot(vaccu, label='Valid')
    plt.legend()
    plt.grid(axis='y')
    plt.subplot(2, 2, 3)
    plt.title('Confusion matrix')
    sns.heatmap(cfm)
    plt.subplot(2, 2, 4)
    plt.title('Tensorflow 2.0')
    plt.imshow(tf_pic)
    plt.grid()
    
if __name__ == '__main__':
    main()
Train loss is : 2.306837, Train accuracy is : 0.057000, Valid accuracy is : 0.080000
Train loss is : 1.265308, Train accuracy is : 0.763000, Valid accuracy is : 0.710000
Train loss is : 0.618489, Train accuracy is : 0.846000, Valid accuracy is : 0.770000
Train loss is : 0.425268, Train accuracy is : 0.870000, Valid accuracy is : 0.870000
Train loss is : 0.340226, Train accuracy is : 0.896000, Valid accuracy is : 0.870000
Train loss is : 0.288782, Train accuracy is : 0.915000, Valid accuracy is : 0.880000
Train loss is : 0.255068, Train accuracy is : 0.922000, Valid accuracy is : 0.920000
Train loss is : 0.234775, Train accuracy is : 0.930000, Valid accuracy is : 0.940000
Train loss is : 0.219541, Train accuracy is : 0.937000, Valid accuracy is : 0.940000
Train loss is : 0.195807, Train accuracy is : 0.941000, Valid accuracy is : 0.940000
Train loss is : 0.174473, Train accuracy is : 0.958000, Valid accuracy is : 0.940000
Train loss is : 0.160328, Train accuracy is : 0.963000, Valid accuracy is : 0.940000
Train loss is : 0.148957, Train accuracy is : 0.966000, Valid accuracy is : 0.940000
Train loss is : 0.138590, Train accuracy is : 0.965000, Valid accuracy is : 0.940000
Train loss is : 0.129336, Train accuracy is : 0.968000, Valid accuracy is : 0.950000
Train loss is : 0.120952, Train accuracy is : 0.968000, Valid accuracy is : 0.950000
Train loss is : 0.113196, Train accuracy is : 0.970000, Valid accuracy is : 0.950000
Train loss is : 0.106510, Train accuracy is : 0.971000, Valid accuracy is : 0.950000
Train loss is : 0.099868, Train accuracy is : 0.973000, Valid accuracy is : 0.950000
Train loss is : 0.093243, Train accuracy is : 0.975000, Valid accuracy is : 0.950000

在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/u014281392/article/details/89049128
今日推荐