卷积神经网络-第一周作业2 (基于卷积神经网络的手势分类)

直接上code

import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
X_train_orig,Y_train_orig,X_test_orig,Y_test_orig,classes=load_dataset()
X_train=X_train_orig/255
X_test=X_test_orig/255
Y_train=convert_to_one_hot(Y_train_orig,6).T
Y_test=convert_to_one_hot(Y_test_orig,6).T

def create_placeholders(n_H0,n_W0,n_C0,n_y):
    X=tf.placeholder(dtype=tf.float32,shape=[None,n_H0,n_W0,n_C0],name='X')
    Y=tf.placeholder(dtype=tf.float32,shape=[None,n_y],name='Y')
    return X,Y
def initialize_parameters():
    tf.set_random_seed(1)
    W1=tf.get_variable(name='W1',shape=[4,4,3,8],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer(seed=0))
    W2=tf.get_variable(name='W2',shape=[2,2,8,16],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer(seed=0))
    parameters={"W1":W1,
                "W2":W2}
    return parameters
def forward_propagation(X,parameters):
    W1=parameters['W1']
    W2=parameters['W2']
    Z1=tf.nn.conv2d(input=X,filter=W1,strides=[1,1,1,1],padding='SAME')
    A1=tf.nn.relu(Z1)
    P1=tf.nn.max_pool(value=A1,ksize=[1,8,8,1],strides=[1,8,8,1],padding='SAME')
    Z2=tf.nn.conv2d(input=P1,filter=W2,strides=[1,1,1,1],padding='SAME')
    A2=tf.nn.relu(Z2)
    P2=tf.nn.max_pool(value=A2,ksize=[1,4,4,1],strides=[1,4,4,1],padding='SAME')
    F=tf.contrib.layers.flatten(P2)
    Z3=tf.contrib.layers.fully_connected(F,6,activation_fn=None)
    return Z3
def compute_cost(Z3,Y):
    cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=Z3,labels=Y))
    return cost
def model(X_train,Y_train,X_test,Y_test,learning_rate=0.009,
          num_epochs=100,minibatch_size=64,print_cost=True):
    ops.reset_default_graph()
    tf.set_random_seed(1)
    seed=3
    (m,n_H0,n_W0,n_C0)=X_train.shape
    n_y=Y_train.shape[1]
    costs=[]
    X,Y=create_placeholders(n_H0,n_W0,n_C0,n_y)
    parameters=initialize_parameters()
    Z3=forward_propagation(X,parameters)
    cost=compute_cost(Z3,Y)
    optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
    init=tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(num_epochs):
            minibatch_cost=0
            num_minibatches=int(m/minibatch_size)
            seed=seed+1
            minibatches=random_mini_batches(X_train,Y_train,minibatch_size,seed)
            for minibatch in minibatches:
                (minibatch_X,minibatch_Y)=minibatch
                _,temp_cost=sess.run([optimizer,cost],feed_dict={X:minibatch_X,Y:minibatch_Y})
                minibatch_cost+=temp_cost/num_minibatches
            if print_cost==True and epoch%5==0:
                print("Cost after epoch %i:%f"%(epoch,minibatch_cost))
            if print_cost==True and epoch%1==0:
                costs.append(minibatch_cost)
        plt.plot(np.squeeze(costs))
        plt.ylabel('cost')
        plt.xlabel('iterations (per tens)')
        plt.title("learning rate="+str(learning_rate))
        plt.show()

        predict_op = tf.argmax(Z3, 1)
        correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))

        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print(accuracy)
        train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
        test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
        print("Train Accuracy:", train_accuracy)
        print("Test Accuracy:", test_accuracy)
        return train_accuracy, test_accuracy, parameters

现在精确有点小,调整下学习率和迭代次数在试试 

def model(X_train,Y_train,X_test,Y_test,learning_rate=0.01,
          num_epochs=150,minibatch_size=64,print_cost=True):

Cost after epoch 100:1.004017
Cost after epoch 105:0.916604
Cost after epoch 110:0.957652
Cost after epoch 115:0.908503
Cost after epoch 120:0.914575
Cost after epoch 125:0.938486
Cost after epoch 130:0.898564
Cost after epoch 135:0.881808
Cost after epoch 140:0.926435
Cost after epoch 145:0.878047
Tensor("Mean_1:0", shape=(), dtype=float32)
Train Accuracy: 0.68425924
Test Accuracy: 0.69166666
是bais 问题 需要增加数据 或者 增加网络深度


猜你喜欢

转载自blog.csdn.net/qq_31119155/article/details/80899195