深度学习入门基于python的理论与实现 第四章two_layer_net.py完全解析

# coding: utf-8
#深度学习入门基于python的理论与实现 第四章two_layer_net.py完全解析
#QQ群:476842922
import sys, os
sys.path.append(os.pardir)  # 为了导入父目录的文件而进行的设定
from common.functions import *
from common.gradient import numerical_gradient

class TwoLayerNet:

    def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
        # 初始化权重    输入层神经元,隐藏层神经元,输出层神经元,0.01
        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)#高斯分布随机数
        self.params['b1'] = np.zeros(hidden_size)#设置0
        self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)#高斯分布随机数
        self.params['b2'] = np.zeros(output_size)#设置0

    def predict(self, x):#进行推理(识别)
        W1, W2 = self.params['W1'], self.params['W2']#权重
        b1, b2 = self.params['b1'], self.params['b2']#偏执
    
        a1 = np.dot(x, W1) + b1#点积+偏执
        z1 = sigmoid(a1)#sigmoid函数
        a2 = np.dot(z1, W2) + b2#点积+偏执
        y = softmax(a2)#softwax函数

        return y#返回y
        
    # x:输入数据, t:监督数据
    def loss(self, x, t):#计算损失函数的值
        y = self.predict(x)#进行推理
        
        return cross_entropy_error(y, t)##交叉熵代价函数 y:输出结果。t:监督数据
    
    def accuracy(self, x, t):#计算识别精度
        y = self.predict(x)#进行推理
        y = np.argmax(y, axis=1)#最大值的索引
        t = np.argmax(t, axis=1)#最大值的索引
        
        accuracy = np.sum(y == t) / float(x.shape[0])#计算识别率
        return accuracy#返回识别率

    # x:输入数据, t:监督数据
    def numerical_gradient(self, x, t):#梯度
        loss_W = lambda W: self.loss(x, t)#匿名函数输入w输出net.loss(x, t)(计算损失函数的值)
        
        grads = {}
        grads['W1'] = numerical_gradient(loss_W, self.params['W1'])#计算权重参数的梯度
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])#计算权重参数的梯度
        grads['W2'] = numerical_gradient(loss_W, self.params['W2'])#计算权重参数的梯度
        grads['b2'] = numerical_gradient(loss_W, self.params['b2'])#计算权重参数的梯度
        
        return grads#返回梯度
        
    def gradient(self, x, t):#梯度
        W1, W2 = self.params['W1'], self.params['W2']#权重
        b1, b2 = self.params['b1'], self.params['b2']#偏执
        grads = {}#梯度
        
        batch_num = x.shape[0]#
        
        # forward
        a1 = np.dot(x, W1) + b1#点积+偏执
        z1 = sigmoid(a1)#sigmoid
        a2 = np.dot(z1, W2) + b2#点积+偏执
        y = softmax(a2)#softmax
        
        # backward
        dy = (y - t) / batch_num# y:输出结果。t:监督数据
        grads['W2'] = np.dot(z1.T, dy)
        grads['b2'] = np.sum(dy, axis=0)
        
        da1 = np.dot(dy, W2.T)
        dz1 = sigmoid_grad(a1) * da1
        grads['W1'] = np.dot(x.T, dz1)
        grads['b1'] = np.sum(dz1, axis=0)

        return grads

猜你喜欢

转载自blog.csdn.net/weixin_33595571/article/details/83552514