NN实现

#coding=utf-8


'''
目录实现一个神经网络模型,包含内容
    1.权重,偏执初始化
    2.设置激活函数
    3.计算前向传播
    4.计算反向传播
    5.使用dropout解决overfitting

验证例子: 用3*4*2的网络实现一个异或运算

input1  input2    input3    out
0          0        0        0
0          0        1        1
0          1        0        1
0          1        1        0
1          0        0        1
1          0        1        0
1          1        0        0

1          1        1        ?



'''

import  numpy as np


class NN:
    averge_error=0;

    def actvition(self,x):
        return 1/(1+np.exp(-x))

    def activition_direct(self,x):
        f = self.actvition(x);
        return f*(1-f)

    def __init__(self,layer):

        #初始化权重
        self.weights=[]
        for i in range(len(layer)-1):
            w_tmp = (np.random.random([layer[i],layer[i+1]])*2-1)*0.25
            self.weights.append(w_tmp)
        #初始化偏置
        self.bias = []
        for i in range(1,len(layer)):
            b_tmp = (np.random.random(layer[i])*2-1)*0.25
            self.bias.append(b_tmp)


    def train(self,x,y,learn_rate=0.01,epoch=1000):
        for k in range(epoch):
            self.averge_error = 0
            for j in range(x.shape[0]):
                x_input = [x[j]]
                #前向传播
                for i in range(len(self.weights)):
                    #计算每一层的值  f(w*x+b)
                    l_tmp = self.actvition(np.dot(x_input[i],self.weights[i])+self.bias[i])
                    #dropout 每一层的每个神经元有0.2的概率不工作
                    self.drop_out(l_tmp,0.2)
                    x_input.append(l_tmp)
                #此时x_input 保存了神经网络每一层的输出
                #计算误差
                error = y[j]-x_input[-1]
                self.averge_error+=error
                #计算最后一层的梯度,注意最后一层的梯度 叉乘
                delta =  [error*self.activition_direct(x_input[-1])]
                #前面层的梯度是点乘,注意这里的循环次数,由于最后一层的梯度已经计算过了
                for m in range(len(x_input)-2,0,-1):
                    delta_tmp = np.dot(delta[-1],self.weights[m].T)*self.activition_direct(x_input[m])
                    delta.append(delta_tmp)
                delta.reverse()

                #使用梯度下降,更新权重
                for n in range(len(self.weights)):
                    x_input_2d  = np.atleast_2d(x_input[n])
                    delta_2d = np.atleast_2d(delta[n])
                    self.weights[n] += learn_rate*np.dot(x_input_2d.T,delta_2d)

                #更新偏置
                for n in range(len(self.bias)):
                    self.bias[n] += learn_rate * delta[n]


    #加入dropout逻辑,以一定概率让神经元不工作
    def drop_out(self, x,p):
        if p < 0 and p > 1:
            raise Exception('dropout p must be in 0-1')
        retain_prob = 1 - p
        # 生成一个0,1分布向量,每个元素要么是0,要么是1 retain_prob表一个元素是1的概率,就神经元工作的概率
        sample = np.random.binomial(n=1, p=retain_prob, size=x.shape);
        # 将原某些神经元的输出置为0
        x = x * sample
        # 将剩下的元素的值放大
        x = x * (1 / retain_prob)
        return x

    def predict(self,x):
        x_input = x
        for j in range(len(self.weights)):
            x_input = self.actvition(np.dot(x_input,self.weights[j])+self.bias[j])
            print x_input
        return x_input

x_train=np.array([
    [0,0,0],
    [0,0,1],
    [0,1,0],
    [0,1,1],
    [1,0,0],
    [1,0,1],
    [1,1,0],
])



x_label=np.array([
    [0],
    [1],
    [1],
    [0],
    [1],
    [0],
    [0]
])

x_test=np.array([
    [1,0,1]
])



nn = NN([3,10,1])
nn.train(x_train,x_label,0.01,1000)
print nn.predict(x_test)

猜你喜欢

转载自blog.csdn.net/lylclz/article/details/79921049