机器学习(7)-- 神经网络应用

神经网络的简单实现

"""  NerualNetwork.py
"""
import numpy as np


def tanh(x):
    return np.tanh(x)


def tanh_deriv(x):
    return 1.0-np.tanh(x)*np.tanh(x)


def logistic(x):
    return 1/(1+np.exp(-x))


def logistic_deriv(x):
    return logistic(x)*(1-logistic(x))


class NeuralNetwork:
    activation = ""
    active_fun = None
    activa_der = None
    weights = []

    def __init__(self, layers, activation="tanh"):
        self.activation = activation
        if activation == "tanh":
            self.active_fun = tanh
            self.activa_der = tanh_deriv
        elif activation == "logistic":
            self.active_fun = logistic
            self.activa_der = logistic_deriv

        for i in range(1, len(layers) - 1):
            self.weights.append(
                np.random.random((layers[i - 1], layers[i]))
            )
            self.weights.append(
                np.random.random((layers[i], layers[i + 1]))
            )

    def fit(self, x, y, learn_rate=0.2, epochs=10000):
        # 转换为np array类型变量,对x增加一列结果,每次计算得到的y
        x = np.atleast_2d(x)
        tmp = np.ones([x.shape[0], x.shape[1]+1])
        tmp[:, 0:-1] = x
        x = tmp
        y = np.array(y)
        """
        共计训练epochs次,每次随机抽取一条数据
        """
        for k in range(epochs):
            i = np.random.randint(x.shape[0])  # 随机抽取一条数据
            # result 中存储了多个列表,每个列表代表了对应层的输出
            result = [x[i][0:-1]]
            # 对x从第一层计算至最后一层
            for layer in range(len(self.weights)):
                result.append(self.active_fun(np.dot(result[layer], self.weights[layer])))
            error = y[i] - result[-1]
            deltas = [error * self.activa_der(result[-1])]   # 输出层的剃度

            # 反向计算每一层的梯度,进而计算得到权重的变化值
            for layer in range(len(result)-2, 0, -1):
                deltas.append(
                    np.dot(deltas[-1], self.weights[layer].T)
                    * self.activa_der(result[layer])
                )
            deltas.reverse()

            # 前向更新权重
            for layer in range(len(self.weights)):
                result_layer = np.atleast_2d(result[layer])
                delta = np.atleast_2d(deltas[layer])
                self.weights[layer] += learn_rate * np.dot(result_layer.T, delta)

    def predict(self,x):
        x = np.array(x)
        tmp = np.ones(x.shape[0], x.shape[1] + 1)
        tmp[:, 0:-1] = x
        for layer in range(0, len(self.weights)):
            tmp = self.active_fun(np.dot(tmp, self.weights[layer]))
        return tmp

"""  test.py
"""
from nn.NeuralNetwork import NeuralNetwork
import numpy as np

nn = NeuralNetwork([2, 10, 1],"tanh")

print(nn.weights)

x= np.array([[0,0],[0,1],[1,0],[1,1]])
y= np.array([0,1,1,0])
nn.fit(x, y)
print(nn.weights)

猜你喜欢

转载自blog.csdn.net/qq_38876114/article/details/94409601