神经网络的简单实现
""" NerualNetwork.py
"""
import numpy as np
def tanh(x):
return np.tanh(x)
def tanh_deriv(x):
return 1.0-np.tanh(x)*np.tanh(x)
def logistic(x):
return 1/(1+np.exp(-x))
def logistic_deriv(x):
return logistic(x)*(1-logistic(x))
class NeuralNetwork:
activation = ""
active_fun = None
activa_der = None
weights = []
def __init__(self, layers, activation="tanh"):
self.activation = activation
if activation == "tanh":
self.active_fun = tanh
self.activa_der = tanh_deriv
elif activation == "logistic":
self.active_fun = logistic
self.activa_der = logistic_deriv
for i in range(1, len(layers) - 1):
self.weights.append(
np.random.random((layers[i - 1], layers[i]))
)
self.weights.append(
np.random.random((layers[i], layers[i + 1]))
)
def fit(self, x, y, learn_rate=0.2, epochs=10000):
x = np.atleast_2d(x)
tmp = np.ones([x.shape[0], x.shape[1]+1])
tmp[:, 0:-1] = x
x = tmp
y = np.array(y)
"""
共计训练epochs次,每次随机抽取一条数据
"""
for k in range(epochs):
i = np.random.randint(x.shape[0])
result = [x[i][0:-1]]
for layer in range(len(self.weights)):
result.append(self.active_fun(np.dot(result[layer], self.weights[layer])))
error = y[i] - result[-1]
deltas = [error * self.activa_der(result[-1])]
for layer in range(len(result)-2, 0, -1):
deltas.append(
np.dot(deltas[-1], self.weights[layer].T)
* self.activa_der(result[layer])
)
deltas.reverse()
for layer in range(len(self.weights)):
result_layer = np.atleast_2d(result[layer])
delta = np.atleast_2d(deltas[layer])
self.weights[layer] += learn_rate * np.dot(result_layer.T, delta)
def predict(self,x):
x = np.array(x)
tmp = np.ones(x.shape[0], x.shape[1] + 1)
tmp[:, 0:-1] = x
for layer in range(0, len(self.weights)):
tmp = self.active_fun(np.dot(tmp, self.weights[layer]))
return tmp
""" test.py
"""
from nn.NeuralNetwork import NeuralNetwork
import numpy as np
nn = NeuralNetwork([2, 10, 1],"tanh")
print(nn.weights)
x= np.array([[0,0],[0,1],[1,0],[1,1]])
y= np.array([0,1,1,0])
nn.fit(x, y)
print(nn.weights)