MxNet_

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/wanzew/article/details/82285209
#coding=utf-8

from mxnet import ndarray as nd
from mxnet import autograd
import random

# 定义数据维度
num_inputs = 2
num_examples = 1000

true_w = [2, -3,4]
true_b = 4.2

X = nd.random_normal(shape = (num_examples, num_inputs))
y = true_w[0] * X[:,0] + true_w[1] * X[:, 1] + true_b
y += 0.01 * nd.random_normal(shape = y.shape)

batch_size = 10
def data_iter():
    # 产生随机索引
    idx = list(range(num_examples))
    random.shuffle(idx)
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:min(i+batch_size, num_examples)])
        yield nd.take(X, j),nd.take(y, j)

for data, label in data_iter():
    print(data, label)
    break

w = nd.random_normal(shape = (num_inputs, 1))
b = nd.zeros((1, ))
params = [w, b]

for param in params:
    param.attach_grad()

def net(X):
    return nd.dot(X,w) + b

def square_loss(yhat, y):
    # 注意这里把y变形成yhat的形状来避免自动广播
    return (yhat - y.reshape(yhat.shape))**2
    # return (yhat - y)**2

def SGD(loss, params, lr):
    loss.backward()
    for param in params:
        param[:] = param - lr * param.grad

epochs = 5
learning_rate = .001
for e in range(epochs):
    total_loss = 0
    for data, label in data_iter():
        with autograd.record():
            output = net(data)
            loss = square_loss(output, label)
        SGD(loss, params, learning_rate)
        total_loss += nd.sum(loss).asscalar()
        
    print("Epoch %d, average loss: %f " % (e, total_loss/num_examples))

猜你喜欢

转载自blog.csdn.net/wanzew/article/details/82285209