Pytorch学习之旅(2)——梯度下降

Pytorch学习之旅(2)——梯度下降

简单线性回归问题求解

1、假设回归函数为 y = w * x + b,则损失值Loss=(w*x+b-y)^2
2、定义损失值计算函数。

def compute_error_for_line_given_points(b, w, points):
    totalError = 0
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]
        totalError += (y - (w*x + b)) ** 2
    return totalError / float(len(points))

3、定义单步梯度下降函数

def step_gradient(b_current, w_current, points, learningRate):
    b_gradient = 0
    w_gradient = 0
    N = float(len(points))
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]
        b_gradient -= (2/N) * (y - (w_current*x+b_current))
        w_gradient -= (2/N) * (y - (w_current*x+b_current)) * x

    new_b = b_current - (learningRate * b_gradient)
    new_w = w_current - (learningRate * w_gradient)

    return [new_b, new_w]

4、定义连续梯度下降函数

def gradient_descent_runner(points, starting_b, starting_w,
                            learning_rate, num_interations):
    b = starting_b
    w = starting_w
    for i in range(num_interations):
        b, w=step_gradient(b, w, np.array(points), learning_rate)

    return [b, w]

5、主函数

def run():
    points = np.genfromtxt("data.csv", delimiter=',')
    learning_rate = 0.0001
    initial_b = 0
    initial_w = 0
    num_interation = 1000

    print("Starting gradient desent at b = {0} , w = {1}, error={2}"
          .format(initial_b, initial_w, compute_error_for_line_given_points(initial_b, initial_w, points)))

    print("Runing...")

    [b, w] = gradient_descent_runner(points, initial_b, initial_w, learning_rate, num_interation)
    print("Ending gradient desent at b = {0} , w = {1}, error={2}"
          .format(b, w, compute_error_for_line_given_points(initial_b, initial_w, points)))


if __name__ == '__main__':
    run()
发布了27 篇原创文章 · 获赞 1 · 访问量 996

猜你喜欢

转载自blog.csdn.net/qq_41320782/article/details/103930824