Python学习日记3-第一个tensorflow程序

y=2x

1生成加入噪声的y=2x,X在(-1,1)有100个点,并画图
发现缺少matplotlib
在Anaconda prompt中给tensorflow环境安装包
pip install cython
pip install matplotlib

在这里插入图片描述

2完整代码和运行效果

“”"
Created on Fri Oct 12 14:08:10 2018
@author: Julie
“”"
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def moving_average (a,w=10):
if len(a)<w:
return a[:]
return [val if idx<w else sum(a[(idx-w):idx])/w for idx,
val in enumerate(a)]
#生成模拟数据
train_X = np.linspace(-1,1,100)
train_Y = 2*train_X+ np.random.randn(*train_X.shape)*0.3 #y=2x
#display
plt.plot(train_X,train_Y,‘ro’,label=‘Original data’)
plt.legend()
plt.show()
#linear regression
#占位符定义输入节点
X = tf.placeholder(“float”)
Y = tf.placeholder(“float”)
#模型的变量,使用回归求估计值
W = tf.Variable(tf.random_normal([1]),name = “weight”)
b = tf.Variable(tf.zeros([1]),name = “bias”)
print(W)
print(b)
#foward
#前向传播网络,?只定义,不计算
z=tf.multiply(X,W)+b
print(z)
#backward
#后向传播
#代价函数
cost = tf.reduce_mean(tf.square(Y-z))
print(cost)
#优化速率
learning_rate = 0.01
#优化策略,梯度下降
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
#init
init = tf.global_variables_initializer()
#times
training_epochs = 25
display_step = 2
#start session
with tf.Session() as sess:
sess.run(init)
plotdata ={“batchsize”:[],“loss”:[]}
#input data
for epoch in range(training_epochs):
for (x,y) in zip(train_X,train_Y):
sess.run(optimizer,feed_dict={X:train_X,Y:train_Y})
#display
if epoch % display_step ==0:
# loss = sess.run(cost,feed_dict={X:train_X,Y:train_Y})
loss = sess.run(cost,feed_dict={X:x,Y:y})
print (“Epoch:”,epoch+1,“cost=”,loss,“W=”,
sess.run(W),“b=”,sess.run(b))
if not(loss == “NA”):
plotdata[“batchsize”].append(epoch)
plotdata[“loss”].append(loss)
print(“Finished!”)
print(“cost”,sess.run(cost,feed_dict={X:train_X,Y:train_Y}),
“W=”,sess.run(W),“b=”,sess.run(b))
plt.plot(train_X,train_Y,‘ro’,label=‘Original data’)
plt.plot(train_X,sess.run(W)*train_X+sess.run(b),label=‘Fittedline’)
plt.legend()
plt.show()
plotdata[“avgloss”]=moving_average(plotdata[“loss”])
plt.figure(1)
plt.subplot(211)
plt.plot(plotdata[“batchsize”],plotdata[“avgloss”],‘b–’)
plt.xlabel(‘Minibatch number’)
plt.ylabel(‘Loss’)
plt.title(‘Minibatch run vs. Training loss’)
plt.show()

<tf.Variable ‘weight_12:0’ shape=(1,) dtype=float32_ref>
<tf.Variable ‘bias_12:0’ shape=(1,) dtype=float32_ref>
Tensor(“add_11:0”, dtype=float32)
Tensor(“Mean_11:0”, shape=(), dtype=float32)
Epoch: 1 cost= 0.3982751 W= [1.0541376] b= [-0.00260837]
Epoch: 3 cost= 0.0068122474 W= [1.768156] b= [-0.00300017]
Epoch: 5 cost= 0.0701677 W= [1.9505185] b= [-0.00300706]
Epoch: 7 cost= 0.097012095 W= [1.9970944] b= [-0.00300719]
Epoch: 9 cost= 0.104563825 W= [2.00899] b= [-0.0030072]
Epoch: 11 cost= 0.10653761 W= [2.0120277] b= [-0.0030072]
Epoch: 13 cost= 0.10704498 W= [2.012804] b= [-0.0030072]
Epoch: 15 cost= 0.10717357 W= [2.0130005] b= [-0.0030072]
Epoch: 17 cost= 0.107207604 W= [2.0130525] b= [-0.0030072]
Epoch: 19 cost= 0.107207604 W= [2.0130525] b= [-0.0030072]
Epoch: 21 cost= 0.107207604 W= [2.0130525] b= [-0.0030072]
Epoch: 23 cost= 0.107207604 W= [2.0130525] b= [-0.0030072]
Epoch: 25 cost= 0.107207604 W= [2.0130525] b= [-0.0030072]
Finished!
cost 0.097434156 W= [2.0130525] b= [-0.0030072]


在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/weixin_43387285/article/details/83026321