002-TensorFlow基础

基本概念实操:

import tensorflow as tf
#创建一个常量op
m1 = tf.constant([[3,3]])
#创建一个常量op
m2 = tf.constant([[2],[3]])
#创建一个矩阵乘法op,把m1和m2传入
product = tf.matmul(m1,m2)
print(product)
Tensor("MatMul:0", shape=(1, 1), dtype=int32)
#定义一个绘画,启动默认图
sess = tf.Session()
#调用sess的run方法来执行矩阵的乘法op
#run(product)触发了图中的3个op
result = sess.run(product)
print(result)
sess.close()
[[15]]
with tf.Session() as sess:
    #调用sess的run方法来执行矩阵的乘法op
    #run(product)触发了图中的3个op
    result = sess.run(product)
    print(result)
[[15]]
import tensorflow as tf
x = tf.Variable([1,2])
a = tf.constant([3,3])
#增加一个减法op
sub = tf.subtract(x,a)
#增加一个加法op
add = tf.add(x,a)
#初始化变量
init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    print(sess.run(sub))
    print(sess.run(add)) 

  

[-2 -1]
[4 5]
#创建一个变量,初始化为0
state = tf.Variable(0,name="counter")
#创建一个op,作用是让state加1
new_value = tf.add(state,1)
#赋值op,将new_value赋值给state
update = tf.assign(state,new_value)
init  = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    print(sess.run(state))
    for i in range(5):
        sess.run(update)
        print(sess.run(state))

  

0
1
2
3
4
5




import tensorflow as tf
#Fetch
input1 = tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)

add = tf.add(input2,input3)
mul = tf.multiply(input1,add)

with tf.Session() as sess:
    result = sess.run([mul,add])
    print(result)

  

[21.0, 7.0]

#Feed
#创建占位符
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1,input2)
with tf.Session() as sess:
    #feed数据以字典的形式传入
    print(sess.run(output,feed_dict={input1:[8.],input2:[2.]}))
[ 16.]
import tensorflow as tf
import numpy as np
#使用numpy生成100个随机点
x_data = np.random.rand(100)
y_data = x_data*0.1+0.2

#构造一个线性模型
b = tf.Variable(32.11) #任意float32类型
k = tf.Variable(88.23) #任意float32类型
y = k*x_data+b

#二次代价函数
loss = tf.reduce_mean(tf.square(y_data-y))
#定义一个梯度下降法来训练的一个优化器
optimizer = tf.train.GradientDescentOptimizer(0.2)
#最小化代价函数
train = optimizer.minimize(loss)
#初始化变量
init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for x in range(2001):
        sess.run(train)
        if x%20==0:
            print(x,sess.run([loss,k,b]))
            

  

0 [1763.568, 68.205513, 0.53934586]
20 [78.176331, 29.277592, -16.521879]
40 [25.467432, 16.753452, -9.3442173]
60 [8.2964983, 9.6051531, -5.2474737]
80 [2.7027426, 5.5251789, -2.9092102]
100 [0.88046998, 3.196485, -1.5746186]
120 [0.28682989, 1.8673555, -0.81288457]
140 [0.093440302, 1.108739, -0.37811583]
160 [0.030439962, 0.67574972, -0.12996645]
180 [0.0099163931, 0.4286159, 0.011667784]
200 [0.0032304539, 0.28756142, 0.092507161]
220 [0.0010523816, 0.20705287, 0.1386472]
240 [0.00034283331, 0.16110168, 0.16498217]
260 [0.00011168456, 0.13487452, 0.18001315]
280 [3.6383361e-05, 0.11990504, 0.18859227]
300 [1.1852552e-05, 0.11136103, 0.19348891]
320 [3.861187e-06, 0.10648444, 0.19628373]
340 [1.2578519e-06, 0.10370106, 0.1978789]
360 [4.097719e-07, 0.10211243, 0.19878934]
380 [1.3349e-07, 0.10120568, 0.19930901]
400 [4.3487045e-08, 0.10068816, 0.19960561]
420 [1.4166082e-08, 0.10039277, 0.19977491]
440 [4.6153357e-09, 0.10022418, 0.19987151]
460 [1.5033901e-09, 0.10012795, 0.19992667]
480 [4.8975296e-10, 0.10007302, 0.19995815]
500 [1.5952369e-10, 0.10004168, 0.19997612]
520 [5.199877e-11, 0.10002379, 0.19998637]
540 [1.6902115e-11, 0.10001357, 0.19999222]
560 [5.5140779e-12, 0.10000775, 0.19999556]
580 [1.7957324e-12, 0.10000442, 0.19999747]
600 [5.8751669e-13, 0.10000253, 0.19999856]
620 [1.9196201e-13, 0.10000144, 0.19999917]
640 [6.716627e-14, 0.10000085, 0.19999951]
660 [2.0625723e-14, 0.10000047, 0.19999973]
680 [7.2808428e-15, 0.10000028, 0.19999984]
700 [3.474998e-15, 0.10000018, 0.19999988]
720 [3.474998e-15, 0.10000018, 0.19999988]
740 [3.474998e-15, 0.10000018, 0.19999988]
760 [3.474998e-15, 0.10000018, 0.19999988]
780 [3.474998e-15, 0.10000018, 0.19999988]
800 [3.474998e-15, 0.10000018, 0.19999988]
820 [3.474998e-15, 0.10000018, 0.19999988]
840 [3.474998e-15, 0.10000018, 0.19999988]
860 [3.474998e-15, 0.10000018, 0.19999988]
880 [3.474998e-15, 0.10000018, 0.19999988]
900 [3.474998e-15, 0.10000018, 0.19999988]
920 [3.474998e-15, 0.10000018, 0.19999988]
940 [3.474998e-15, 0.10000018, 0.19999988]
960 [3.474998e-15, 0.10000018, 0.19999988]
980 [3.474998e-15, 0.10000018, 0.19999988]
1000 [3.474998e-15, 0.10000018, 0.19999988]
1020 [3.474998e-15, 0.10000018, 0.19999988]
1040 [3.474998e-15, 0.10000018, 0.19999988]
1060 [3.474998e-15, 0.10000018, 0.19999988]
1080 [3.474998e-15, 0.10000018, 0.19999988]
1100 [3.474998e-15, 0.10000018, 0.19999988]
1120 [3.474998e-15, 0.10000018, 0.19999988]
1140 [3.474998e-15, 0.10000018, 0.19999988]
1160 [3.474998e-15, 0.10000018, 0.19999988]
1180 [3.474998e-15, 0.10000018, 0.19999988]
1200 [3.474998e-15, 0.10000018, 0.19999988]
1220 [3.474998e-15, 0.10000018, 0.19999988]
1240 [3.474998e-15, 0.10000018, 0.19999988]
1260 [3.474998e-15, 0.10000018, 0.19999988]
1280 [3.474998e-15, 0.10000018, 0.19999988]
1300 [3.474998e-15, 0.10000018, 0.19999988]
1320 [3.474998e-15, 0.10000018, 0.19999988]
1340 [3.474998e-15, 0.10000018, 0.19999988]
1360 [3.474998e-15, 0.10000018, 0.19999988]
1380 [3.474998e-15, 0.10000018, 0.19999988]
1400 [3.474998e-15, 0.10000018, 0.19999988]
1420 [3.474998e-15, 0.10000018, 0.19999988]
1440 [3.474998e-15, 0.10000018, 0.19999988]
1460 [3.474998e-15, 0.10000018, 0.19999988]
1480 [3.474998e-15, 0.10000018, 0.19999988]
1500 [3.474998e-15, 0.10000018, 0.19999988]
1520 [3.474998e-15, 0.10000018, 0.19999988]
1540 [3.474998e-15, 0.10000018, 0.19999988]
1560 [3.474998e-15, 0.10000018, 0.19999988]
1580 [3.474998e-15, 0.10000018, 0.19999988]
1600 [3.474998e-15, 0.10000018, 0.19999988]
1620 [3.474998e-15, 0.10000018, 0.19999988]
1640 [3.474998e-15, 0.10000018, 0.19999988]
1660 [3.474998e-15, 0.10000018, 0.19999988]
1680 [3.474998e-15, 0.10000018, 0.19999988]
1700 [3.474998e-15, 0.10000018, 0.19999988]
1720 [3.474998e-15, 0.10000018, 0.19999988]
1740 [3.474998e-15, 0.10000018, 0.19999988]
1760 [3.474998e-15, 0.10000018, 0.19999988]
1780 [3.474998e-15, 0.10000018, 0.19999988]
1800 [3.474998e-15, 0.10000018, 0.19999988]
1820 [3.474998e-15, 0.10000018, 0.19999988]
1840 [3.474998e-15, 0.10000018, 0.19999988]
1860 [3.474998e-15, 0.10000018, 0.19999988]
1880 [3.474998e-15, 0.10000018, 0.19999988]
1900 [3.474998e-15, 0.10000018, 0.19999988]
1920 [3.474998e-15, 0.10000018, 0.19999988]
1940 [3.474998e-15, 0.10000018, 0.19999988]
1960 [3.474998e-15, 0.10000018, 0.19999988]
1980 [3.474998e-15, 0.10000018, 0.19999988]
2000 [3.474998e-15, 0.10000018, 0.19999988]
 
 
 
 可以看出在迭代700次之后,损失值几乎不变了,k,b也基本固定不变了。
 
 

 

猜你喜欢

转载自www.cnblogs.com/Mjerry/p/9823980.html
002