tensorflow 案例

简单的线性回归

我们先写一个简单的线性回归练练手

1.数据准备

实际的数据大家可以通过pandas等package读入,也可以使用自带的Boston House Price数据集,这里为了简单,我们自己手造一点数据集。

In [1]:
%matplotlib inline# 在notebook里面显示图像
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (14,8)# 显示图像的最大范围

n_observations = 100
xs = np.linspace(-3, 3, n_observations)#linspace函数可以生成元素为50的等间隔数列。而前两个参数分别是数列的开头与结尾。如果写入第三个参数,可以制定数列的元素个数。
ys = np.sin(xs) + np.random.uniform(-0.5, 0.5, n_observations)#:从一个均匀分布[low,high)中随机采样,
plt.scatter(xs, ys)
plt.show()


2.准备好placeholder

In [2]:
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')

3.初始化参数/权重

In [3]:
W = tf.Variable(tf.random_normal([1]), name='weight')#tf.random_normal函数用于从服从指定正太分布的数值中取出指定个数的值。
b = tf.Variable(tf.random_normal([1]), name='bias')

4.计算预测结果

In [4]:
Y_pred = tf.add(tf.multiply(X, W), b)#y=wx+b

5.计算损失函数值

In [5]:
loss = tf.square(Y - Y_pred, name='loss')

6.初始化optimizer

In [6]:
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

7.指定迭代次数,并在session里执行graph

In [7]:
n_samples = xs.shape[0]
with tf.Session() as sess:
	# 记得初始化所有变量
	sess.run(tf.global_variables_initializer()) 
	
	writer = tf.summary.FileWriter('./graphs/linear_reg', sess.graph)#写到日志这里的点代表python所在的路径
	
	# 训练模型
	for i in range(50):
		total_loss = 0
		for x, y in zip(xs, ys):#zip() 函数用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的列表。
			# 通过feed_dict字典把数据灌进去
			_, l = sess.run([optimizer, loss], feed_dict={X: x, Y:y}) #_,l分别得到optimizer,loss


			total_loss += l
		if i%5 ==0:
			print('Epoch {0}: {1}'.format(i, total_loss/n_samples))#取平均值total_loss/n_samples


	# 关闭writer
	writer.close() 
	
	# 取出w和b的值
	W, b = sess.run([W, b]) 
Epoch 0: [ 0.16139977]
Epoch 5: [ 0.18130365]
Epoch 10: [ 0.18130423]
Epoch 15: [ 0.18130423]
Epoch 20: [ 0.18130423]
Epoch 25: [ 0.18130423]
Epoch 30: [ 0.18130423]
Epoch 35: [ 0.18130423]
Epoch 40: [ 0.18130423]
Epoch 45: [ 0.18130423]
In [8]:
print(W,b)
print("W:"+str(W[0]))
print("b:"+str(b[0]))
(array([ 0.22108963], dtype=float32), array([-0.11899097], dtype=float32))
W:0.22109
b:-0.118991
In [9]:
plt.plot(xs, ys, 'bo', label='Real data')
plt.plot(xs, xs * W + b, 'r', label='Predicted data')
plt.legend()
plt.show()

plt.plot(x,y,format_string,**kwargs)  
x轴数据,y轴数据,format_string控制曲线的格式字串 
format_string 由颜色字符,风格字符,和标记字符 
这里写图片描述  
这里写图片描述  
这里写图片描述


多项式回归

by @寒小阳
对于非线性的数据分布,用线性回归拟合程度一般,我们来试试多项式回归

1.数据准备

实际的数据大家可以通过pandas等package读入,也可以使用自带的Boston House Price数据集,这里为了简单,我们自己手造一点数据集。

In [1]:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (14,8)

n_observations = 100
xs = np.linspace(-3, 3, n_observations)
ys = np.sin(xs) + np.random.uniform(-0.5, 0.5, n_observations)
plt.scatter(xs, ys)
plt.show()

2.准备好placeholder

In [2]:
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')

3.初始化参数/权重

In [3]:
#tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
  •     shape: 输出张量的形状,必选
  •     mean: 正态分布的均值,默认为0
  •     stddev: 正态分布的标准差,默认为1.0
  •     dtype: 输出的类型,默认为tf.float32
  •     seed: 随机数种子,是一个整数,当设置之后,每次生成的随机数都一样
  •     name: 操作的名称

W = tf.Variable(tf.random_normal([1]), name='weight')

b = tf.Variable(tf.random_normal([1]), name='bias')

4.计算预测结果

In [4]:
#y=wx+w2*x^2+w3*x^3
Y_pred = tf.add(tf.multiply(X, W), b)
#添加高次项
W_2 = tf.Variable(tf.random_normal([1]), name='weight_2')
Y_pred = tf.add(tf.multiply(tf.pow(X, 2), W_2), Y_pred)#tf.pow次方
W_3 = tf.Variable(tf.random_normal([1]), name='weight_3')
Y_pred = tf.add(tf.multiply(tf.pow(X, 3), W_3), Y_pred)

5.计算损失函数值

In [5]:
sample_num = xs.shape[0]
loss = tf.reduce_sum(tf.pow(Y_pred - Y, 2)) / sample_num

6.初始化optimizer

In [6]:
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

7.指定迭代次数,并在session里执行graph

In [7]:
n_samples = xs.shape[0]
with tf.Session() as sess:
	# 记得初始化所有变量
	sess.run(tf.global_variables_initializer()) 
	
	writer = tf.summary.FileWriter('./graphs/polynomial_reg', sess.graph)
	
	# 训练模型
	for i in range(1000):
		total_loss = 0
		for x, y in zip(xs, ys):
			# 通过feed_dic把数据灌进去
			_, l = sess.run([optimizer, loss], feed_dict={X: x, Y:y}) 
			total_loss += l
		if i%20 ==0:
			print('Epoch {0}: {1}'.format(i, total_loss/n_samples))

	# 关闭writer
	writer.close()
    # 取出w和b的值
	W, W_2, W_3, b = sess.run([W, W_2, W_3, b])
Epoch 0: 0.150352864606
Epoch 20: 0.00355805616073
Epoch 40: 0.00275296724522
Epoch 60: 0.00219133220779
Epoch 80: 0.00179779622967
Epoch 100: 0.00152215424918
Epoch 120: 0.00132919847058
Epoch 140: 0.00119423839119
Epoch 160: 0.00109995152057
Epoch 180: 0.00103417843925
Epoch 200: 0.00098838469456
Epoch 220: 0.000956582921146
Epoch 240: 0.000934567380848
Epoch 260: 0.000919388579573
Epoch 280: 0.000908977913666
Epoch 300: 0.000901885387353
Epoch 320: 0.000897095015746
Epoch 340: 0.000893896470687
Epoch 360: 0.000891793329443
Epoch 380: 0.000890440036316
Epoch 400: 0.000889595619036
Epoch 420: 0.000889093668069
Epoch 440: 0.000888818895999
Epoch 460: 0.000888692155277
Epoch 480: 0.000888659526047
Epoch 500: 0.000888684939841
Epoch 520: 0.000888743188066
Epoch 540: 0.000888818862286
Epoch 560: 0.000888901214453
Epoch 580: 0.000888983756354
Epoch 600: 0.000889062528231
Epoch 620: 0.000889135822454
Epoch 640: 0.000889201900063
Epoch 660: 0.000889261205175
Epoch 680: 0.000889313147823
Epoch 700: 0.000889358760833
Epoch 720: 0.00088939824022
Epoch 740: 0.000889432252548
Epoch 760: 0.000889461824831
Epoch 780: 0.000889486788392
Epoch 800: 0.000889508205019
Epoch 820: 0.000889526253386
Epoch 840: 0.00088954167843
Epoch 860: 0.000889555067311
Epoch 880: 0.00088956627741
Epoch 900: 0.000889575722044
Epoch 920: 0.000889583687085
Epoch 940: 0.000889590321462
Epoch 960: 0.000889596035263
Epoch 980: 0.000889601055151
In [8]:
print("W:"+str(W[0]))#第0个元素
print("W_2:"+str(W_2[0]))
print("W_3:"+str(W_3[0]))
print("b:"+str(b[0]))
W:0.841851
W_2:0.0138749
W_3:-0.095953
b:-0.0433892
In [9]:
plt.plot(xs, ys, 'bo', label='Real data')
plt.plot(xs, xs*W + np.power(xs,2)*W_2 + np.power(xs,3)*W_3 + b, 'r', label='Predicted data')
plt.legend()
plt.show()




逻辑回归

by @寒小阳
解决分类问题里最普遍的baseline model就是逻辑回归,简单同时可解释性好,使得它大受欢迎,我们来用tensorflow完成这个模型的搭建。

0.环境设定

In [1]:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'

import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import time

1.数据读取

In [2]:
#使用tensorflow自带的工具加载MNIST手写数字集合
mnist = input_data.read_data_sets('/data/mnist', one_hot=True) 
Extracting /data/mnist/train-images-idx3-ubyte.gz
Extracting /data/mnist/train-labels-idx1-ubyte.gz
Extracting /data/mnist/t10k-images-idx3-ubyte.gz
Extracting /data/mnist/t10k-labels-idx1-ubyte.gz
In [3]:
#查看一下数据维度
mnist.train.images.shape
Out[3]:
(55000, 784)#55000个 ,每个维度784
In [4]:
#查看target维度
mnist.train.labels.shape
Out[4]:
(55000, 10)

2.准备好placeholder

In [5]:
batch_size = 128
X = tf.placeholder(tf.float32, [batch_size, 784], name='X_placeholder')#X = tf.placeholder(tf.float32, [None, 784], name='X_placeholder')
                                                                               可以只指定维度不指定个数
Y = tf.placeholder(tf.int32, [batch_size, 10], name='Y_placeholder')

3.准备好参数/权重

In [6]:
w = tf.Variable(tf.random_normal(shape=[784, 10], stddev=0.01), name='weights')#x*w+b 矩阵要能运算所以【784,10】
b = tf.Variable(tf.zeros([1, 10]), name="bias")

4.拿到每个类别的score

In [7]:
logits = tf.matmul(X, w) + b 

5.计算多分类softmax的loss function

In [8]:
# 求交叉熵损失
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name='loss')
# 求平均
loss = tf.reduce_mean(entropy)

6.准备好optimizer

这里的最优化用的是随机梯度下降,我们可以选择AdamOptimizer这样的优化器

In [9]:
learning_rate = 0.01
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

7.在session里执行graph里定义的运算

In [10]:
#迭代总轮次
n_epochs = 30

with tf.Session() as sess:
	# 在Tensorboard里可以看到图的结构
	writer = tf.summary.FileWriter('./graphs/logistic_reg', sess.graph)

	start_time = time.time()
	sess.run(tf.global_variables_initializer())	
	n_batches = int(mnist.train.num_examples/batch_size)
	for i in range(n_epochs): # 迭代这么多轮
		total_loss = 0

		for _ in range(n_batches):
			X_batch, Y_batch = mnist.train.next_batch(batch_size)
			_, loss_batch = sess.run([optimizer, loss], feed_dict={X: X_batch, Y:Y_batch}) 
			total_loss += loss_batch
		print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))

	print('Total time: {0} seconds'.format(time.time() - start_time))

	print('Optimization Finished!')

	# 测试模型
	
	preds = tf.nn.softmax(logits)
	correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))#tf.argmax(preds,1)预测的0-9的一个数,
tf.argmax(preds,1)实际0-9的一个数

accuracy = tf . reduce_sum ( tf . cast ( correct_preds , tf . float32 )) #准确率

tf.cast(x, dtype, name=None)

此函数是类型转换函数

参数
  • x:输入
  • dtype:转换目标类型
  • name:名称
n_batches = int ( mnist . test . num_examples / batch_size ) total_correct_preds = 0 for i in range ( n_batches ): X_batch , Y_batch = mnist . test . next_batch ( batch_size ) accuracy_batch = sess . run ([ accuracy ], feed_dict = { X : X_batch , Y : Y_batch }) total_correct_preds += accuracy_batch [ 0 ] print ( 'Accuracy {0}' . format ( total_correct_preds / mnist . test . num_examples )) writer . close ()
Average loss epoch 0: 0.367760838988
Average loss epoch 1: 0.298558457956
Average loss epoch 2: 0.287027260189
Average loss epoch 3: 0.278150600014
Average loss epoch 4: 0.278299358254
Average loss epoch 5: 0.27258301403
Average loss epoch 6: 0.267979631022
Average loss epoch 7: 0.269774100389
Average loss epoch 8: 0.265927433273
Average loss epoch 9: 0.264164017509
Average loss epoch 10: 0.263760118374
Average loss epoch 11: 0.262523819378
Average loss epoch 12: 0.263911345518
Average loss epoch 13: 0.26254081634
Average loss epoch 14: 0.260421709234
Average loss epoch 15: 0.259092338157
Average loss epoch 16: 0.258972431136
Average loss epoch 17: 0.256881248006
Average loss epoch 18: 0.256846180694
Average loss epoch 19: 0.256935099327
Average loss epoch 20: 0.256742743675
Average loss epoch 21: 0.255526978688
Average loss epoch 22: 0.257270330576
Average loss epoch 23: 0.255813773973
Average loss epoch 24: 0.256732740637
Average loss epoch 25: 0.2530989521
Average loss epoch 26: 0.253306594276
Average loss epoch 27: 0.253756374329
Average loss epoch 28: 0.251982488884
Average loss epoch 29: 0.250331024902
Total time: 13.8942499161 seconds
Optimization Finished!
Accuracy 0.9146



多层感知器

by @寒小阳
我们来搭建第一个神经网络完成分类问题,这里依旧用刚才的手写数字识别问题为例。

0.环境设定

In [1]:
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import time

1.数据准备

In [2]:
#使用tensorflow自带的工具加载MNIST手写数字集合
mnist = input_data.read_data_sets('/data/mnist', one_hot=True) 
Extracting /data/mnist/train-images-idx3-ubyte.gz
Extracting /data/mnist/train-labels-idx1-ubyte.gz
Extracting /data/mnist/t10k-images-idx3-ubyte.gz
Extracting /data/mnist/t10k-labels-idx1-ubyte.gz
In [3]:
#查看一下数据维度
mnist.train.images.shape
Out[3]:
(55000, 784)
In [4]:
#查看target维度
mnist.train.labels.shape
Out[4]:
(55000, 10)

2.准备好placeholder

In [5]:
X = tf.placeholder(tf.float32, [None, 784], name='X_placeholder') 
Y = tf.placeholder(tf.int32, [None, 10], name='Y_placeholder')

3.准备好参数/权重

In [6]:
# 网络参数
n_hidden_1 = 256 # 第1个隐层
n_hidden_2 = 256 # 第2个隐层
n_input = 784 # MNIST 数据输入(28*28*1=784)
n_classes = 10 # MNIST 总共10个手写数字类别

weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'),#字典
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W')
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'),
    'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'),
    'out': tf.Variable(tf.random_normal([n_classes]), name='bias')
}

4.构建网络计算graph

In [7]:
def multilayer_perceptron(x, weights, biases):
    # 第1个隐层,使用relu激活函数
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'], name='fc_1')#x*w1=b1
    layer_1 = tf.nn.relu(layer_1, name='relu_1')#
使用relu激活函数
# 第2个隐层,使用relu激活函数 layer_2 = tf . add ( tf . matmul ( layer_1 , weights [ 'h2' ]), biases [ 'b2' ], name = 'fc_2' ) layer_2 = tf . nn . relu ( layer_2 , name = 'relu_2' ) # 输出层 out_layer = tf . add ( tf . matmul ( layer_2 , weights [ 'out' ]), biases [ 'out' ], name = 'fc_3' ) return out_layer

5.拿到预测类别score

In [8]:
pred = multilayer_perceptron(X, weights, biases)

6.计算损失函数值并初始化optimizer

In [9]:
learning_rate = 0.001
loss_all = tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=Y, name='cross_entropy_loss')#交叉熵
loss = tf.reduce_mean(loss_all, name='avg_loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)

7.初始化变量

In [10]:
init = tf.global_variables_initializer()

8.在session中执行graph定义的运算

In [11]:
#训练总轮数
training_epochs = 15
#一批数据大小
batch_size = 128
#信息展示的频度
display_step = 1

with tf.Session() as sess:
    sess.run(init)
    writer = tf.summary.FileWriter('./graphs/MLP_DNN', sess.graph)

    # 训练
    for epoch in range(training_epochs):
        avg_loss = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        # 遍历所有的batches
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # 使用optimizer进行优化
            _, l = sess.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y})
            # 求平均的损失
            avg_loss += l / total_batch
        # 每一步都展示信息
        if epoch % display_step == 0:
            print("Epoch:", '%04d' % (epoch+1), "cost=", \
                "{:.9f}".format(avg_loss))
    print("Optimization Finished!")

    # 在测试集上评估
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))
    # 计算准确率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels}))
    writer.close()
('Epoch:', '0001', 'cost=', '216.058385404')
('Epoch:', '0002', 'cost=', '49.464269667')
('Epoch:', '0003', 'cost=', '31.474119233')
('Epoch:', '0004', 'cost=', '22.352105056')
('Epoch:', '0005', 'cost=', '16.458953918')
('Epoch:', '0006', 'cost=', '12.631599525')
('Epoch:', '0007', 'cost=', '9.714879267')
('Epoch:', '0008', 'cost=', '7.663560503')
('Epoch:', '0009', 'cost=', '5.726646216')
('Epoch:', '0010', 'cost=', '4.465609000')
('Epoch:', '0011', 'cost=', '3.477879983')
('Epoch:', '0012', 'cost=', '2.538919657')
('Epoch:', '0013', 'cost=', '2.105529374')
('Epoch:', '0014', 'cost=', '1.542090875')
('Epoch:', '0015', 'cost=', '1.206216523')
Optimization Finished!
('Accuracy:', 0.93959999)



猜你喜欢

转载自blog.csdn.net/qq_40213457/article/details/80752239