Tensorflow学习笔记Demo002

很久没有使用更新tensorflow的教程了,今天来用tensorflow来搭建神经网络。搭建的具体过程就不在这里介绍了,只要根据原理一步步的跟进即可;

废话不多说;

第一个例子是使用tensorflow搭建的最小二乘法的机器学习拟合线性回归曲线(虽然sklearn中有相应的包可以实现相应的功能)

import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing
使用的数据是加州房价数据,数据来源是sklearn中自带的数据

housing=fetch_california_housing()
m,n=housing.data.shape87
housing_data_plus_bias=np.c_[np.ones((m,1)),housing.data]
X=tf.constant(housing_data_plus_bias,dtype=tf.float32,name="X")
#the type of y can not be list the shape should be [[],[],[]]
y=tf.constant(housing.target.reshape(-1,1),dtype=tf.float32,name="y")

XT=tf.transpose(X)
#使用最小二乘法 
theta=tf.matmul(tf.matmul(tf.reverse(tf.matmul(XT,X)),XT),y)
with tf.Session() as sess:
    sess.run(theta)
    #或者使用eval直接进行使用;
     #theta=theta.eval()

也可以采用梯度下降法实现(tensorflow实现)

#使用梯度下降法
learning_rate=0.01
n_epochs=1000
X=tf.constant(housing_data_plus_bias,dtype=tf.float32,name="X")
y=tf.constant(housing.target,dtype=tf.float32,name="y")
#liang zhong leixing de bianliang
#
theta=tf.Variable(tf.random_normal((n+1,1),-1.0,1.0),name="theta")
ypredict=tf.matmul(X,theta)
#tf.square() tf.square_difference()
error=ypredict-y
mse=tf.reduce_mean(tf.square(ypredict-y))

#define the gradients
gradients=2/m*tf.matmul(tf.transpose(X),error)
#define the learning steps
training_op=tf.assign(theta,theta-learning_rate*gradients)

init=tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    #chu shi hua shuju
    for epoch  in range(n_epochs):
        if epoch%100==0:
            print("while the epoch is :%s, the mse is %s"%(epoch,mse.eval())
        sess.run(training_op)
    best_theta=theta.eval()

当然优化函数可以使用tensorflow自带的优化器;使用梯度下降优化函数;

#using the tensorflow optimize
optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op=optimizer.minimize(mse)
#shiyong
nit=tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    #chu shi hua shuju
    for epoch  in range(n_epochs):
        if epoch%100==0:
            print("while the epoch is :%s, the mse is %s"%(epoch,mse.eval())
        sess.run(training_op)
    best_theta=theta.eval()

使用mom优化函数:

#vt=0.9*vt+grad
#theta=theta-alpha*vt
optimizer2=tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)
training_op=optimizer2.minimize(mse)

损失函数是mse


#使用mini batch(随机梯度下降法来优化mse),

#mini-batch Gradient Descent 
#caiyong feed_dict fangshi

X=tf.placeholder(tf.float32,shape=(None,n+1),name="X")
y=tf.placeholder(tf.float32,shape=(None,1),name="y")

#define the min batch
batch_size=100
n_batches=int(np.ceil(m/batch_size))

#epoch is the number of iterations
def fetch_batch(epoch,batch_index,batch_size):
    return X[batch_index*batch_size:(batch_index+1)*batch_size],y[batch_index*batch_size:(batch_index+1)*batch_size]

with tf.Session() as sess:
    for epoch in range(epochs):
        for i in range(n_batches):
            X_train,y_train=fetch_batch(epoch,i,batch_size)
            sess.run(training_op,feed_dict={X:X_train,y:y_train})
        if(epoch%10==10):
            print("the loss function is %s"%(sess.run(mse))
        best_theta=theta.eval()   
#save model 保存模型的方法 
init=tf.global_variables_initializer()
saver=tf.train.Saver()
with tf.Session() as sess:
    sess.run(init)
    #...
    for epoch in epochs:
        save_path=saver.save(sess,"./temp/my_model.ckpt")
    besttheta=theta.eval()
    save_path=saver.save(sess,"./temp/my_model.ckpt")



猜你喜欢

转载自blog.csdn.net/hufanglei007/article/details/79702472