關於深度學習之TensorFlow簡單實例


1.對TensorFlow的基本操作

import tensorflow as tf
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
a=tf.constant(2)
b=tf.constant(3)
with tf.Session() as sess:
    print("a:%i" % sess.run(a),"b:%i" % sess.run(b))
    print("Addition with constants: %i" % sess.run(a+b))
    print("Multiplication with constant:%i" % sess.run(a*b))

a=tf.placeholder(tf.int16)
b=tf.placeholder(tf.int16)
add=tf.add(a,b)
mul=tf.multiply(a,b)
with tf.Session() as sess:
    print("Addition with variables: %i" % sess.run(add,feed_dict={a:2,b:3}))
    print("Multiplication with variables: %i" % sess.run(mul,feed_dict={a:2,b:3}))

with tf.Session() as sess:
    matrix1=tf.constant([[3.,3.]])
    matrix2=tf.constant([[2.],[2.]])
    product=tf.matmul(matrix1,matrix2)
    result=sess.run(product)
    print(result)

結果截圖

 

2.線性回歸操作

import  tensorflow  as  tf
import  numpy  as  np
import  matplotlib.pyplot  as  plt
import  os
os.environ["CUDA_VISIBLE_DEVICES"]="0"

# 設置訓練參數,learning_rate=0.01,training_epochs=1000,display_step=50。
#Parameters
learning_rate=0.01
training_epochs=1000
display_step=50

# 創建訓練數據
#training  Data
train_X=np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
                                        7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y=np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
                                        2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples=train_X.shape[0]

# 構造計算圖,使用變量Variable構造變量X,Y
#tf  Graph  Input
X=tf.placeholder("float")
Y=tf.placeholder("float")

# 設置模型的初始權重
#Set  model  weights
W=tf.Variable(np.random.randn(),name="weight")
b=tf.Variable(np.random.randn(),name='bias')

# 構造線性回歸模型
#Construct  a  linear  model
pred=tf.add(tf.multiply(X,W),b)

# 求損失函數,即均方差
#Mean  squared  error
cost=tf.reduce_sum(tf.pow(pred-Y,2))/(2*n_samples)

# 使用梯度下降法求最小值,即最優解
#  Gradient  descent
optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# 初始化全部變量
#Initialize  the  variables
init  =tf.global_variables_initializer()

# 使用tf.Session()創建Session會話對象,會話封裝了Tensorflow運行時的狀態和控制。
#Start  training
with  tf.Session()  as  sess:
        sess.run(init)

        # 調用會話對象sess的run方法,運行計算圖,即開始訓練模型。
        #Fit  all  training  data
        for  epoch  in  range(training_epochs):
                for  (x,y)  in  zip(train_X,train_Y):
                        sess.run(optimizer,feed_dict={X:x,Y:y})
                #Display  logs  per  epoch  step
                if  (epoch+1)  %  display_step==0:
                        c=sess.run(cost,feed_dict={X:train_X,Y:train_Y})
                        print("Epoch:"  ,'%04d'  %(epoch+1),"cost=","{:.9f}".format(c),"W=",sess.run(W),"b=",sess.run(b))
        print("Optimization  Finished!")

        # 打印訓練模型的代價函數。
        training_cost=sess.run(cost,feed_dict={X:train_X,Y:train_Y})
        print("Train  cost=",training_cost,"W=",sess.run(W),"b=",sess.run(b))

        # 可視化,展現線性模型的最終結果。
        #Graphic  display
        plt.plot(train_X,train_Y,'ro',label='Original  data')
        plt.plot(train_X,sess.run(W)*train_X+sess.run(b),label="Fitting  line")
        plt.legend()
        plt.show()

結果截圖

 

3.邏輯回歸

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("/home/yxcx/tf_data",one_hot=True)
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"

#Parameters
learning_rate=0.01
training_epochs=25
batch_size=100
display_step=1

#tf Graph Input
x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])

#Set model weights
W=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))

#Construct model
pred=tf.nn.softmax(tf.matmul(x,W)+b)

#Minimize error using cross entropy
cost=tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred),reduction_indices=1))

#Gradient Descent
optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

#Initialize the variables
init=tf.global_variables_initializer()

#Start training
with tf.Session() as sess:
    sess.run(init)
    #Training cycle
    for epoch in range(training_epochs):
        avg_cost=0
        total_batch=int(mnist.train.num_examples/batch_size)
        # loop over all batches
        for i in range(total_batch):
            batch_xs,batch_ys=mnist.train.next_batch(batch_size)
            #Fit training using batch data
            _,c=sess.run([optimizer,cost],feed_dict={x:batch_xs,y:batch_ys})

            #Conpute average loss
            avg_cost+= c/total_batch
        if (epoch+1) % display_step==0:
            print("Epoch:",'%04d' % (epoch+1),"Cost:" ,"{:.09f}".format(avg_cost))
    print("Optimization Finished!")
        #Test model
    correct_prediction=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
        # Calculate accuracy for 3000 examples
    accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    print("Accuracy:",accuracy.eval({x:mnist.test.images[:3000],y:mnist.test.labels[:3000]}))

結果截圖:

 

4.K鄰近算法

import  numpy  as  np
import  tensorflow  as  tf
from  tensorflow.examples.tutorials.mnist  import  input_data
import  os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
mnist  =input_data.read_data_sets("/home/yxcx/tf_data/MNIST_data",one_hot=True)
# mnist=input_data.read_data_sets("MNIST_data",one_hot=True,source_url='http://yann.lecun.com/exdb/mnist/')
Xtr,Ytr=mnist.train.next_batch(5000)
Xte,Yte=mnist.test.next_batch(200)

#tf  Graph  Input
xtr=tf.placeholder("float",[None,784])
xte=tf.placeholder("float",[784])
distance  =tf.reduce_sum(tf.abs(tf.add(xtr,tf.negative(xte))),reduction_indices=1)
pred=tf.argmin(distance,0)
accuracy=0
init=tf.global_variables_initializer()
#Start  training
with  tf.Session()  as  sess:
        sess.run(init)
        for  i  in  range(len(Xte)):
                #Get  nearest  nerighbor
                nn_index=sess.run(pred,feed_dict={xtr:Xtr,xte:Xte[i,:]})
                print("Test",i  ,"Prediction:",np.argmax(Ytr[nn_index]),"True  Class:",np.argmax(Yte[i]))
                if  np.argmax(Ytr[nn_index])==np.argmax(Yte[i]):
                        accuracy+=1./len(Xte)
        print("Done!")
        print("accuacy:"  ,accuracy)

結果截圖:

 

 注:在3和4實驗所使用數據集路徑是不需要修改的,在運行程序時需要聯網,會自動下載數據集。因為網絡原因,可能會失敗。如果失敗后需多運行幾次,就可以成功

 

對代碼中用到的一些方法即參數進行記錄:

1. x=tf.placeholder(dytype,shape,name)

三個參數含義為:

  1. dtype:數據類型。常用的是tf.float32,tf.float64等數值類型
  2. shape:數據形狀。默認是None,就是一維值,也可以是多維(比如[None,784]表示行位置,列為784)
  3. name:名稱

2.tf.Variable(initializer,name)

參數含義為:

  1.initializer表示初始化

  2.name:名稱

3.tf.zeros( shape, dtype=tf.float32, name=None )

參數含義:

  1.第一個參數可以理解為數組,如代碼中的tf.zeros([784,10])  理解為784行10列

  2.類型

  3.名稱

4.tf.nn.softmax()

含義:將向量歸結為0-1之間,使特征更明顯

5.reduce_mean(input_tensor,axis=None,keep_dims=False,name=None,reduction_indices=None)

該方法用於計算張量的各個維度上的元素的平均值.

  1.input_tensor:要減少的張量.應該有數字類型.

  2.axis:要減小的尺寸.如果為None(默認),則減少所有維度.必須在[-rank(input_tensor), rank(input_tensor))范圍內.

  3.keep_dims:如果為true,則保留長度為1的縮小尺寸.

  4.name:操作的名稱(可選).

  5.reduction_indices:axis的不支持使用的名稱.

6.tf.train.GradientDescentOptimizer

  自適應學習器


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM