TensorFlow—多層感知器—MNIST手寫數字識別



1
import tensorflow as tf 2 import tensorflow.examples.tutorials.mnist.input_data as input_data 3 import matplotlib.pyplot as plt 4 import numpy as np 5 mnist=input_data.read_data_sets("MNIST_data/",one_hot=True) #下載據數 6 print('train images:',mnist.train.images.shape, #查看數據 7 'labels:',mnist.train.labels.shape) 8 print('validation images:',mnist.validation.images.shape, 9 'labels:',mnist.validation.labels.shape) 10 print('test images:',mnist.test.images.shape, 11 'labels:',mnist.test.labels.shape 12 #定義顯示多項圖像的函數 13 def plot_images_labels_prediction_3(images,labels,prediction,idx,num=10): 14 fig=plt.gcf() 15 fig.set_size_inches(12,14) 16 if num>25:num=25 17 for i in range(0,num): 18 ax=plt.subplot(5,5,i+1) 19 ax.imshow(np.reshape(images[idx],(28,28)),cmap='binary') 20 title='lable='+str(np.argmax(labels[idx])) 21 if len(prediction)>0: 22 title+=",prediction="+str(prediction[idx]) 23 ax.set_title(title,fontsize=10) 24 ax.set_xticks([]);ax.set_yticks([]) 25 idx+=1 26 plt.show() 27 28 plot_images_labels_prediction_3(mnist.train.images,mnist.train.labels,[],0) 29 #定義layer函數,構建多層感知器模型 30 def layer(output_dim,input_dim,inputs,activation=None): 31 W=tf.Variable(tf.random_normal([input_dim,output_dim])) 32 b=tf.Variable(tf.random_normal([1,output_dim])) 33 XWb=tf.matmul(inputs,W)+b 34 if activation is None: 35 outputs=XWb 36 else: 37 outputs=activation(XWb) 38 return outputs 39 #建立輸入層 40 x=tf.placeholder("float",[None,784]) 41 #建立隱藏層 42 h1=layer(output_dim=256,input_dim=784,inputs=x, 43 activation=tf.nn.relu) 44 #建立輸出層 45 y_predict=layer(output_dim=10,input_dim=256,inputs=h1, 46 activation=None) 47 y_label=tf.placeholder("float",[None,10]) 48 #定義損失函數 49 loss_function=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits 50 (logits=y_predict, 51 labels=y_label)) 52 #定義優化器 53 optimizer=tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss_function) 54 #計算每一項數據是否預測正確 55 correct_prediction=tf.equal(tf.argmax(y_label,1), 56 tf.argmax(y_predict,1)) 57 #計算預測正確結果的平均值 58 accuracy=tf.reduce_mean(tf.cast(correct_prediction,"float")) 59 #1、定義訓練參數 60 trainEpochs=15 #設置執行15個訓練周期 61 batchSize=100 #每一批次項數為100 62 totalBatchs=int(mnist.train.num_examples/batchSize) #計算每個訓練周期 63 loss_list=[];epoch_list=[];accuracy_list=[] #初始化訓練周期、誤差、准確率 64 from time import time #導入時間模塊 65 startTime=time() #開始計算時間 66 sess=tf.Session() #建立Session 67 sess.run(tf.global_variables_initializer()) #初始化TensorFlow global 變量 68 #2、進行訓練 69 for epoch in range(trainEpochs): 70 for i in range(totalBatchs): 71 batch_x,batch_y=mnist.train.next_batch(batchSize) #使用mnist.train.next_batch方法讀取批次數據,傳入參數batchSize是100 72 sess.run(optimizer,feed_dict={x:batch_x, 73 y_label:batch_y}) #執行批次訓練 74 loss,acc=sess.run([loss_function,accuracy], #使用驗證數據計算准確率 75 feed_dict={x:mnist.validation.images, 76 y_label:mnist.validation.labels}) 77 epoch_list.append(epoch); #加入訓練周期列表 78 loss_list.append(loss) #加入誤差列表 79 accuracy_list.append(acc) #加入准確率列表 80 print("Train Epoch:",'%02d' % (epoch+1),"Loss=",\ 81 "{:.9f}".format(loss),"Accuracy=",acc) 82 duration=time()-startTime 83 print("Train Finished takes:",duration) #計算並顯示全部訓練所需時間 84 #畫出誤差執行結果 85 86 fig=plt.gcf() 87 fig.set_size_inches(4,2) 88 plt.plot(epoch_list,loss_list,label='loss') 89 plt.ylabel('loss') 90 plt.xlabel('epoch') 91 plt.legend(['loss'],loc='upper left') 92 #畫出准確率執行結果 93 plt.plot(epoch_list,accuracy_list,label="accuracy") 94 fig=plt.gcf() 95 fig.set_size_inches(4,2) 96 plt.ylim(0.8,1) 97 plt.ylabel('accuracy') 98 plt.xlabel('epoch') 99 plt.legend() 100 plt.show() 101 #評估模型准確率 102 print("accuracy:",sess.run(accuracy, 103 feed_dict={x:mnist.test.images, 104 y_label:mnist.test.labels})) 105 #進行預測 106 #1.執行預測 107 prediction_result=sess.run(tf.argmax(y_predict,1), 108 feed_dict={x:mnist.test.images}) 109 #2.預測結果 110 print(prediction_result[:10]) 111 #3.顯示前10項預測結果 112 plot_images_labels_prediction_3(mnist.test.images, 113 mnist.test.labels, 114 prediction_result,0)

運行結果:

 

 

 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM