tensorflow手寫數字識別(有注釋)


 1 import tensorflow as tf
 2 import numpy as np
 3 # const = tf.constant(2.0, name='const')
 4 # b = tf.placeholder(tf.float32, [None, 1], name='b')
 5 # # b = tf.Variable(2.0, dtype=tf.float32, name='b')
 6 # c = tf.Variable(1.0, dtype=tf.float32, name='c')
 7 #
 8 # d = tf.add(b, c, name='d')
 9 # e = tf.add(c, const, name='e')
10 # a = tf.multiply(d, e, name='a')
11 # init = tf.global_variables_initializer()
12 #
13 # print(a)
14 # with tf.Session() as sess:
15 #     sess.run(init)
16 #     ans = sess.run(a, feed_dict={b: np.arange(0, 10)[:, np.newaxis]})
17 # print(a)
18 # print(ans)
19 
20 from tensorflow.examples.tutorials.mnist import input_data
21 mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)  # 載入數據集
22 
23 learning_rate = 0.5  # 學習率
24 epochs = 10  # 訓練10次所有的樣本
25 batch_size = 100  # 每批訓練的樣本數
26 
27 x = tf.placeholder(tf.float32, [None, 784])  # 為訓練集的特征提供占位符
28 y = tf.placeholder(tf.float32, [None, 10])  # 為訓練集的標簽提供占位符
29 
30 W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.03), name='W1')  # 初始化隱藏層的W1參數
31 b1 = tf.Variable(tf.random_normal([300]), name='b1')  # 初始化隱藏層的b1參數
32 W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.03), name='W2')  # 初始化全連接層的W1參數
33 b2 = tf.Variable(tf.random_normal([10]), name='b2')  # 初始化全連接層的b1參數
34 
35 hidden_out = tf.add(tf.matmul(x, W1), b1)  # 定義隱藏層的第一步運算
36 hidden_out = tf.nn.relu(hidden_out)  # 定義隱藏層經過激活函數后的運算
37 
38 y_ = tf.nn.softmax(tf.add(tf.matmul(hidden_out, W2), b2))  # 定義全連接層的輸出運算
39 
40 y_clipped = tf.clip_by_value(y_, 1e-10, 0.9999999)
41 cross_entropy = -tf.reduce_mean(tf.reduce_sum(y * tf.log(y_clipped) + (1 - y) * tf.log(1 - y_clipped), axis=1))
42 # 交叉熵
43 
44 optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
45 # 梯度下降優化器,傳入的參數是交叉熵
46 
47 init = tf.global_variables_initializer()  # 所有參數初始化
48 
49 correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))  # 返回true|false
50 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # 將true轉化為1,false轉化為0
51 
52 # 開始訓練
53 with tf.Session() as sess:
54     sess.run(init)
55     total_batch = int(len(mnist.train.labels) / batch_size)  # 計算每個epoch要迭代幾次
56     for epoch in range(epochs):
57         avg_cost = 0
58         for i in range(total_batch):
59             batch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)
60             _, c = sess.run([optimizer, cross_entropy], feed_dict={x: batch_x, y: batch_y})
61             # 其實上面這一步只需要跑optimizer這個優化器就好了,因為交叉熵也會同時跑。
62             # 但是我們想要得到交叉熵的值來作為損失函數,所以還需要跑一個交叉熵。
63             avg_cost += c / total_batch
64         print("Epoch:", (epoch + 1), "cost = ", "{:.3f}".format(avg_cost))  # 這是每訓練完所有樣本得到的損失值
65     print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))
66     # 因為之前的計算已經把中間參數計算出來了,所以這里只用最后的計算測試集就行了

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM