1. 創建meter
2. 添加數據
3. 展示結果
4. 清除meter
以下代碼是在前面隨筆中代碼的基礎上添加的meter相關操作:
-
import tensorflow as tf import datetime def preporocess(x,y): x = tf.cast(x,dtype=tf.float32) / 255 x = tf.reshape(x,(-1,28 *28)) # 鋪平 x = tf.squeeze(x,axis=0) # print('里面x.shape:',x.shape) y = tf.cast(y,dtype=tf.int32) return x,y def main(): # 加載手寫數字數據 mnist = tf.keras.datasets.mnist (train_x, train_y), (test_x, test_y) = mnist.load_data() # 處理數據 # 訓練數據 db = tf.data.Dataset.from_tensor_slices((train_x,train_y)) # 將x,y分成一一對應的元組 db = db.map(preporocess) # 執行預處理函數 db = db.shuffle(60000).batch(2000) # 打亂加分組 # 測試數據 db_test = tf.data.Dataset.from_tensor_slices((test_x,test_y)) db_test = db_test.map(preporocess) db_test = db_test.shuffle(10000).batch(10000) # 設置超參 iter_num = 2000 # 迭代次數 lr = 0.01 # 學習率 # 定義模型器和優化器 model = tf.keras.Sequential([ tf.keras.layers.Dense(256,activation='relu'), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(32, activation='relu'), tf.keras.layers.Dense(10) ]) # model.build(input_shape=[None,28*28]) # 事先查看網絡結構 # model.summary() # 優化器 # optimizer = tf.keras.optimizers.SGD(learning_rate=lr) optimizer = tf.keras.optimizers.Adam(learning_rate=lr) # 創建meter存儲loss和acc acc_meter = tf.keras.metrics.Accuracy() loss_meter = tf.keras.metrics.Mean() # 迭代訓練 for i in range(iter_num): for step,(x,y) in enumerate(db): with tf.GradientTape() as tape: logits = model(x) y_onehot = tf.one_hot(y,depth=10) # loss = tf.reduce_mean(tf.losses.MSE(y_onehot,logits)) # 差平方損失 loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y_onehot,logits,from_logits=True)) # 交叉熵損失 loss_meter.update_state(loss) # 添加loss進meter grads = tape.gradient(loss,model.trainable_variables) # 梯度 grads,_ = tf.clip_by_global_norm(grads,15) # 梯度限幅 optimizer.apply_gradients(zip(grads,model.trainable_variables)) # 更新參數 # tensorboard顯示時寫入文件的代碼 # if step % 10 == 0: # # 將數據寫入log文件 # with summary_writer.as_default(): # tf.summary.scalar('loss', float(loss), step=step) # pass # 計算測試集准確率 for (x,y) in db_test: logits = model(x) out = tf.nn.softmax(logits,axis=1) pre = tf.argmax(out,axis=1) pre = tf.cast(pre,dtype=tf.int32) # 調用meter接口求acc acc_meter.update_state(y,pre) print() # 以下是自己編寫的求acc的方法 # acc = tf.equal(pre,y) # acc = tf.cast(acc,dtype=tf.int32) # acc = tf.reduce_mean(tf.cast(acc,dtype=tf.float32)) # print('i:{}'.format(i)) # print('acc:{}'.format(acc)) # ************************** 將數據寫入log文件 *********************************** # with summary_writer.as_default(): # tf.summary.scalar('acc', float(acc), step=i) print('loss_meter.result().numpy():', loss_meter.result().numpy()) print('acc_meter.result().numpy():', acc_meter.result().numpy()) loss_meter.reset_states() acc_meter.reset_states() print('第{}次迭代結束'.format(i)) if __name__ == '__main__': # ***************************** tensorboard文件處理 ******************************* # current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') # 當前時間 # log_dir = 'tb_data/logs/' + current_time # 以當前時間作為log文件名 # summary_writer = tf.summary.create_file_writer(log_dir) # 創建log文件 main()