數據集 flower_photos
數據預處理
INPUT_DATA = 'F://dl_dataset//flower_photos' OUTPUT_FILE = 'flower_processed_data.npy' training_images = [] training_labels = [] testing_images = [] testing_labels = [] validation_images = [] validation_labels = [] current_label = 0 for i in os.listdir(INPUT_DATA): path = os.path.join(INPUT_DATA, i).replace('//', '\\') if not os.path.isdir(path): continue for j in os.walk(path): for k in j[2]: filename = os.path.join(j[0], k) img = Image.open(filename) img = img.resize((299, 299)) image_value = np.array(img) # 隨機划分數據 chance = np.random.randint(100) if chance < 10: validation_images.append(image_value) validation_labels.append(current_label) elif chance < 20: testing_images.append(image_value) testing_labels.append(current_label) else: training_images.append(image_value) training_labels.append(current_label) current_label += 1 # 將訓練數據亂序 state = np.random.get_state() np.random.shuffle(training_images) np.random.set_state(state) np.random.shuffle(training_labels) out = np.asarray([training_images, training_labels, validation_images, validation_labels, testing_images, testing_labels]) np.save(OUTPUT_FILE, out)
存儲為 npy 文件
遷移學習-finetune
import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim # 加載通過TensorFlow-Slim定義好的inception_v3模型。 import tensorflow.contrib.slim.python.slim.nets.inception_v3 as inception_v3 INPUT_DATA = 'flower_processed_data.npy' # 處理好之后的數據文件。 TRAIN_FILE = 'train_dir/model' # 保存訓練好的模型的路徑。 CKPT_FILE = 'inception_v3.ckpt' # 預訓練模型參數 # 定義訓練中使用的參數。 LEARNING_RATE = 0.0001 STEPS = 300 BATCH = 32 N_CLASSES = 5 CHECKPOINT_EXCLUDE_SCOPES = 'InceptionV3/Logits,InceptionV3/AuxLogits' # fine_tune 參數 TRAINABLE_SCOPES = 'InceptionV3/Logits,InceptionV3/AuxLogits' # 需要訓練的網絡層參數名稱,在fine-tuning的過程中就是最后的全聯接層。 def get_tuned_variables(): # 獲取所有需要從谷歌訓練好的模型中加載的參數。 exclusions = [scope.strip() for scope in CHECKPOINT_EXCLUDE_SCOPES.split(',')] variables_to_restore = [] # 枚舉inception-v3模型中所有的參數,然后判斷是否需要從加載列表中移除。 for var in slim.get_model_variables(): excluded = False for exclusion in exclusions: if var.op.name.startswith(exclusion): excluded = True break if not excluded: variables_to_restore.append(var) return variables_to_restore def get_trainable_variables(): # 獲取所有需要訓練的變量列表 scopes = [scope.strip() for scope in TRAINABLE_SCOPES.split(',')] variables_to_trian = [] # 枚舉所有需要訓練的參數前綴,並通過這些前綴找到所有需要訓練的參數。 for scope in scopes: variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) variables_to_trian.extend(variables) return variables_to_trian def main(): # 加載預處理好的數據。 processed_data = np.load(INPUT_DATA) training_images = processed_data[0] training_labels = processed_data[1] validation_images = processed_data[2] validation_labels = processed_data[3] testing_images = processed_data[4] testing_labels = processed_data[5] n_training_example = len(training_images) images = tf.placeholder(tf.float32, [None, 299, 299, 3], name='input_images') labels = tf.placeholder(tf.int64, [None], name='labels') # 定義inception-v3模型。因為谷歌給出的只有模型參數取值,所以這里 # 需要在這個代碼中定義inception-v3的模型結構。雖然理論上需要區分訓練和 # 測試中使用到的模型,也就是說在測試時應該使用is_training=False,但是 # 因為預先訓練好的inception-v3模型中使用的batch normalization參數與 # 新的數據會有出入,所以這里直接使用同一個模型來做測試。 with slim.arg_scope(inception_v3.inception_v3_arg_scope()): logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES, is_training=True) # 獲取需要訓練的變量 trainable_variables = get_trainable_variables() tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES), logits, weights=1.0) train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(tf.losses.get_total_loss(), var_list=trainable_variables) ### 固定部分參數,優化其他參數 train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(tf.losses.get_total_loss()) ### 優化全部參數 with tf.name_scope('evaluation'): correct_prediction = tf.equal(tf.arg_max(logits, 1), labels) # 計算正確率 evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 定義加載Google訓練好的Inception-v3模型的Saver load_fn = slim.assign_from_checkpoint_fn( CKPT_FILE, get_tuned_variables(), ignore_missing_vars=True) saver = tf.train.Saver() with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) load_fn(sess) # 加載 預訓練 參數 start = 0 end = BATCH for i in range(STEPS): sess.run(train_step, feed_dict={images: training_images[start: end], labels: training_labels[start: end]}) if i % 30 == 0 or i + 1 == STEPS: saver.save(sess, TRAIN_FILE, global_step=i) validation_accuracy = sess.run(evaluation_step, feed_dict={images: validation_images, labels: validation_labels}) print('Step %d: Validation accuracy = %.1f%%' % (i, validation_accuracy * 100.0)) start = end if start == n_training_example: start = 0 end = start + BATCH if end > n_training_example: end = n_training_example # 在最后的測試數據上測試正確率 test_accuracy = sess.run(evaluation_step, feed_dict={images: testing_images, labels: testing_labels}) print('Final test accuracy = %.1f%%' % (test_accuracy * 100)) if __name__ == '__main__': main()
全部更新,訓練慢,但是效果還行
Step 0: Validation accuracy = 25.6% Step 30: Validation accuracy = 26.4% Step 60: Validation accuracy = 48.0% Step 90: Validation accuracy = 79.3% Step 120: Validation accuracy = 88.6% Step 150: Validation accuracy = 92.3% Step 180: Validation accuracy = 93.2% Step 210: Validation accuracy = 96.0% Step 240: Validation accuracy = 94.9% Step 270: Validation accuracy = 94.6% Step 299: Validation accuracy = 94.6% Final test accuracy = 92.4%
部分更新,訓練快,但是效果不行,當然你可以繼續訓練看看效果
Step 0: Validation accuracy = 25.0% Step 30: Validation accuracy = 25.3% Step 60: Validation accuracy = 30.1% Step 90: Validation accuracy = 32.7% Step 120: Validation accuracy = 42.0% Step 150: Validation accuracy = 52.8% Step 180: Validation accuracy = 53.7% Step 210: Validation accuracy = 59.7% Step 240: Validation accuracy = 61.9% Step 270: Validation accuracy = 66.5% Step 299: Validation accuracy = 67.0% Final test accuracy = 67.0%
參考資料:
https://www.jianshu.com/p/0237ebbee5d5
https://www.jianshu.com/p/a4fbe308b7b8