在以圖搜圖的過程中,需要以來模型提取特征,通過特征之間的歐式距離來找到相似的圖形。
本次我們主要講訴以圖搜圖模型創建的方法。
圖片預處理方法,看這里: https://keras.io/zh/preprocessing/image/
本文主要參考了這位大神的文章, 傳送門在此: InceptionV3進行fine-tuning
訓練模型代碼如下:

# 基本流程 # import os import sys import glob import argparse import matplotlib.pyplot as plt from keras.applications.inception_v3 import InceptionV3, preprocess_input from keras.models import Model from keras.layers import Dense, GlobalAveragePooling2D from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import SGD # 一、定義函數 IM_WIDTH, IM_HEIGHT = 299, 299 # inceptionV3 指定圖片尺寸 FC_SIZE = 1024 # 全連接層的數量 # 二、數據處理 # 圖片歸類放在不同文件夾下 train_dir = 'E:/Project/Image/data/finetune/train' # 訓練集數據 val_dir = 'E:/Project/Image/data/finetune/test' # 驗證集數據 nb_epoch = 1 batch_size = 15 nb_classes = len(glob.glob(train_dir + "/*")) # 分類數 # 圖片增強 # ImageDataGenerator 會自動根據路徑下的文件夾創建標簽,所以在代碼中只看到輸入的 x, 看不到 y train_datagen = ImageDataGenerator( preprocessing_function=preprocess_input, rotation_range=30, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) train_generator = train_datagen.flow_from_directory( train_dir, target_size=(IM_WIDTH, IM_HEIGHT),batch_size=batch_size, class_mode='categorical' ) validation_generator = train_datagen.flow_from_directory( val_dir, target_size=(IM_WIDTH, IM_HEIGHT),batch_size=batch_size, class_mode='categorical' ) # 三、使用 bottleneck finetune # 去掉 模型最外層的全連接層,添加上自己的 全連接層 # 添加新層函數 def add_new_last_layer(base_model, nb_classes): x = base_model.output x = GlobalAveragePooling2D()(x) # 下采樣 x = Dense(FC_SIZE, activation='relu')(x) predict_bottle_feat = Dense(nb_classes, activation='softmax')(x) model = Model(input=base_model.input, output=predict_bottle_feat) return model # 凍結 base_model 所有層 def setup_to_transfer_learn(model, base_model): for layer in base_model.layers: layer.trainable = False model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # 定義網絡框架 base_model = InceptionV3(weights='imagenet', include_top=False) model = add_new_last_layer(base_model, nb_classes) setup_to_transfer_learn(model, base_model) # 訓練 # 模式一訓練 steps = 20 # 可以自由定義,越大結果越精准,但過大容易過擬合 history_tl = model.fit_generator( train_generator, epochs=nb_epoch, steps_per_epoch=steps, validation_data=validation_generator, validation_steps=steps, class_weight='auto') # 保存模型 model.save("my_inceptionV3.h5")
使用模型提取指定層的特征:

from keras.preprocessing import image from keras_applications.inception_v3 import preprocess_input from keras.models import Model, load_model import numpy as np target_size = (229, 229) #fixed size for InceptionV3 architecture base_model = load_model(filepath="my_inceptionV3.h5") # 需要提取那一層的特征,此處就寫入指定層的名稱 model = Model(input=base_model.input, output=base_model.get_layer('block4_pool').output) img_path = "C:/Users/Administrator/Pictures/搜圖/horse.jpg" img = image.load_img(img_path, target_size=target_size) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) block4_pool_features = model.predict(x)
使用模型進行預測:

from keras.preprocessing import image from keras.models import load_model import numpy as np import json from keras_applications.imagenet_utils import decode_predictions def predict(model, img, target_size): """Run model prediction on image Args: model: keras model img: PIL format image target_size: (w,h) tuple Returns: list of predicted labels and their probabilities """ if img.size != target_size: img = img.resize(target_size) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = model.predict(x) # 此處獲取的為 return preds[0] # 返回 numpy array [classes, ] def decode_predict(probalities_list): with open("img_classes.json", 'r') as load_f: load_dict = json.load(load_f) index = probalities_list.index(max(probalities_list)) target_class = load_dict[str(index)] return target_class target_size = (229, 229) #fixed size for InceptionV3 architecture model = load_model(filepath="my_inceptionV3.h5") img = image.load_img("C:/Users/Administrator/Pictures/搜圖/horse.jpg") res_numpy = predict(model, img, target_size=target_size) res_list = res_numpy.tolist() target_class = decode_predict(res_list) print(target_class)