keras_cnn_實現人臉訓練分類
廢話不多扯,直接進入正題吧!今天在訓練自己分割出來的圖片,感覺效果挺不錯的,所以在這分享一下心得,望入門的同孩采納。
1、首先使用python OpenCV庫里面的人臉檢測分類器把你需要訓練的測試人臉圖片給提取出來,這一步很重要,因為deep learn他也不是萬能的,很多原始人臉圖片有很多干擾因素,直接拿去做模型訓練效果是非常low的。所以必須得做這一步。而且還提醒一點就是你的人臉圖片每個類別的人臉圖片光線不要相差太大,雖然都是灰度圖片,但是會影響你的結果,我測試過了好多次了,
2、把分割出來的人臉全部使用resize的方法變成[100x100]的圖片,之前我也試過rgb的圖片,但是效果不好,所以我建議都轉成灰度圖片,這樣數據量小,計算速度也快,當然了keras的后端我建議使用TensorFlow-GPU版,這樣計算過程明顯比CPU快1萬倍。
3、時間有限,我的數據集只有六張,前面三張是某某的人臉,后面三張又是另一個人臉,這樣就只有兩個類別,說到這里的時候很多人都覺得不可思議了吧,數據集這么小你怎么訓練的?效果會好嗎?那么你不要着急慢慢讀下去吧!其次我把每個類別的前面兩張圖片自我復制了100次,這樣我就有數據集了,類別的最后一張使用來做測試集,
自我復制50次。下面我來為大家揭曉答案吧,請詳細參考如下代碼:
(1)、導包
# coding:utf-8 import numpy as np import os import cv2 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' from keras.models import Sequential from keras.layers import Dense,Dropout,Flatten from keras.layers import Conv2D,MaxPooling2D from keras import optimizers import pandas as pd import matplotlib.pyplot as plt
(2)、讀取我們的圖片數據
filePath = os.listdir('img_test/') print(filePath) img_data = [] for i in filePath: img_data.append(cv2.resize(cv2.cvtColor(cv2.imread('img_test/%s'%i),cv2.COLOR_BGR2GRAY),(100,100),interpolation=cv2.INTER_AREA))
(3)、制作訓練集合測試集
x_train = np.zeros([100,100,100,1]) y_train = [] x_test = np.zeros([50,100,100,1]) y_test = [] for i in range(100): if i<25: x_train[i,:,:,0] = img_data[2] y_train.append(1) elif 25<=i<50: x_train[i, :, :, 0] = img_data[4] y_train.append(2) elif 50<=i<75: x_train[i, :, :, 0] = img_data[1] y_train.append(1) else: x_train[i, :, :, 0] = img_data[5] y_train.append(2) for j in range(50): if j%2==0: x_test[j, :, :, 0] = img_data[0] y_test.append(1) else: x_test[j, :, :, 0] = img_data[3] # np.ones((100,100)) y_test.append(2) y_train = np.array(pd.get_dummies(y_train)) y_ts = np.array(y_test) y_test = np.array(pd.get_dummies(y_test))
(4)、建立keras_cnn模型
model = Sequential() # 第一層: model.add(Conv2D(32,(3,3),input_shape=(100,100,1),activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.5)) # model.add(Conv2D(64,(3,3),activation='relu')) #第二層: # model.add(Conv2D(32,(3,3),activation='relu')) # model.add(Dropout(0.25)) # model.add(MaxPooling2D(pool_size=(2,2))) # model.add(Dropout(0.25)) # 2、全連接層和輸出層: model.add(Flatten()) # model.add(Dense(500,activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(20,activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2,activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy',#,'binary_crossentropy' optimizer=optimizers.Adadelta(lr=0.01, rho=0.95, epsilon=1e-06),#,'Adadelta' metrics=['accuracy'])
(5)、訓練模型和得分輸出
# 模型訓練
model.fit(x_train,y_train,batch_size=30,epochs=100)
y_predict = model.predict(x_test)
score = model.evaluate(x_test, y_test)
print(score)
y_pred = np.argmax(y_predict,axis=1)
plt.figure('keras')
plt.scatter(list(range(len(y_pred))),y_pred ,c=y_pred)
plt.show()
下面是結果輸出,loss = 0.0018 acc = 1.0 效果很不錯,主要在於你訓練時候的深度。


完整代碼如下:
1 # coding:utf-8
2 import numpy as np
3 import os
4 import cv2
5 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
6 from keras.models import Sequential
7 from keras.layers import Dense,Dropout,Flatten
8 from keras.layers import Conv2D,MaxPooling2D
9 from keras import optimizers
10 import pandas as pd
11 import matplotlib.pyplot as plt
12
13 filePath = os.listdir('img_test/')
14 print(filePath)
15 img_data = []
16 for i in filePath:
17 img_data.append(cv2.resize(cv2.cvtColor(cv2.imread('img_test/%s'%i),cv2.COLOR_BGR2GRAY),(100,100),interpolation=cv2.INTER_AREA))
18
19
20 x_train = np.zeros([100,100,100,1])
21 y_train = []
22 x_test = np.zeros([50,100,100,1])
23 y_test = []
24
25 for i in range(100):
26 if i<25:
27 x_train[i,:,:,0] = img_data[2]
28 y_train.append(1)
29 elif 25<=i<50:
30 x_train[i, :, :, 0] = img_data[4]
31 y_train.append(2)
32 elif 50<=i<75:
33 x_train[i, :, :, 0] = img_data[1]
34 y_train.append(1)
35 else:
36 x_train[i, :, :, 0] = img_data[5]
37 y_train.append(2)
38
39 for j in range(50):
40 if j%2==0:
41 x_test[j, :, :, 0] = img_data[0]
42 y_test.append(1)
43 else:
44 x_test[j, :, :, 0] = img_data[3] # np.ones((100,100))
45 y_test.append(2)
46
47 y_train = np.array(pd.get_dummies(y_train))
48 y_ts = np.array(y_test)
49 y_test = np.array(pd.get_dummies(y_test))
50 '''
51 from keras.models import load_model
52 from sklearn.metrics import accuracy_score
53
54 model = load_model('model/my_model.h5')
55 y_predict = model.predict(x_test)
56 y_p = np.argmax(y_predict,axis=1)+1
57 score = accuracy_score(y_ts,y_p)
58 # score = model.evaluate(x_train,y_train)
59 print(score)
60 '''
61
62 model = Sequential()
63 # 第一層:
64 model.add(Conv2D(32,(3,3),input_shape=(100,100,1),activation='relu'))
65 model.add(MaxPooling2D(pool_size=(2,2)))
66 model.add(Dropout(0.5))
67 # model.add(Conv2D(64,(3,3),activation='relu'))
68 #第二層:
69 # model.add(Conv2D(32,(3,3),activation='relu')) # model.add(Dropout(0.25))
70 # model.add(MaxPooling2D(pool_size=(2,2)))
71 # model.add(Dropout(0.25))
72
73 # 2、全連接層和輸出層:
74 model.add(Flatten())
75 # model.add(Dense(500,activation='relu'))
76 # model.add(Dropout(0.5))
77 model.add(Dense(20,activation='relu'))
78 model.add(Dropout(0.5))
79 model.add(Dense(2,activation='softmax'))
80
81 model.summary()
82 model.compile(loss='categorical_crossentropy',#,'binary_crossentropy'
83 optimizer=optimizers.Adadelta(lr=0.01, rho=0.95, epsilon=1e-06),#,'Adadelta'
84 metrics=['accuracy'])
85
86 # 模型訓練
87 model.fit(x_train,y_train,batch_size=30,epochs=150)
88 y_predict = model.predict(x_test)
89 score = model.evaluate(x_test, y_test)
90 print('loss: ',score[0],' acc: ',score[1])
91 y_pred = np.argmax(y_predict,axis=1)
92 plt.figure('keras',figsize=(12,6))
93 plt.scatter(list(range(len(y_pred))),y_pred ,c=y_pred)
94 plt.show()
95
96 # 保存模型
97 # model.save('test/my_model.h5')
98
99 # import matplotlib.pyplot as plt
100 # plt.imshow(x_train[30,:,:,0].reshape(100,100),cmap='gray')
101 # plt.figure()
102 # plt.imshow(x_test[3,:,:,0].reshape(100,100),cmap='gray')
103 # plt.xticks([]);plt.yticks([])
104 # plt.show()
