opencv基於PCA降維算法的人臉識別(att_faces)
一、數據提取與處理
# 導入所需模塊
import matplotlib.pyplot as plt
import numpy as np
import os
import cv2
# plt顯示灰度圖片
def plt_show(img):
plt.imshow(img,cmap='gray')
plt.show()
# 讀取一個文件夾下的所有圖片,輸入參數是文件名,返回文件地址列表
def read_directory(directory_name):
faces_addr = []
for filename in os.listdir(directory_name):
faces_addr.append(directory_name + "/" + filename)
return faces_addr
# 讀取所有人臉文件夾,保存圖像地址在列表中
faces = []
for i in range(1,42):
faces_addr = read_directory('./att_faces/s'+str(i))
for addr in faces_addr:
faces.append(addr)
# 讀取圖片數據,生成列表標簽
images = []
labels = []
for index,face in enumerate(faces):
image = cv2.imread(face,0)
images.append(image)
labels.append(int(index/10+1))
print(len(labels))
print(len(images))
print(type(images[0]))
print(labels)
410
410
<class 'numpy.ndarray'>
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41]
# 畫出最后兩組人臉圖像
#創建畫布和子圖對象
fig, axes = plt.subplots(2,10
,figsize=(15,4)
,subplot_kw = {"xticks":[],"yticks":[]} #不要顯示坐標軸
)
#填充圖像
for i, ax in enumerate(axes.flat):
ax.imshow(images[i+390],cmap="gray") #選擇色彩的模式

二、PCA降低維度
# 圖像數據轉換特征矩陣
image_data = []
for image in images:
data = image.flatten()
image_data.append(data)
print(image_data[0].shape)
(10304,)
# 轉換為numpy數組
X = np.array(image_data)
y = np.array(labels)
print(type(X))
print(X.shape)
<class 'numpy.ndarray'>
(410, 10304)
PCA變換原理。在人臉識別過程中,一般把圖片看成是向量進行處理,高等數學中我們接觸的一般都是二維或三維向量,向量的維數是根據組成向量的變量個數來定的,例如就是一個二維向量,因為其有兩個參量。而在將一幅圖像抽象為一個向量的過程中,我們把圖像的每個像素定為一維,對於一幅的普通圖像來說,最后抽象為一個維的高維向量,如此龐大的維數對於后續圖像計算式來說相當困難,因此有必要在盡可能不丟失重要信息的前提下降低圖像維數,PCA就是降低圖像維數的一種方法。圖像在經過PCA變換之后,可以保留任意數量的對圖像特征貢獻較大的維數分量,也就是你可以選擇降維到30維或者90維或者其他,當然最后保留的維數越多,圖像丟失的信息越少,但計算越復雜。
參考博客:https://blog.csdn.net/qq_37791134/article/details/81387813
# 導入sklearn的pca模塊
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
# 畫出特征矩陣
import pandas as pd
data = pd.DataFrame(X)
data.head()
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | ... | 10294 | 10295 | 10296 | 10297 | 10298 | 10299 | 10300 | 10301 | 10302 | 10303 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 48 | 49 | 45 | 47 | 49 | 57 | 39 | 42 | 53 | 49 | ... | 39 | 44 | 40 | 41 | 49 | 42 | 44 | 47 | 46 | 46 |
| 1 | 34 | 34 | 33 | 32 | 38 | 40 | 39 | 49 | 54 | 57 | ... | 42 | 44 | 38 | 30 | 37 | 30 | 36 | 37 | 40 | 33 |
| 2 | 60 | 60 | 62 | 53 | 48 | 51 | 61 | 60 | 71 | 68 | ... | 27 | 35 | 28 | 33 | 31 | 31 | 37 | 32 | 34 | 34 |
| 3 | 39 | 44 | 53 | 37 | 61 | 48 | 61 | 45 | 35 | 40 | ... | 23 | 30 | 36 | 32 | 28 | 32 | 31 | 29 | 26 | 29 |
| 4 | 63 | 53 | 35 | 36 | 33 | 34 | 31 | 35 | 39 | 43 | ... | 173 | 169 | 166 | 161 | 158 | 169 | 137 | 41 | 10 | 24 |
5 rows × 10304 columns
# 划分數據集
x_train,x_test,y_train,y_test = train_test_split(X, y, test_size=0.2)
# 訓練PCA模型
pca=PCA(n_components=100)
pca.fit(x_train)
PCA(copy=True, iterated_power='auto', n_components=100, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)
# 返回測試集和訓練集降維后的數據集
x_train_pca = pca.transform(x_train)
x_test_pca = pca.transform(x_test)
print(x_train_pca.shape)
print(x_test_pca.shape)
(328, 100)
(82, 100)
V = pca.components_
V.shape
(100, 10304)
# 100個特征臉
#創建畫布和子圖對象
fig, axes = plt.subplots(10,10
,figsize=(15,15)
,subplot_kw = {"xticks":[],"yticks":[]} #不要顯示坐標軸
)
#填充圖像
for i, ax in enumerate(axes.flat):
ax.imshow(V[i,:].reshape(112,92),cmap="gray") #選擇色彩的模式

# 改選擇多少個特征呢?
#屬性explained_variance_ratio,查看降維后每個新特征向量所占的信息量占原始數據總信息量的百分比
#又叫做可解釋方差貢獻率
pca.explained_variance_ratio_
array([0.16754406, 0.11712118, 0.08050592, 0.05800583, 0.04899411,
0.03236304, 0.02552568, 0.02246334, 0.02105942, 0.01869678,
0.01492577, 0.01452819, 0.01195689, 0.01106418, 0.01061136,
0.00920361, 0.00893044, 0.00841665, 0.00815548, 0.00745415,
0.00684847, 0.00674609, 0.00641437, 0.00555017, 0.00533678,
0.00511044, 0.00498169, 0.00493545, 0.00477643, 0.0046901 ,
0.00452947, 0.00443995, 0.00424948, 0.00415627, 0.00402244,
0.00391703, 0.00380438, 0.00365518, 0.00347555, 0.00338822,
0.00325 , 0.00306806, 0.00305956, 0.00297671, 0.00286721,
0.00281228, 0.00272433, 0.00266031, 0.00257338, 0.00251557,
0.00247235, 0.00243605, 0.00236254, 0.00232992, 0.00225821,
0.00221418, 0.00217406, 0.00213639, 0.00203163, 0.00199645,
0.00194659, 0.00193678, 0.00187899, 0.00186114, 0.00181597,
0.00178071, 0.0017298 , 0.00171467, 0.00166234, 0.00163148,
0.00160447, 0.00157375, 0.00155019, 0.00154325, 0.00152017,
0.00149426, 0.00147426, 0.00145617, 0.00143343, 0.00140277,
0.00138425, 0.00135825, 0.00134036, 0.00133259, 0.00129024,
0.00126753, 0.00124071, 0.00123078, 0.00121395, 0.00119294,
0.00116697, 0.00115547, 0.00111406, 0.00111104, 0.00109964,
0.00107608, 0.00106702, 0.00105275, 0.00102797, 0.00100745])
# 返回特征所攜帶的數據是原始數據的多少
pca.explained_variance_ratio_.sum()
0.9002145083699277
# 畫出特征個數和所攜帶信息數的曲線圖
explained_variance_ratio = []
for i in range(1,151):
pca=PCA(n_components=i).fit(x_train)
explained_variance_ratio.append(pca.explained_variance_ratio_.sum())
plt.plot(range(1,151),explained_variance_ratio)
plt.show()

三、使用OpenCV的EigenFace算法進行識別
原理:將訓練集圖像和測試集圖像都投影到特征向量空間中,再使用聚類方法(最近鄰或k近鄰等)得到里測試集中的每個圖像最近的圖像,進行分類即可。
cv2.face.EigenFaceRecognizer_create()創建人臉識別的模型,通過圖像數組和對應標簽數組來訓練模型
predict()函數進行人臉預測,該函數會返回兩個元素的數組
- 第一個是識別個體的標簽,
- 第二個是置信度,越小匹配度越高,0表示完全匹配。
getEigenValues() 獲得特征值
getEigenVectors() 特征向量
getMean() 均值
# 模型創建與訓練
model = cv2.face.EigenFaceRecognizer_create()
model.train(x_train,y_train)
# 預測
res = model.predict(x_test[0])
print(res)
(6, 1786.5044335243144)
y_test[0]
6
# 測試數據集的准確率
ress = []
true = 0
for i in range(len(y_test)):
res = model.predict(x_test[i])
# print(res[0])
if y_test[i] == res[0]:
true = true+1
else:
print(i)
print('測試集識別准確率:%.2f'% (true/len(y_test)))
8
35
測試集識別准確率:0.98
# 平均臉
mean = model.getMean()
print(mean)
meanFace = mean.reshape(112,92)
plt_show(meanFace)
[[86.65243902 86.85670732 87.47865854 ... 77.02134146 76.27439024
75.65243902]]

四、自定義圖片測試
# 降維
pca=PCA(n_components=100)
pca.fit(X)
X = pca.transform(X)
# 將所有數據都用作訓練集
# 模型創建與訓練
model = cv2.face.EigenFaceRecognizer_create()
model.train(X,y)
# plt顯示彩色圖片
def plt_show0(img):
b,g,r = cv2.split(img)
img = cv2.merge([r, g, b])
plt.imshow(img)
plt.show()
# 輸入圖片識別
img = cv2.imread('./att_faces/test.jpg')
plt_show0(img)
print(img.shape)

# 灰度處理
img = cv2.imread('./att_faces/test.jpg',0)
plt_show(img)
imgs = []
imgs.append(img)
# 特征矩陣
image_data = []
for img in imgs:
data = img.flatten()
image_data.append(data)
test = np.array(image_data)
test.shape
(1, 10304)
# 用訓練好的pca模型給圖片降維
test = pca.transform(test)
test[0].shape
(100,)
res = model.predict(test)
res
(41, 4308.711798033283)
print('人臉識別結果:',res[0])
人臉識別結果: 41
五、OpenCV中的簡單人臉檢測
# 加載人臉檢測模型
face_engine = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
img = cv2.imread('./image/image.jpg')
plt_show0(img)

# 復制圖像灰度處理
img_ = img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 檢測人臉獲取人臉區域
faces = face_engine.detectMultiScale(gray)
# 將檢測出的人臉可視化
for(x, y, w, h) in faces:
cv2.rectangle(img_, (x, y), (x + w, y + h), (0, 0, 255), 3)
plt_show0(img_)
face = img[y:y + w, x:x + h]
plt_show0(face)


總結
可使用人臉檢測得到圖片,在對圖片進行其他的一些操作,在進行人臉識別,特別注意尺寸要和訓練的圖片一致
項目可以更加完善,但pca降維算法在實際的使用中非常有效,一定要掌握與使用
備注:
- 基於sklearn中pca算法和OpenCV特征臉算法的人臉識別項目
- 2020-4-28
- 曾強
opencv基於PCA降維算法的人臉識別(yalefaces)
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import os
import cv2
import imageio
# plt顯示灰度圖片
def plt_show(img):
plt.imshow(img,cmap='gray')
plt.show()
nameList = ['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15']
characteristic = ['centerlight','glasses','happy','leftlight','noglasses','rightlight','sad','sleepy','surprised','wink']
faces = []
for name in nameList:
for character in characteristic:
src = './yalefaces/faces/subject'+ name +'.'+character
img = imageio.imread(src)
faces.append(img)
len(faces)
150
labels = []
for name in nameList:
for i in range(10):
labels.append(int(name))
len(labels)
150
image_data = []
for image in faces:
data = image.flatten()
image_data.append(data)
X = np.array(image_data)
y = np.array(labels)
X.shape
(150, 77760)
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
import pandas as pd
data = pd.DataFrame(X)
data.head()
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | ... | 77750 | 77751 | 77752 | 77753 | 77754 | 77755 | 77756 | 77757 | 77758 | 77759 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | ... | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 |
| 1 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | ... | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 |
| 2 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | ... | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 |
| 3 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 129 | ... | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 |
| 4 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | 130 | ... | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 | 68 |
5 rows × 77760 columns
x_train,x_test,y_train,y_test = train_test_split(X, y, test_size=0.2)
y_test
array([13, 12, 9, 7, 15, 9, 3, 5, 12, 9, 5, 13, 1, 10, 12, 13, 9,
6, 12, 6, 1, 2, 6, 1, 7, 2, 8, 1, 3, 12])
#先訓練PCA模型
pca=PCA(n_components=60)
pca.fit(x_train)
PCA(copy=True, iterated_power='auto', n_components=60, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)
# 返回測試集和訓練集降維后的數據集
x_train_pca = pca.transform(x_train)
x_test_pca = pca.transform(x_test)
print(x_train_pca.shape)
print(x_test_pca.shape)
(120, 60)
(30, 60)
pca.explained_variance_
array([1.08422936e+08, 4.66350537e+07, 3.07918180e+07, 2.64046838e+07,
1.75413992e+07, 1.60852267e+07, 9.78916417e+06, 7.52160385e+06,
6.88799618e+06, 6.66114015e+06, 4.63862806e+06, 3.90812976e+06,
3.48376116e+06, 3.18737747e+06, 3.06254146e+06, 2.75505745e+06,
2.67162174e+06, 2.40860869e+06, 2.29132186e+06, 1.99029494e+06,
1.91040434e+06, 1.62767018e+06, 1.51547238e+06, 1.41752277e+06,
1.37872739e+06, 1.28054296e+06, 1.15090468e+06, 1.10293253e+06,
1.06338859e+06, 1.02508177e+06, 9.37512997e+05, 8.71844988e+05,
8.27272967e+05, 8.00608330e+05, 7.86361786e+05, 7.62182396e+05,
7.22127119e+05, 7.14440493e+05, 6.70731109e+05, 6.51273242e+05,
6.44085041e+05, 6.38427776e+05, 5.90752701e+05, 5.67930714e+05,
5.50766567e+05, 5.43246428e+05, 5.25706169e+05, 5.15642292e+05,
4.87835392e+05, 4.64595068e+05, 4.60028444e+05, 4.52881987e+05,
4.39019101e+05, 4.32071261e+05, 4.29244698e+05, 4.04887637e+05,
3.95972004e+05, 3.84714305e+05, 3.61548759e+05, 3.50453433e+05])
pca.explained_variance_ratio_
array([0.31160122, 0.13402643, 0.0884939 , 0.07588553, 0.05041296,
0.04622801, 0.02813349, 0.02161665, 0.0197957 , 0.01914373,
0.01333115, 0.01123174, 0.01001213, 0.00916034, 0.00880157,
0.00791787, 0.00767809, 0.0069222 , 0.00658513, 0.00571999,
0.00549039, 0.00467783, 0.00435538, 0.00407388, 0.00396238,
0.00368021, 0.00330763, 0.00316976, 0.00305612, 0.00294603,
0.00269436, 0.00250563, 0.00237753, 0.0023009 , 0.00225996,
0.00219047, 0.00207535, 0.00205326, 0.00192764, 0.00187172,
0.00185106, 0.0018348 , 0.00169779, 0.0016322 , 0.00158287,
0.00156126, 0.00151085, 0.00148193, 0.00140201, 0.00133522,
0.0013221 , 0.00130156, 0.00126172, 0.00124175, 0.00123362,
0.00116362, 0.001138 , 0.00110565, 0.00103907, 0.00100718])
pca.explained_variance_ratio_.sum()
0.9713784954856973
X_tr = x_train_pca
y_tr = y_train
X_te = x_test_pca
y_te = y_test
print(len(X_tr))
print(len(y_te))
120
30
model = cv2.face.EigenFaceRecognizer_create()
model.train(X_tr,y_tr)
res = model.predict(X_te[29])
print(res)
(12, 6619.150897246195)
y_te[29]
12
ress = []
true = 0
for i in range(len(X_te)):
res = model.predict(X_te[i])
# print(res[0])
if y_te[i] == res[0]:
true = true+1
else:
print(i)
print('測試集識別准確率:%.2f'% (true/len(y_te)))
0
12
17
20
27
28
測試集識別准確率:0.80
# 輸入圖片識別
normal_image = []
for name in nameList:
img = imageio.imread('./yalefaces/faces/subject'+name+'.normal')
normal_image.append(img)
normal_image_data = []
for image in normal_image:
data = image.flatten()
normal_image_data.append(data)
normal = pca.transform(normal_image_data)
normal.shape
(15, 60)
res = model.predict(normal[14])
res
(15, 8895.523222370097)
img_ = imageio.imread('./yalefaces/faces/subject01.happy')
# img_ = cv2.cvtColor(img_,cv2.COLOR_BGR2GRAY)
plt_show(img_)

img = imageio.imread('./yalefaces/test2.gif')
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
plt_show(img)
imgs = []
imgs.append(img)
imgs[0].shape

(243, 320)
img = imageio.imread('./yalefaces/test3.gif')
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
plt_show(img)
imgs = []
imgs.append(img)
imgs[0].shape

test_img = []
for image in imgs:
data = image.flatten()
test_img.append(data)
test = pca.transform(test_img)
res = model.predict(test)
res
(2, 19431.70881247637)
源碼地址:
鏈接:https://pan.baidu.com/s/1yAG7R0AoxaUvF5k6EuUaQQ
提取碼:huhr
