效果圖
調用face_recognition.face_landmarks()方法即可得到人臉特征點, 返回一個字典, 下圖是返回的數據, 包括chin(下巴), left_eye(左眼)等.
我畫了兩種圖, 一種是遍歷所有的點, 直接給點畫圖的圖(點用實心圓繪制). 第二個是單獨畫下巴, 連成線, 用的是polylines方法.
我是4.10版本的opencv. 查閱官方py文檔, 這是鏈接
完整代碼:
import face_recognition
import numpy as np
import cv2
image = face_recognition.load_image_file("./data/奧巴馬.png")
image2 = image.copy()
face_landmarks_list = face_recognition.face_landmarks(image)
# print(face_landmarks_list)
for each in face_landmarks_list:
print(each)
for i in each.keys():
print(i, end=': ')
print(each[i])
for any in each[i]:
image = cv2.circle(image, any, 3, (0,0,255), -1)
cv2.imshow("奧巴馬", image)
# 單獨畫下巴
for each in face_landmarks_list:
pts = np.array(each['chin'])
pts = pts.reshape((-1, 1, 2))
cv2.polylines(image2, [pts], False, (0, 255, 255)) # false 參數使其不閉合
cv2.imshow("奧巴馬2", image2)
cv2.waitKey(0)
cv2.destroyAllWindows()
在線攝像機版本:
import face_recognition
import numpy as np
import cv2
camera = cv2.VideoCapture(0)
while True:
ret, image = camera.read()
image = cv2.flip(image, 1)
image2 = image.copy()
face_landmarks_list = face_recognition.face_landmarks(image)
# print(face_landmarks_list)
for each in face_landmarks_list:
print(each)
for i in each.keys():
print(i, end=': ')
print(each[i])
for any in each[i]:
image = cv2.circle(image, any, 3, (0,0,255), -1)
cv2.imshow("奧巴馬", image)
# 單獨畫下巴
for each in face_landmarks_list:
pts = np.array(each['chin'])
pts = pts.reshap 大專欄 使用face_recognition進行人臉特征檢測e((-1, 1, 2))
cv2.polylines(image2, [pts], False, (0, 255, 255)) # false 參數使其不閉合
cv2.imshow("奧巴馬2", image2)
if cv2.waitKey(1000 // 12) & 0xff == ord("q"):
break
cv2.destroyAllWindows()
camera.release()
附一份在線的人臉搜索代碼, 人臉數據保存在相對路徑./data/mans
下
import cv2
import face_recognition
import numpy as np
import os
import re
# 人臉數據, 文件, 編碼, 名字
files = os.listdir("./data/mans")
face_images = [0]*len(files)
face_encodings = [0]*len(files)
face_names = [0]*len(files)
# 獲取編碼和名稱
for i in range(len(files)):
face_images[i] = face_recognition.load_image_file('./data/mans/' + files[i])
face_encodings[i] = face_recognition.face_encodings(face_images[i])
if len(face_encodings[i]) > 0:
face_encodings[i] = face_encodings[i][0]
else:
face_encodings[i] = None
face_names[i] = re.findall(r'(.*)..*', files[i])[0]
print(face_names)
# 人臉比較
# results = face_recognition.compare_faces(face_encodings[0], face_encodings[1])
# print(results)
# 人臉距離
# face_distances = face_recognition.face_distance(face_encodings[0], face_encodings[1])
# index = np.argmin(face_distances)
# print(index)
# camera = cv2.VideoCapture('./data/test.avi') # 從視頻文件
camera = cv2.VideoCapture(0) # 從攝像頭
while True:
ret, img = camera.read()
img = cv2.flip(img, 1)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 灰度處理
locations = face_recognition.face_locations(img)
for top, right, bottom, left in locations:
cv2.rectangle(img, (left, top), (right, bottom), (255, 0, 0), 2)
sub_img = img[top:bottom, left:right]
sub_img_code = face_recognition.face_encodings(sub_img)
if len(sub_img_code) != 0:
face_distances = face_recognition.face_distance(face_encodings, sub_img_code[0])
print(face_distances)
index = np.argmin(face_distances)
name = face_names[index]
cv2.putText(img, name, (left, top - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
cv2.imshow('Face', img)
if cv2.waitKey(1000 // 12) & 0xff == ord("q"):
break
cv2.destroyAllWindows()
camera.release()