一、前言
头部姿态估计(基于PaddleHub发布的人脸关键点检测模型face_landmark_localization,该模型转换自https://github.com/lsy17096535/face-landmark)对二维图像进行头部姿态估计,得出Pitch(点头)、Yaw(摇头)、Roll(摆头)三个参数,实现机器对图片人物姿态进行解释。(Pitch上负下正,Yaw左正右负,Roll左负右正,单位为弧度)
二、基本思路
通过将图片中的人脸关键点投影到三维人脸模型上,根据二维和三维坐标变换关系矩阵,求解欧拉角,得出参数。
具体:利用输入的世界坐标系点的位置和face_landmark_localization获取的图片人脸关键点位置以及相机参数计算得到旋转、平移向量(本实践是在相机没有畸变情况下进行),由于OpenCV提供了函数solvePnp(),我们可以直接求得图片的外参rotation_vector(旋转向量),translation_vector(平移向量),利用函数cv2.Rodrigues()将旋转向量转换成旋转矩阵,再将由旋转矩阵和旋转向量拼接得到的投影矩阵通过cv2.decomposeProjectionMatrix()分解,因为得到参数cameraMatrix,rotMatrix,transVect,rotMatrixX,rotMatrixY,rotMatrixZ,eulerAngles,所以要得到欧拉角仅需获取第7个参数,最后在图片中显示参数。
三、实验过程
1、导入模块和调用包
import cv2
import numpy as np
import paddlehub as hub
2、加载人脸关键点检测模型,并写入头部三维关键点坐标,以及要在图片上显示投影框的头部投影点坐标
class HeadPost(object):
def __init__(self):
self.module = hub.Module(name="face_landmark_localization")
# 头部三维关键点坐标
self.model_points = np.array([
[6.825897, 6.760612, 4.402142],
[1.330353, 7.122144, 6.903745],
[-1.330353, 7.122144, 6.903745],
[-6.825897, 6.760612, 4.402142],
[5.311432, 5.485328, 3.987654],
[1.789930, 5.393625, 4.413414],
[-1.789930, 5.393625, 4.413414],
[-5.311432, 5.485328, 3.987654],
[2.005628, 1.409845, 6.165652],
[-2.005628, 1.409845, 6.165652],
[2.774015, -2.080775, 5.048531],
[-2.774015, -2.080775, 5.048531],
[0.000000, -3.116408, 6.097667],
[0.000000, -7.415691, 4.070434]
], dtype='float')
# 头部投影点
self.reprojectsrc = np.float32([
[10.0, 10.0, 10.0],
[10.0, -10.0, 10.0],
[-10.0, 10.0, 10.0],
[-10.0, -10.0, 10.0]])
# 投影点连线
self.line_pairs = [
[0, 2], [1, 3], [0, 1], [2, 3]]
3、从face_landmark_localization的检测结果抽取姿态估计需要的点坐标
def get_image_points(self, face_landmark):
image_points = np.array([
face_landmark[17], face_landmark[21],
face_landmark[22], face_landmark[26],
face_landmark[36], face_landmark[39],
face_landmark[42], face_landmark[45],
face_landmark[31], face_landmark[35],
face_landmark[48], face_landmark[54],
face_landmark[57], face_landmark[8]
], dtype='float')
return image_points
4、获取旋转向量和平移向量
def get_pose_vector(self, image_points):
# 设定相机的焦距、图像的中心位置
center = (self.photo_size[1] / 2, self.photo_size[0] / 2)
focal_length = self.photo_size[1]
# 相机内参数矩阵
camera_matrix = np.array([
[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]],
dtype="float")
# 畸变矩阵(假设不存在畸变)
dist_coeffs = np.zeros((4, 1))
# 函数solvepnp接收一组对应的3D坐标和2D坐标,以及相机内参camera_matrix和dist_coeffs进行反推图片的外参rotation_vector,translation_vector
ret, rotation_vector, translation_vector = cv2.solvePnP(self.model_points,image_points,camera_matrix,dist_coeffs)
# 函数projectPoints根据所给的3D坐标和已知的几何变换来求解投影后的2D坐标
reprojectdst, ret = cv2.projectPoints(self.reprojectsrc, rotation_vector, translation_vector, camera_matrix,dist_coeffs)
return rotation_vector, translation_vector, camera_matrix, dist_coeffs, reprojectdst
5、计算欧拉角
# 将旋转向量转换为欧拉角
def get_euler_angle(self,rotation_vector, translation_vector):
# 通过罗德里格斯公式将旋转向量和旋转矩阵之间进行转换
rvec_matrix = cv2.Rodrigues(rotation_vector)[0]
proj_matrix = np.hstack((rvec_matrix, translation_vector))
euler_angles = cv2.decomposeProjectionMatrix(proj_matrix)[6]
return euler_angles
6、在图片中显示参数和投影框
def pose_euler_angle(self, photo):
self.photo_size = photo.shape
res = self.module.keypoint_detection(images=[photo], use_gpu=False)
face_landmark = res[0]['data'][0]
image_points = self.get_image_points(face_landmark)
rotation_vector, translation_vector, camera_matrix, dist_coeffs, reprojectdst = self.get_pose_vector(image_points)
pitch, yaw, roll = self.get_euler_angle(rotation_vector, translation_vector)
#画出投影框
reprojectdst = tuple(map(tuple, reprojectdst.reshape(4, 2)))
for start, end in self.line_pairs:
v2.line(photo, reprojectdst[start], reprojectdst[end], (0, 0, 255))
#标注14个人脸关键点
for (x, y) in image_points:
cv2.circle(photo, (int(x), int(y)), 2, (0, 0, 255), -1)
#显示参数
cv2.putText(photo, "pitch: " + "{:5.2f}".format(euler_angle[0, 0]), (15, int(self.photo_size[0] / 2 - 30)),cv2.FONT_HERSHEY_SIMPLEX,0.7, (0, 0, 255), 2)
cv2.putText(photo, "yaw: " + "{:6.2f}".format(euler_angle[1, 0]), (15, int(self.photo_size[0] / 2 )),cv2.FONT_HERSHEY_SIMPLEX,0.7, (0, 0, 255), 2)
cv2.putText(photo, "roll: " + "{:6.2f}".format(euler_angle[2, 0]), (15, int(self.photo_size[0] / 2 + 30)),cv2.FONT_HERSHEY_SIMPLEX,0.7, (0, 0, 255), 2)
cv2.imshow('headpost', photo)
cv2.waitKey(0)
7、插入图片
HeadPost().pose_euler_angle(photo=cv2.imread('hbi.jpg'))
四、遇到的问题
1、import paddlehub as hub 出现错误,复现如下
File "D:/python代码/pycharm代码/头部姿态估计.py", line 3, in <module>
import paddlehub as hub
File "C:\Users\86183\AppData\Roaming\Python\Python37\site-packages\paddlehub\__init__.py", line 12, in <module>
from . import module
File "C:\Users\86183\AppData\Roaming\Python\Python37\site-packages\paddlehub\module\__init__.py", line 16, in <module>
from . import module
File "C:\Users\86183\AppData\Roaming\Python\Python37\site-packages\paddlehub\module\module.py", line 31, in <module>
from paddlehub.common import utils
File "C:\Users\86183\AppData\Roaming\Python\Python37\site-packages\paddlehub\common\__init__.py", line 16, in <module>
from . import utils
File "C:\Users\86183\AppData\Roaming\Python\Python37\site-packages\paddlehub\common\utils.py", line 33, in <module>
from paddlehub.common.logger import logger
File "C:\Users\86183\AppData\Roaming\Python\Python37\site-packages\paddlehub\common\logger.py", line 155, in <module>
logger = Logger()
File "C:\Users\86183\AppData\Roaming\Python\Python37\site-packages\paddlehub\common\logger.py", line 67, in __init__
level = json.load(fp).get("log_level", "DEBUG")
File "D:\Anaconda\lib\json\__init__.py", line 296, in load
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
File "D:\Anaconda\lib\json\__init__.py", line 348, in loads
return _default_decoder.decode(s)
File "D:\Anaconda\lib\json\decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "D:\Anaconda\lib\json\decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
卸载重装PaddleHub也未能解决,终于在GitHub社区中找到解决办法
将以下代码拷贝到.paddlehub/conf/config.json中成功解决
{
"server_url": [
"http://paddlepaddle.org.cn/paddlehub"
],
"resource_storage_server_url": "https://bj.bcebos.com/paddlehub-data/",
"debug": false,
"log_level": "DEBUG"
}
2、最终显示效果图时,一直显示空白内容,最后才发现由于使用的cv2.imshow()展示结果过快,使图片一闪而过,在其后加入cv2.waitkey(0)成功解决。(参数0表示一直等待)
3、最开始我使用的是AI Studio中头部姿态点头、摇头估计项目中19个人脸关键点,发现参数Roll显示异常,于是替换成常用的14个人脸关键点进行检测,展示效果更好。对比效果图如下,19个人脸关键点(上),14个人脸关键点(下)(Pitch上负下正,Yaw左正右负,Roll左负右正,单位为弧度)
五、最终效果
六、总结
基于PaddleHub的头部姿态估计课题对我来说十分不容易,在学习各种坐标系如何进行转换,如何求得旋转和平移矩阵以及怎样换算欧拉角后,我更加深刻体会到数学对学习人工智能的重要地位。另外,我对于常用包和模块的运用不熟练,接下来我会更加深入python的学习,同时也会用PaddleHub尝试其他项目,努力提升自己。
参考文献:
https://blog.csdn.net/cdknight_happy/article/details/79975060
https://zhuanlan.zhihu.com/p/82064640
https://www.sohu.com/a/278664242_100007727
https://aistudio.baidu.com/aistudio/projectdetail/673271