對實現人臉瘦臉簡單功能的一個記錄,大概流程如下:
1.使用dlib檢測出人臉關鍵點
2.使用Interactive Image Warping 局部平移算法實現瘦臉
參考:https://blog.csdn.net/grafx/article/details/70232797?locationNum=11&fps=1
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import dlib import cv2 import numpy as np import math predictor_path='data/shape_predictor_68_face_landmarks.dat' #使用dlib自帶的frontal_face_detector作為我們的特征提取器 detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(predictor_path) def landmark_dec_dlib_fun(img_src): img_gray = cv2.cvtColor(img_src,cv2.COLOR_BGR2GRAY) land_marks = [] rects = detector(img_gray,0) for i in range(len(rects)): land_marks_node = np.matrix([[p.x,p.y] for p in predictor(img_gray,rects[i]).parts()]) # for idx,point in enumerate(land_marks_node): # # 68點坐標 # pos = (point[0,0],point[0,1]) # print(idx,pos) # # 利用cv2.circle給每個特征點畫一個圈,共68個 # cv2.circle(img_src, pos, 5, color=(0, 255, 0)) # # 利用cv2.putText輸出1-68 # font = cv2.FONT_HERSHEY_SIMPLEX # cv2.putText(img_src, str(idx + 1), pos, font, 0.8, (0, 0, 255), 1, cv2.LINE_AA) land_marks.append(land_marks_node) return land_marks ''' 方法: Interactive Image Warping 局部平移算法 ''' def localTranslationWarp(srcImg,startX,startY,endX,endY,radius): ddradius = float(radius * radius) copyImg = np.zeros(srcImg.shape, np.uint8) copyImg = srcImg.copy() # 計算公式中的|m-c|^2 ddmc = (endX - startX) * (endX - startX) + (endY - startY) * (endY - startY) H, W, C = srcImg.shape for i in range(W): for j in range(H): #計算該點是否在形變圓的范圍之內 #優化,第一步,直接判斷是會在(startX,startY)的矩陣框中 if math.fabs(i-startX)>radius and math.fabs(j-startY)>radius: continue distance = ( i - startX ) * ( i - startX) + ( j - startY ) * ( j - startY ) if(distance < ddradius): #計算出(i,j)坐標的原坐標 #計算公式中右邊平方號里的部分 ratio=( ddradius-distance ) / ( ddradius - distance + ddmc) ratio = ratio * ratio #映射原位置 UX = i - ratio * ( endX - startX ) UY = j - ratio * ( endY - startY ) #根據雙線性插值法得到UX,UY的值 value = BilinearInsert(srcImg,UX,UY) #改變當前 i ,j的值 copyImg[j,i] =value return copyImg #雙線性插值法 def BilinearInsert(src,ux,uy): w,h,c = src.shape if c == 3: x1=int(ux) x2=x1+1 y1=int(uy) y2=y1+1 part1=src[y1,x1].astype(np.float)*(float(x2)-ux)*(float(y2)-uy) part2=src[y1,x2].astype(np.float)*(ux-float(x1))*(float(y2)-uy) part3=src[y2,x1].astype(np.float) * (float(x2) - ux)*(uy-float(y1)) part4 = src[y2,x2].astype(np.float) * (ux-float(x1)) * (uy - float(y1)) insertValue=part1+part2+part3+part4 return insertValue.astype(np.int8) def face_thin_auto(src): landmarks = landmark_dec_dlib_fun(src) #如果未檢測到人臉關鍵點,就不進行瘦臉 if len(landmarks) == 0: return for landmarks_node in landmarks: left_landmark= landmarks_node[3] left_landmark_down=landmarks_node[5] right_landmark = landmarks_node[13] right_landmark_down = landmarks_node[15] endPt = landmarks_node[30] #計算第4個點到第6個點的距離作為瘦臉距離 r_left=math.sqrt((left_landmark[0,0]-left_landmark_down[0,0])*(left_landmark[0,0]-left_landmark_down[0,0])+ (left_landmark[0,1] - left_landmark_down[0,1]) * (left_landmark[0,1] - left_landmark_down[0, 1])) # 計算第14個點到第16個點的距離作為瘦臉距離 r_right=math.sqrt((right_landmark[0,0]-right_landmark_down[0,0])*(right_landmark[0,0]-right_landmark_down[0,0])+ (right_landmark[0,1] -right_landmark_down[0,1]) * (right_landmark[0,1] -right_landmark_down[0, 1])) #瘦左邊臉 thin_image = localTranslationWarp(src,left_landmark[0,0],left_landmark[0,1],endPt[0,0],endPt[0,1],r_left) #瘦右邊臉 thin_image = localTranslationWarp(thin_image, right_landmark[0,0], right_landmark[0,1], endPt[0,0],endPt[0,1], r_right) #顯示 cv2.imshow('thin',thin_image) cv2.imwrite('thin.jpg',thin_image) def main(): src = cv2.imread('img/test6.jpg') cv2.imshow('src', src) face_thin_auto(src) cv2.waitKey(0) if __name__ == '__main__': main()
原文:https://blog.csdn.net/u011941438/article/details/82416470