本文讲述从在线直播拉流中通过dlib定位鼻子的demo
拉流地址:rtmp://58.200.131.2:1935/livetv/hunantv 湖南卫视
效果:
代码较容易,主逻辑都是和普通本地摄像头一样的,dlib部分要重点看看:
import cv2 import threading import dlib import imutils from imutils import face_utils predictor_path = 'models\\shape_predictor_68_face_landmarks.dat' face_rec_model_path = 'models\\dlib_face_recognition_resnet_model_v1.dat' predictor = dlib.shape_predictor(predictor_path) detector = dlib.get_frontal_face_detector() (noseStart, noseEnd) = face_utils.FACIAL_LANDMARKS_IDXS["nose"] class Producer(threading.Thread): """docstring for Producer""" def __init__(self, rtmp_str): super(Producer, self).__init__() self.rtmp_str = rtmp_str # 通过cv2中的类获取视频流操作对象cap self.cap = cv2.VideoCapture(self.rtmp_str) # 调用cv2方法获取cap的视频帧(帧:每秒多少张图片) # fps = self.cap.get(cv2.CAP_PROP_FPS) self.fps = self.cap.get(cv2.CAP_PROP_FPS) print(self.fps) # 获取cap视频流的每帧大小 self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.size = (self.width, self.height) print(self.size) def run(self): print('in producer') ret, image = self.cap.read() while ret: frame = imutils.resize(image, width=600) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = detector(gray, 0) for rect in rects: shape = predictor(gray, rect) shape = face_utils.shape_to_np(shape) nose = shape[noseStart:noseEnd] noseHull = cv2.convexHull(nose) cv2.drawContours(frame, [noseHull], -1, (0, 255, 0), 1) cv2.putText(frame, "nose", (nose[0][0], nose[0][1]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.imshow("Frame", frame) cv2.waitKey(int(1000 / int(self.fps))) # 延迟 if cv2.waitKey(1) & 0xFF == ord('q'): self.cap.release() cv2.destroyAllWindows() break ret, image = self.cap.read() if __name__ == '__main__': print('run program') rtmp_str = 'rtmp://58.200.131.2:1935/livetv/hunantv' # 湖南卫视 producer = Producer(rtmp_str) producer.start()
需要安装的python库
pip install imutils pip install opencv-python pip install dlib
依赖的dlib模型文件:
链接:https://pan.baidu.com/s/1hw0bznAO7-7f1AIvYxzVBA
提取码:8691