話不多說,直接提供兩種獲取攝像頭人臉圖片的方式
1. 使用 tracking.js 前端獲取 -- 實際web項目中最常用的
tracking.js 文檔地址: https://trackingjs.com/
獲取步驟:
A: 引入 tracking.js 和 face.js
B: 調用 tracking 獲取攝像頭視頻,放到 <video> 中,設置人臉監測
C: 調用人臉檢測方法,針對視頻每一幀圖片進行監測,當監測到人臉區域時,使用 canvas 繪制人臉區域
D: 然后針對存在人臉的圖片進行截圖,截圖區域為監測到的人臉區域,可設置參數調整截圖范圍,這樣就當前圖片(該圖片為視頻中的某一幀存在人臉的圖片)中截取到了人臉
代碼如下:
html 代碼
<!DOCTYPE html> <html lang="zh" xmlns:th="http://www.thymeleaf.org"> <head> <meta charset="utf-8"> <title>人臉識別</title> <link rel="stylesheet" th:href="@{/css/layer.css}"> <link rel="stylesheet" th:href="@{/css/demo.css}"> <script type="text/javascript" th:src="@{/js/jquery-1.9.1.js}"></script> <script type="text/javascript" th:src="@{/js/layer.js}"></script> <script th:src="@{/js/tracking-min.js}"></script> <script th:src="@{/js/face-min.js}"></script> </head> <body> <div> <p align="center">人臉圖片檢測</p> </div> <div class="demo-frame"> <div class="demo-container"> <div id="face1"> <video id="video" width="640" height="480"></video> <canvas id="canvas" width="640" height="480"></canvas> </div> </div> </div> <script type="text/javascript" th:inline="javascript"> window.onload = function() { var video = document.getElementById('video'); var canvas = document.getElementById('canvas'); var context = canvas.getContext('2d'); // 創建人臉追蹤對象 ,並初始化 var tracker = new tracking.ObjectTracker('face'); tracker.setInitialScale(4); tracker.setStepSize(2); tracker.setEdgesDensity(0.1); // 從攝像頭獲取視頻,放到 video 中,同時開啟人臉監測 tracking.track('#video', tracker, { camera: true }); // 監聽視頻中的每一幀,判斷是否存在人臉 tracker.on('track', function(event) { if(event.data.length===0){ console.info('無人臉'); context.clearRect(0, 0, canvas.width, canvas.height); }else{ //存在人臉 event.data.forEach(function(rect) { // 一張圖片可能存在多個人臉,遍歷人臉信息 // canvas 畫圖,設置顏色 context.strokeStyle = '#FF0000'; // 畫一個 矩形框,定義位置為人臉區域做坐標 context.strokeRect(rect.x, rect.y, rect.width, rect.height); // 顯示坐標值 context.font = '11px Helvetica'; context.fillStyle = "#fff"; context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11); context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22); // 針對監測到人臉進行處理 Shoot(rect.x, rect.y, rect.width, rect.height); // 此處休眠1分中 -- 避免頻繁獲取,方便調試,實際開發不需要 sleep(1000*60*1); }); } }); function Shoot(x,y,width,height) { var trackerTask = tracking.track(video, tracker); //先清除掉 canvas 畫的框,避免截圖獲取到這個框 -- 理論上從視頻獲取不會截取到,但調試時實際有截取,所以做下清除 context.clearRect(0, 0, canvas.width, canvas.height); //從視頻的這個區域畫圖,捕獲人臉 context.drawImage(video,x-50,y-50,width+50,height+100,0,0,width+50,height+100); //將圖片寫到元素 var img = document.createElement("img"); img.src = this.canvas.toDataURL("image/png"); //打印下截取到圖片的 base64 編碼 console.log(img.src); //刪除字符串前的提示信息 "data:image/png;base64," var b64 = img.src.substring(22); //以下為調用后端接口 var path = "/faceLogin"; $.ajax({ type : 'post', dataType : 'json', url : path, data : { imgdata:b64, identityImgBase64:"身份證圖片base64編碼", name:"姓名" }, success : function(result){ if(result.code=='0'){ //停止人臉監聽 trackerTask.stop(); //其他代碼.... }else{ // 此處根據業務需要,決定是否停止監聽 trackerTask.stop(); //其他代碼.... } } }) } //休眠方法--測試 function sleep(numberMillis) { var now = new Date(); var exitTime = now.getTime() + numberMillis; while (true) { now = new Date(); if (now.getTime() > exitTime) return; } } }; </script> </body> </html>
相關 css
demo.css
* { margin: 0; padding: 0; font-family: Helvetica, Arial, sans-serif; } .demo-title { position: absolute; width: 100%; background: #2e2f33; z-index: 2; padding: .7em 0; } .demo-title a { color: #fff; border-bottom: 1px dotted #a64ceb; text-decoration: none; } .demo-title p { color: #fff; text-align: center; text-transform: lowercase; font-size: 15px; } .demo-frame { width: 854px; height: 658px; position: fixed; top: 50%; left: 50%; margin: -329px 0 0 -429px; padding: 95px 20px 45px 34px; overflow: hidden; -webkit-box-sizing: border-box; -moz-box-sizing: border-box; -ms-box-sizing: border-box; box-sizing: border-box; } .demo-container { width: 100%; height: 530px; position: relative; background: #eee; overflow: hidden; border-bottom-right-radius: 10px; border-bottom-left-radius: 10px; } .dg.ac { z-index: 100 !important; top: 50px !important; } #face1{ width: 1200px; height: 900px; } /*#face2{ margin-left: 65%; width: 550px; height: 300px; background-color: black; }*/ #video{ margin-left: 80px; margin-top: 25px; position: absolute; } #canvas { margin-left: 80px; margin-top: 25px; position: absolute; } #canvas1 { margin-left: 72%; margin-top: 200px; width:480px; height:360px; /* background-color:#A64CEB;*/ } p{ font-size: 50px; font-family: "仿宋"; margin-top: 50px; }
2. java 后端獲取: 使用 openimaj 從攝像頭視頻中獲取隊員人臉 --- 此方式好像沒啥用,自己寫着玩的
代碼如下:
A: 引入maven依賴
<!-- openimaj 用於從攝像頭獲取人臉拍照 文檔地址 : http://openimaj.org --> <dependency> <groupId>com.github.sarxos</groupId> <artifactId>webcam-capture</artifactId> <version>0.3.12</version> </dependency> <dependency> <artifactId>faces</artifactId> <groupId>org.openimaj</groupId> <version>1.3.10</version> <scope>compile</scope> </dependency>
B: demo代碼如下,運行main即可
import cn.hutool.core.collection.CollectionUtil; import lombok.Data; import lombok.extern.slf4j.Slf4j; import org.openimaj.image.*; import org.openimaj.image.colour.RGBColour; import org.openimaj.image.colour.Transforms; import org.openimaj.image.processing.face.detection.DetectedFace; import org.openimaj.image.processing.face.detection.FaceDetector; import org.openimaj.image.processing.face.detection.HaarCascadeDetector; import org.openimaj.video.VideoDisplay; import org.openimaj.video.VideoDisplayListener; import org.openimaj.video.capture.VideoCapture; import org.openimaj.video.capture.VideoCaptureException; import javax.imageio.ImageIO; import javax.swing.*; import java.awt.image.BufferedImage; import java.io.File; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.LinkedList; import java.util.List; /** * 攝像頭拍照工具類 : 調用本機攝像頭抓取人臉拍照 * @author qxl * @date 2020/10/12 */ @Slf4j @Data public class WebcamCaptureUtil { // 視頻捕獲對象 private VideoCapture vc; // 視頻顯示 JFrame 窗口對象 private JFrame windows; // 視頻顯示對象 private VideoDisplay<MBFImage> vd; //捕獲人臉圖片存放集合 private LinkedList<BufferedImage> faceImages = new LinkedList<>(); /** * 打開攝像頭捕獲人臉,數據存入 faceImages */ public void faceCapture() throws VideoCaptureException{ // 創建視頻捕獲對象 vc = new VideoCapture(320,240); //創建 JFrame 窗口,用於顯示視頻 windows = DisplayUtilities.makeFrame("攝像頭人臉檢測中..."); windows.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); windows.setVisible(true); //創建視頻顯示對象 vd = VideoDisplay.createVideoDisplay(vc,windows); // 監聽視頻 vd.addVideoListener( //視頻顯示的監聽--針對每一幀的圖片 new VideoDisplayListener<MBFImage>(){ public void beforeUpdate(MBFImage frame){ FaceDetector<DetectedFace,FImage> fd = new HaarCascadeDetector(40); List<DetectedFace> faces = fd.detectFaces(Transforms.calculateIntensity(frame)); for(DetectedFace face : faces ) { frame.drawShape(face.getBounds(), RGBColour.RED); BufferedImage image = ImageUtilities.createBufferedImageForDisplay(face.getFacePatch()); faceImages.addLast(image); } } public void afterUpdate(VideoDisplay<MBFImage> display){ } }); } /** * 保存人臉圖片 * @param image 要保存的image * @param savePath 保存的路徑 * @param imageName 圖片名稱 */ public void saveImage(BufferedImage image,String savePath,String imageName) throws IOException { File path = new File(savePath); if (!path.exists()) {//如果文件不存在,則創建該目錄 path.mkdirs(); } File file = new File(savePath + "/" + imageName + ".png"); ImageIO.write(image,"png",file); } /** * 關閉攝像頭及人臉捕獲 */ public void closeWebcam(){ if(vc != null){ vc.stopCapture(); vc.close(); } if(vd != null){ vd.close(); } if(windows != null){ // 關閉 jFrame 窗口 windows.removeNotify(); } } /** * 清理緩存圖片 */ public void clearFaceImages(){ faceImages.clear(); } public static void main(String[] args) { WebcamCaptureUtil webcamCaptureUtil = new WebcamCaptureUtil(); try { //開始人臉捕獲 webcamCaptureUtil.faceCapture(); //等待捕獲人臉 LinkedList<BufferedImage> faceImages = webcamCaptureUtil.getFaceImages(); String filePath = "D:\\" + "/picture/" + new SimpleDateFormat("yyyy-MM-dd").format(new Date()); //假設獲取10張圖片之后,人臉比對成功 int count = 0; while (count<10){ if (CollectionUtil.isNotEmpty(faceImages)){ String time = new SimpleDateFormat("yyyMMdd_HHmmss").format(new Date()); BufferedImage image = faceImages.pollFirst(); if (image != null){ webcamCaptureUtil.saveImage(image,filePath,time); } Thread.sleep(1000L); count++; } } //關閉攝像頭 webcamCaptureUtil.closeWebcam(); //打印集合元素個數 System.out.println("未清空前"+faceImages.size()); webcamCaptureUtil.clearFaceImages(); System.out.println("清空后"+ faceImages.size()); } catch (VideoCaptureException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } } }
以上就是前后端從攝像頭獲取人臉的兩種方式,如果想要實現人臉對比,可參考我的另一篇博客: 虹軟SDK離線人臉比對
原創作品,轉載注明出處: https://www.cnblogs.com/huaixiaonian/p/13822115.html ,謝謝!