java 调用本机摄像头获取人脸图片的实现

话不多说,直接提供两种获取摄像头人脸图片的方式

1. 使用 tracking.js 前端获取 --  实际web项目中最常用的

      tracking.js 文档地址: https://trackingjs.com/

      获取步骤:

            A:  引入 tracking.js 和  face.js

            B: 调用 tracking 获取摄像头视频,放到 <video> 中,设置人脸监测

            C: 调用人脸检测方法,针对视频每一帧图片进行监测,当监测到人脸区域时,使用 canvas 绘制人脸区域

            D: 然后针对存在人脸的图片进行截图,截图区域为监测到的人脸区域,可设置参数调整截图范围,这样就当前图片(该图片为视频中的某一帧存在人脸的图片)中截取到了人脸

      代码如下:

         html 代码   

<!DOCTYPE html>
<html  lang="zh" xmlns:th="http://www.thymeleaf.org">
<head>
  <meta charset="utf-8">
  <title>人脸识别</title>
  <link rel="stylesheet" th:href="@{/css/layer.css}">
  <link rel="stylesheet" th:href="@{/css/demo.css}">
  <script type="text/javascript" th:src="@{/js/jquery-1.9.1.js}"></script>
  <script type="text/javascript" th:src="@{/js/layer.js}"></script>
  <script th:src="@{/js/tracking-min.js}"></script>
  <script th:src="@{/js/face-min.js}"></script>
</head>
<body>
    <div>
        <p align="center">人脸图片检测</p>
    </div>
  <div class="demo-frame">
    <div class="demo-container">
        <div id="face1">
             <video id="video" width="640" height="480"></video>
             <canvas id="canvas" width="640" height="480"></canvas>
        </div>
      </div>
  </div>
  <script  type="text/javascript" th:inline="javascript">
    window.onload = function() {
      var video = document.getElementById('video');
      var canvas = document.getElementById('canvas');
      var context = canvas.getContext('2d');
      // 创建人脸追踪对象 ,并初始化
      var tracker = new tracking.ObjectTracker('face');
      tracker.setInitialScale(4);
      tracker.setStepSize(2);
      tracker.setEdgesDensity(0.1);
      // 从摄像头获取视频,放到 video 中,同时开启人脸监测
      tracking.track('#video', tracker, { camera: true });
      // 监听视频中的每一帧,判断是否存在人脸
      tracker.on('track', function(event) {
        if(event.data.length===0){
            console.info('无人脸');
            context.clearRect(0, 0, canvas.width, canvas.height);
        }else{
            //存在人脸
          event.data.forEach(function(rect) {
              // 一张图片可能存在多个人脸,遍历人脸信息
              // canvas 画图,设置颜色
              context.strokeStyle = '#FF0000';
              // 画一个 矩形框,定义位置为人脸区域做坐标
              context.strokeRect(rect.x, rect.y, rect.width, rect.height);
              // 显示坐标值
              context.font = '11px Helvetica';
              context.fillStyle = "#fff";
              context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
              context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
              // 针对监测到人脸进行处理
              Shoot(rect.x, rect.y, rect.width, rect.height);
              // 此处休眠1分中 -- 避免频繁获取,方便调试,实际开发不需要
              sleep(1000*60*1);
          });
        }
      });

      function Shoot(x,y,width,height) {
          var trackerTask = tracking.track(video, tracker);
        //先清除掉 canvas 画的框,避免截图获取到这个框 -- 理论上从视频获取不会截取到,但调试时实际有截取,所以做下清除
        context.clearRect(0, 0, canvas.width, canvas.height);
        //从视频的这个区域画图,捕获人脸
        context.drawImage(video,x-50,y-50,width+50,height+100,0,0,width+50,height+100);
        //将图片写到元素
         var img = document.createElement("img");
         img.src = this.canvas.toDataURL("image/png");
         //打印下截取到图片的 base64 编码
         console.log(img.src);
        //删除字符串前的提示信息 "data:image/png;base64,"
         var b64 = img.src.substring(22);
         //以下为调用后端接口
         var path = "/faceLogin";
        $.ajax({
            type : 'post',
            dataType : 'json',
            url : path,
            data : {
                imgdata:b64,
                identityImgBase64:"身份证图片base64编码",
                name:"姓名"
            },
            success : function(result){
                if(result.code=='0'){
                    //停止人脸监听
                    trackerTask.stop();
                    //其他代码....
                }else{
                    // 此处根据业务需要,决定是否停止监听
                    trackerTask.stop();
                    //其他代码....
                 }
            }
        })
      }

         //休眠方法--测试
        function sleep(numberMillis) {
            var now = new Date();
            var exitTime = now.getTime() + numberMillis;
            while (true) {
                now = new Date();
                if (now.getTime() > exitTime)
                    return;
            }
        }
    };
</script>
</body>
</html>

 

     相关 css

          demo.css

* {
  margin: 0;
  padding: 0;
  font-family: Helvetica, Arial, sans-serif;
}

.demo-title {
  position: absolute;
  width: 100%;
  background: #2e2f33;
  z-index: 2;
  padding: .7em 0;
}

.demo-title a {
  color: #fff;
  border-bottom: 1px dotted #a64ceb;
  text-decoration: none;
}

.demo-title p {
  color: #fff;
  text-align: center;
  text-transform: lowercase;
  font-size: 15px;
}

.demo-frame {
  width: 854px;
  height: 658px;
  position: fixed;
  top: 50%;
  left: 50%;
  margin: -329px 0 0 -429px;
  padding: 95px 20px 45px 34px;
  overflow: hidden;
  -webkit-box-sizing: border-box;
  -moz-box-sizing: border-box;
  -ms-box-sizing: border-box;
  box-sizing: border-box;
}

.demo-container {
  width: 100%;
  height: 530px;
  position: relative;
  background: #eee;
  overflow: hidden;
  border-bottom-right-radius: 10px;
  border-bottom-left-radius: 10px;
}

.dg.ac {
  z-index: 100 !important;
  top: 50px !important;
}

#face1{
    width: 1200px;
    height: 900px;
}

/*#face2{
    margin-left: 65%;
    width: 550px;
    height: 300px;
    background-color: black;
}*/
#video{
  margin-left: 80px;
  margin-top: 25px;
  position: absolute; 
}
#canvas {
  margin-left: 80px;
  margin-top: 25px;
  position: absolute;
 }
#canvas1 {
    margin-left: 72%;
    margin-top: 200px;
    width:480px; 
    height:360px; 
 /* background-color:#A64CEB;*/
}
p{
    font-size: 50px;
    font-family: "仿宋";
    margin-top: 50px;
    }

2. java 后端获取: 使用 openimaj 从摄像头视频中获取队员人脸  --- 此方式好像没啥用,自己写着玩的

     代码如下:

         A: 引入maven依赖 

        <!-- openimaj 用于从摄像头获取人脸拍照 文档地址 : http://openimaj.org -->
        <dependency>
            <groupId>com.github.sarxos</groupId>
            <artifactId>webcam-capture</artifactId>
            <version>0.3.12</version>
        </dependency>
        <dependency>
            <artifactId>faces</artifactId>
            <groupId>org.openimaj</groupId>
            <version>1.3.10</version>
            <scope>compile</scope>
        </dependency>

 

     B: demo代码如下,运行main即可

        

import cn.hutool.core.collection.CollectionUtil;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.openimaj.image.*;
import org.openimaj.image.colour.RGBColour;
import org.openimaj.image.colour.Transforms;
import org.openimaj.image.processing.face.detection.DetectedFace;
import org.openimaj.image.processing.face.detection.FaceDetector;
import org.openimaj.image.processing.face.detection.HaarCascadeDetector;
import org.openimaj.video.VideoDisplay;
import org.openimaj.video.VideoDisplayListener;
import org.openimaj.video.capture.VideoCapture;
import org.openimaj.video.capture.VideoCaptureException;

import javax.imageio.ImageIO;
import javax.swing.*;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;

/**
 * 摄像头拍照工具类 : 调用本机摄像头抓取人脸拍照
 * @author qxl
 * @date 2020/10/12
 */
@Slf4j
@Data
public class WebcamCaptureUtil {

    // 视频捕获对象
    private VideoCapture vc;
    // 视频显示 JFrame 窗口对象
    private JFrame windows;
    // 视频显示对象
    private VideoDisplay<MBFImage> vd;
    //捕获人脸图片存放集合
    private LinkedList<BufferedImage> faceImages = new LinkedList<>();

    /**
     * 打开摄像头捕获人脸,数据存入 faceImages
     */
    public void faceCapture() throws VideoCaptureException{
        // 创建视频捕获对象
        vc = new VideoCapture(320,240);
        //创建 JFrame 窗口,用于显示视频
        windows = DisplayUtilities.makeFrame("摄像头人脸检测中...");
        windows.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
        windows.setVisible(true);
        //创建视频显示对象
        vd = VideoDisplay.createVideoDisplay(vc,windows);
        // 监听视频
        vd.addVideoListener(
             //视频显示的监听--针对每一帧的图片
            new VideoDisplayListener<MBFImage>(){
                public void beforeUpdate(MBFImage frame){
                    FaceDetector<DetectedFace,FImage> fd = new HaarCascadeDetector(40);
                    List<DetectedFace> faces = fd.detectFaces(Transforms.calculateIntensity(frame));
                    for(DetectedFace face : faces ) {
                        frame.drawShape(face.getBounds(), RGBColour.RED);
                        BufferedImage image = ImageUtilities.createBufferedImageForDisplay(face.getFacePatch());
                        faceImages.addLast(image);
                    }
                }

                public void afterUpdate(VideoDisplay<MBFImage> display){
                }
        });
    }

    /**
     * 保存人脸图片
     * @param image  要保存的image
     * @param savePath  保存的路径
     * @param imageName  图片名称
     */
    public void saveImage(BufferedImage image,String savePath,String imageName) throws IOException {
        File path = new File(savePath);
        if (!path.exists()) {//如果文件不存在,则创建该目录
            path.mkdirs();
        }
        File file = new File(savePath + "/" + imageName + ".png");
        ImageIO.write(image,"png",file);
    }


    /**
     * 关闭摄像头及人脸捕获
     */
    public void closeWebcam(){
        if(vc != null){
            vc.stopCapture();
            vc.close();
        }
        if(vd != null){
            vd.close();
        }
        if(windows != null){
            // 关闭 jFrame 窗口
            windows.removeNotify();
        }
    }

    /**
     * 清理缓存图片
     */
    public void clearFaceImages(){
        faceImages.clear();
    }


    public static void main(String[] args) {
        WebcamCaptureUtil webcamCaptureUtil = new WebcamCaptureUtil();
        try {
            //开始人脸捕获
            webcamCaptureUtil.faceCapture();
            //等待捕获人脸
            LinkedList<BufferedImage> faceImages = webcamCaptureUtil.getFaceImages();
            String filePath = "D:\\" + "/picture/" + new SimpleDateFormat("yyyy-MM-dd").format(new Date());
            //假设获取10张图片之后,人脸比对成功
            int count = 0;
            while (count<10){
               if (CollectionUtil.isNotEmpty(faceImages)){
                   String time = new SimpleDateFormat("yyyMMdd_HHmmss").format(new Date());
                   BufferedImage image = faceImages.pollFirst();
                   if (image != null){
                       webcamCaptureUtil.saveImage(image,filePath,time);
                   }
                   Thread.sleep(1000L);
                   count++;
               }
            }
            //关闭摄像头
            webcamCaptureUtil.closeWebcam();
            //打印集合元素个数
            System.out.println("未清空前"+faceImages.size());
            webcamCaptureUtil.clearFaceImages();
            System.out.println("清空后"+ faceImages.size());
        } catch (VideoCaptureException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

}

 

以上就是前后端从摄像头获取人脸的两种方式,如果想要实现人脸对比,可参考我的另一篇博客: 虹软SDK离线人脸比对

 原创作品,转载注明出处: https://www.cnblogs.com/huaixiaonian/p/13822115.html ,谢谢!

posted @ 2020-10-15 18:13  fy_qxl  阅读(2289)  评论(5编辑  收藏  举报