团队项目-继之前的修改
这次将人脸录入部分的界面嵌入到页面中
以下是页面代码:
<template>
<div class="import-container">
<div class="simple-card">
<h2>人员管理</h2>
<div class="stats-bar">
<div class="stat-item">
<span class="stat-label">已保存人脸总数:</span>
<span class="stat-value">{{ totalFacesCount }}</span>
</div>
<div class="stat-item" v-if="currentFaceDir">
<span class="stat-label">当前人脸已保存:</span>
<span class="stat-value">{{ currentFacePhotosCount }}张</span>
</div>
</div>
<div class="button-group">
<div class="input-group">
<input v-model="inputName" type="text" placeholder="请输入姓名"> <!-- 移除 disabled 属性 -->
<button @click="clearData" class="import-btn">
<span class="icon">🗑️</span>
删除全部数据
</button>
<button @click="createFaceFolder" :disabled="!inputName || currentFaceDir || !isCameraActive" class="import-btn">
<span class="icon">📁</span>
创建人脸文件夹
</button>
<button @click="queryFaceData" class="import-btn">
<span class="icon">🔍</span>
查询人脸数据
</button>
</div>
<div class="camera-section">
<div class="camera-container">
<img v-if="isCameraActive" src="http://localhost:5000/face-video-feed" alt="Camera Feed" class="camera-preview">
<div v-else class="camera-placeholder">
<i class="fa fa-video-camera fa-5x"></i>
<p>点击"录入人脸数据"启动摄像头</p>
</div>
</div>
<div class="controls-container">
<button @click="toggleCamera" class="import-btn" :class="{ 'recording': isCameraActive }">
<span class="icon" v-if="isCameraActive">⏹️</span>
<span class="icon" v-else>📷</span>
{{ isCameraActive ? '结束录入人脸数据' : '录入人脸数据' }}
</button>
<button @click="saveCurrentFace" :disabled="!isCameraActive || !currentFaceDir" class="import-btn">
<span class="icon">💾</span>
保存当前人脸
</button>
<button @click="convertFormat" class="import-btn">
<span class="icon">🔄</span>
格式转换
</button>
<div class="lesson-selector">
<select v-model="lessonId">
<option value="">请选择课程</option>
<option v-for="lesson in lessons" :key="lesson.id" :value="lesson.id">
{{ lesson.className }}
</option>
</select>
</div>
<button @click="importFaceData" class="import-btn">
<span class="icon">📥</span>
导入信息
</button>
</div>
</div>
</div>
<div v-if="processStatus" class="status-box">
<div :class="['status-item', processStatus.type]">
{{ processStatus.message }}
</div>
</div>
<div v-if="importResult" class="result-box">
<div class="success-text">
✅ 成功导入 {{ importResult.success }} 条数据
</div>
<div v-if="importResult.errors.length > 0" class="error-list">
<div class="error-header">❌ 失败 {{ importResult.errors.length }} 条:</div>
<div v-for="(error, index) in importResult.errors" :key="index" class="error-item">
第{{ error.line }}行: {{ error.reason }}
</div>
</div>
</div>
<!-- 查询结果弹窗 -->
<div v-if="isQueryDialogVisible" class="query-dialog">
<div class="dialog-content">
<h3>已保存的人脸数据名字</h3>
<ul>
<li v-for="name in queriedFaceNames" :key="name">{{ name }}</li>
</ul>
<button @click="isQueryDialogVisible = false">关闭</button>
</div>
</div>
</div>
</div>
</template>
<script setup>
import { ref, onMounted, onUnmounted, watch } from 'vue';
import axios from "axios";
const lessonId = ref('');
const lessons = ref([]);
const processStatus = ref(null);
const importResult = ref(null);
const inputName = ref('');
const isCameraActive = ref(false);
const currentFaceDir = ref('');
const totalFacesCount = ref(0);
const currentFacePhotosCount = ref(0);
const isQueryDialogVisible = ref(false); // 控制查询结果弹窗的显示
const queriedFaceNames = ref([]); // 存储查询到的人脸数据名字
const fetchLessons = async () => {
try {
const user = JSON.parse(localStorage.getItem('user'));
const response = await axios.get(`/api/lessons?name=${user.name}`);
lessons.value = response.data;
} catch (error) {
processStatus.value = { type: 'error', message: '加载课程失败' };
}
};
const updateFaceCount = async () => {
try {
const response = await axios.post('http://localhost:5000/face-registration', { action: 'check_count' });
totalFacesCount.value = response.data.total_faces;
currentFacePhotosCount.value = response.data.current_face_photos;
} catch (error) {
console.error("更新人脸计数失败:", error);
}
};
onMounted(() => {
fetchLessons();
updateFaceCount();
});
onUnmounted(() => {
if (isCameraActive.value) {
stopCamera();
}
});
watch(currentFaceDir, () => {
updateFaceCount();
});
const clearData = async () => {
try {
processStatus.value = { type: 'processing', message: '正在删除全部数据...' };
const response = await axios.post('http://localhost:5000/face-registration', { action: 'clear' });
currentFaceDir.value = '';
inputName.value = ''; // 清除姓名输入
processStatus.value = { type: 'success', message: response.data.message };
updateFaceCount();
} catch (error) {
processStatus.value = { type: 'error', message: '删除数据失败' };
}
};
const createFaceFolder = async () => {
if (!inputName.value) {
processStatus.value = { type: 'error', message: '请输入姓名' };
return;
}
try {
processStatus.value = { type: 'processing', message: '正在创建人脸文件夹...' };
const response = await axios.post('http://localhost:5000/face-registration', {
action: 'create_folder',
name: inputName.value
});
if (response.data.status === 'success') {
currentFaceDir.value = response.data.folder_path;
processStatus.value = { type: 'success', message: `成功创建文件夹: ${inputName.value}` };
updateFaceCount();
} else {
processStatus.value = { type: 'error', message: response.data.message };
}
} catch (error) {
processStatus.value = { type: 'error', message: '创建文件夹失败' };
}
};
const startCamera = async () => {
try {
processStatus.value = { type: 'processing', message: '正在启动摄像头...' };
const response = await axios.post('http://localhost:5000/face-registration', { action: 'start_camera' });
if (response.data.status === 'success') {
isCameraActive.value = true;
totalFacesCount.value = response.data.total_faces;
processStatus.value = { type: 'success', message: '摄像头已启动,请输入姓名并创建文件夹' };
} else {
processStatus.value = { type: 'error', message: response.data.message };
}
} catch (error) {
processStatus.value = { type: 'error', message: '启动摄像头失败' };
}
};
const stopCamera = async () => {
try {
processStatus.value = { type: 'processing', message: '正在关闭摄像头...' };
await axios.post('http://localhost:5000/face-registration', { action: 'stop_camera' });
isCameraActive.value = false;
currentFaceDir.value = ''; // 关闭摄像头时重置当前文件夹
// 保留姓名输入,方便用户连续录入不同人员
processStatus.value = { type: 'success', message: '摄像头已关闭' };
} catch (error) {
processStatus.value = { type: 'error', message: '关闭摄像头失败' };
}
};
const toggleCamera = async () => {
if (isCameraActive.value) {
await stopCamera();
} else {
await startCamera();
}
};
const saveCurrentFace = async () => {
if (!isCameraActive.value) {
processStatus.value = { type: 'error', message: '请先启动摄像头' };
return;
}
if (!currentFaceDir.value) {
processStatus.value = { type: 'error', message: '请先创建人脸文件夹' };
return;
}
try {
processStatus.value = { type: 'processing', message: '正在保存人脸照片...' };
const response = await axios.post('http://localhost:5000/face-registration', { action: 'save_face' });
if (response.data.status === 'success') {
currentFacePhotosCount.value = response.data.current_count;
processStatus.value = { type: 'success', message: response.data.message };
} else {
processStatus.value = { type: 'error', message: response.data.message };
}
} catch (error) {
processStatus.value = { type: 'error', message: '保存人脸照片失败' };
}
};
const importFaceData = async () => {
if (!lessonId.value) {
processStatus.value = { type: 'error', message: '请先选择课程' };
return;
}
try {
processStatus.value = { type: 'processing', message: '正在导入人脸数据...' };
const response = await axios.post(`http://localhost:8080/api/students/import-face-data?lessonId=${lessonId.value}`);
importResult.value = response.data;
processStatus.value = { type: 'success', message: `成功导入数据` };
} catch (error) {
let errorMessage = error.response?.data?.errors?.[0]?.reason || error.message;
processStatus.value = { type: 'error', message: `导入失败: ${errorMessage}` };
}
};
const convertFormat = async () => {
try {
processStatus.value = { type: 'processing', message: '正在转换格式...' };
await axios.post('http://localhost:5000/start-face-recognition');
processStatus.value = { type: 'success', message: '格式转换完成' };
} catch (error) {
processStatus.value = { type: 'error', message: '格式转换失败' };
}
};
// 查询人脸数据
const queryFaceData = async () => {
try {
processStatus.value = { type: 'processing', message: '正在查询人脸数据...' };
const response = await axios.post('http://localhost:5000/face-registration', { action: 'query_faces' });
if (response.data.status === 'success') {
queriedFaceNames.value = response.data.names;
isQueryDialogVisible.value = true;
processStatus.value = { type: 'success', message: '查询成功' };
} else {
processStatus.value = { type: 'error', message: response.data.message };
}
} catch (error) {
processStatus.value = { type: 'error', message: '查询失败' };
}
};
</script>
<style scoped>
.import-container {
min-height: 100vh;
background: #f8fafc;
display: flex;
justify-content: center;
align-items: center;
padding: 2rem;
}
.simple-card {
background: white;
border-radius: 12px;
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.08);
padding: 2.5rem;
width: 100%;
max-width: 1200px;
}
h2 {
color: #2d3748;
font-size: 1.8rem;
text-align: center;
margin-bottom: 2rem;
font-weight: 600;
}
.stats-bar {
display: flex;
gap: 2rem;
margin-bottom: 1.5rem;
padding: 1rem;
background: #f8fafc;
border-radius: 8px;
}
.stat-item {
display: flex;
align-items: center;
gap: 0.5rem;
}
.stat-label {
color: #718096;
font-weight: 500;
}
.stat-value {
color: #2d3748;
font-weight: 600;
font-size: 1.1rem;
}
.button-group {
display: flex;
flex-direction: column;
gap: 2rem;
}
.input-group {
display: grid;
grid-template-columns: 2fr 1fr 1fr 1fr; /* 增加一列 */
gap: 1.5rem;
}
.camera-section {
display: grid;
grid-template-columns: 3fr 2fr;
gap: 2rem;
}
.camera-container {
background: #f8fafc;
border-radius: 12px;
overflow: hidden;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05);
height: 400px;
display: flex;
align-items: center;
justify-content: center;
}
.camera-preview {
max-width: 100%;
max-height: 100%;
object-fit: contain;
}
.camera-placeholder {
text-align: center;
padding: 2rem;
}
.camera-placeholder i {
color: #cbd5e0;
margin-bottom: 1rem;
}
.camera-placeholder p {
color: #718096;
}
.controls-container {
display: grid;
grid-template-columns: 1fr;
gap: 1.5rem;
}
.import-btn {
padding: 1rem;
background: #f8fafc;
border: 2px solid #e2e8f0;
border-radius: 8px;
color: #2d3748;
font-size: 1rem;
cursor: pointer;
transition: all 0.2s ease;
display: flex;
align-items: center;
justify-content: center;
gap: 0.8rem;
}
.import-btn:hover {
background: #4299e1;
color: white;
border-color: #4299e1;
transform: translateY(-2px);
}
.import-btn.recording {
background: #e53e3e;
color: white;
border-color: #e53e3e;
}
.import-btn.recording:hover {
background: #c53030;
border-color: #c53030;
}
.import-btn:disabled {
opacity: 0.6;
cursor: not-allowed;
transform: none;
}
.lesson-selector select {
width: 100%;
padding: 0.75rem;
border: 1px solid #e2e8f0;
border-radius: 8px;
background: white;
color: #4a5568;
font-size: 1rem;
appearance: none;
background-image: url("data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='%234a5568'%3e%3cpath d='M7 10l5 5 5-5z'/%3e%3c/svg%3e");
background-repeat: no-repeat;
background-position: right 1rem center;
}
.status-box {
margin-top: 1.5rem;
}
.status-item {
padding: 1rem;
border-radius: 8px;
text-align: center;
}
.status-item.processing {
background: #ebf8ff;
color: #3182ce;
}
.status-item.success {
background: #f0fff4;
color: #38a169;
}
.status-item.error {
background: #fff5f5;
color: #c53030;
}
.result-box {
margin-top: 2rem;
padding: 1.5rem;
background: #f8fafc;
border-radius: 8px;
}
.success-text {
color: #38a169;
margin-bottom: 1rem;
}
.error-header {
color: #c53030;
font-weight: 500;
margin-bottom: 0.5rem;
}
.error-item {
color: #718096;
font-size: 0.9rem;
padding: 0.3rem 0;
}
/* 查询结果弹窗样式 */
.query-dialog {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: rgba(0, 0, 0, 0.5);
display: flex;
justify-content: center;
align-items: center;
}
.dialog-content {
background: white;
padding: 2rem;
border-radius: 8px;
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.1);
}
.dialog-content h3 {
margin-bottom: 1rem;
}
.dialog-content ul {
list-style-type: none;
padding: 0;
margin-bottom: 1rem;
}
.dialog-content button {
padding: 0.5rem 1rem;
background: #f8fafc;
border: 2px solid #e2e8f0;
border-radius: 8px;
color: #2d3748;
font-size: 1rem;
cursor: pointer;
transition: all 0.2s ease;
}
.dialog-content button:hover {
background: #4299e1;
color: white;
border-color: #4299e1;
transform: translateY(-2px);
}
@media (max-width: 992px) {
.camera-section {
grid-template-columns: 1fr;
}
.input-group {
grid-template-columns: 1fr;
}
.stats-bar {
flex-direction: column;
gap: 0.5rem;
}
}
</style>
后端端口以及函数:
from flask import Flask, jsonify, request, Response
from flask_cors import CORS
import threading
import subprocess
import pandas as pd
from datetime import datetime
from face_reco_from_camera_ot import Face_Recognizer
from threading import Lock
import os
import shutil
import dlib
import cv2
import numpy as np
import logging
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}}) # 启用跨域支持
# 全局变量和锁
face_recognizer = None
current_csv = ""
camera_lock = Lock() # 防止摄像头资源冲突
face_camera = None
current_face_dir = ""
ss_cnt = 0
registered_names = []
path_photos_from_camera = "D:\\github\\face\\"
# Dlib 正向人脸检测器
detector = dlib.get_frontal_face_detector()
# -------------------- 人脸录入相关函数 --------------------
# 初始化工作目录
def pre_work_mkdir():
if not os.path.isdir(path_photos_from_camera):
os.makedirs(path_photos_from_camera)
# 检查已有人脸数据
def check_existing_faces():
global registered_names
registered_names.clear()
if os.listdir(path_photos_from_camera):
person_list = os.listdir(path_photos_from_camera)
for person in person_list:
name = person.split('_')[1]
registered_names.append(name)
print(f"已注册人脸: {len(registered_names)}")
return len(registered_names)
# 获取指定人脸文件夹中的照片数量
def get_face_photos_count(face_dir):
if face_dir and os.path.exists(face_dir) and os.path.isdir(face_dir):
return len(os.listdir(face_dir))
return 0
# 创建人脸文件夹
def create_face_folder(input_name_char):
global current_face_dir, ss_cnt
current_face_dir = os.path.join(path_photos_from_camera, f"person_{input_name_char}")
os.makedirs(current_face_dir)
logging.info(f"新建的人脸文件夹: {current_face_dir}")
ss_cnt = 0 # 重置计数器
return current_face_dir
# 保存人脸图片
def save_face_image(frame, face_rect, current_face_dir, ss_cnt):
x, y, w, h = face_rect
face_image = frame[y:y + h, x:x + w]
img_path = os.path.join(current_face_dir, f"img_face_{ss_cnt}.jpg")
cv2.imwrite(img_path, face_image)
logging.info(f"写入本地: {img_path}")
return img_path
# 开始人脸识别
def start_face_camera():
global face_camera
if face_camera is None:
face_camera = cv2.VideoCapture(0)
return face_camera.isOpened()
# 释放摄像头
def release_face_camera():
global face_camera
if face_camera is not None:
face_camera.release()
face_camera = None
# 生成视频帧
def generate_face_frames():
global face_camera, current_face_dir, ss_cnt
if face_camera is None:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n'
b'<h1>No camera</h1>\r\n')
return
while True:
success, frame = face_camera.read()
if not success:
break
else:
# 转换为灰度图用于人脸检测
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
# 绘制人脸矩形框
for face in faces:
x, y, w, h = face.left(), face.top(), face.width(), face.height()
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# 添加人脸计数文本
if current_face_dir:
count_text = f"已保存: {get_face_photos_count(current_face_dir)}张"
cv2.putText(frame, count_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# 编码为JPEG
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
# -------------------- 签到功能路由 --------------------
@app.route('/start-recognition', methods=['POST'])
def start_recognition():
global face_recognizer, current_csv
try:
with camera_lock:
# 释放已有摄像头资源
if face_recognizer is not None:
face_recognizer.__del__()
# 初始化新实例
face_recognizer = Face_Recognizer()
current_csv = "" # 移除了对不存在属性的引用
return jsonify({
"status": "started",
"message": "人脸识别已启动"
})
except Exception as e:
return jsonify({
"status": "error",
"message": str(e)
}), 500
@app.route('/video_feed')
def video_feed():
global face_recognizer
if face_recognizer is None:
return Response("人脸识别未初始化", status=503)
return Response(
face_recognizer.generate_frames(),
mimetype='multipart/x-mixed-replace; boundary=frame'
)
@app.route('/sign-in-status', methods=['GET'])
def get_sign_in_status():
global face_recognizer
try:
if face_recognizer is not None:
return jsonify({"signed": face_recognizer.signed_in_names})
else:
return jsonify({"signed": []})
except Exception as e:
return jsonify({"signed": [], "error": str(e)}), 500
reset_lock = Lock()
@app.route('/release-camera', methods=['POST'])
def release_camera():
global face_recognizer
with camera_lock:
if face_recognizer is not None:
face_recognizer.__del__()
face_recognizer = None
return jsonify({"status": "success"})
# -------------------- 人脸录入路由 --------------------
@app.route('/face-registration', methods=['POST'])
def face_registration():
global current_face_dir, ss_cnt
data = request.get_json()
action = data.get('action')
input_name = data.get('name')
if action == 'check_count':
# 检查已保存的人脸数据个数
return jsonify({
"status": "success",
"total_faces": check_existing_faces(),
"current_face_photos": get_face_photos_count(current_face_dir)
})
elif action == 'clear':
# 清除所有数据
if os.path.isdir(path_photos_from_camera):
shutil.rmtree(path_photos_from_camera)
pre_work_mkdir()
check_existing_faces()
return jsonify({"status": "success", "message": "已清除所有数据"})
elif action == 'create_folder':
# 创建人脸文件夹
if not input_name:
return jsonify({"status": "error", "message": "请输入姓名"})
if input_name in registered_names:
return jsonify({"status": "error", "message": "此名字已被录入,请输入新的名字"})
folder_path = create_face_folder(input_name)
check_existing_faces() # 更新已注册人脸列表
return jsonify({
"status": "success",
"message": f"创建文件夹成功: {folder_path}",
"folder_path": folder_path
})
elif action == 'start_camera':
# 启动摄像头
if start_face_camera():
return jsonify({
"status": "success",
"message": "摄像头已启动",
"total_faces": check_existing_faces()
})
else:
return jsonify({"status": "error", "message": "无法启动摄像头"})
elif action == 'stop_camera':
# 停止摄像头
release_face_camera()
return jsonify({"status": "success", "message": "摄像头已关闭"})
elif action == 'save_face':
# 保存人脸照片
if not current_face_dir:
return jsonify({"status": "error", "message": "请先创建人脸文件夹"})
# 读取当前帧
success, frame = face_camera.read()
if not success:
return jsonify({"status": "error", "message": "无法获取当前帧"})
# 检测人脸
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
if len(faces) == 0:
return jsonify({"status": "error", "message": "未检测到人脸"})
elif len(faces) > 1:
return jsonify({"status": "error", "message": "检测到多个人脸,请确保画面中只有一个人"})
# 保存人脸
face = faces[0]
face_rect = (face.left(), face.top(), face.width(), face.height())
save_face_image(frame, face_rect, current_face_dir, ss_cnt)
ss_cnt += 1
return jsonify({
"status": "success",
"message": f"已保存第 {ss_cnt} 张人脸照片",
"current_count": ss_cnt
})
elif action == 'query_faces':
# 查询已保存的人脸数据名字
check_existing_faces()
return jsonify({
"status": "success",
"names": registered_names
})
return jsonify({"status": "error", "message": "未知操作"})
# 人脸录入视频流路由
@app.route('/face-video-feed')
def face_video_feed():
return Response(generate_face_frames(),
mimetype='multipart/x-mixed-replace; boundary=frame')
# -------------------- 特征提取路由 --------------------
def run_face_reco_standalone():
subprocess.Popen(['python', 'features_extraction_to_csv.py'])
@app.route('/start-face-recognition', methods=['POST'])
def start_face_recognition():
thread = threading.Thread(target=run_face_reco_standalone)
thread.start()
return jsonify({
'status': 'started',
'message': '人脸特征正在导入数据库'
})
if __name__ == '__main__':
pre_work_mkdir()
check_existing_faces()
app.run(port=5000, debug=True, threaded=True)
import dlib
import numpy as np
import cv2
import pandas as pd
import os
import time
from datetime import datetime
class Face_Recognizer:
def __init__(self):
# 初始化变量
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
self.face_rec = dlib.face_recognition_model_v1("dlib_face_recognition_resnet_model_v1.dat")
# 加载人脸特征数据库
self.features_known_list = []
self.name_known_list = []
self.load_face_database()
# 初始化摄像头
self.cap = cv2.VideoCapture(0)
# 签到信息
self.signed_in_names = []
self.sign_in_time = {}
# 人脸匹配阈值
self.threshold = 0.45
# 人脸检测频率控制
self.frame_count = 0
self.detection_interval = 5 # 每5帧检测一次人脸
def load_face_database(self):
"""加载人脸特征数据库"""
if os.path.exists(r"D:\github\facecsv\features_all.csv"):
csv_rd = pd.read_csv(r"D:\github\facecsv\features_all.csv", header=None)
for i in range(csv_rd.shape[0]):
features_someone_arr = []
for j in range(0, 128):
if csv_rd.iloc[i][j] == 'nan':
features_someone_arr.append(0)
else:
features_someone_arr.append(csv_rd.iloc[i][j])
self.features_known_list.append(features_someone_arr)
self.name_known_list.append("person_" + str(i + 1))
print("Faces in Database:", len(self.features_known_list))
def get_face_features(self, frame, face_rect):
"""获取人脸特征向量"""
shape = self.predictor(frame, face_rect)
face_descriptor = self.face_rec.compute_face_descriptor(frame, shape)
return list(face_descriptor)
def compare_faces(self, features_unknown, features_known_list):
"""比较人脸特征向量,返回最匹配的人脸索引和距离"""
distances = []
for features_known in features_known_list:
distance = np.linalg.norm(np.array(features_unknown) - np.array(features_known))
distances.append(distance)
min_distance = min(distances)
min_distance_index = distances.index(min_distance)
return min_distance_index, min_distance
def generate_frames(self):
"""生成视频帧并进行人脸识别"""
while True:
ret, frame = self.cap.read()
if not ret:
break
# 转换为RGB格式(dlib使用RGB)
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 人脸检测频率控制
self.frame_count += 1
if self.frame_count % self.detection_interval != 0:
# 编码为JPEG并返回
ret, buffer = cv2.imencode('.jpg', frame)
frame_bytes = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
continue
# 检测人脸
faces = self.detector(rgb_frame, 0)
# 处理检测到的人脸
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
# 获取人脸特征
features_unknown = self.get_face_features(rgb_frame, face)
# 比较人脸
if len(self.features_known_list) > 0:
min_distance_index, min_distance = self.compare_faces(features_unknown, self.features_known_list)
# 绘制人脸矩形框
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
# 判断是否匹配
if min_distance < self.threshold:
# 获取匹配的姓名
person_name = self.name_known_list[min_distance_index]
# 绘制姓名
cv2.putText(frame, person_name, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
# 记录签到信息
if person_name not in self.signed_in_names:
self.signed_in_names.append(person_name)
self.sign_in_time[person_name] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"{person_name} 签到成功,时间:{self.sign_in_time[person_name]}")
else:
# 未匹配到已知人脸
cv2.putText(frame, "Unknown", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
# 编码为JPEG并返回
ret, buffer = cv2.imencode('.jpg', frame)
frame_bytes = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
def __del__(self):
"""释放资源"""
if hasattr(self, 'cap') and self.cap.isOpened():
self.cap.release()