话不多说,直接上代码:
import time
import numpy as np
import face_recognition
import cv2
# import mediapipe as mp
video_capture = cv2.VideoCapture(0)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
haveface = 0
face_loc_hog = []
face_loc_cnn = []
dnn_proto_text = 'models/deploy.prototxt'
dnn_model = 'models/res10_300x300_ssd_iter_140000.caffemodel'
face_net = cv2.dnn.readNetFromCaffe(dnn_proto_text, dnn_model)
face_detection = cv2.CascadeClassifier('models/haarcascade_frontalface_default.xml')
# mp_face_mesh = mp.solutions.face_mesh
# model = mp_face_mesh.FaceMesh(
# static_image_mode=True,#TRUE:静态图片/False:摄像头实时读取
# refine_landmarks=True,#使用Attention Mesh模型
# max_num_faces=40,
# min_detection_confidence=0.2, #置信度阈值,越接近1越准
# min_tracking_confidence=0.5,#追踪阈值
# )
# # 导入可视化函数和可视化样式
# mp_drawing = mp.solutions.drawing_utils
# # mp_drawing_styles=mp.solutions.drawing_styles
# draw_spec = mp_drawing.DrawingSpec(thickness=2, circle_radius=1, color=[223, 155, 6])
# mpFace_dec = mp.solutions.face_detection
# mpdraw = mp.solutions.drawing_utils
# face_dec = mpFace_dec.FaceDetection(0.75)
while True:
ret, frame = video_capture.read()
frameClone = frame.copy() # 复制画面
haveface = 0
time_start = time.time()
# haar分类算法
gray = cv2.cvtColor(frameClone, cv2.COLOR_BGR2GRAY) # 转为灰度图
face_haar = face_detection.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(72, 72),
flags=cv2.CASCADE_DO_CANNY_PRUNING)
for facehaar in face_haar:
top, right, bottom, left = facehaar
cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 255), 1)
cv2.putText(frame, "haar", (left, top), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 1)
haveface += 1
time_end = time.time()
print('haar: ' + str(round((time_end - time_start), 3)) + ' s')
time_start = time.time()
# hog算法
face_loc_hog = face_recognition.face_locations(frameClone, model="hog")
for face_hog in face_loc_hog:
# 打印人脸信息
top, right, bottom, left = face_hog
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 1)
cv2.putText(frame, "hog", (left, top), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1)
haveface += 1
time_end = time.time()
print('Hog: ' + str(round((time_end - time_start), 3)) + ' s')
time_start = time.time()
# 基于cnn识别人脸,是否使用gpu看装机环境
face_loc_cnn = face_recognition.face_locations(frameClone, number_of_times_to_upsample=1, model="cnn")
for face_cnn in face_loc_cnn:
# 打印人脸信息
top, right, bottom, left = face_cnn
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 1)
cv2.putText(frame, "cnn", (left, top), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1)
haveface += 1
time_end = time.time()
print('cnn: ' + str(round((time_end - time_start), 3)) + ' s')
time_start = time.time()
# 基于caffe模型
face_net.setInput(cv2.dnn.blobFromImage(frameClone, 1.0, (300, 300), (104.0, 177.0, 123.0), False, False))
detections = face_net.forward()
(h, w) = frameClone.shape[:2]
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
print(confidence)
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY), (255, 0, 0), 1)
cv2.putText(frame, "caffe", (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1)
haveface += 1
time_end = time.time()
print('caffe: ' + str(round((time_end - time_start), 3)) + ' s')
# time_start = time.time()
# # # 基于mp方法
# img_rgb = cv2.cvtColor(frameClone, code=cv2.COLOR_BGR2RGB)
# Results = face_dec.process(img_rgb)
# if Results.detections:
# for id, detection in enumerate(Results.detections):
# bboxC = detection.location_data.relative_bounding_box
# h, w, c = frameClone.shape
# bbox = int(bboxC.xmin * w), int(bboxC.ymin * h), int(bboxC.width * w), int(bboxC.height * h)
# cv2.rectangle(frame, bbox, (0, 255, 255), 1)
# cv2.putText(frame, 'media', (bbox[0], bbox[1]-20), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255), 1)
#
# time_end = time.time()
# print('mp: ' + str(round((time_end - time_start), 3)) + ' s')
if haveface > 0:
cv2.imwrite('testimg1/' + str(time.time()) + '.jpg', frame)
frame1 = frame.copy()
cv2.imshow('Video', frame1)
# Hit 'q' on the keyboard to quit!
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
time.sleep(0.5)
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
最后运行时间记录日志:
haar: 0.375 s
Hog: 0.36 s
cnn: 20.713 s
caffe: 0.074 s
haar: 0.017 s
Hog: 0.207 s
cnn: 20.613 s
caffe: 0.042 s
haar: 0.009 s
Hog: 0.162 s
cnn: 20.798 s
0.987467
caffe: 0.019 s
haar: 0.009 s
Hog: 0.162 s
cnn: 20.813 s
caffe: 0.022 s
haar: 0.009 s
Hog: 0.164 s
cnn: 20.604 s
caffe: 0.018 s
haar: 0.009 s
Hog: 0.162 s
cnn: 20.561 s
caffe: 0.018 s
haar: 0.011 s
Hog: 0.161 s
cnn: 20.53 s
caffe: 0.02 s
haar: 0.009 s
Hog: 0.161 s
cnn: 21.342 s
caffe: 0.018 s
haar: 0.009 s
Hog: 0.165 s
cnn: 21.341 s
caffe: 0.024 s
haar: 0.01 s
Hog: 0.163 s
cnn: 20.695 s
caffe: 0.021 s
haar: 0.012 s
Hog: 0.162 s
cnn: 20.837 s
0.99937433
caffe: 0.021 s
haar: 0.011 s
Hog: 0.167 s
cnn: 21.428 s
0.9820749
caffe: 0.02 s
haar: 0.012 s
Hog: 0.163 s
cnn: 20.721 s
0.98545885
caffe: 0.02 s
haar: 0.012 s
Hog: 0.161 s
cnn: 21.175 s
0.9658015
caffe: 0.02 s
haar: 0.097 s
Hog: 0.16 s
cnn: 21.006 s
0.9999292
caffe: 0.021 s
haar: 0.009 s
Hog: 0.264 s
cnn: 20.752 s
0.9957392
caffe: 0.02 s
haar: 0.01 s
Hog: 0.165 s
浙公网安备 33010602011771号