import cv2
import face_recognition
import numpy as np
from PIL import ImageFont, ImageDraw, Image
class FaceRecognition(object):
# 已知姓名的人脸编码
known_face_encodings = []
# 人脸对应名称
known_face_names = []
# 初始化眨眼的连续帧数
blink_counter = 0
# 初始化眨眼次数总和
blink_total = 0
# 眼长宽比例值
EAR_THRESH = 0.15
EAR_CONSEC_FRAMES_MIN = 1
# 当EAR小于阈值时,接连多少帧一定发生眨眼动作
EAR_CONSEC_FRAMES_MAX = 3
def gel_konw_face_encodings(self):
"""
获取已知姓名人脸照片的128维向量
:return:
"""
face_locations, fcj_face_encoding = self.get_face_info(file_path="小明.png")
self.known_face_encodings = [fcj_face_encoding[0]]
self.known_face_names = ["小明"]
def get_face_info(self, img_ndarry=None, file_path="", only_first_face=True):
"""
获取图片中第一个人脸位置和128位的人脸特征向量
:param img_ndarry:
:param file_path:
:return:
"""
if file_path:
img_ndarry = face_recognition.load_image_file("付晨杰.png")
# 缩小图片为原图的四分之一提高检测速度
small_frame = cv2.resize(img_ndarry, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame
smaill_face_locations = face_recognition.face_locations(rgb_small_frame)
if smaill_face_locations:
if only_first_face:
smaill_face_locations = [smaill_face_locations[0]]
face_locations = [(top*4, right*4, bottom*4, left*4) for (top, right, bottom, left) in smaill_face_locations]
face_encodings = face_recognition.face_encodings(rgb_small_frame, smaill_face_locations)
return face_locations, face_encodings
else:
return [], []
def compare_faces(self, known_face_encodings, unkonwn_face_encoding):
"""人脸对比"""
results = face_recognition.compare_faces(known_face_encodings, unkonwn_face_encoding, tolerance=0.4)
for index, item in enumerate(results):
if item:
return self.known_face_names[index]
return "unkonwn"
def cv2_img_add_text(self, img, text, left, top, textColor=(0, 255, 0), textSize=20):
"""
cv2无法显示中文,因此需要通过PIL并定义字体库显示,然后重新转化为OpenCV格式
:param img:
:param text:
:param left:
:param top:
:param textColor:
:param textSize:
:return:
"""
if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
# 创建一个可以在给定图像上绘图的对象
draw = ImageDraw.Draw(img)
# 字体的格式
fontStyle = ImageFont.truetype(
"font/Alimama_ShuHeiTi_Bold.ttf", textSize, encoding="utf-8")
# 绘制文本
draw.text((left, top), text, textColor, font=fontStyle)
# 转换回OpenCV格式
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def eye_aspect_ratio(self, eye):
"""
获取眼睛长宽比
:param eye:
:return:
"""
# (|e1-e5|+|e2-e4|) / (2|e0-e3|)
A = np.linalg.norm(eye[1] - eye[5])
B = np.linalg.norm(eye[2] - eye[4])
C = np.linalg.norm(eye[0] - eye[3])
ear = (A + B) / (2.0 * C)
return ear
def biopsy_by_blink(self, img_ndarry, face_location, check_eye=True):
"""
活体检测:眨眼检测
:return:
"""
face_landmarks_list = face_recognition.face_landmarks(img_ndarry, [face_location])[0]
left_eyebrow = face_landmarks_list['left_eye']
right_eyebrow = face_landmarks_list['right_eye']
if check_eye:
left_eyebrow = np.array(left_eyebrow)
right_eyebrow = np.array(right_eyebrow)
left_ear = self.eye_aspect_ratio(left_eyebrow) # 计算左眼EAR
right_ear = self.eye_aspect_ratio(right_eyebrow) # 计算右眼EAR
ear = (left_ear + right_ear) / 2.0 # 求左右眼EAR的均值
# EAR低于阈值,有可能发生眨眼,眨眼连续帧数加一次
if ear < self.EAR_THRESH:
self.blink_counter += 1
# EAR高于阈值,判断前面连续闭眼帧数,如果在合理范围内,说明发生眨眼
else:
if self.EAR_CONSEC_FRAMES_MIN <= self.blink_counter <= self.EAR_CONSEC_FRAMES_MAX:
self.blink_total += 1
self.blink_counter = 0
# # for item in left_eyebrow:
# cv2.circle(img_ndarry, left_eyebrow[0], 1, (255, 0, 0), 2)
# cv2.circle(img_ndarry, left_eyebrow[1], 1, (255, 0, 0), 2)
def test(self):
capture = cv2.VideoCapture(0)
while True:
ret, frame = capture.read()
if ret is False:
print("未检测到摄像头")
exit(0)
face_locations, face_encodings = self.get_face_info(img_ndarry=frame)
if face_locations:
# 循环处理个个人脸
for index, face_encoding in enumerate(face_encodings):
# 对比人脸显示人员名称
name = self.compare_faces(self.known_face_encodings, face_encoding)
# 活体检测-眨眼
self.biopsy_by_blink(frame, face_locations[index])
if self.blink_total > 1:
name += "检测到眨眼"
# 长方形显示人脸区域
top, right, bottom, left = face_locations[index]
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# 显示人员名称
frame = self.cv2_img_add_text(frame, name, left + 6, bottom - 6)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print("未检测到人脸")
# 释放摄像头
capture.release()
capture.destroyAllWindows()
if __name__ == "__main__":
a = FaceRecognition()
a.gel_konw_face_encodings()
a.test()