人脸身份实时训练识别.ipynb代码如下:

1.收集人脸数据

2.训练模型

3.实时识别人脸身份

1.收集人脸数据,用于训练,如果不想训练,可以直接运行3.摄像头实时识别人脸身份

import os
import servo_control  #摄像头复位

servo_control.servo_move(0, 90, 1, 60)  #此处的60为摄像头角度,0(最上)-180(最下),可自行修改最后这个参数60来调整角度
os.system('./killmjpg.sh')  #杀死影响摄像头的mjpg进程

连接摄像头

import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display
#from jetbot import Camera, bgr8_to_jpeg
from jetcam.usb_camera import USBCamera
from jetcam.utils import bgr8_to_jpeg

#camera = Camera.instance(width=224, height=224)
camera = USBCamera(capture_device=0,width=320, height=240)
camera.running=True
print("camera created")

image = widgets.Image(format='jpeg', width=320, height=240)  # 这个宽度和高度不一定必须与相机匹配

camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)

display(image)

创建数据采集按钮并绑定按钮拍照功能(拍一张即可,文件名不可重复) 注意:拍的照片尽量清晰明了,不然识别率会很低

点击add image按钮收集一张人脸数据,想继续收集下一张请再次运行下面这个代码块

button_layout = widgets.Layout(width='128px', height='64px')
add_button = widgets.Button(description='add image', button_style='success', layout=button_layout)

img_name = input('请输入你的英文名字 ')

display(image)
display(add_button)

def save_img():
    if img_name:
        image_path = os.path.join('./known', img_name + '.jpg')
        with open(image_path, 'wb') as f:
            f.write(image.value)

add_button.on_click(lambda x:save_img())

2.在OpenCV中训练面部识别模型

生成训练模型

import face_recognition
import cv2
import os
import pickle
print(cv2.__version__)

Encodings=[]
Names=[]
image_dir='./known'
for root,dirs,files in os.walk(image_dir):
    print(files)
    for file in files:
        path = os.path.join(root,file)
        print(path)
        name=os.path.splitext(file)[0]
        print(name)
        person=face_recognition.load_image_file(path)
        encoding=face_recognition.face_encodings(person)[0]
        Encodings.append(encoding)
        Names.append(name)
        
with open('./trains/train.pkl1','wb') as f:
    pickle.dump(Names,f)
    pickle.dump(Encodings,f)

print('train success')

3.通过摄像头进行面部身份识别

import face_recognition
import cv2
import os
import pickle
import time
print(cv2.__version__)
fpsReport=0
scaleFactor=.25
# 创建显示控件
from jetcam.utils import bgr8_to_jpeg
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display
face_imge5 = widgets.Image(format='jpeg')#, width=320, height=240)
#display(face_imge5)
Encodings=[]
Names=[]

with open('./trains/train.pkl1','rb') as f:
    Names=pickle.load(f)
    Encodings=pickle.load(f)

font=cv2.FONT_HERSHEY_SIMPLEX
'''
dispW=640
dispH=480
flip=4
camSet='nvarguscamerasrc !  video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
cam= cv2.VideoCapture(camSet)  
'''
#注意:此处摄像头可能会报错,因为在第一步收集数据时打开过摄像头,不能打开两次,所以清核重新运行3.摄像头实时识别即可
cam = cv2.VideoCapture(0)

timeStamp=time.time()

注意:识别到第一张人脸的时候可能会卡一下

display(face_imge5)

try:
    while True:
        ret,frame = cam.read()
        frameSmall=cv2.resize(frame,(0,0),fx=scaleFactor,fy=scaleFactor)
        frameRGB=cv2.cvtColor(frameSmall,cv2.COLOR_BGR2RGB)
        facePositions=face_recognition.face_locations(frameRGB,model='cnn')

        allEncodings=face_recognition.face_encodings(frameRGB,facePositions)

        for (top,right,bottom,left),face_encoding in zip(facePositions,allEncodings):
            name='Unkown Person'
            matches=face_recognition.compare_faces(Encodings,face_encoding)
            if True in matches:
                first_match_index=matches.index(True)
                name=Names[first_match_index]

            top=int(top/scaleFactor)
            right=int(right/scaleFactor)
            bottom=int(bottom/scaleFactor)
            left=int(left/scaleFactor)

            cv2.rectangle(frame,(left,top),(right,bottom),(0,0,255),2)
            cv2.putText(frame,name,(left,top-6),font,.75,(0,0,255),2)

        dt = time.time()-timeStamp
        fps=1/dt
        fpsReport=.90*fpsReport + .1*fps
        #print('fps is:',round(fpsReport))
        timeStamp=time.time()
        cv2.rectangle(frame,(0,0),(100,40),(0,0,255),-1)
        cv2.putText(frame,str(round(fpsReport,1)) + 'fps',(0,25),font,.75,(0,255,255),2)
        face_imge5.value = bgr8_to_jpeg(frame)
except KeyboardInterrupt:
    cam.release()

 

import time
import Adafruit_PCA9685

pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(50)

def servo_move(basechannel,basevalue,tiltchannel,tiltvalue):
    basevalue=4096*((basevalue*11)+500)/20000
    pwm.set_pwm(basechannel,0,int(basevalue))
    tiltvalue=4096*((tiltvalue*11)+500)/20000
    pwm.set_pwm(tiltchannel,0,int(tiltvalue))

pan = 90
tilt = 90

servo_move(0, pan, 1, tilt)

'''
if __name__ == '__main__':
    servo_move(0,30,1,30)
    time.sleep(3)
    for angle in range(180, 0, -5):
        servo_move(0, angle, 1, angle)
        time.sleep(0.5)

    servo_move(0, pan, 1, tilt)
'''
#print('end')
servo_control.py
#!/bin/bash

sudo kill -9 `ps -elf|grep mjpg |awk '{print $4}'|awk 'NR==1'`

sudo kill -9 `ps -elf|grep mjpg |awk '{print $4}'|awk 'NR==1'`
killmjpg.sh

注:收集的图片数据存在./known文件夹下,文件名为你的英文名;

       训练好的train.pkl存放在./trains文件夹下;

posted on 2021-09-25 20:39  始终不够啊  阅读(444)  评论(0)    收藏  举报