目录:

  1.打开摄像头

  2.颜色检测

  3.二维码识别

  4.人脸识别

  5.行人检测

  6.汽车检测

  7.车牌检测

  8.OpenCV根据颜色定位物体的实时位置

  9.OpenCV单线循迹

  10.超声波自由避障

  11.智能识物实时播放

  12.人体追踪

  13.人脸身份识别

  14.识别镜头中感兴趣的对象

  15.颜色跟随

  16.人脸跟随

 

 

1.打开摄像头

import cv2

dispW=640
dispH=480
flip=4

#CSI摄像头
camSet='nvarguscamerasrc !  video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
cam = cv2.VideoCapture(camSet)

while True:
    ret, frame = cam.read()
    cv2.imshow('frame', frame)
    if cv2.waitKey(5) & 0xFF == 27:
        break

cam.release()
cv2.destroyAllWindows()
打开csi摄像头
import cv2

# 需关闭mjpg服务 ps-ef|grep mjpg   查看摄像头编号 cat /dev/video
cap = cv2.VideoCapture(1)  #打开摄像头 最大范围 640×480

cap.set(3,320)  #设置画面宽度
cap.set(4,240)  #设置画面高度

while 1: #进入无限循环
    ret,frame = cap.read() #将摄像头拍摄到的画面作为frame的值,ret为布尔值,如果正确读取帧,返回True
    #反转视频镜头
    #frame = cv2.flip(frame, -1)

    cv2.imshow('usb frame',frame) #将具体的测试效果显示出来
    if cv2.waitKey(5) & 0xFF == 27: #如果按了ESC就退出 当然也可以自己设置
        break

cap.release()
cv2.destroyAllWindows()
打开USB摄像头

2.颜色检测

import cv2   #导入库
import numpy as np
import time
import threading
import os
import servo_control
from pygame_test import play

#关于色域范围可以百度 HSV
#百科:https://baike.baidu.com/item/HSV/547122?fr=aladdin
#参考:https://blog.csdn.net/leo_888/article/details/88284251

#启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')

# 要识别的颜色字典
color_dist = {
             'red':   {'Lower': np.array([156, 60, 60]), 'Upper': np.array([180, 255, 255])},
             'green': {'Lower': np.array([35, 43, 35]), 'Upper': np.array([90, 255, 255])},
             'blue':  {'Lower': np.array([110, 80, 80]), 'Upper': np.array([128, 255, 255])},
#             'gray':  {'Lower': np.array([0,0,30]),'Upper':np.array([255,40,80]) }
             };

color_count = [0,0,0]

flag = -1
#flag_bak = -1
running = True
#使用之前 关闭mjpg进程 ps -elf|grep mjpg  找到进程号,杀死进程  sudo kill -9 xxx   xxx代表进程号
#cat /dev/video

img_w = 320
img_h = 240

cap = cv2.VideoCapture(0)  #打开摄像头 最大范围 640×480
#cap = cv2.VideoCapture(1)  #打开摄像头 最大范围 640×480
cap.set(3,img_w)  #设置画面宽度
cap.set(4,img_h)  #设置画面高度

def music(flag):
    global running
    running = False
    if flag == 0:
        play('./audios/0016.wav')
        print('red')
    elif flag == 1:
        play('./audios/0017.wav')
        print('green')
    elif flag == 2:
        play('./audios/0018.wav')
        print('blue')
    running = True

#eEvent = threading.Event()
#th1 = threading.Thread(target=music,args=())


lastTime = time.time();

time.sleep(1)
#无限循环
while 1: #进入无线循环
    
    #将摄像头拍摄到的画面作为frame的值
    ret,frame = cap.read()
    if running:
        for i in color_dist:
            #1-高斯滤波GaussianBlur() 让图片模糊
            frame = cv2.GaussianBlur(frame,(5,5),0)
    
            #2-转换HSV的样式 以便检测
            hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) 
        
            #3-查找字典
            mask = cv2.inRange(hsv, color_dist[i]['Lower'], color_dist[i]['Upper'])
        
            #4-腐蚀图像
            mask = cv2.erode(mask,None,iterations=2)
        
            #高斯模糊
            mask = cv2.GaussianBlur(mask,(3,3),0)
        
            #图像合并
            res = cv2.bitwise_and(frame,frame,mask=mask)       
            #6-边缘检测
            cnts = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2] 
         
            if len(cnts) >0 : #通过边缘检测来确定所识别物体的位置信息得到相对坐标

                cnt = max(cnts,key=cv2.contourArea)
                rect = cv2.minAreaRect(cnt)
                # 获取最小外接矩形的4个顶点
                box = cv2.boxPoints(rect)
                        
                #获取坐标 长宽 角度
                c_x, c_y = rect[0]
                c_h, c_w = rect[1]
                c_angle = rect[2]
                #print(c_x,c_y,c_h,c_w)
                #判断是否是方块大小且在方框中
                if 25 < c_h < 75 and 25 < c_w < 75  and 50 < c_x < 270 and 60 < c_y < 180:
                    #print('---',c_x,c_y,c_h,c_w)
                    #绘制轮廓
                    cv2.drawContours(frame, [np.int0(box)], -1, (0, 255, 255), 2)
                
                    #print(time.time(), 'x=', int(c_x), 'y=', int(c_y), 'c_h=', int(c_h), 'c_w=', int(c_w), 'angle=', int(c_angle))
                    if(time.time() - lastTime > 0.1) :
                        lastTime = time.time()
                        index = -1
                        if i=='red':
                            index = 0;
                        elif i=='green':
                            index = 1
                        elif i=='blue':
                            index = 2
#                       elif i == 'gray':
#                           index = 3
                        
                        flag = index
                        if index >= 0:
                            color_count[index] += 1
                        if color_count[index] > 4:
                            color_count[0] = 0
                            color_count[1] = 0
                            color_count[2] = 0

                            th1 = threading.Thread(target=music,args=(flag,))                 
                            th1.setDaemon(True)
                            th1.start()
            else:
                pass
    
    
    cv2.imshow('frame',frame) #将具体的测试效果显示出来
    #cv2.imshow('mask',mask)
    #cv2.imshow('res',res)
    if cv2.waitKey(5) & 0xFF == 27: #如果按了ESC就退出 当然也可以自己设置
        break

cap.release()
cv2.destroyAllWindows() #后面两句是常规操作,每次使用摄像头都需要这样设置一波
颜色检测(本地版)
import cv2   #导入库
import numpy as np
import time
import threading
import os
import servo_control
from zhinengjiaohu import play

#创建显示控件
from jetcam.utils import bgr8_to_jpeg
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display

disp = widgets.Image(format='jpeg', width=320, height=240)
display(disp)


#关于色域范围可以百度 HSV
#百科:https://baike.baidu.com/item/HSV/547122?fr=aladdin
#参考:https://blog.csdn.net/leo_888/article/details/88284251

#启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')

# 要识别的颜色字典
color_dist = {
             'red':   {'Lower': np.array([156, 60, 60]), 'Upper': np.array([180, 255, 255])},
             'green': {'Lower': np.array([35, 43, 35]), 'Upper': np.array([90, 255, 255])},
             'blue':  {'Lower': np.array([110, 80, 80]), 'Upper': np.array([128, 255, 255])},
#             'gray':  {'Lower': np.array([0,0,30]),'Upper':np.array([255,40,80]) }
             };

color_count = [0,0,0]

flag = -1
#flag_bak = -1
running = True
#使用之前 关闭mjpg进程 ps -elf|grep mjpg  找到进程号,杀死进程  sudo kill -9 xxx   xxx代表进程号
#cat /dev/video

img_w = 320
img_h = 240

cap = cv2.VideoCapture(0)  #打开摄像头 最大范围 640×480
#cap = cv2.VideoCapture(1)  #打开摄像头 最大范围 640×480
cap.set(3,img_w)  #设置画面宽度
cap.set(4,img_h)  #设置画面高度

def music(flag):
    global running
    running = False
    if flag == 0:
        play('./audios/0016.wav')
        print('red')
    elif flag == 1:
        play('./audios/0017.wav')
        print('green')
    elif flag == 2:
        play('./audios/0018.wav')
        print('blue')
    running = True

#eEvent = threading.Event()
#th1 = threading.Thread(target=music,args=())


lastTime = time.time();

time.sleep(1)
#无限循环
while 1: #进入无线循环
    
    #将摄像头拍摄到的画面作为frame的值
    ret,frame = cap.read()
    if running:
        for i in color_dist:
            #1-高斯滤波GaussianBlur() 让图片模糊
            frame = cv2.GaussianBlur(frame,(5,5),0)
    
            #2-转换HSV的样式 以便检测
            hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) 
        
            #3-查找字典
            mask = cv2.inRange(hsv, color_dist[i]['Lower'], color_dist[i]['Upper'])
        
            #4-腐蚀图像
            mask = cv2.erode(mask,None,iterations=2)
        
            #高斯模糊
            mask = cv2.GaussianBlur(mask,(3,3),0)
        
            #图像合并
            res = cv2.bitwise_and(frame,frame,mask=mask)       
            #6-边缘检测
            cnts = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2] 
         
            if len(cnts) >0 : #通过边缘检测来确定所识别物体的位置信息得到相对坐标

                cnt = max(cnts,key=cv2.contourArea)
                rect = cv2.minAreaRect(cnt)
                # 获取最小外接矩形的4个顶点
                box = cv2.boxPoints(rect)
                        
                #获取坐标 长宽 角度
                c_x, c_y = rect[0]
                c_h, c_w = rect[1]
                c_angle = rect[2]
                #print(c_x,c_y,c_h,c_w)
                #判断是否是方块大小且在方框中
                if 25 < c_h < 75 and 25 < c_w < 75  and 50 < c_x < 270 and 60 < c_y < 180:
                    #print('---',c_x,c_y,c_h,c_w)
                    #绘制轮廓
                    cv2.drawContours(frame, [np.int0(box)], -1, (0, 255, 255), 2)
                
                    #print(time.time(), 'x=', int(c_x), 'y=', int(c_y), 'c_h=', int(c_h), 'c_w=', int(c_w), 'angle=', int(c_angle))
                    if(time.time() - lastTime > 0.1) :
                        lastTime = time.time()
                        index = -1
                        if i=='red':
                            index = 0;
                        elif i=='green':
                            index = 1
                        elif i=='blue':
                            index = 2
#                       elif i == 'gray':
#                           index = 3
                        
                        flag = index
                        if index >= 0:
                            color_count[index] += 1
                        if color_count[index] > 4:
                            color_count[0] = 0
                            color_count[1] = 0
                            color_count[2] = 0

                            th1 = threading.Thread(target=music,args=(flag,))                 
                            th1.setDaemon(True)
                            th1.start()
            else:
                pass
    
    disp.value = bgr8_to_jpeg(frame)
    #cv2.imshow('frame',frame) #将具体的测试效果显示出来
    #if cv2.waitKey(5) & 0xFF == 27: #如果按了ESC就退出 当然也可以自己设置
        #break

cap.release()
#cv2.destroyAllWindows() #后面两句是常规操作,每次使用摄像头都需要这样设置一波
颜色检测(jupyter版)
import pygame
import time

pygame.mixer.init()

def play(filename):
    t1 = time.time()
    pygame.mixer.music.load(filename)
    pygame.mixer.music.play()

    while pygame.mixer.music.get_busy():  #检查是否正在播放音乐
        pass
    #time.sleep(5)
    t2 = time.time()
    t = t2 - t1
    print('t', t)
    print('end')

if __name__ == '__main__':
    play('./audios/0018.wav')
pygame_test.py

3.二维码识别

# 载入必要的库
import os
import cv2
import numpy as np
import sys
import time
import servo_control

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')
time.sleep(0.1)

data_old = ''
data_new = ''

cam = cv2.VideoCapture(0)
#cam = cv2.VideoCapture(1)

# 摄像头二维码实时识别
def Video_display():
    global data_old,data_new
    qrCodeDetector = cv2.QRCodeDetector()
    while 1:
        ret, frame = cam.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 
        t = time.time()
        data, points, _ = qrCodeDetector.detectAndDecode(gray)
        #print("Time Taken for Detect and Decode : {:.3f} seconds".format(time.time() - t))     
        if points is not None:
            #print('p', tuple(points[0][0]))
            nrOfPoints = len(points)
            for i in range(nrOfPoints):
                nextPointIndex = (i+1) % nrOfPoints
                cv2.line(frame, tuple(points[i][0]), tuple(points[nextPointIndex][0]), (255,0,0), 5)
        if data:
            data_new = data
            if data_old != data_new:
                print('data: ', data)
                data_old = data_new
            cv2.putText(frame, data, tuple(points[0][0]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2) 
        cv2.imshow('frame: ', frame)
        if cv2.waitKey(5) & 0xFF == 27:
            break

if __name__ == '__main__':
    try:
        Video_display()
    except KeyboardInterrupt:
        cam.release()
        cv2.destroyAllWindows()
二维码识别(本地版)
# 载入必要的库
import os
import cv2
import numpy as np
import sys
import time
import servo_control

#创建显示控件
from jetcam.utils import bgr8_to_jpeg
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display

disp = widgets.Image(format='jpeg', width=320, height=240)
display(disp)

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')
time.sleep(0.1)

data_old = ''
data_new = ''

cam = cv2.VideoCapture(0)
#cam = cv2.VideoCapture(1)

# 摄像头二维码实时识别
def Video_display():
    global data_old,data_new
    qrCodeDetector = cv2.QRCodeDetector()
    while 1:
        ret, frame = cam.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 
        t = time.time()
        data, points, _ = qrCodeDetector.detectAndDecode(gray)
        #print("Time Taken for Detect and Decode : {:.3f} seconds".format(time.time() - t))     
        if points is not None:
            #print('p', tuple(points[0][0]))
            nrOfPoints = len(points)
            for i in range(nrOfPoints):
                nextPointIndex = (i+1) % nrOfPoints
                cv2.line(frame, tuple(points[i][0]), tuple(points[nextPointIndex][0]), (255,0,0), 7)
        if data:
            data_new = data
            if data_old != data_new:
                print('data: ', data)
                data_old = data_new
            cv2.putText(frame, data, tuple(points[0][0]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2) 
        #cv2.imshow('frame: ', frame)
        disp.value = bgr8_to_jpeg(frame)
        #if cv2.waitKey(5) & 0xFF == 27:
            #break

            
if __name__ == '__main__':
    try:
        Video_display()
    except KeyboardInterrupt:
        cam.release()
二维码识别(jupyter版)
# 载入必要的库
import cv2
import numpy as np
import sys
import time
from baiduaip_test import text2audio,voice
import threading

text = ''
text_old = ''


cam = cv2.VideoCapture(0)

def music(text):
    global text_old
    if text_old != text:
        text_old = text
        print('data', data)
        filepath = text2audio('二维码内容为' + text)
        voice(filepath)


# 摄像头二维码实时识别
def Video_display():
    qrCodeDetector = cv2.QRCodeDetector()
    global text, text_old
    while 1:
        ret, frame = cam.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 
        #t = time.time()
        data, points, _ = qrCodeDetector.detectAndDecode(gray)
        #print("Time Taken for Detect and Decode : {:.3f} seconds".format(time.time() - t))     
        if points is not None:
            #print('p', tuple(points[0][0]))
            nrOfPoints = len(points)
            for i in range(nrOfPoints):
                nextPointIndex = (i+1) % nrOfPoints
                cv2.line(frame, tuple(points[i][0]), tuple(points[nextPointIndex][0]), (255,0,0), 5)
        if data:
            cv2.putText(frame, data, tuple(points[0][0]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2) 
            t = threading.Thread(target=music, args=(data,))
            t.setDaemon(True)
            t.start()

        cv2.imshow('frame: ', frame)
        if cv2.waitKey(5) & 0xFF == 27:
            break

if __name__ == '__main__':
    try:
        Video_display()
    except KeyboardInterrupt:
        cam.release()
        cv2.destroyAllWindows()
jetson_qr_play.py(配合百度语音技术播报二维码内容)
import os
import time
import pygame
from aip import AipSpeech
import pyaudio_test

""" 你的 APPID AK SK """
APP_ID = '24705618'
API_KEY = 'D9nixLbfFdTp4B7rT378Y67K'
SECRET_KEY = '2Rm19eciqEa9sCUHNyls50QUNtU34qeu'

client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)

# 读取文件
def get_file_content(filePath):
    os.system("ffmpeg -y -i {} -acodec pcm_s16le -f s16le -ac 1 -ar 16000 {}.pcm".format(filePath,filePath))
    with open("{}.pcm".format(filePath), 'rb') as fp:
        return fp.read()

# 语音识别
def audio2text(filepath):
    # 识别本地文件
    res = client.asr(get_file_content(filepath), 'pcm', 16000, {'dev_pid': 1537,})
    print('res', res)
    #text = res.get('result'][0] if not res.get('err_no') else 'error'
    return res.get("result")[0] if res.get('result') else 'error'

# 语音合成
def text2audio(text):
    #filename = "{}.mp3".format(int(time.time()))
    filename = '1.mp3'
    path = '/tmp/'
    filepath = os.path.join(path, filename)
    result = client.synthesis(text, 'zh', 1, {'vol': 7,"spd": 4,"pit": 7,"per": 4})
    # 识别正确返回语音二进制 错误则返回dict 参照下面错误码
    if not isinstance(result, dict):
        #print('start write')
        with open(filepath, 'wb') as f:
            f.write(result)
    return filepath

# 语音播放
def voice(filepath):
    print('voice start')
    pygame.mixer.init()
    pygame.mixer.music.load(filepath)
    pygame.mixer.music.play()
    while pygame.mixer.music.get_busy():  #检查是否正在播放音乐
        pass
    print('voice end')


if __name__ == '__main__':
    filepath = pyaudio_test.rec('1.wav')
    text = audio2text(filepath)
    #print('text', text)
    filename = text2audio(text)
    voice(filename)
baiduaip_test.py
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
import time

'''
#调用csi摄像头
dispW=320
dispH=240
flip=4
camSet='nvarguscamerasrc !  video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
cam = cv2.VideoCapture(camSet)
'''
# 调用USB摄像头
cam = cv2.VideoCapture(1)


width = cam.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cam.get(cv2.CAP_PROP_FRAME_HEIGHT)
print('width:', width, 'height:', height)

while True:
    # 读取当前帧
    ret, frame = cam.read()
    # 转为灰度图像
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    barcodes = pyzbar.decode(gray) #解析图片信息
    for barcode in barcodes:
        # 提取条形码的边界框的位置
        # 画出图像中条形码的边界框
        (x, y, w, h) = barcode.rect
        cv2.rectangle(gray, (x, y), (x + w, y + h), (0, 0, 255), 2)
 
        # 条形码数据为字节对象,所以如果我们想在输出图像上
        # 画出来,就需要先将它转换成字符串
        barcodeData = barcode.data.decode("utf-8")
        barcodeType = barcode.type
 
        # 绘出图像上条形码的数据和条形码类型
        text = "{} ({})".format(barcodeData, barcodeType)
        cv2.putText(gray, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                    .5, (0, 0, 125), 2)
        
        # 向终端打印条形码数据和条形码类型
        print(time.time(), "[INFO] Found {} barcode: {}".format(barcodeType, barcodeData))
    cv2.imshow("camera", gray)
    
    if cv2.waitKey(5) & 0xFF == 27: #如果按了ESC就退出 当然也可以自己设置
        break
#关闭摄像头
camera.release()
#关闭窗口
cv2.destroyAllWindows()
pyzbar检测二维码

4.人脸识别

# 导入必要的库
import os
import numpy as np
import cv2
import servo_control

# 载入HAAR模型
#face_cascade = cv2.CascadeClassifier('./trains/haarcascade_frontalface_default.xml')
#eye_cascade = cv2.CascadeClassifier('./trains/haarcascade_eye.xml')

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')

face_cascade = cv2.CascadeClassifier('./trains/face.xml')

eye_cascade = cv2.CascadeClassifier('./trains/eye.xml')
# csi camera
'''
dispW=480
dispH=320
flip=4
#Uncomment These next Two Line for Pi Camera
camSet='nvarguscamerasrc !  video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
cap= cv2.VideoCapture(camSet)
'''
# usb camera
cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture(1)

dis_w = 320
dis_h = 240

cap.set(3, dis_w)
cap.set(4, dis_h)

def Video_display():
    while 1:      
        ret, img = cap.read()
        img = cv2.flip(img,1)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        for (x,y,w,h) in faces:
            
            cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
            roi_gray = gray[y:y+h, x:x+w]
            roi_color = img[y:y+h, x:x+w]
            print(int(x+w/2), int(y+h/2))
            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
        cv2.imshow('img ', img)
        if cv2.waitKey(5) & 0xFF ==27:
            break

if __name__ == '__main__':
    try:
        Video_display()
    except KeyboardInterrupt:
        cap.release()
        cv2.destroyAllWindows()
人脸识别(本地版)
# 导入必要的库
import os
import numpy as np
import cv2
import servo_control

# 载入HAAR模型
#face_cascade = cv2.CascadeClassifier('./trains/haarcascade_frontalface_default.xml')
#eye_cascade = cv2.CascadeClassifier('./trains/haarcascade_eye.xml')

#创建显示控件
from jetcam.utils import bgr8_to_jpeg
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display

disp = widgets.Image(format='jpeg', width=320, height=240)

display(disp)

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')

face_cascade = cv2.CascadeClassifier('./trains/face.xml')

eye_cascade = cv2.CascadeClassifier('./trains/eye.xml')
# csi camera
'''
dispW=480
dispH=320
flip=4
#Uncomment These next Two Line for Pi Camera
camSet='nvarguscamerasrc !  video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
cap= cv2.VideoCapture(camSet)
'''
# usb camera
cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture(1)

dis_w = 320
dis_h = 240

cap.set(3, dis_w)
cap.set(4, dis_h)

def Video_display():
    while 1:      
        ret, img = cap.read()
        img = cv2.flip(img,1)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        for (x,y,w,h) in faces:
            
            cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
            roi_gray = gray[y:y+h, x:x+w]
            roi_color = img[y:y+h, x:x+w]
            print(int(x+w/2), int(y+h/2))
            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
        #cv2.imshow('img ', img)
        disp.value = bgr8_to_jpeg(img)
        #if cv2.waitKey(5) & 0xFF ==27:
            #break

if __name__ == '__main__':
    try:
        Video_display()
    except KeyboardInterrupt:
        cap.release()
人脸识别(jupyter版)

5.行人检测

import cv2
import numpy as np

# 创建我们的身体分类器
body_classifier = cv2.CascadeClassifier('./images/haarcascade_fullbody.xml')

# 为视频文件启动视频捕获
cap = cv2.VideoCapture('./images/walking.avi')

#一旦视频成功加载,循环播放
def Video_display():
    while cap.isOpened():
        # Read first frame
        ret, frame = cap.read()
        frame = cv2.resize(frame, None,fx=0.5, fy=0.5, interpolation = cv2.INTER_LINEAR)
        
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Pass frame to our body classifier
        bodies = body_classifier.detectMultiScale(gray, 1.2, 3)
        
        # Extract bounding boxes for any bodies identified
        for (x,y,w,h) in bodies:
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
            cv2.imshow('frame', frame)
            if cv2.waitKey(5) & 0xFF == 27:
                break
    cap.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    Video_display()
OpenCV实现行人检测

6.汽车检测

# 载入必要的库
import os
import cv2
import time
import numpy as np
import servo_control

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')
time.sleep(0.1)

# 载入HAAR分类器
car_classifier = cv2.CascadeClassifier('./trains/haarcascade_car.xml')

cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture(1)

def car_detect():
    while 1:
        time.sleep(.05)
        # Read first frame
        ret, frame = cap.read()
        
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        
        # Pass frame to our car classifier
        cars = car_classifier.detectMultiScale(gray, 1.4, 2)       
        # Extract bounding boxes for any bodies identified
        for (x,y,w,h) in cars:
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
        cv2.imshow('frame', frame)
        if cv2.waitKey(5) & 0xFF == 27:
            break

if __name__ == '__main__':
    try:
        car_detect()
    except KeyboardInterrupt:
        cap.release()
        cv2.destroyAllWindows()
汽车检测(本地版)
# 载入必要的库
import os
import cv2
import time
import numpy as np
import servo_control

#创建显示控件
from jetcam.utils import bgr8_to_jpeg
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display

disp = widgets.Image(format='jpeg', width=320, height=240)
display(disp)

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')
time.sleep(0.1)

# 载入HAAR分类器
car_classifier = cv2.CascadeClassifier('./trains/haarcascade_car.xml')

cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture(1)

def car_detect():
    try:
        while 1:
            time.sleep(.05)
            # Read first frame
            ret, frame = cap.read()

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            # Pass frame to our car classifier
            cars = car_classifier.detectMultiScale(gray, 1.4, 2)       
            # Extract bounding boxes for any bodies identified
            for (x,y,w,h) in cars:
                cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
            disp.value = bgr8_to_jpeg(frame)
            #cv2.imshow('frame', frame)
            #if cv2.waitKey(5) & 0xFF == 27:
                #break
    except KeyboardInterrupt:
        cap.release()
    
car_detect()
汽车检测(jupyter版)

7.车牌检测

import cv2
import imutils
import numpy as np
import pytesseract

img  = cv2.imread('./images/makecar.jpg',cv2.IMREAD_COLOR)
img = cv2.resize(img, (600,400) )
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
gray = cv2.bilateralFilter(gray, 13, 15, 15) 

edged = cv2.Canny(gray, 30, 200) 
contours = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None


for c in contours:
    
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.018 * peri, True)
 
    if len(approx) == 4:
        screenCnt = approx
        break

if screenCnt is None:
    detected = 0
    print ("No contour detected")
else:
     detected = 1

if detected == 1:
    cv2.drawContours(img, [screenCnt], -1, (0, 0, 255), 3)

mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
new_image = cv2.bitwise_and(img,img,mask=mask)

(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
Cropped = gray[topx:bottomx+1, topy:bottomy+1]

text = pytesseract.image_to_string(Cropped, config='--psm 11')
print("programming_fever's License Plate Recognition\n")
print("Detected license plate Number is:",text)
img = cv2.resize(img,(500,300))
Cropped = cv2.resize(Cropped,(400,200))
# 显示图像
try:
    cv2.imshow('img', img)
    cv2.imshow('cropped', Cropped)
#    if cv2.waitKey(5) & 0xFF ==27:
 #       break
finally:
    cv2.destroyAllWindows()
OpenCV实现车牌检测

8.OpenCV根据颜色定位物体的实时位置

from __future__ import print_function
from imutils.video import VideoStream
import imutils
import time
import cv2
import os
import RPi.GPIO as GPIO

# LED 初始化
redLed = 17
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(redLed, GPIO.OUT)

# 打印物体的实时位置
def mapObjectPosition (x, y):
    print ("[INFO] Object Center coordenates at X0 = {0} and Y0 =  {1}".format(x, y))

'''
dispW=500
dispH=350
flip=4
# 调用CSI摄像头的属性参数
camSet=camSet='nvarguscamerasrc !  video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
# 初始化视频流并允许摄像机传感器预热
print("[INFO] waiting for camera to warmup...")
vs = cv2.VideoCapture(camSet) #VideoStream(camSet).start()
'''
vs = cv2.VideoCapture(1)

time.sleep(2.0)

# 定义对象的上下边界
# 在HSV颜色空间中进行跟踪
colorLower = (9,135,231)
colorUpper = (31,255,255)


# 关闭LED灯
GPIO.output(redLed, GPIO.LOW)
ledOn = False

def Video_display():
    global ledOn
    # 循环的帧从视频流
    while True:
        # 从视频流中抓取下一帧,调整大小
        # 帧,并将其转换为HSV颜色空间
        ret, frame = vs.read()
        frame = imutils.resize(frame, width=500)
        frame = imutils.rotate(frame, angle=0)
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # 为对象颜色构造一个遮罩,然后执行
        # 一系列的膨胀和侵蚀,以消除任何小的
        # blobs left in the mask
        mask = cv2.inRange(hsv, colorLower, colorUpper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)

        # 找到遮罩中的轮廓并初始化
        # (x, y) center of the object
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
        #cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        center = None

        # 只有在找到至少一条轮廓线时才进行
        if len(cnts) > 0:
            # 在蒙版中找到最大的轮廓,然后使用
            # 它可以计算出最小的围圆
            # 重心
            c = max(cnts, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

            # 只有当半径满足最小尺寸时才进行
            if radius > 10:
                # 在框架上画圆和质心,
                # 然后更新跟踪点的列表
                cv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 255), 2)
                cv2.circle(frame, center, 5, (0, 0, 255), -1)

                # 定位舵机在圆心
                mapObjectPosition(int(x), int(y))

                # 如果led还没有打开,打开led
                if not ledOn:
                    GPIO.output(redLed, GPIO.HIGH)
                    ledOn = True
        # 如果没有检测到球,关闭LED灯
        elif ledOn:
            GPIO.output(redLed, GPIO.LOW)
            ledOn = False

        # 向我们的屏幕显示框架
        cv2.imshow('frame', frame)
        if cv2.waitKey(5) & 0xFF == 27:
            break
    # 做点清理工作
    print("\n [INFO] Exiting Program and cleanup stuff \n")
    GPIO.cleanup()
    vs.stop()
    vs.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    Video_display()
定位物体实时位置

 9.OpenCV单线循迹

'''
 图像识别功能使用前,如果树梅派在开机前有连接的摄像头,
需要先关闭mjpg进程 

请在光照环境稳定且背景为纯色的情况下使用图像识别功能。 
'''

import cv2  #导入库
import numpy as np 
import time
import z_uart as myUart


cap = cv2.VideoCapture(0)  #打开摄像头
cap.set(3,160)
cap.set(4,120)  #设置窗口的大小

dis_w = 160

#全局变量
#systick_ms_bak = 0

myUart.setup_uart(115200)


def Tracing(cx):
     print('cx', cx)
     if cx <= 40:  #偏you
         l_pwm = 1500 + int(600*(cx-40)/40)
         car_run(-10, 200, l_pwm)
     elif 40 < cx < 120:
         car_run(200, 200)
     elif cx >= 120:  #偏zuo
         r_pwm = 1500 + int(600*(cx-120)/40)
         car_run(200, -10, r_pwm)

def car_run(left_speed, right_speed, pwm_value=1500):
    testStr = "{#006P%04dT0000!#007P%04dT0000!#000P%04dT0000!}" % (1500+left_speed, 1500-right_speed, pwm_value)
    print(testStr)
    myUart.uart_send_str(testStr)

def camera_xunji():
    while True: #进入无线循环
        # Capture the frames
        ret, frame = cap.read()  #ret, img = cap.read()中,cap.read()是按帧读取,其会返回两个值:ret,img(ret是布尔值,如果读取帧是正确的则返回True,如果文件读取到结尾,它的返回值就为False;后面的frame该帧图像的三维矩阵BGR形式。)    
        #反转视频镜头
        #frame = cv2.flip(frame, -1)
        t1 = time.time()
        # Crop the image
        crop_img =frame[60:120, 0:160] #frame是相对于父视图的坐标系来定位的。如果你这样设置frame:(0,0,100,200),也就是在父视图左上角添加了一个宽100,高200的子视图(前提是没有改变父视图的bounds,接下来会有介绍bounds)。
        #crop_img =frame(0,0,640,480)
        # 转化成GRAY样式
        gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
        #高斯滤波GaussianBlur() 让图片模糊
        blur = cv2.GaussianBlur(gray,(5,5),0)
        #二值化
        ret,thresh1 = cv2.threshold(blur,60,255,cv2.THRESH_BINARY_INV)
        #腐蚀,去除图像毛次,减少细微影响
        mask = cv2.erode(thresh1, None, iterations=2)
        #膨胀,
        mask = cv2.dilate(mask, None, iterations=2)

        # 查找轮廓
        contours = cv2.findContours(mask.copy(), 1, cv2.CHAIN_APPROX_NONE)[-2]
        # Find the biggest contour (if detected)
        if len(contours) > 0:
            c = max(contours, key=cv2.contourArea)
            M = cv2.moments(c)

            cx = int(M['m10']/M['m00'])
            cy = int(M['m01']/M['m00'])
            #根据得到的坐标画出蓝线,方便看出偏差
            cv2.line(crop_img,(cx,0),(cx,720),(255,0,0),1)
            cv2.line(crop_img,(0,cy),(1280,cy),(255,0,0),1)
            #绘制绿色轮廓
            cv2.drawContours(crop_img, contours, -1, (0,255,0), 1)
            #print(time.time(), int(cx),int(cy
        
            Tracing(cx)
        else:
            print("I don't see the line" )

        cv2.imshow('frame', crop_img) #将具体的测试效果显示出来
        t2=time.time()
        pfs = 1.0/(t2-t1)
        print('fps',pfs)
        #cv2.imshow('mask',mask)
        #cv2.imshow('res',res)
        if cv2.waitKey(5) & 0xFF == 27: #如果按了ESC就退出 当然也可以自己设置
            break

if __name__ == '__main__':
    try:
        camera_xunji()
    except KeyboardInterrupt:
        car_run(0,0,)
        cap.release()
        cv2.destroyAllWindows() #后面两句是常规操作,每次使用摄像头都需要这样设置一波
jetson_car_opencv_xunji.py
#导包
import serial
import time
import threading
import z_led as myLed
import z_beep as myBeep

#全局变量定义
ser = ''
uart_baud = 115200
uart_get_ok = 0
uart_receive_buf = ""
uart_receive_buf_index = 0

#发送字符串 只需传入要发送的字符串即可
def uart_send_str(string):
    global ser
    ser.write(string.encode("GB2312"))
    #time.sleep(0.01)
    #ser.flushInput()

#线程调用函数,主要处理数据接受格式,主要格式为 $...!   #...! {...}三种格式,...内容长度不限
def serialEvent():
    global ser,uart_receive_buf_index,uart_receive_buf,uart_get_ok
    mode = 0
    try:
        while True:
            if uart_get_ok == 0:
                uart_receive_buf_index = ser.inWaiting()
                if uart_receive_buf_index > 0:
                    uart_receive_buf = uart_receive_buf+ser.read(uart_receive_buf_index).decode()
                    #print('get1:',uart_receive_buf, " len:", len(uart_receive_buf), " mode:", mode)
                    if mode == 0:
                        if uart_receive_buf.find('{') >= 0:
                            mode = 1
                            #print('mode1 start')
                        elif uart_receive_buf.find('$') >= 0:
                            mode = 2
                            #print('mode2 start')
                        elif uart_receive_buf.find('#') >= 0:
                            mode = 3
                            #print('mode3 start')
                    
                    if mode == 1:
                        if uart_receive_buf.find('}') >= 0:
                            uart_get_ok = 1
                            mode = 0
                            ser.flushInput()
                            #print('{}:',uart_receive_buf, " len:", len(uart_receive_buf))
                            #print('mode1 end')
                    elif mode == 2:
                        if uart_receive_buf.find('!') >= 0:
                            uart_get_ok = 2
                            mode = 0
                            ser.flushInput()
                            #print('$!:',uart_receive_buf, " len:", len(uart_receive_buf))
                            #print('mode2 end')
                    elif mode == 3:
                        if uart_receive_buf.find('!') >= 0:
                            uart_get_ok = 3
                            mode = 0
                            ser.flushInput()
                            #print('#!:', uart_receive_buf, " len:", len(uart_receive_buf))
                            #print('mode3 end')
                    
                    #print('get2:',uart_receive_buf, " len:", len(uart_receive_buf), " mode:", mode, " getok:", uart_get_ok)
    
    except IOError:
        pass

#串口接收线程
uart_thread = threading.Thread(target=serialEvent)

#串口初始化
def setup_uart(baud):
    global ser,uart_thread,uart_receive_buf
    uart_baud = baud
    ser = serial.Serial("/dev/ttyTHS1", uart_baud, timeout=2)
    ser.flushInput()
    uart_thread.start()
    uart_send_str("uart init ok!\r\n");
    uart_receive_buf = ''
        
#循环执行串口    
def loop_uart():
    global uart_get_ok, uart_receive_buf
    if(uart_get_ok):
        print(int(time.time()*1000))
        uart_send_str(uart_receive_buf)
        
        if uart_receive_buf == '$LEDON!':
            myLed.on(1)
        elif uart_receive_buf == '$LEDOFF!':
            myLed.off(1)
        elif uart_receive_buf == '$BEEPON!':
            myBeep.on()
        elif uart_receive_buf == '$BEEPOFF!':
            myBeep.off()
        
        uart_receive_buf = ''
        uart_get_ok = 0

#大循环
if __name__ == '__main__':
    setup_uart(115200)
    myLed.setup_led()
    myBeep.setup_beep()
    #发出哔哔哔作为开机声音
    myBeep.beep(0.1)
    myBeep.beep(0.1)
    myBeep.beep(0.1)
    try:
        while True:
            loop_uart()
  
    except KeyboardInterrupt:
        if ser != None:
            ser.close()
        myLed.off(1)
        myLed.off(2)
        myBeep.off()
z_uart.py

10.超声波自由避障

#导入模块
import time
import RPi.GPIO as GPIO

import z_uart as myUart

#端口模式设置
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)

#引脚定义
BEEP_PIN = 21
LED_PIN = 4
KEY1_PIN = 24
KEY2_PIN = 25

#超声波引脚定义
TRIG = 23
ECHO = 22

#全局变量定义
systick_ms_bak = 0
dis = 100
#初始化IO口
def setup_gpio():
    GPIO.setup(LED_PIN, GPIO.OUT,initial=1)     
    GPIO.setup(BEEP_PIN, GPIO.OUT, initial=0)   
    GPIO.setup(KEY1_PIN, GPIO.IN, pull_up_down = GPIO.PUD_UP)  
    GPIO.setup(KEY2_PIN, GPIO.IN, pull_up_down = GPIO.PUD_UP)
#初始化超声波    
def setup_csb():
    GPIO.setup(TRIG, GPIO.OUT,initial = 0)
    GPIO.setup(ECHO, GPIO.IN,pull_up_down = GPIO.PUD_UP)
    
#大循环LED灯
def loop_led():
    global systick_ms_bak
    if(int((time.time() * 1000))- systick_ms_bak > 500):
        systick_ms_bak = int((time.time() * 1000))
        GPIO.output(LED_PIN, not GPIO.input(LED_PIN))  #led引脚电平翻转
#间接发出响声,x代表间接的时间,单位为秒
def beep(x):
    GPIO.output(BEEP_PIN,1)  #蜂鸣器鸣叫
    time.sleep(x)
    GPIO.output(BEEP_PIN,0)  #蜂鸣器关闭
    time.sleep(x)
#启动示意
def setup_start():
    for i in range(3):
        beep(0.1)

#超声波测距    
def distance():
    GPIO.output(TRIG, 0)
    time.sleep(0.000002)

    GPIO.output(TRIG, 1)
    time.sleep(0.00001)
    GPIO.output(TRIG, 0)
   
    while GPIO.input(ECHO) == 0:
        a = 0
    time1 = time.time()
    while GPIO.input(ECHO) == 1:
        a = 1
    time2 = time.time()

    during = time2 - time1
    return during * 340 / 2 * 100

#自由避障
def car_move():
    global systick_ms_bak
    if(int((time.time() * 1000))- systick_ms_bak > 100):
        systick_ms_bak = int((time.time() * 1000))
        dis = distance()
        if int(dis) >= 100:
            car_go_back(400)
        elif 50 <= int(dis) < 100:
            car_go_back(300)
        elif 20 <= int(dis) < 50:
            car_go_back(200)
        else:
            car_right_turn(500)

'''
函数功能:串口发送指令控制电机转动
范围:-1000~+1000
'''
def car_run(left_speed, right_speed, pwm_value=1500):
    textSrt = '#006P{:0>4d}T0000!#007P{:0>4d}T0000!#000P{:0>4d}T0000'.format(1500+left_speed, 1500-right_speed, pwm_value)
    print(textSrt)
    myUart.uart_send_str(textSrt)

'''
函数功能:小车前进后退
正值小车前进,负值小车后退
范围:-1000~+1000
'''
def car_go_back(speed, pwm_value=1500):
    car_run(speed, speed)

'''
函数功能:小车右转
正值小车右转
范围:0~1000
'''
def car_right_turn(speed, pwm_value=1500):
    speedl = speed
    speedr = speed*2//3
    car_run(speedl, speedr, 2100)

'''
函数功能:小车停止
'''
def car_stop():
    myUart.uart_send_str('#006P1500T1000!#007P1500T1000!#000P1500T1000!')
#关闭    
def destory():
    GPIO.output(LED_PIN, 1)
    GPIO.output(BEEP_PIN, 0)
    GPIO.cleanup()
    car_stop()
#大循环
if __name__ == '__main__':
    
    setup_gpio()             #初始化IO口
    setup_start()            #初始化示意蜂鸣器滴滴滴三声
    setup_csb()              #初始化超声波
    myUart.setup_uart(115200) #设置串口
    try:
        while True:
            loop_led()
            car_move()
    except KeyboardInterrupt:
        destory() 
jetson_ziyoubizhang.py

 11.智能识物实时播放

# 实时播放识别物体对象
import cv2
import jetson.inference
import jetson.utils
import time
import numpy as np
import threading # 线程
from baiduaip_test import voice


speak = True
item = '欢迎使用众灵AI智能语音识别系统!!'
confidence = 0
itemOld=''

'''
import pygame
from aip import AipSpeech

#这里需要填你自己的id和密钥
APP_ID='24705618'
API_KEY='D9nixLbfFdTp4B7rT378Y67K'
SECRET_KEY='2Rm19eciqEa9sCUHNyls50QUNtU34qeu'
aipSpeech=AipSpeech(APP_ID,API_KEY,SECRET_KEY)
'''


def sayItem():
    global speak
    global item
    while True:
        if speak == True:
            voice('./audios/zl.mp3')
            speak=False

            '''
            result = aipSpeech.synthesis(text = item,options={'spd':5,'vol':9,'per':0,})
            if not isinstance(result,dict):
                with open('./audios/zl.mp3','wb') as f:
                    f.write(result)
                    
            else:print(result)
            #我们利用Jetson Nano自带的pygame
            pygame.mixer.init()
            pygame.mixer.music.load('./audios/zl.mp3')
            pygame.mixer.music.play()
            speak=False
            '''
            
x = threading.Thread(target=sayItem,daemon=True)
x.start()

# csi camera
'''
dispW=1280
dispH=720
flip=4
        
# Gstreamer code for improvded Raspberry Pi Camera Quality
camSet='nvarguscamerasrc wbmode=3 tnr-mode=2 tnr-strength=1 ee-mode=2 ee-strength=1 ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! videobalance contrast=1.5 brightness=-.2 saturation=1.2 ! appsink'
cam=cv2.VideoCapture(camSet)
'''
# usb camera
cam=cv2.VideoCapture(0)
cam.set(3,640)
cam.set(4,480)
dispW = 640
dispH = 480
net=jetson.inference.imageNet('googlenet')
font=cv2.FONT_HERSHEY_SIMPLEX
timeMark= time.time()
fpsFiltered=0

def shiwu():
    global speak, item, itemOld, timeMark, fpsFiltered, confidence
    while True:
        ret,frame = cam.read()
        img = cv2.cvtColor(frame,cv2.COLOR_BGR2RGBA).astype(np.float32)
        img = jetson.utils.cudaFromNumpy(img)
    
        if speak == False:
            classID,confidence = net.Classify(img,dispW,dispH)
            if confidence>=.5:
                item=net.GetClassDesc(classID)
                if item!=itemOld:
                    speak = True
            if confidence < .5:
                item=''
            itemOld = item
        dt = time.time() - timeMark
        timeMark = time.time()
        fps = 1/dt
        fpsFiltered=.95*fpsFiltered + .05*fps
        cv2.putText(frame,str(round(fpsFiltered,1)) + ' fps '+item+str(round(confidence,2)),(0,30),font,1,(0,0,255),2)
        cv2.imshow('frame', frame)
        if cv2.waitKey(5) & 0xFF == 27:
            break

if __name__ == '__main__':
    try:
        shiwu()
    except KeyboardInterrupt:
        cam.release()
        cv2.destroyAllWindows()
        
jetson_zhinengshiwu.py
import os
import time
import pygame
from aip import AipSpeech
import pyaudio_test

""" 你的 APPID AK SK """
APP_ID = '24705618'
API_KEY = 'D9nixLbfFdTp4B7rT378Y67K'
SECRET_KEY = '2Rm19eciqEa9sCUHNyls50QUNtU34qeu'

client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)

# 读取文件
def get_file_content(filePath):
    os.system("ffmpeg -y -i {} -acodec pcm_s16le -f s16le -ac 1 -ar 16000 {}.pcm".format(filePath,filePath))
    with open("{}.pcm".format(filePath), 'rb') as fp:
        return fp.read()

# 语音识别
def audio2text(filepath):
    # 识别本地文件
    res = client.asr(get_file_content(filepath), 'pcm', 16000, {'dev_pid': 1537,})
    print('res', res)
    #text = res.get('result'][0] if not res.get('err_no') else 'error'
    return res.get("result")[0] if res.get('result') else 'error'

# 语音合成
def text2audio(text):
    #filename = "{}.mp3".format(int(time.time()))
    filename = '1.mp3'
    path = '/tmp/'
    filepath = os.path.join(path, filename)
    result = client.synthesis(text, 'zh', 1, {'vol': 7,"spd": 4,"pit": 7,"per": 4})
    # 识别正确返回语音二进制 错误则返回dict 参照下面错误码
    if not isinstance(result, dict):
        #print('start write')
        with open(filepath, 'wb') as f:
            f.write(result)
    return filepath

# 语音播放
def voice(filepath):
    print('voice start')
    pygame.mixer.init()
    pygame.mixer.music.load(filepath)
    pygame.mixer.music.play()
    while pygame.mixer.music.get_busy():  #检查是否正在播放音乐
        pass
    print('voice end')


if __name__ == '__main__':
    filepath = pyaudio_test.rec('1.wav')
    text = audio2text(filepath)
    #print('text', text)
    filename = text2audio(text)
    voice(filename)
baiduaip_test.py

 12.人体追踪

# 载入必要的库
import os
import time
import cv2
import numpy as np
import z_uart as myUart
import servo_control

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')

time.sleep(0.1)

servo_control.servo_move(0, 90, 1, 60)
myUart.setup_uart(115200)
# 创建我们的身体分类器
body_classifier = cv2.CascadeClassifier('./trains/haarcascade_fullbody.xml')

# 为视频文件启动视频捕获
#cap = cv2.VideoCapture('./videos/walking.avi')
cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture(1)
dis_w = 640
dis_h = 480
cap.set(3,dis_w)
cap.set(4,dis_h)
axis_x = dis_w//2//2

def tracing(area, pwm_value):
    print('area ', area)
    print('pwm ', pwm_value)
    if pwm_value < 500:
        pwm_value = 500
    if pwm_value >2500:
        pwm_value = 2500
    if 12000 < area:
        car_run(-300, -300, pwm_value=pwm_value)
    elif area <= 12000:
        car_run(300, 300, pwm_value=pwm_value)

def angle(x,y,w,h):
    print('---',x,y,w,h)
    center_x = x + w
    print('cen x ',center_x)
    if center_x <= axis_x:  
        pwm_value = 1500 - 800*(axis_x-center_x)//axis_x
    elif axis_x < center_x < axis_x*2:
        pwm_value = 1500
    elif center_x >=axis_x:
        pwm_value = 1500 - 800*(axis_x - center_x)//axis_x
        print('pwm_cd', pwm_value)
    return pwm_value


def car_run(left_speed, right_speed, pwm_value=1500):
    textStr = '{#006P%04dT0000!#007P%04dT0000!#000P%04dT0000!}' % (1500+left_speed, 1500-right_speed, pwm_value)
    print(textStr)
    myUart.uart_send_str(textStr)

def Video_display():
    while 1:
        # Read first frame
        ret, frame = cap.read()
        frame = cv2.resize(frame, None,fx=0.5, fy=0.5, interpolation = cv2.INTER_LINEAR)
        #print('frame shape ',frame.shape)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Pass frame to our body classifier
        bodies = body_classifier.detectMultiScale(gray, 1.2, 3)
        
        # Extract bounding boxes for any bodies identified
        for (x,y,w,h) in bodies:
            print(x,y,w,h)
            area = int(w*h)
            if area > 4000: 
                cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
                pwm_value = angle(x,y,w,h)
                tracing(area, pwm_value)
        cv2.imshow('frame', frame)
        if cv2.waitKey(5) & 0xFF == 27:
            break


if __name__ == '__main__':
    try:
        Video_display()
    except KeyboardInterrupt:
        car_run(0, 0)
        cap.release()
        cv2.destroyAllWindows()
    finally:
        car_run(0, 0)
行人追踪(本地版)
# 载入必要的库
import os
import time
import cv2
import numpy as np
import z_uart as myUart
import servo_control

#创建显示控件
from jetcam.utils import bgr8_to_jpeg
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display

disp = widgets.Image(format='jpeg', width=640, height=480)
display(disp)

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')

time.sleep(0.1)

servo_control.servo_move(0, 90, 1, 60)
myUart.setup_uart(115200)
# 创建我们的身体分类器
body_classifier = cv2.CascadeClassifier('./trains/haarcascade_fullbody.xml')

# 为视频文件启动视频捕获
#cap = cv2.VideoCapture('./videos/walking.avi')
cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture(1)
dis_w = 640
dis_h = 480
cap.set(3,dis_w)
cap.set(4,dis_h)
axis_x = dis_w//2//2

def tracing(area, pwm_value):
    print('area ', area)
    print('pwm ', pwm_value)
    if pwm_value < 500:
        pwm_value = 500
    if pwm_value >2500:
        pwm_value = 2500
    if 12000 < area:
        car_run(-150, -150, pwm_value=pwm_value)
    elif area <= 12000:
        car_run(150, 150, pwm_value=pwm_value)

def angle(x,y,w,h):
    print('---',x,y,w,h)
    center_x = x + w
    print('cen x ',center_x)
    if center_x <= axis_x:  
        pwm_value = 1500 - 800*(axis_x-center_x)//axis_x
    elif axis_x < center_x < axis_x*2:
        pwm_value = 1500
    elif center_x >=axis_x:
        pwm_value = 1500 - 800*(axis_x - center_x)//axis_x
        print('pwm_cd', pwm_value)
    return pwm_value


def car_run(left_speed, right_speed, pwm_value=1500):
    textStr = '{#006P%04dT0000!#007P%04dT0000!#000P%04dT0000!}' % (1500+left_speed, 1500-right_speed, pwm_value)
    print(textStr)
    myUart.uart_send_str(textStr)

def Video_display():
    while 1:
        # Read first frame
        ret, frame = cap.read()
        frame = cv2.resize(frame, None,fx=0.5, fy=0.5, interpolation = cv2.INTER_LINEAR)
        #print('frame shape ',frame.shape)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Pass frame to our body classifier
        bodies = body_classifier.detectMultiScale(gray, 1.2, 3)
        
        # Extract bounding boxes for any bodies identified
        for (x,y,w,h) in bodies:
            print(x,y,w,h)
            area = int(w*h)
            if area > 4000: 
                cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
                pwm_value = angle(x,y,w,h)
                tracing(area, pwm_value)
        disp.value = bgr8_to_jpeg(frame)
        #cv2.imshow('frame', frame)
        #if cv2.waitKey(5) & 0xFF == 27:
            #break


if __name__ == '__main__':
    try:
        Video_display()
    except KeyboardInterrupt:
        car_run(0, 0)
        cap.release()
        #cv2.destroyAllWindows()
    finally:
        car_run(0, 0)
行人追踪(jupyter版)

13.人脸身份识别

基于Dlib玩转人脸身份识别

安装脸部识别库
1.安装依赖库文件
sudo apt-get update
sudo apt-get install cmake libopenblas-dev liblapack-dev libjpeg-dev
2.由于nano的内存只有4G,在编译dlib 的过程中,需要一个交换文件
git clone http://github.com/JetsonHacksNano/installSwapfile
cd installSwapfile
./installSwapfile.sh
3.下载dlib安装包,解压之后,需要对源代码中一行代码进行注释
wget http://dlib.net/files/dlib-19.17.tar.bz2
tar jxvf dlib-19.17.tar.bz2
cd dlib-19.17
4.修改在dlib/cuda/目录下的cudnn_dlibapi.cpp文件中的内容,使得适用于jetson nano
找到如下目录: /dlib-19.17/dlib/cuda/cudnn_dlibapi.cpp
用jupyterlab打开,删除或注释下面这行代码:
forward_algo = forward_best_algo;

5.安装dlib-19.17依赖库
cd ..
cd dlib-19.17
sudo python3 setup.py install
执行编译,编译时间较长,请耐心等待

6.安装face_recognition的库
sudo pip3 install face_recognition
dlib依赖库及face_recognition的安装
# 在OpenCV中训练面部识别模型
# 生成训练模型
import face_recognition
import cv2
import os
import time
import pickle

def train(filename='./trains/train.pkl'):
    Encodings=[]
    Names=[]
    j=0

    image_dir='./known'
    for root,dirs,files in os.walk(image_dir):
        print(files)
        for file in files:
            path = os.path.join(root,file)
            print(path)
            name=os.path.splitext(file)[0]
            print(name)
            person=face_recognition.load_image_file(path)
            encoding=face_recognition.face_encodings(person)[0]
            Encodings.append(encoding)
            Names.append(name)
    print(Names)

    with open(filename,'wb') as f:
        pickle.dump(Names,f)
        pickle.dump(Encodings,f)
    return filename

cam = cv2.VideoCapture(0)

# 通过摄像头进行面部身份识别
def face_detect(filename='./trains/train.pkl'):
    fpsReport=0
    scaleFactor=.25

    Encodings=[]
    Names=[]

    with open(filename,'rb') as f:
        Names=pickle.load(f)
        Encodings=pickle.load(f)

    timeStamp = time.time()

    while True:
        ret,frame = cam.read()
        frameSmall=cv2.resize(frame,(0,0),fx=scaleFactor,fy=scaleFactor)
        frameRGB=cv2.cvtColor(frameSmall,cv2.COLOR_BGR2RGB)
        facePositions=face_recognition.face_locations(frameRGB,model='cnn')
        allEncodings=face_recognition.face_encodings(frameRGB,facePositions)
        for (top,right,bottom,left),face_encoding in zip(facePositions,allEncodings):
            name='Unkown Person'
            matches=face_recognition.compare_faces(Encodings,face_encoding)
            if True in matches:
                first_match_index=matches.index(True)
                name=Names[first_match_index]
            
            top=int(top/scaleFactor)
            right=int(right/scaleFactor)
            bottom=int(bottom/scaleFactor)
            left=int(left/scaleFactor)
        
            cv2.rectangle(frame,(left,top),(right,bottom),(0,0,255),2)
            cv2.putText(frame,name,(left,top-6),cv2.FONT_HERSHEY_SIMPLEX,.75,(0,0,255),2)
        
        dt = time.time()-timeStamp
        fps=1/dt
        fpsReport=.90*fpsReport + .1*fps
        #print('fps is:',round(fpsReport))
        timeStamp=time.time()
        
        cv2.rectangle(frame,(0,0),(100,40),(0,0,255),-1)
        cv2.putText(frame,str(round(fpsReport,1)) + 'fps',(0,25),cv2.FONT_HERSHEY_SIMPLEX,.75,(0,255,255),2)
        cv2.imshow('frame', frame)
        if cv2.waitKey(5) & 0xFF == 27:
            break

if __name__ == '__main__':
    try:
        #filename = train()
        face_detect()
    except KeyboardInterrupt:
        cam.release()
        cv2.destroyAllWindows()
jetson_ident_detect.py

14.识别镜头中感兴趣的对象

import cv2
import jetson.inference
import jetson.utils
import time
import numpy as np

timeStamp = time.time()
fpsFiltered = 0

obj = 'dog'  # interesting thing
net = jetson.inference.detectNet('ssd-mobilenet-v2',threshold=.5)
font = cv2.FONT_HERSHEY_SIMPLEX

cam = cv2.VideoCapture(0)

def interest_detect():
    global timeStamp, fpsFiltered
    while True:
        _,img = cam.read()
        height = img.shape[0]
        width = img.shape[1]
    
        frame = cv2.cvtColor(img,cv2.COLOR_BGR2RGBA).astype(np.float32)
        frame = jetson.utils.cudaFromNumpy(frame)
        detections = net.Detect(frame,width,height)
    
        for detect in detections:
            #print(detect)
            ID=detect.ClassID
            top=int(detect.Top)
            left=int(detect.Left)
            bottom=int(detect.Bottom)
            right=int(detect.Right)
            item=net.GetClassDesc(ID)

            if item == obj: # 识别interesting thing
                cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)
                cv2.putText(img,item,(left,top+20),font,1,(0,0,255),2)
        
            #print(item,top,left,bottom,right)
    
        #print(str(round(fps,1))+' fps ')
        dt=time.time()-timeStamp
        timeStamp=time.time()
        fps=1/dt
        fpsFiltered=.9*fpsFiltered + .1*fps
        cv2.putText(img,str(round(fpsFiltered,1)) + ' fps ',(0,30),font,1,(0,0,255),2)
        cv2.imshow('img', img)
        if cv2.waitKey(5) & 0xFF == 27:
            break

if __name__ == '__main__':
    try:
        interest_detect()
    except KeyboardInterrupt:
        cam.release()
        cv2.destroyAllWindows()
jetson_interest_detect.py

 15.颜色跟随

import cv2   #导入库
import numpy as np
import time
import os
from servo_control import *

#关于色域范围可以百度 HSV
#百科:https://baike.baidu.com/item/HSV/547122?fr=aladdin
#参考:https://blog.csdn.net/leo_888/article/details/88284251

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')
time.sleep(0.1)

# 要识别的颜色字典
color_dist = {
             'red':   {'Lower': np.array([156, 60, 60]), 'Upper': np.array([180, 255, 255])},
             'green': {'Lower': np.array([35, 43, 35]), 'Upper': np.array([90, 255, 255])},
             'blue':  {'Lower': np.array([110, 80, 80]), 'Upper': np.array([128, 255, 255])},
#             'gray':  {'Lower': np.array([0,0,30]),'Upper':np.array([255,40,80]) }
             };

flag = -1
flag_bak = -1

img_w = 320
img_h = 240

cap = cv2.VideoCapture(0)  #打开摄像头 最大范围 640×480
cap.set(3,img_w)  #设置画面宽度
cap.set(4,img_h)  #设置画面高度


lastTime = time.time()

time.sleep(1)

while 1: #进入无线循环
    global pan,tilt
    #将摄像头拍摄到的画面作为frame的值
    ret,frame = cap.read()
    for i in color_dist:
        #1-高斯滤波GaussianBlur() 让图片模糊
        frame = cv2.GaussianBlur(frame,(5,5),0)
    
        #2-转换HSV的样式 以便检测
        hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) 
        
        #3-查找字典
        mask = cv2.inRange(hsv, color_dist[i]['Lower'], color_dist[i]['Upper'])
        
        #4-腐蚀图像
        mask = cv2.erode(mask,None,iterations=2)
        
        #高斯模糊
        mask = cv2.GaussianBlur(mask,(3,3),0)
        
        #图像合并
        res = cv2.bitwise_and(frame,frame,mask=mask)       
        #6-边缘检测
        cnts = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2] 
         
        if len(cnts) >0 : #通过边缘检测来确定所识别物体的位置信息得到相对坐标

            cnt = max(cnts,key=cv2.contourArea)
            rect = cv2.minAreaRect(cnt)
            # 获取最小外接矩形的4个顶点
            box = cv2.boxPoints(rect)
                        
            #获取坐标 长宽 角度
            c_x, c_y = rect[0]
            c_h, c_w = rect[1]
            c_angle = rect[2]
            #print(c_x,c_y,c_h,c_w)
            #判断是否是方块大小且在方框中
            if 25 < c_h < 85 and 25 < c_w < 85: #  and 50 < c_x < 270 and 60 < c_y < 180:
                #print('---',c_x,c_y,c_h,c_w)
                #绘制轮廓
                cv2.drawContours(frame, [np.int0(box)], -1, (0, 255, 255), 2)
                
                #print(time.time(), 'x=', int(c_x), 'y=', int(c_y), 'c_h=', int(c_h), 'c_w=', int(c_w), 'angle=', int(c_angle))
                
                errorPan = c_x - img_w/2 
                errorTilt = c_y - img_h/2
                #print('errorpan',errorPan)
                if abs(errorPan)>15:
                    pan=pan-errorPan/75
                if abs(errorTilt)>15:
                    tilt=tilt-errorTilt/50
                #print('pan',pan)
                if pan > 180:
                    pan = 180
                    print("Pan out of Range")
                if pan < 0:
                    pan = 0
                    print("pan Out of Range")
                if tilt > 180:
                    tilt = 180
                    print("Pan out of Range")
                if tilt < 0:
                    tilt = 0
                    print("pan Out of Range")

                #servo_move(0,180-pan,1,tilt)
                servo_move(0,pan,1,180-tilt)
                
            else:
                pass
    
    
    cv2.imshow('frame',frame) #将具体的测试效果显示出来
    #cv2.imshow('mask',mask)
    #cv2.imshow('res',res)
    if cv2.waitKey(5) & 0xFF == 27: #如果按了ESC就退出 当然也可以自己设置
        break

cap.release()
cv2.destroyAllWindows() #后面两句是常规操作,每次使用摄像头都需要这样设置一波
颜色跟随(本地版)
import cv2   #导入库
import numpy as np
import time
import os
from servo_control import *

#创建显示控件
from jetcam.utils import bgr8_to_jpeg
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display

disp = widgets.Image(format='jpeg', width=320, height=240)
display(disp)

#关于色域范围可以百度 HSV
#百科:https://baike.baidu.com/item/HSV/547122?fr=aladdin
#参考:https://blog.csdn.net/leo_888/article/details/88284251

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')
time.sleep(0.1)

# 要识别的颜色字典
color_dist = {
             'red':   {'Lower': np.array([156, 60, 60]), 'Upper': np.array([180, 255, 255])},
             'green': {'Lower': np.array([35, 43, 35]), 'Upper': np.array([90, 255, 255])},
             'blue':  {'Lower': np.array([110, 80, 80]), 'Upper': np.array([128, 255, 255])},
#             'gray':  {'Lower': np.array([0,0,30]),'Upper':np.array([255,40,80]) }
             };

flag = -1
flag_bak = -1

img_w = 320
img_h = 240

cap = cv2.VideoCapture(0)  #打开摄像头 最大范围 640×480
cap.set(3,img_w)  #设置画面宽度
cap.set(4,img_h)  #设置画面高度


lastTime = time.time()

time.sleep(1)

try:
    while 1: #进入无线循环
        global pan,tilt
        #将摄像头拍摄到的画面作为frame的值
        ret,frame = cap.read()
        for i in color_dist:
            #1-高斯滤波GaussianBlur() 让图片模糊
            frame = cv2.GaussianBlur(frame,(5,5),0)

            #2-转换HSV的样式 以便检测
            hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) 

            #3-查找字典
            mask = cv2.inRange(hsv, color_dist[i]['Lower'], color_dist[i]['Upper'])

            #4-腐蚀图像
            mask = cv2.erode(mask,None,iterations=2)

            #高斯模糊
            mask = cv2.GaussianBlur(mask,(3,3),0)

            #图像合并
            res = cv2.bitwise_and(frame,frame,mask=mask)       
            #6-边缘检测
            cnts = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2] 

            if len(cnts) >0 : #通过边缘检测来确定所识别物体的位置信息得到相对坐标

                cnt = max(cnts,key=cv2.contourArea)
                rect = cv2.minAreaRect(cnt)
                # 获取最小外接矩形的4个顶点
                box = cv2.boxPoints(rect)

                #获取坐标 长宽 角度
                c_x, c_y = rect[0]
                c_h, c_w = rect[1]
                c_angle = rect[2]
                #print(c_x,c_y,c_h,c_w)
                #判断是否是方块大小且在方框中
                if 25 < c_h < 85 and 25 < c_w < 85: #  and 50 < c_x < 270 and 60 < c_y < 180:
                    #print('---',c_x,c_y,c_h,c_w)
                    #绘制轮廓
                    cv2.drawContours(frame, [np.int0(box)], -1, (0, 255, 255), 2)

                    #print(time.time(), 'x=', int(c_x), 'y=', int(c_y), 'c_h=', int(c_h), 'c_w=', int(c_w), 'angle=', int(c_angle))

                    errorPan = c_x - img_w/2 
                    errorTilt = c_y - img_h/2
                    #print('errorpan',errorPan)
                    if abs(errorPan)>15:
                        pan=pan-errorPan/75
                    if abs(errorTilt)>15:
                        tilt=tilt-errorTilt/50
                    #print('pan',pan)
                    if pan > 180:
                        pan = 180
                        print("Pan out of Range")
                    if pan < 0:
                        pan = 0
                        print("pan Out of Range")
                    if tilt > 180:
                        tilt = 180
                        print("Pan out of Range")
                    if tilt < 0:
                        tilt = 0
                        print("pan Out of Range")

                    #servo_move(0,180-pan,1,tilt)
                    servo_move(0,pan,1,180-tilt)

                else:
                    pass

        disp.value = bgr8_to_jpeg(frame)
        #cv2.imshow('frame',frame) #将具体的测试效果显示出来
        #cv2.imshow('mask',mask)
        #cv2.imshow('res',res)
        #if cv2.waitKey(5) & 0xFF == 27: #如果按了ESC就退出 当然也可以自己设置
            #break
except KeyboardInterrupt:
    cap.release()
    #cv2.destroyAllWindows() #后面两句是常规操作,每次使用摄像头都需要这样设置一波
颜色跟随(jupyter版)

16.人脸跟随

# OpenCV 摄像头云台人脸追踪
import os
import cv2
import numpy as np
#import Adafruit_PCA9685
import time
from servo_control import *

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')
time.sleep(0.1)

servo_move(0,90,1,90)
cam = cv2.VideoCapture(0)
dis_w = 320
dis_h = 240
cam.set(3, dis_w)
cam.set(4, dis_h)

# 载入人脸和眼睛的HAAR 模型
face_cascade = cv2.CascadeClassifier('./trains/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./trains/haarcascade_eye.xml')

def Video_display():
    global pan, tilt
    while True: 
        ret,frame = cam.read()
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray,1.3,5)

        for(x,y,w,h) in faces:
            cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
            Xcent = x + w/2
            Ycent = y + h/2
            errorPan  = Xcent - dis_w/2 
            errorTilt = Ycent - dis_h/2
            if abs(errorPan)>15:
                pan=pan-errorPan/50
            if abs(errorTilt)>15:
                tilt=tilt-errorTilt/50
            if pan > 180:
                pan = 180
                print("Pan out of Range")
            if pan < 0:
                pan = 0
                print("pan Out of Range")
            if tilt > 180:
                tilt = 180
                print("Pan out of Range")
            if tilt < 0:
                tilt = 0
                print("pan Out of Range")

            servo_move(0,pan,1,180-tilt)
            #servo_move(0,pan,1,180-tilt)

            roi_gray = gray[y:y+h, x:x+w]
            roi_color = frame[y:y+h, x:x+w]        
            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)          
        cv2.imshow('frame', frame)
        if cv2.waitKey(5) & 0xFF == 27:
            break

if __name__ == '__main__':
    try:
        Video_display()
    except KeyboardInterrupt:
        cap.release()
        cv2.destroyAllWindows()
人脸跟随(本地版)
# OpenCV 摄像头云台人脸追踪
import os
import cv2
import numpy as np
#import Adafruit_PCA9685
import time
from servo_control import *

#创建显示控件
from jetcam.utils import bgr8_to_jpeg
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display

disp = widgets.Image(format='jpeg', width=320, height=240)
display(disp)

# 启动脚本杀死影响程序的进程
os.system('./killmjpg.sh')
time.sleep(0.1)

servo_move(0,90,1,90)
cam = cv2.VideoCapture(0)
dis_w = 320
dis_h = 240
cam.set(3, dis_w)
cam.set(4, dis_h)

# 载入人脸和眼睛的HAAR 模型
face_cascade = cv2.CascadeClassifier('./trains/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./trains/haarcascade_eye.xml')

def Video_display():
    global pan, tilt
    while True: 
        ret,frame = cam.read()
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray,1.3,5)

        for(x,y,w,h) in faces:
            cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
            Xcent = x + w/2
            Ycent = y + h/2
            errorPan  = Xcent - dis_w/2 
            errorTilt = Ycent - dis_h/2
            if abs(errorPan)>15:
                pan=pan-errorPan/50
            if abs(errorTilt)>15:
                tilt=tilt-errorTilt/50
            if pan > 180:
                pan = 180
                print("Pan out of Range")
            if pan < 0:
                pan = 0
                print("pan Out of Range")
            if tilt > 180:
                tilt = 180
                print("Pan out of Range")
            if tilt < 0:
                tilt = 0
                print("pan Out of Range")

            servo_move(0,pan,1,180-tilt)
            #servo_move(0,pan,1,180-tilt)

            roi_gray = gray[y:y+h, x:x+w]
            roi_color = frame[y:y+h, x:x+w]        
            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)          
        disp.value = bgr8_to_jpeg(frame)
        #cv2.imshow('frame', frame)
        #if cv2.waitKey(5) & 0xFF == 27:
            #break

            
if __name__ == '__main__':
    try:
        Video_display()
    except KeyboardInterrupt:
        cam.release()
人脸跟随(jupyter版)
import time
import Adafruit_PCA9685

pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(50)

def servo_move(basechannel,basevalue,tiltchannel,tiltvalue):
    basevalue=4096*((basevalue*11)+500)/20000
    pwm.set_pwm(basechannel,0,int(basevalue))
    tiltvalue=4096*((tiltvalue*11)+500)/20000
    pwm.set_pwm(tiltchannel,0,int(tiltvalue))

pan = 90
tilt = 90

servo_move(0, pan, 1, tilt)

'''
if __name__ == '__main__':
    servo_move(0,30,1,30)
    time.sleep(3)
    for angle in range(180, 0, -5):
        servo_move(0, angle, 1, angle)
        time.sleep(0.5)

    servo_move(0, pan, 1, tilt)
'''
#print('end')
servo_control.py(云台舵机控制代码)

 

posted on 2021-08-16 17:42  始终不够啊  阅读(131)  评论(0)    收藏  举报