yolo infer docker 实现

docker pull nvcr.io/nvidia/tensorrt:22.12-py3
docker run -it --privileged --network host --gpus all \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-e DISPLAY \
--device=/dev/bus/usb \
-v /etc/localtime:/etc/localtime:ro \
-v ~/workspace:/workspace \
--name yolo_infer  \
--workdir=/workspace \
nvcr.io/nvidia/tensorrt:22.12-py3 /bin/bash

这个 docker run 命令用于启动一个容器,支持 GPU、USB 设备、网络以及图形界面的运行,具体解释如下:

  1. -it:交互模式,-i 保持输入流,-t 分配一个伪终端。用于在容器内运行交互式 shell。

  2. --privileged:赋予容器超级权限,允许访问主机上的所有设备和资源。适用于需要与硬件、系统资源打交道的应用。

  3. --network host:使用主机网络模式,容器的网络配置与主机一致,这样容器可以直接使用主机的网络接口。

  4. --gpus all:启用对主机所有 GPU 的访问,用于需要 GPU 加速的应用程序,如深度学习、计算加速等。

  5. -v /tmp/.X11-unix:/tmp/.X11-unix:将主机的 X11 Unix socket 挂载到容器中,允许容器中的应用通过主机的 X11 服务器显示图形界面。通常与 DISPLAY 环境变量配合使用。

  6. -e DISPLAY:设置 DISPLAY 环境变量,指定容器的 GUI 应用程序输出到主机的显示屏上。

  7. --device=/dev/bus/usb:挂载主机的 USB 总线设备到容器中,允许容器与主机的 USB 设备进行直接通信,比如摄像头、传感器等设备。

  8. -v /etc/localtime:/etc/localtime:ro:将主机的本地时间和时区设置挂载到容器中,确保容器内的时间与主机保持一致。以只读模式挂载,避免容器修改主机时间配置。

  9. -v ~/workspace:/workspace:将主机的 ~/workspace 目录挂载到容器的 /workspace 目录,允许容器访问主机的工作空间,便于读取或保存数据。

  10. --name yolo_infer:为容器指定名称 yolo_infer,方便之后管理和操作容器。

  11. nvcr.io/nvidia/tensorrt:22.12-py3:使用名为 nvcr.io/nvidia/tensorrt 的 Docker 镜像,版本号为 22.12-py3

  12. /bin/bash:启动容器后进入 Bash shell,提供交互式命令行环境。

这个命令整体用于运行一个带有高权限、支持 GPU 和 USB 设备、并且能够运行 GUI 应用的容器环境,适合运行像 YOLO 这样需要硬件加速和外部设备支持的应用。

git clone https://github.com/shouxieai/infer.git
cd infer/
修改 Makefile:
# 定义编译器
cc        := g++
nvcc      := /usr/local/cuda/bin/nvcc
 
# 定义源文件和目标文件
cpp_srcs  := $(shell find src -name "*.cpp")
cpp_objs  := $(cpp_srcs:.cpp=.cpp.o)
cpp_objs  := $(cpp_objs:src/%=objs/%)
cpp_mk := $(cpp_objs:.cpp.o=.cpp.mk)
 
cu_srcs	  := $(shell find src -name "*.cu")
cu_objs   := $(cu_srcs:.cu=.cu.o)
cu_objs	  := $(cu_objs:src/%=objs/%)
cu_mk	  := $(cu_objs:.cu.o=.cu.mk)
 
include_paths := src \
	/usr/include/opencv4 \
	/usr/local/cuda/include \
	/usr/include/x86_64-linux-gnu/TensorRT


# 库文件路径
library_paths := /usr/local/cuda/lib64 \
    /usr/lib/x86_64-linux-gnu \
    /usr/local/cuda/lib64/stubs \
    /usr/lib/x86_64-linux-gnu/TensorRT \
    /usr/local/cuda/lib64/stubs \
    /usr/local/cuda/lib64
 
 
link_librarys := opencv_core opencv_imgproc opencv_videoio opencv_imgcodecs \
				 nvinfer nvinfer_plugin nvonnxparser \
				 cuda cublas cudart cudnn \
				 stdc++ dl
 
# 定义导出和运行时路径
empty		  :=
export_path   := $(subst $(empty) $(empty),:,$(library_paths))
 
run_paths     := $(foreach item,$(library_paths),-Wl,-rpath=$(item))
 
# 定义编译选项
include_paths := $(foreach item,$(include_paths),-I$(item))
library_paths := $(foreach item,$(library_paths),-L$(item))
link_librarys := $(foreach item,$(link_librarys),-l$(item))
 
cpp_compile_flags := -std=c++11 -fPIC -w -g -pthread -fopenmp -O0
cu_compile_flags  := -std=c++11 -g -w -O0 -Xcompiler "$(cpp_compile_flags)"
link_flags        := -pthread -fopenmp -Wl,-rpath='$$ORIGIN'
 
cpp_compile_flags += $(include_paths)
cu_compile_flags  += $(include_paths)
link_flags        += $(library_paths) $(link_librarys) $(run_paths)
 
# 添加头文件依赖
ifneq ($(MAKECMDGOALS), clean)
-include $(cpp_mk) $(cu_mk)
endif
 
# 编译CUDA和C++程序
pro	   := workspace/pro
expath := library_path.txt
 
library_path.txt : 
	@echo LD_LIBRARY_PATH=$(export_path):"$$"LD_LIBRARY_PATH > $@
 
workspace/pro : $(cpp_objs) $(cu_objs)
	@echo Link $@
	@mkdir -p $(dir $@)
	@$(cc) $^ -o $@ $(link_flags)
 
objs/%.cpp.o : src/%.cpp
	@echo Compile CXX $<
	@mkdir -p $(dir $@)
	@$(cc) -c $< -o $@ $(cpp_compile_flags)
 
objs/%.cu.o : src/%.cu
	@echo Compile CUDA $<
	@mkdir -p $(dir $@)
	@$(nvcc) -c $< -o $@ $(cu_compile_flags)
 
objs/%.cpp.mk : src/%.cpp
	@echo Compile depends CXX $<
	@mkdir -p $(dir $@)
	@$(cc) -M $< -MF $@ -MT $(@:.cpp.mk=.cpp.o) $(cpp_compile_flags)
 
objs/%.cu.mk : src/%.cu
	@echo Compile depends CUDA $<
	@mkdir -p $(dir $@)
	@$(nvcc) -M $< -MF $@ -MT $(@:.cu.mk=.cu.o) $(cu_compile_flags)
 
run : workspace/pro
	@cd workspace && ./pro
 
# 其它
debug :
	@echo $(include_paths)
	@echo $(library_paths)
	@echo $(link_librarys)
 
clean : 
	@rm -rf library_path.txt
	@rm -rf objs workspace/pro
	@rm -rf workspace/Result.jpg
 
.PHONY : debug clean run
 
export LD_LIBRARY_PATH:=$(export_path):$(LD_LIBRARY_PATH)
wget -O workspace/yolov5s.onnx  https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.onnx --no-check-certificate
trtexec --onnx=workspace/yolov5s.onnx --saveEngine=workspace/yolov5s.engine
make run

报错:fatal error: opencv2/opencv.hpp: No such file or directory

apt update
apt install libopencv-dev
ls /usr/include/opencv4
make run
输出结果
Compile depends CXX src/main.cpp
Compile CXX src/main.cpp
Compile CUDA src/yolo.cu
Compile CUDA src/infer.cu
Link workspace/pro
[infer.cu:393]: Infer 0x7f2b88000d70 [StaticShape]
[infer.cu:405]: Inputs: 1
[infer.cu:409]: 	0.images : shape {1x3x640x640}
[infer.cu:412]: Outputs: 1
[infer.cu:416]: 	0.output0 : shape {1x25200x85}
[yolo.cu:574]: When using static shape model, number of images[16] must be less than or equal to the maximum batch[1].
[BATCH16]: 0.05018 ms
[yolo.cu:574]: When using static shape model, number of images[16] must be less than or equal to the maximum batch[1].
[BATCH16]: 0.12086 ms
[yolo.cu:574]: When using static shape model, number of images[16] must be less than or equal to the maximum batch[1].
[BATCH16]: 0.10445 ms
[yolo.cu:574]: When using static shape model, number of images[16] must be less than or equal to the maximum batch[1].
[BATCH16]: 0.10317 ms
[yolo.cu:574]: When using static shape model, number of images[16] must be less than or equal to the maximum batch[1].
[BATCH16]: 0.02646 ms
[BATCH1]: 190.41962 ms
[BATCH1]: 4.47782 ms
[BATCH1]: 4.47869 ms
[BATCH1]: 4.36294 ms
[BATCH1]: 4.35104 ms
CMakeLists.txt
cmake_minimum_required(VERSION 3.10)
project(MyProject)

# 设置 C++ 标准
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED True)

# 指定 CUDA 的位置和版本
find_package(CUDA REQUIRED)
find_package(OpenCV REQUIRED)
# find_package(TensorRT REQUIRED)

# 源文件
file(GLOB_RECURSE CPP_SRCS src/*.cpp)
file(GLOB_RECURSE CU_SRCS src/*.cu)

# 包含路径
include_directories(
    src
    ${OpenCV_INCLUDE_DIRS}
    ${CUDA_INCLUDE_DIRS}
)

# 链接库路径
link_directories(
    /usr/local/cuda/lib64
    /usr/lib/x86_64-linux-gnu
)

# 库文件
set(LINK_LIBS
    ${OpenCV_LIBS}
    nvinfer nvinfer_plugin nvonnxparser
    cuda cublas cudart cudnn
    stdc++ dl
)

# 添加编译选项
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11 -O0 -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fPIC -pthread -fopenmp -O0")

# 编译目标
cuda_add_executable(${PROJECT_NAME} ${CPP_SRCS} ${CU_SRCS})

# 链接库
target_link_libraries(${PROJECT_NAME} ${LINK_LIBS})

# 设置 RPATH
set_target_properties(${PROJECT_NAME} PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE)

# 运行命令
add_custom_target(run
    COMMAND ${PROJECT_NAME}
    DEPENDS ${PROJECT_NAME}
)
chmod 777 ./workspace/build.sh 
./workspace/build.sh 

cd workspace
cmake ..
make run
posted @ 2024-09-06 01:13  Zenith_Hugh  阅读(119)  评论(0)    收藏  举报