Darknet_ROS

双系统ubuntu20.04复现目标检测与跟踪过程,分享给大家:

1.环境配置

首先配置darknet_ros,它包含了Darknet 框架,YOLO算法以及ROS接口。

点击查看代码
cd ~/catkin_ws/src
git clone https://github.com/leggedrobotics/darknet_ros.git --recursive
catkin build darknet_ros --cmake-args -DCMAKE_BUILD_TYPE=Release

启动PX4室外场景仿真以及YOLO算法:

点击查看代码
# 启动yolo 
roslaunch darknet_ros darknet_ros.launch
# 室外仿真
cd ~/PX4_Firmware
roslaunch px4 outdoor1.launch

但是YOLO此时并没有收到图像,并一直是 Waiting for image。执行 rostopic list 命令后发现并没有 typhoon_h480_0/cgo3_camera/image_raw 话题(来自typhoon_h480.sdf里的cgo3_camera_controller),只有darknet_ros默认订阅的/camera/rgb/image_raw,因此并不会弹出目标识别的窗口。
检查原因是gazebo_ros相机插件丢失,执行以下命令即可重新找回 typhoon_h480_0/cgo3_camera/image_raw 话题,并改~/catkin_ws/src/darknet_ros/darknet_ros/launch/darknet_ros.launch:

点击查看代码
sudo apt install ros-noetic-gazebo-ros-pkgs ros-noetic-gazebo-ros-control
# ~/catkin_ws/src/darknet_ros/darknet_ros/launch/darknet_ros.launch
# 将<arg name="image" default="/camera/rgb/image_raw" />改为
<arg name="image" default="/typhoon_h480_0/cgo3_camera/image_raw" />
然后重启YOLO,目标识别的窗口出现了:

image

在进行通讯时又碰到了无法定位的问题,解决思路和上一节相同。在~/XTDrone/sensing/slam/laser_slam/script文件夹下面新建laser_transfer_copy.py(注意里面包含了以前小节的内容,可以有取舍,另外大家碰到的问题可能不一致,最好从话题入手):

点击查看代码
import rospy
from geometry_msgs.msg import PoseStamped
from tf2_ros import TransformListener, Buffer
import sys
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from gazebo_msgs.msg import ModelStates

vehicle_type = sys.argv[1]
vehicle_id = sys.argv[2]
laser_slam_type = sys.argv[3]
rospy.init_node(vehicle_type+vehicle_id+'_'+laser_slam_type+'_laser_transfer')
pose_pub = rospy.Publisher(vehicle_type+'_'+ vehicle_id+"/mavros/vision_pose/pose", PoseStamped, queue_size=1)
tfBuffer = Buffer()
tflistener = TransformListener(tfBuffer)
local_pose = PoseStamped()
local_pose.header.frame_id = 'map'
hector = PoseStamped()
height = 0

# 新增:用于 faster_lio 的回调
def odom_callback(odom_msg):
    pose_msg = PoseStamped()
    pose_msg.header = odom_msg.header
    pose_msg.pose = odom_msg.pose.pose
    pose_pub.publish(pose_msg)

def hector_callback(data):
    global hector
    hector = data

def height_distance_callback(msg):
    global height
    height = msg.ranges[0]
    if(height==float("inf")):
        height = 0
    
def hector_slam():
    global local_pose, height
    pose2d_sub = rospy.Subscriber(vehicle_type+'_'+ vehicle_id+"/pose", PoseStamped, hector_callback,queue_size=1)
    rate = rospy.Rate(100)
    while True:
        local_pose = hector
        local_pose.pose.position.z = height
        pose_pub.publish(local_pose)
        rate.sleep()
        
def cartographer():
    global local_pose, height
    rate = rospy.Rate(30)
    while not rospy.is_shutdown():
        try:
            tfstamped = tfBuffer.lookup_transform('map', 'base_link', rospy.Time(0))
        except:
            continue
        local_pose.header.stamp = rospy.Time().now()
        local_pose.pose.position = tfstamped.transform.translation
        local_pose.pose.position.z = height
        local_pose.pose.orientation = tfstamped.transform.rotation
        pose_pub.publish(local_pose)
        rate.sleep()
        
def aloam():
    global local_pose
    rate = rospy.Rate(30)
    while not rospy.is_shutdown():
        try:
            tfstamped = tfBuffer.lookup_transform('camera_init', 'aft_mapped', rospy.Time(0))
        except:
            continue
        local_pose.header.stamp = rospy.Time().now()
        local_pose.pose.position = tfstamped.transform.translation
        local_pose.pose.orientation = tfstamped.transform.rotation
        pose_pub.publish(local_pose)
        rate.sleep()

# 新增:faster_lio 转换函数(直接订阅 /Odometry)
def faster_lio():
    rospy.loginfo("Starting faster_lio converter, subscribing to /Odometry")
    rospy.Subscriber("/Odometry", Odometry, odom_callback)
    rospy.spin()  # 保持节点运行
    
# 新增:用于 Gazebo 真值(Ground Truth)的回调
def gazebo_gt_callback(model_states_msg):
    # 根据无人机名称提取位姿
    if vehicle_type+'_'+vehicle_id not in model_states_msg.name:
        return
    idx = model_states_msg.name.index(vehicle_type+'_'+vehicle_id)
    pose_msg = PoseStamped()
    # 手动设置时间戳和坐标系
    pose_msg.header.stamp = rospy.Time.now()
    pose_msg.header.frame_id = "map"   # 可以使用 "world" 或 "odom"
    pose_msg.pose = model_states_msg.pose[idx]
    pose_pub.publish(pose_msg)

def publish_gazebo_gt():
    rospy.loginfo("Publishing Gazebo ground truth for %s_%s to vision_pose/pose", vehicle_type, vehicle_id)
    rospy.Subscriber("/gazebo/model_states", ModelStates, gazebo_gt_callback)
    rospy.spin()

if __name__ == '__main__':
    if laser_slam_type == 'hector':
        height_distance_sub = rospy.Subscriber(vehicle_type+'_'+ vehicle_id+"/distance", LaserScan, height_distance_callback,queue_size=1)
        hector_slam()
    elif laser_slam_type == 'cartographer':
        height_distance_sub = rospy.Subscriber(vehicle_type+'_'+ vehicle_id+"/distance", LaserScan, height_distance_callback,queue_size=1)
        cartographer()
    elif laser_slam_type == 'aloam':
        aloam()
    elif laser_slam_type == 'faster_lio':
        faster_lio()
    elif laser_slam_type == 'ground_truth':   # 新增分支
        publish_gazebo_gt()
    else:
        print('input error')

2.成功实现移动目标检测

重新执行下面代码:

点击查看代码
# 终端1
cd ~/PX4_Firmware 
roslaunch px4 outdoor1.launch
# 终端2
roslaunch darknet_ros darknet_ros.launch
# 终端3
cd ~/XTDrone/sensing/slam/laser_slam/script
python3 laser_transfer_copy.py typhoon_h480 0 ground_truth
# 终端4
cd ~/XTDrone/communication
python3 multirotor_communication.py typhoon_h480 0
# 终端5
cd ~/XTDrone/control/keyboard
python3 multirotor_keyboard_control.py typhoon_h480 1 vel

由于我们使用的是轻量的YOLO算法,其检测精度一般(在终端2中可以看到其检测精度)。

image

posted @ 2026-04-15 19:47  九钟  阅读(12)  评论(0)    收藏  举报