视觉SLAM 十四讲——对极约束求解相机运动(2D-2D)

主要内容

1. 对极约束

  几何意义是P 三者共面,对极约束同时包含了平移和旋转。

  基础矩阵:

  本质矩阵:

  对极约束表示:

    其中, 分别表示为相机坐标系下归一化的平面坐标

2. 本质矩阵的特点(3×3

  1E在不同尺度下是等价的

  2) 内在性质:奇异值必定是 的形式

  3) 由于平移+旋转,共有6个自由度,但因为尺度等价性,E实际上有5个自由度

3 本质矩阵求解——八点法

  1E的内在性质是一种非线性性质,在求解线性方程时会带来麻烦,因此不会用5对点来求解。

  2) 未知数共有9个,只考虑尺度等价性,所以用82D-2D点来求解,线性变换的方法。

  3) 求解矩阵后,进行奇异值分解分(SVD),得到运动信息

  4) 分解的时候,会有4中情况,选取一个点进行求解,检测该点在两个相机下的深度信息,选取两个正的深度。

  5) 线性方程解可能不满足E的内在性质,将求解出来的矩阵投影到E所在的流形上。

4 单应矩阵H

  

  定义与旋转,平移及平面的参数有关。

    

  1) 自由度为8的单应矩阵可以通过4对匹配点进行计算(注意,这些特征点不能有三点共线的情况

  2) 求解方法:直接线性变换法,对其进行分解得到运动信息,分解方法有数值法和解析法

    4组解,(根据计算点的深度以及平面的法向量信息选取合理的解)

  3) 意义

    3.1) 描述了两个平面之间的映射关系,若所有特征点落在同一平面,则可以通过H估计运动,

        具体应用在无人机携带俯视相机或扫地机器人的顶视相机中。

    3.2)当特征点共面或者相机发生纯旋转时,基础矩阵自由度下降,H可以估计纯旋转的运动信息。

5. 在实际应用,为避免退化现象造成的影响,会同时估计FH,选择其中重投影误差比较小的那个作为最终运动的估计结果。

6. 对极几何2D-2D的特点

  1)尺度不确定性

    t归一化相当于固定了尺度信息,成为单目SLAM的初始化,后面的轨迹和地图以此单位进行计算。

      初始化不可避免,初始化完毕以后,计算特征点空间位置,可以利用3D-2D的方法计算相机运动。

  2) 初始化的纯旋转问题

           纯旋转 → t=0 → E = 0 → 无法求解R,

    or:     H求解R, 但是t0,导致无法利用三角测量求解特征点位置

      单目SLAM初始化,不能只有纯旋转,必须要有一定程度的平移。

  3) 多余8对点的情况

    利用最小二乘进行求解矩阵E

    当可能存在误匹配的情况时,更倾向于使用随机采样一致性(Random Sample ConcensusRANSAS),可以处理带有错误匹配的数据

7. 代码中需要注意的点:

  1) Mat类型的操作:C++ ,赋值初始化,乘法,转置等运算

  2)三个矩阵的求解

  3) 从match信息中得到对应图像中像素的坐标信息

  4)根据求得的R t在反向通过对极约束进行验证, 计算的精度情况等。

 

参考链接

随机抽样一致性算法(RANSAC)

使用cv::findFundamentalMat要注意的几点

findHomography()函数详解

Mat_<double> ( 3,3 )是什么意思

OpenCV矩阵运算总结


代码

#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
// #include "extra.h" // use this if in OpenCV2 
using namespace std;
using namespace cv;

/****************************************************
 * 本程序演示了如何使用2D-2D的特征匹配估计相机运动
 * **************************************************/

void find_feature_matches (
    const Mat& img_1, const Mat& img_2,
    std::vector<KeyPoint>& keypoints_1,
    std::vector<KeyPoint>& keypoints_2,
    std::vector< DMatch >& matches );

void pose_estimation_2d2d (
    std::vector<KeyPoint> keypoints_1,
    std::vector<KeyPoint> keypoints_2,
    std::vector< DMatch > matches,
    Mat& R, Mat& t );

// 像素坐标转相机归一化坐标
Point2d pixel2cam ( const Point2d& p, const Mat& K );

int main ( int argc, char** argv )
{
    if ( argc != 3 )
    {
        cout<<"usage: pose_estimation_2d2d img1 img2"<<endl;
        return 1;
    }
    //-- 读取图像
    Mat img_1 = imread ( argv[1], CV_LOAD_IMAGE_COLOR );
    Mat img_2 = imread ( argv[2], CV_LOAD_IMAGE_COLOR );

    vector<KeyPoint> keypoints_1, keypoints_2;
    vector<DMatch> matches;
    find_feature_matches ( img_1, img_2, keypoints_1, keypoints_2, matches );
    cout<<"一共找到了"<<matches.size() <<"组匹配点"<<endl;

    //-- 估计两张图像间运动
    Mat R,t;
    pose_estimation_2d2d ( keypoints_1, keypoints_2, matches, R, t );

    //-- 验证E=t^R*scale
    Mat t_x = ( Mat_<double> ( 3,3 ) <<
                0,                      -t.at<double> ( 2,0 ),     t.at<double> ( 1,0 ),
                t.at<double> ( 2,0 ),      0,                      -t.at<double> ( 0,0 ),
                -t.at<double> ( 1,0 ),     t.at<double> ( 0,0 ),      0 );

    cout<<"t^R="<<endl<<t_x*R<<endl;

    //-- 验证对极约束
    Mat K = ( Mat_<double> ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
    for ( DMatch m: matches )
    {
        Point2d pt1 = pixel2cam ( keypoints_1[ m.queryIdx ].pt, K );
        Mat y1 = ( Mat_<double> ( 3,1 ) << pt1.x, pt1.y, 1 );
        Point2d pt2 = pixel2cam ( keypoints_2[ m.trainIdx ].pt, K );
        Mat y2 = ( Mat_<double> ( 3,1 ) << pt2.x, pt2.y, 1 );
        Mat d = y2.t() * t_x * R * y1;
        cout << "epipolar constraint = " << d << endl;
    }
    return 0;
}

void find_feature_matches ( const Mat& img_1, const Mat& img_2,
                            std::vector<KeyPoint>& keypoints_1,
                            std::vector<KeyPoint>& keypoints_2,
                            std::vector< DMatch >& matches )
{
    //-- 初始化
    Mat descriptors_1, descriptors_2;
    // used in OpenCV3 
    Ptr<FeatureDetector> detector = ORB::create();
    Ptr<DescriptorExtractor> descriptor = ORB::create();
    // use this if you are in OpenCV2 
    // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
    // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
    Ptr<DescriptorMatcher> matcher  = DescriptorMatcher::create ( "BruteForce-Hamming" );
    //-- 第一步:检测 Oriented FAST 角点位置
    detector->detect ( img_1,keypoints_1 );
    detector->detect ( img_2,keypoints_2 );

    //-- 第二步:根据角点位置计算 BRIEF 描述子
    descriptor->compute ( img_1, keypoints_1, descriptors_1 );
    descriptor->compute ( img_2, keypoints_2, descriptors_2 );

    //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
    vector<DMatch> match;
    //BFMatcher matcher ( NORM_HAMMING );
    matcher->match ( descriptors_1, descriptors_2, match );

    //-- 第四步:匹配点对筛选
    double min_dist=10000, max_dist=0;

    //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
    for ( int i = 0; i < descriptors_1.rows; i++ )
    {
        double dist = match[i].distance;
        if ( dist < min_dist ) min_dist = dist;
        if ( dist > max_dist ) max_dist = dist;
    }

    printf ( "-- Max dist : %f \n", max_dist );
    printf ( "-- Min dist : %f \n", min_dist );

    //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
    for ( int i = 0; i < descriptors_1.rows; i++ )
    {
        if ( match[i].distance <= max ( 2*min_dist, 30.0 ) )
        {
            matches.push_back ( match[i] );
        }
    }
}


Point2d pixel2cam ( const Point2d& p, const Mat& K )
{
    return Point2d
           (
               ( p.x - K.at<double> ( 0,2 ) ) / K.at<double> ( 0,0 ),
               ( p.y - K.at<double> ( 1,2 ) ) / K.at<double> ( 1,1 )
           );
}


void pose_estimation_2d2d ( std::vector<KeyPoint> keypoints_1,
                            std::vector<KeyPoint> keypoints_2,
                            std::vector< DMatch > matches,
                            Mat& R, Mat& t )
{
    // 相机内参,TUM Freiburg2
    Mat K = ( Mat_<double> ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );

    //-- 把匹配点转换为vector<Point2f>的形式
    vector<Point2f> points1;
    vector<Point2f> points2;

    for ( int i = 0; i < ( int ) matches.size(); i++ )
    {
        points1.push_back ( keypoints_1[matches[i].queryIdx].pt );
        points2.push_back ( keypoints_2[matches[i].trainIdx].pt );
    }

    //-- 计算基础矩阵
    Mat fundamental_matrix;
    fundamental_matrix = findFundamentalMat ( points1, points2, CV_FM_8POINT );
    cout<<"fundamental_matrix is "<<endl<< fundamental_matrix<<endl;

    //-- 计算本质矩阵
    Point2d principal_point ( 325.1, 249.7 );    //相机光心, TUM dataset标定值
    double focal_length = 521;            //相机焦距, TUM dataset标定值
    Mat essential_matrix;
    essential_matrix = findEssentialMat ( points1, points2, focal_length, principal_point );
    cout<<"essential_matrix is "<<endl<< essential_matrix<<endl;

    //-- 计算单应矩阵
    Mat homography_matrix;
    homography_matrix = findHomography ( points1, points2, CV_RANSAC, 3 );
    cout<<"homography_matrix is "<<endl<<homography_matrix<<endl;

    //-- 从本质矩阵中恢复旋转和平移信息.
    recoverPose ( essential_matrix, points1, points2, R, t, focal_length, principal_point );
    cout<<"R is "<<endl<<R<<endl;
    cout<<"t is "<<endl<<t<<endl;
    
}

结果及分析

Starting: /home/qifarui/code/slambook-master/ch7/build/pose_estimation_2d2d 1.png 2.png
-- Max dist : 94.000000 
-- Min dist : 4.000000 
一共找到了79组匹配点
fundamental_matrix is 
[4.544437503937326e-06, 0.0001333855576988952, -0.01798499246457619;
 -0.0001275657012959839, 2.266794804637672e-05, -0.01416678429258694;
 0.01814994639952877, 0.004146055871509035, 1]
essential_matrix is 
[0.01097677479889588, 0.2483720528328748, 0.03167429208264108;
 -0.2088833206116968, 0.02908423961947315, -0.674465883831914;
 0.008286777626839029, 0.66140416240827, 0.01676523772760232]
homography_matrix is 
[0.9261214281395963, -0.1445322024422802, 33.26921085290552;
 0.04535424466077615, 0.9386696693994352, 8.570979963061975;
 -1.00619755759245e-05, -3.0081402779533e-05, 1]
R is 
[0.9969387384756405, -0.0515557418857258, 0.05878058527448649;
 0.05000441581116598, 0.9983685317362444, 0.02756507279509838;
 -0.06010582439317147, -0.02454140007064545, 0.9978902793176159]
t is 
[-0.9350802885396324;
 -0.03514646277098749;
 0.352689070059345]
t^R=
[-0.01552350379194682, -0.3512511256306389, -0.04479421344178829;
 0.2954056249626309, -0.04113132612112196, 0.9538388002732133;
 -0.01171927330817152, -0.9353667366876339, -0.02370962657084997]
epipolar constraint = [-0.0005415187894598494]
epipolar constraint = [-0.002158321996443405]
epipolar constraint = [0.0003344241034267809]
epipolar constraint = [-8.762783075558223e-06]
epipolar constraint = [0.0002121321904275941]
epipolar constraint = [0.0001088766988978362]
epipolar constraint = [0.0004246621761395189]
epipolar constraint = [-0.003173128213173212]
epipolar constraint = [-3.716645900272639e-05]
epipolar constraint = [-0.001451851842095554]
epipolar constraint = [-0.0009607717987651782]
epipolar constraint = [-0.0008051993509270439]
epipolar constraint = [-0.001424813547085908]
epipolar constraint = [-0.0004339424745618652]
epipolar constraint = [-0.0004786109668010741]
epipolar constraint = [-0.0001965692841337796]
epipolar constraint = [0.001542309775822037]
epipolar constraint = [0.003107154827523384]
epipolar constraint = [0.0006774648880640624]
epipolar constraint = [-0.001176167495898923]
epipolar constraint = [-0.003987986032579591]
epipolar constraint = [-0.001255263863970228]
epipolar constraint = [-0.001553941958799671]
epipolar constraint = [0.002009914868860929]
epipolar constraint = [-0.0006068096317080607]
epipolar constraint = [-2.769881084775661e-05]
epipolar constraint = [0.001274573010445104]
epipolar constraint = [-0.004169986054276958]
epipolar constraint = [-0.001108104734518528]
epipolar constraint = [-0.0005154295528846109]
epipolar constraint = [-0.001776993209361748]
epipolar constraint = [6.67735429456251e-07]
epipolar constraint = [-0.001853977733712375]
epipolar constraint = [-0.0004823070343765445]
epipolar constraint = [0.000474090447079023]
epipolar constraint = [-0.00296104174875747]
epipolar constraint = [-0.003347655248199201]
epipolar constraint = [-0.000197568098110501]
epipolar constraint = [-0.002824643387693802]
epipolar constraint = [-0.0004311798178931181]
epipolar constraint = [0.001181203683149307]
epipolar constraint = [1.756253096285576e-07]
epipolar constraint = [-0.002137829860220063]
epipolar constraint = [0.001153415526361834]
epipolar constraint = [-0.002120357729634967]
epipolar constraint = [2.741528250471692e-06]
epipolar constraint = [0.0009044582009310256]
epipolar constraint = [-0.002100484436687942]
epipolar constraint = [-0.0003517789230311139]
epipolar constraint = [-2.72046495066805e-05]
epipolar constraint = [-0.003784823354081965]
epipolar constraint = [-0.001588562609431521]
epipolar constraint = [-0.002928516012703969]
epipolar constraint = [-0.001016592267328753]
epipolar constraint = [0.0001241874570146462]
epipolar constraint = [-0.0009797104639974082]
epipolar constraint = [0.001457875224074562]
epipolar constraint = [-1.153738168938045e-05]
epipolar constraint = [-0.002733247428635111]
epipolar constraint = [0.001415721256946334]
epipolar constraint = [0.001790255871245483]
epipolar constraint = [-0.002547929672679236]
epipolar constraint = [-0.006257078862295637]
epipolar constraint = [-0.001874877416209744]
epipolar constraint = [-0.002104542913954163]
epipolar constraint = [1.392530300986516e-06]
epipolar constraint = [-0.004013502582045692]
epipolar constraint = [-0.004726241072653375]
epipolar constraint = [-0.001328682673556458]
epipolar constraint = [3.99592575944796e-07]
epipolar constraint = [-0.00508051172487578]
epipolar constraint = [-0.001977141851277564]
epipolar constraint = [-0.001661334265550506]
epipolar constraint = [0.004285762869538863]
epipolar constraint = [0.004087325193214478]
epipolar constraint = [0.0001482534277866508]
epipolar constraint = [-0.000962530114253686]
epipolar constraint = [-0.00234107601156941]
epipolar constraint = [-0.006138005036133154]
*** Exited normally ***

 

  可以看到,最后利用求得的R t再带入到对极约束中去,得到的精度在0.00x 精度级别。

 

posted @ 2021-07-30 11:33  博客园—哆啦A梦  阅读(1273)  评论(0编辑  收藏  举报