手写Bundle Adjustment(三)高斯牛顿法实现BA求解PnP

PnP( Perspective-n-Point )是求解3D到2D点对运动的方法。PnP可以再很少的匹配点中获得较好的运动估计,是最重要的一种姿态估计方法。

PnP问题有多种解法,通用的算法有P3P、EPnP、DLT、UPnP、MRE等,其中P3P、EPnP、DLT、UPnP为线性变换求解,MRE即最小二乘法,是用非线性优化的方式构建最小二乘问题进行迭代求解,也就是Bundle Adjustment 。

在本篇中我们将上一篇博客中的高斯牛顿拟合曲线问题修改成为一个高斯牛顿求解 PnP 的程序,在这个PnP问题中我们只优化相机的位姿,可以看做是一个最简单结构的Bundle Adjustment。我们将PnP问题构建成一个定义于李代数上最小化重投影误差的非线性最小二乘问题。用李代数描述相机位姿是因为可以构建无约束的优化问题,重投影误差是将像素坐标与3D点按照当前估计的位姿进行投影得到的位姿相比较得到的误差。

\xi * = arg \min_{\xi }\tfrac{1}{2}\sum _{i=1}^{n}\left \| u_{i}-\tfrac{1}{s_{i}Kexp(\xi \wedge )P_{i}} \right \|_{2}^{2}

1、构建问题

PnP问题的已知条件是:

  1. 世界坐标系的3D点坐标;
  2. 3D点在图像上的投影点的像素坐标;
  3. 相机内参。

要求取的是世界坐标系到相机坐标系的变换关系,即相机位姿。

我们用特征点匹配获取对应关系的3D坐标点和与其对应的投影点的像素坐标。用到一组RGB-D相机数据,对两帧图像进行特征连提取与匹配。将第一帧的匹配特征点用深度图求取第一帧相机坐标系下的空间坐标,将第一帧相机坐标系看做世界坐标系,即可得到世界坐标系3D点坐标,第二帧的像素坐标为3D点在图像上的投影点的像素坐标,求取到的相机的帧间坐标变换即为PnP问题中世界坐标系到相机坐标系的变换——相机位姿。

首先通过第一帧的rgb图和深度图以及第二帧的rgb图获取匹配的3D点坐标和像素坐标。

//-- 读取图像
    Mat img_1 = imread ( "./data/1.png", CV_LOAD_IMAGE_COLOR );
    Mat img_2 = imread ( "./data/2.png", CV_LOAD_IMAGE_COLOR );

    //初始化
    vector<KeyPoint> keypoints_1;
    vector<KeyPoint> keypoints_2;
    vector< DMatch > matches;
    Mat descriptors_1, descriptors_2;
    // used in OpenCV3
    Ptr<FeatureDetector> detector = ORB::create();
    Ptr<DescriptorExtractor> descriptor = ORB::create();
    // use this if you are in OpenCV2
    // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
    // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
    Ptr<DescriptorMatcher> matcher  = DescriptorMatcher::create ( "BruteForce-Hamming" );

    //检测 Oriented FAST 角点位置
    detector->detect ( img_1,keypoints_1 );
    detector->detect ( img_2,keypoints_2 );

    //根据角点位置计算 BRIEF 描述子
    descriptor->compute ( img_1, keypoints_1, descriptors_1 );
    descriptor->compute ( img_2, keypoints_2, descriptors_2 );

    //对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
    vector<DMatch> match;
    // BFMatcher matcher ( NORM_HAMMING );
    matcher->match ( descriptors_1, descriptors_2, match );

    //匹配点对筛选
    double min_dist=10000, max_dist=0;

    //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
    for ( int i = 0; i < descriptors_1.rows; i++ )
    {
        double dist = match[i].distance;
        if ( dist < min_dist ) min_dist = dist;
        if ( dist > max_dist ) max_dist = dist;
    }

    printf ( "-- Max dist : %f \n", max_dist );
    printf ( "-- Min dist : %f \n", min_dist );

    //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
    for ( int i = 0; i < descriptors_1.rows; i++ )
    {
        if ( match[i].distance <= max ( 2*min_dist, 30.0 ) )
        {
            matches.push_back ( match[i] );
        }
    }
    cout<<"一共找到了"<<matches.size() <<"组匹配点"<<endl;
    Mat d1 = imread ( "./data/1_depth.png", CV_LOAD_IMAGE_UNCHANGED );
    for ( DMatch m:matches )
    {
        ushort d = d1.ptr<unsigned short> (int ( keypoints_1[m.queryIdx].pt.y )) [ int ( keypoints_1[m.queryIdx].pt.x ) ];
        if ( d == 0 )   // bad depth
            continue;
        float z = d/5000.0;
        float x = ( keypoints_1[m.queryIdx].pt.x - cx )* z / fx;
        float y = ( keypoints_1[m.queryIdx].pt.y - cx ) * z / fy;
        p3d.push_back ( Vector3d( x, y, z) );
        p2d.push_back ( Vector2d( keypoints_2[m.trainIdx].pt.x, keypoints_2[m.trainIdx].pt.y) );
    }

 

2、高斯牛顿迭代

扫描二维码关注公众号,回复: 3100031 查看本文章

单个投影点的误差:

error = u_{i} - \tfrac{1}{s_{i}}Kexp(\xi ^{\wedge })P_{i}

雅克比矩阵为:

J = \begin{bmatrix} \tfrac{fx}{Z'}& 0& -\tfrac{fxX'}{Z'^{2}}& fx+\tfrac{fxX'^{2}}{Z'^{2}}&-\tfrac{fxY'}{Z'} &-\tfrac{fxY'}{Z'} \\ 0&\tfrac{fy}{Z'} & -\tfrac{fyY'}{Z'^{2}}& fy+\tfrac{fyY'^{2}}{Z'^{2}} & \tfrac{fxX'Y'}{Z'^{2}} & \tfrac{fyX'}{Z'} \end{bmatrix}

位姿估计更新为李代数左乘模型

T_{k+1} = exp(\bigtriangleup \xi \wedge )T_{k}

//高斯牛顿迭代
    int iterations = 100;
    double cost = 0, lastCost = 0;
    int nPoints = p3d.size();
    cout << "points: " << nPoints << endl;

    Sophus::SE3 T_esti; // estimated pose   相机位姿R,t李代数形式

    for (int iter = 0; iter < iterations; iter++) {
        Matrix<double, 6, 6> H = Matrix<double, 6, 6>::Zero();
        Vector6d b = Vector6d::Zero();

        cost = 0;
        // compute cost
        for (int i = 0; i < nPoints; i++) {
            Vector2d p2 = p2d[i];
            Vector3d p3 = p3d[i];  //第i个数据

            Vector3d P = T_esti * p3;
            double x = P[0];
            double y = P[1];
            double z = P[2];

            Vector2d p2_ = { fx * ( x/z ) + cx, fy * ( y/z ) + cy };
            Vector2d e = p2 - p2_;    //误差error = 观测值 - 估计值
            cost += (e[0]*e[0]+e[1]*e[1]);

            // compute jacobian
            Matrix<double, 2, 6> J;
            J(0,0) = -(fx/z);
            J(0,1) = 0;
            J(0,2) = (fx*x/(z*z));
            J(0,3) = (fx*x*y/(z*z));
            J(0,4) = -(fx*x*x/(z*z)+fx);
            J(0,5) = (fx*y/z);
            J(1,0) = 0;
            J(1,1) = -(fy/z);
            J(1,2) = (fy*y/(z*z));
            J(1,3) = (fy*y*y/(z*z)+fy);
            J(1,4) = -(fy*x*y/(z*z));
            J(1,5) = -(fy*x/z);

            H += J.transpose() * J;
            b += -J.transpose() * e;
        }

        // solve dx
        Vector6d dx;
        dx = H.ldlt().solve(b);

        if (isnan(dx[0])) {
            cout << "result is nan!" << endl;
            break;
        }

        if (iter > 0 && cost >= lastCost) {
            // 误差增长了,说明近似的不够好
            cout << "cost: " << cost << ", last cost: " << lastCost << endl;
            break;
        }

        // 更新估计位姿
        T_esti = Sophus::SE3::exp(dx)*T_esti;

        lastCost = cost;

        cout << "iteration " << iter << " cost=" << cout.precision(12) << cost << endl;
    }

    cout << "estimated pose: \n" << T_esti.matrix() << endl;
    return 0;

3、运行结果

完整代码可见https://github.com/YCJin9/sparse_BA

猜你喜欢

转载自blog.csdn.net/qq_41814939/article/details/82501664
今日推荐