从零开始手写vio-ch3作业

在这里插入图片描述

1

1


 
#给出x,y点坐标
x,y=[0,1,2,3,4,5,6,7,8,9,10,11],[0.001,699.051,1864.14,1242.76,414.252,138.084,46.028,15.3427,5.11423,1.70474,0.568247,0.378832]
#

在这里插入图片描述

2
修改的代码如下:
对于函数y=ax2+bx+c 来说 它的残差特别简单,它的雅克比也特变简单
error=ax2+bx+c-y (这里面的a,b,c是待优化的变量 ,y是观测值,x是数据点)
J=[x2 , x, 1]

// 计算曲线模型误差
    virtual void ComputeResidual() override
    {
    
    
        Vec3 abc = verticies_[0]->Parameters();  // 估计的参数

        residual_(0) = abc(0)*x_*x_ + abc(1)*x_ + abc(2)  - y_;  // 构建残差
    }

    // 计算残差对变量的雅克比
    virtual void ComputeJacobians() override
    {
    
    
        Vec3 abc = verticies_[0]->Parameters();
        double exp_y = std::exp( abc(0)*x_*x_ + abc(1)*x_ + abc(2) );

        Eigen::Matrix<double, 1, 3> jaco_abc;  // 误差为1维,状态量 3 个,所以是 1x3 的雅克比矩阵
        jaco_abc << x_ * x_ , x_  , 1 ;
        jacobians_[0] = jaco_abc;
    }

主函数里面计算观测值的函数也不要忘记改哦!

 // 构造 N 次观测
    for (int i = 0; i < N; ++i) {
    
    

        double x = i/100.;
        double n = noise(generator);
        // 观测 y
        double y =  a*x*x + b*x + c + n;
//        double y = std::exp( a*x*x + b*x + c );

        // 每个观测对应的残差函数
        shared_ptr< CurveFittingEdge > edge(new CurveFittingEdge(x,y));
        std::vector<std::shared_ptr<Vertex>> edge_vertex;
        edge_vertex.push_back(vertex);
        edge->SetVertex(edge_vertex);

        // 把这个残差添加到最小二乘问题
        problem.AddEdge(edge);
    }

运行结果如下:

Test CurveFitting start...
iter: 0 , chi= 719.475 , Lambda= 0.001
iter: 1 , chi= 91.395 , Lambda= 0.000333333
problem solve cost: 0.502665 ms
   makeHessian cost: 0.324319 ms
-------After optimization, we got these parameters :
 1.61039  1.61853 0.995178
-------ground truth: 
1.0,  2.0,  1.0

3
其他的阻尼因子策略
在这里插入图片描述从上面可以看出,源程序是第三种方法
实现第一种阻尼因子策略

void Problem::ComputeLambdaInitLM() {
    
    
    ni_ = 2.;
    //currentLambda_ = -1.;//lambda_初始值
        currentLambda_ = 1e-3;//lambda_初始值  小一点的初始值
        //currentLambda_ = 1e3;//lambda_初始值   大一点的初始值
    currentChi_ = 0.0;
    // TODO:: robust cost chi2
    for (auto edge: edges_) {
    
    
        currentChi_ += edge.second->Chi2();
    }
    if (err_prior_.rows() > 0)
        currentChi_ += err_prior_.norm();

    stopThresholdLM_ = 1e-6 * currentChi_;          // 迭代条件为 误差下降 1e-6 倍

    double maxDiagonal = 0;
    ulong size = Hessian_.cols();
    assert(Hessian_.rows() == Hessian_.cols() && "Hessian is not square");
    for (ulong i = 0; i < size; ++i) {
    
    
        maxDiagonal = std::max(fabs(Hessian_(i, i)), maxDiagonal);
    }
    double tau = 1e-5;
    currentLambda_ = tau * maxDiagonal;
}

//Hessian矩阵添加阻尼因子
void Problem::AddLambdatoHessianLM() {
    
    
    ulong size = Hessian_.cols();
    assert(Hessian_.rows() == Hessian_.cols() && "Hessian is not square");
    for (ulong i = 0; i < size; ++i) {
    
    
        Hessian_(i, i) += currentLambda_*Hessian_(i, i);
    }
}
//Hessian矩阵去除阻尼因子
void Problem::RemoveLambdaHessianLM() {
    
    
    ulong size = Hessian_.cols();
    assert(Hessian_.rows() == Hessian_.cols() && "Hessian is not square");
    // TODO:: 这里不应该减去一个,数值的反复加减容易造成数值精度出问题?而应该保存叠加lambda前的值,在这里直接赋值
    for (ulong i = 0; i < size; ++i) {
    
    
        Hessian_(i, i) /= 1.0+currentLambda_;
    }
}
//判断当前步是否可行以及LM的lambda怎么更新
//对应作业中的第一种阻尼因子策略
bool Problem::IsGoodStepInLM() {
    
    
    /*double scale = 0;
    scale = delta_x_.transpose() * (currentLambda_ * delta_x_ + b_);
    scale += 1e-3;    // make sure it's non-zero :)*/

    // recompute residuals after update state
    // 统计所有的残差
    double tempChi = 0.0;
    for (auto edge: edges_) {
    
    
        edge.second->ComputeResidual();//计算残差
        tempChi += edge.second->Chi2();//所有的残差
    }
//compute rho
    assert(Hessian_.rows()==Hessian_.cols()&&"Hessian is not a square");//Hessian矩阵不能是方阵
    ulong size=Hessian_.cols();
    MatXX diag_Hessian(MatXX::Zero(size,size));//初始化diag_Hessian-Hessian对角
    for(ulong i=0;i<size;++i)
    {
    
    
        diag_Hessian(i,i)=Hessian_(i,i);//获得Hessian的对角线元素
    }
    double scale = delta_x_.transpose()*(currentLambda_*diag_Hessian*delta_x_+b_);
    double rho = (currentChi_ - tempChi) / scale;
    double epsilon=0.75;
    double L_down=9.0;
    double L_up=11.0;
    if (rho > epsilon && isfinite(tempChi))   // last step was good, 误差在下降
    {
    
    

        currentLambda_ = std::max(currentLambda_/L_down,1e-7);
        currentChi_ = tempChi;//当前残差和
        return true;
    } else {
    
    
        currentLambda_ = std::min(currentLambda_*L_up,1e7);
        return false;
    }
}

当lamda初始值为1e-3时,运行结果为

Test CurveFitting start...
iter: 0 , chi= 1.12702e+06 , Lambda= 0.001
iter: 1 , chi= 1.03183e+06 , Lambda= 17.8946
iter: 2 , chi= 85556.2 , Lambda= 1.98828
iter: 3 , chi= 9439.41 , Lambda= 0.22092
iter: 4 , chi= 437.923 , Lambda= 0.0245467
iter: 5 , chi= 311.211 , Lambda= 0.00272741
iter: 6 , chi= 216.851 , Lambda= 0.000303046
iter: 7 , chi= 161.003 , Lambda= 3.36718e-05
iter: 8 , chi= 96.7359 , Lambda= 3.74131e-06
iter: 9 , chi= 90.9707 , Lambda= 4.15701e-07
iter: 10 , chi= 90.9502 , Lambda= 1e-07
delat_x.squareNorm()= 2.92411e-08
problem solve cost: 0.875806 ms
   makeHessian cost: 0.6009 ms
-------After optimization, we got these parameters :
 1.95119  3.07802 0.970343
-------ground truth: 
2.0,  3.0,  1.0

当lamda初始值为1e3时,运行结果为

Test CurveFitting start...
iter: 0 , chi= 1.12702e+06 , Lambda= 0.001
iter: 1 , chi= 1.03183e+06 , Lambda= 17.8946
iter: 2 , chi= 85556.2 , Lambda= 1.98828
iter: 3 , chi= 9439.41 , Lambda= 0.22092
iter: 4 , chi= 437.923 , Lambda= 0.0245467
iter: 5 , chi= 311.211 , Lambda= 0.00272741
iter: 6 , chi= 216.851 , Lambda= 0.000303046
iter: 7 , chi= 161.003 , Lambda= 3.36718e-05
iter: 8 , chi= 96.7359 , Lambda= 3.74131e-06
iter: 9 , chi= 90.9707 , Lambda= 4.15701e-07
iter: 10 , chi= 90.9502 , Lambda= 1e-07
delat_x.squareNorm()= 2.92411e-08
problem solve cost: 2.54703 ms
   makeHessian cost: 1.74085 ms
-------After optimization, we got these parameters :
 1.95119  3.07802 0.970343
-------ground truth: 
2.0,  3.0,  1.0

从上面的结果可以知道,两个不同大小初始值的结果是一样的,按理来说应该不一样,我也不知道为什么,这是一个问题。

2 公式推导

在这里插入图片描述

在这里插入图片描述
在这里插入图片描述

3

在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/joun772/article/details/110246965