非線性優化一個曲線擬合(ceres/g2o)


  將代碼和實際理論結合起來才能更好的理解理論上是怎么實現的,參考用高博十四講的理論加實踐親手試一下,感覺公式和代碼才能結合起來。不能做到創新,至少做到了解和理解

曲線擬合問題:

  考慮這樣一條曲線:$y = \exp (a{x^2} + bx + c) + w$,其中a,b,c為曲線的參數,w為高斯噪聲,滿足$w = (0,{\sigma ^2})$,假設有N個關於x,y的觀測數據點,想根據這些數據點求出曲線的參數,(誤差噪聲)最小二乘

\[\mathop {\min }\limits_{a,b,c} \frac{1}{2}\sum\limits_{i = 1}^N {{{\left\| {{y_i} - \exp (ax_i^2 + b{x_i} + c)} \right\|}^2}} \]

  我們首先明確我們要估計的變量是a,b,c這三個系數,思路是先根據模型生成x,y的真值,然后在真值中加入高斯分布的噪聲。隨后使用高斯牛頓法從帶噪聲的數據擬合參數模型。定義誤差為:${e_i} = {y_i} - \exp (ax_i^2 + b{x_i} + c)$,求取雅克比矩陣:

\[{J_i} = \left[ {\begin{array}{*{20}{c}}
{\frac{{\partial {e_i}}}{{\partial a}} = - x_i^2\exp (ax_i^2 + b{x_i} + c)}\\
{\frac{{\partial {e_i}}}{{\partial b}} = - {x_i}\exp (ax_i^2 + b{x_i} + c)}\\
{\frac{{\partial {e_i}}}{{\partial c}} = - \exp (ax_i^2 + b{x_i} + c)}
\end{array}} \right]\]

高斯牛頓法的增量方程為:

\[\left( {\sum\limits_{i = 1}^{100} {{J_i}{{({\sigma ^2})}^{ - 1}}{J_i}^T} } \right)\Delta {x_k} = \sum\limits_{i = 1}^{100} { - {J_i}{{({\sigma ^2})}^{ - 1}}{e_i}} \]

 1 #include <iostream>
 2 #include <chrono>
 3 #include <opencv2/opencv.hpp>
 4 #include <Eigen/Core>
 5 #include <Eigen/Dense>
 6 using namespace cv;
 7 using namespace std;
 8 using namespace Eigen;
 9 
10 int main(int argc, char **argv) {
11     double ar = 1.0, br = 2.0, cr = 1.0;         // 真實參數值
12     double ae = 2.0, be = -1.0, ce = 5.0;        // 估計參數值,賦給一個初值,然后在這個初值的基礎上進行變化量的迭代
13     int N = 100;                                 // 數據點
14     double w_sigma = 1.0;                        // 噪聲Sigma值
15     double inv_sigma = 1.0 / w_sigma;           // 后面噪聲1/(Sigma^2)值會用
16     cv::RNG rng;                                 // OpenCV隨機數產生器
17 
18     vector<double> x_data, y_data;      // 用於擬合的觀測數據
19     for (int i = 0; i < N; i++) {
20         double x = i / 100.0;
21         x_data.push_back(x);
22         y_data.push_back(exp(ar * x * x + br * x + cr) + rng.gaussian(w_sigma * w_sigma));
23     }
24 
25     // 開始Gauss-Newton迭代
26     int iterations = 100;    // 迭代次數
27     double cost = 0, lastCost = 0;  // 本次迭代的cost和上一次迭代的cost最小二乘值,通過此值衡量迭代是否到位,進行終止
28 
29     chrono::steady_clock::time_point t1 = chrono::steady_clock::now();//計算迭代時間用
30     for (int iter = 0; iter < iterations; iter++) {
31 
32         Matrix3d H = Matrix3d::Zero();             // Hessian = J^T W^{-1} J in Gauss-Newton
33         Vector3d b = Vector3d::Zero();             // bias
34         cost = 0;
35 
36         for (int i = 0; i < N; i++) {
37             double xi = x_data[i], yi = y_data[i];  // 第i個數據點
38             double error = yi - exp(ae * xi * xi + be * xi + ce);
39             Vector3d J; // 雅可比矩陣
40             J[0] = -xi * xi * exp(ae * xi * xi + be * xi + ce);  // de/da
41             J[1] = -xi * exp(ae * xi * xi + be * xi + ce);  // de/db
42             J[2] = -exp(ae * xi * xi + be * xi + ce);  // de/dc
43 
44             H += inv_sigma * inv_sigma * J * J.transpose();//構造Hx=b
45             b += -inv_sigma * inv_sigma * error * J;
46 
47             cost += error * error;//計算最小二乘結果,看是否是最小的
48         }
49 
50         // 求解線性方程 Hx=b
51         Vector3d dx = H.ldlt().solve(b);
52         if (isnan(dx[0])) {
53             cout << "result is nan!" << endl;
54             break;
55         }
56 
57         if (iter > 0 && cost >= lastCost) {
58             cout << "cost: " << cost << ">= last cost: " << lastCost << ", break." << endl;
59             break;
60         }
61 
62         ae += dx[0];//更新迭代量,繼續迭代尋找最優值
63         be += dx[1];
64         ce += dx[2];
65 
66         lastCost = cost;
67 
68         cout << "total cost: " << cost << ", \t\tupdate: " << dx.transpose() <<
69             "\t\testimated params: " << ae << "," << be << "," << ce << endl;
70     }
71 
72     chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
73     chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);//給出優化用時
74     cout << "solve time cost = " << time_used.count() << " seconds. " << endl;
75 
76     cout << "estimated abc = " << ae << ", " << be << ", " << ce << endl;
77     waitKey(0);
78     return 0;
79 
80 }

 

  •  同樣的問題用ceres庫進行曲線擬合,迭代和求解過程就可以通過ceres的模板庫來操作
 1 #include <iostream>
 2 #include <opencv2/core/core.hpp>
 3 #include <ceres/ceres.h>
 4 #include <chrono>
 5 
 6 using namespace std;
 7 
 8 // 代價函數的計算模型
 9 struct CURVE_FITTING_COST
10 {
11     CURVE_FITTING_COST ( double x, double y ) : _x ( x ), _y ( y ) {}
12     // 殘差的計算
13     template <typename T>
14     bool operator() (
15         const T* const abc,     // 模型參數,有3維,也就是要要優化的參數塊,
16         T* residual ) const     // 殘差
17     {
18         residual[0] = T ( _y ) - ceres::exp ( abc[0]*T ( _x ) *T ( _x ) + abc[1]*T ( _x ) + abc[2] ); // y-exp(ax^2+bx+c)
19         return true;
20     }
21     const double _x, _y;    // x,y數據
22 };
23 
24 int main ( int argc, char** argv )
25 {
26     double a=1.0, b=2.0, c=1.0;         // 真實參數值
27     int N=100;                          // 數據點
28     double w_sigma=1.0;                 // 噪聲Sigma值
29     cv::RNG rng;                        // OpenCV隨機數產生器
30     double abc[3] = {0,0,0};            // abc參數的估計值,這里初始化為0
31 
32     vector<double> x_data, y_data;      // 定義觀測數據
33 
34     cout<<"generating data: "<<endl;
35     for ( int i=0; i<N; i++ )
36     {
37         double x = i/100.0;
38         x_data.push_back ( x );
39         y_data.push_back (
40             exp ( a*x*x + b*x + c ) + rng.gaussian ( w_sigma )
41         );
42         cout<<x_data[i]<<" "<<y_data[i]<<endl;//產生觀測數據
43     }
44 
45     // 構建最小二乘問題
46     ceres::Problem problem;
47     for ( int i=0; i<N; i++ )
48     {
49         problem.AddResidualBlock (     // 向問題中添加誤差項
50         // 使用自動求導,模板參數:誤差類型,輸出維度,輸入維度,維數要與前面struct中一致
51             new ceres::AutoDiffCostFunction<CURVE_FITTING_COST, 1, 3> ( 
52                 new CURVE_FITTING_COST ( x_data[i], y_data[i] )// 觀測數據
53             ),
54             nullptr,            // 核函數,這里不使用,為空
55             abc                 // 待估計參數
56         );
57     }
58 
59     // 配置求解器
60     ceres::Solver::Options options;     // 這里有很多配置項可以填
61     options.linear_solver_type = ceres::DENSE_QR;  // 增量方程如何求解
62     options.minimizer_progress_to_stdout = true;   // 輸出到cout
63 
64     ceres::Solver::Summary summary;                // 優化信息
65     chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
66     ceres::Solve ( options, &problem, &summary );  // 開始優化
67     chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
68     chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>( t2-t1 );
69     cout<<"solve time cost = "<<time_used.count()<<" seconds. "<<endl;
70 
71     // 輸出結果
72     cout<<summary.BriefReport() <<endl;
73     cout<<"estimated a,b,c = ";
74     for ( auto a:abc ) cout<<a<<" ";
75     cout<<endl;
76 
77     return 0;
78 }

 

  •  圖優化(g2o)進行曲線擬合參照圖優化系列-g2o細細看,多看幾遍中間的來龍去脈
  1 #include <iostream>
  2 #include <g2o/core/base_vertex.h>
  3 #include <g2o/core/base_unary_edge.h>
  4 #include <g2o/core/block_solver.h>
  5 #include <g2o/core/optimization_algorithm_levenberg.h>
  6 #include <g2o/core/optimization_algorithm_gauss_newton.h>
  7 #include <g2o/core/optimization_algorithm_dogleg.h>
  8 #include <g2o/solvers/dense/linear_solver_dense.h>
  9 #include <Eigen/Core>
 10 #include <opencv2/core/core.hpp>
 11 #include <cmath>
 12 #include <chrono>
 13 using namespace std; 
 14 
 15 // 曲線模型的頂點,模板參數:優化變量維度和數據類型
 16 class CurveFittingVertex: public g2o::BaseVertex<3, Eigen::Vector3d>
 17 {
 18 public:
 19     EIGEN_MAKE_ALIGNED_OPERATOR_NEW
 20     virtual void setToOriginImpl() // 重置
 21     {
 22         _estimate << 0,0,0;
 23     }
 24     
 25     virtual void oplusImpl( const double* update ) // 更新
 26     {
 27         _estimate += Eigen::Vector3d(update);
 28     }
 29     // 存盤和讀盤:留空
 30     virtual bool read( istream& in ) {}
 31     virtual bool write( ostream& out ) const {}
 32 };
 33 
 34 // 誤差模型 模板參數:觀測值維度,類型,連接頂點類型
 35 class CurveFittingEdge: public g2o::BaseUnaryEdge<1,double,CurveFittingVertex>
 36 {
 37 public:
 38     EIGEN_MAKE_ALIGNED_OPERATOR_NEW
 39     CurveFittingEdge( double x ): BaseUnaryEdge(), _x(x) {}
 40     // 計算曲線模型誤差
 41     void computeError()
 42     {
 43         const CurveFittingVertex* v = static_cast<const CurveFittingVertex*> (_vertices[0]);
 44         const Eigen::Vector3d abc = v->estimate();
 45         _error(0,0) = _measurement - std::exp( abc(0,0)*_x*_x + abc(1,0)*_x + abc(2,0) ) ;
 46     }
 47     virtual bool read( istream& in ) {}
 48     virtual bool write( ostream& out ) const {}
 49 public:
 50     double _x;  // x 值, y 值為 _measurement
 51 };
 52 
 53 int main( int argc, char** argv )
 54 {
 55     double a=1.0, b=2.0, c=1.0;         // 真實參數值
 56     int N=100;                          // 數據點
 57     double w_sigma=1.0;                 // 噪聲Sigma值
 58     cv::RNG rng;                        // OpenCV隨機數產生器
 59     double abc[3] = {0,0,0};            // abc參數的估計值
 60 
 61     vector<double> x_data, y_data;      // 數據
 62     
 63     cout<<"generating data: "<<endl;
 64     for ( int i=0; i<N; i++ )
 65     {
 66         double x = i/100.0;
 67         x_data.push_back ( x );
 68         y_data.push_back (
 69             exp ( a*x*x + b*x + c ) + rng.gaussian ( w_sigma )
 70         );
 71         cout<<x_data[i]<<" "<<y_data[i]<<endl;
 72     }
 73     
 74     // 構建圖優化,先設定g2o
 75     typedef g2o::BlockSolver< g2o::BlockSolverTraits<3,1> > Block;  // 每個誤差項優化變量維度為3,誤差值維度為1
 76     Block::LinearSolverType* linearSolver = new g2o::LinearSolverDense<Block::PoseMatrixType>(); // 線性方程求解器
 77     Block* solver_ptr = new Block( linearSolver );      // 矩陣塊求解器
 78     // 梯度下降方法,從GN, LM, DogLeg 中選
 79     g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg( solver_ptr );
 80     // g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton( solver_ptr );
 81     // g2o::OptimizationAlgorithmDogleg* solver = new g2o::OptimizationAlgorithmDogleg( solver_ptr );
 82     g2o::SparseOptimizer optimizer;     // 圖模型
 83     optimizer.setAlgorithm( solver );   // 設置求解器
 84     optimizer.setVerbose( true );       // 打開調試輸出
 85     
 86     // 往圖中增加頂點
 87     CurveFittingVertex* v = new CurveFittingVertex();
 88     v->setEstimate( Eigen::Vector3d(0,0,0) );
 89     v->setId(0);
 90     optimizer.addVertex( v );
 91     
 92     // 往圖中增加邊
 93     for ( int i=0; i<N; i++ )
 94     {
 95         CurveFittingEdge* edge = new CurveFittingEdge( x_data[i] );
 96         edge->setId(i);
 97         edge->setVertex( 0, v );                // 設置連接的頂點
 98         edge->setMeasurement( y_data[i] );      // 觀測數值
 99         edge->setInformation( Eigen::Matrix<double,1,1>::Identity()*1/(w_sigma*w_sigma) ); // 信息矩陣:協方差矩陣之逆
100         optimizer.addEdge( edge );
101     }
102     
103     // 執行優化
104     cout<<"start optimization"<<endl;
105     chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
106     optimizer.initializeOptimization();
107     optimizer.optimize(100);
108     chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
109     chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>( t2-t1 );
110     cout<<"solve time cost = "<<time_used.count()<<" seconds. "<<endl;
111     
112     // 輸出優化值
113     Eigen::Vector3d abc_estimate = v->estimate();
114     cout<<"estimated model: "<<abc_estimate.transpose()<<endl;
115     
116     return 0;
117 }

 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM