LM算法+推导+C++代码实践
生活随笔
收集整理的這篇文章主要介紹了
LM算法+推导+C++代码实践
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
LM算法+推導+C++代碼實踐
- 一、算法推導
- 二、代碼實踐
- 參考
一、算法推導
二、代碼實踐
#include <Eigen/Dense> #include <Eigen/Sparse> #include <iostream> #include <iomanip> #include <math.h>using namespace std; using namespace Eigen;const double DERIV_STEP = 1e-5; const int MAX_ITER = 100;#define max(a,b) (((a)>(b))?(a):(b))double func(const VectorXd& input, const VectorXd& output, const VectorXd& params, double objIndex) {// obj = A * sin(Bx) + C * cos(D*x) - Fdouble x1 = params(0);double x2 = params(1);double x3 = params(2);double x4 = params(3);double t = input(objIndex);double f = output(objIndex);return x1 * sin(x2 * t) + x3 * cos(x4 * t) - f; }//return vector make up of func() element. VectorXd objF(const VectorXd& input, const VectorXd& output, const VectorXd& params) {VectorXd obj(input.rows());for (int i = 0; i < input.rows(); i++)obj(i) = func(input, output, params, i);return obj; }//F = (f ^t * f)/2 double Func(const VectorXd& obj) {//平方和,所有誤差的平方和return obj.squaredNorm() / 2; }double Deriv(const VectorXd& input, const VectorXd& output, int objIndex, const VectorXd& params,int paraIndex) {VectorXd para1 = params;VectorXd para2 = params;para1(paraIndex) -= DERIV_STEP;para2(paraIndex) += DERIV_STEP;double obj1 = func(input, output, para1, objIndex);double obj2 = func(input, output, para2, objIndex);return (obj2 - obj1) / (2 * DERIV_STEP); }MatrixXd Jacobin(const VectorXd& input, const VectorXd& output, const VectorXd& params) {int rowNum = input.rows();int colNum = params.rows();MatrixXd Jac(rowNum, colNum);for (int i = 0; i < rowNum; i++){for (int j = 0; j < colNum; j++){Jac(i, j) = Deriv(input, output, i, params, j);}}return Jac; } double maxMatrixDiagonale(const MatrixXd& Hessian) {int max = 0;for (int i = 0; i < Hessian.rows(); i++){if (Hessian(i, i) > max)max = Hessian(i, i);}return max; } //L(h) = F(x) + h^t*J^t*f + h^t*J^t*J*h/2 //deltaL = h^t * (u * h - g)/2 double linerDeltaL(const VectorXd& step, const VectorXd& gradient, const double u) {double L = step.transpose() * (u * step - gradient);return L; }void levenMar(const VectorXd& input, const VectorXd& output, VectorXd& params) {int errNum = input.rows(); //error numint paraNum = params.rows(); //parameter num//initial parameter VectorXd obj = objF(input, output, params); //得到誤差MatrixXd Jac = Jacobin(input, output, params); //得到雅可比矩陣MatrixXd A = Jac.transpose() * Jac; //Hessian矩陣,此處為4x4的矩陣VectorXd gradient = Jac.transpose() * obj; //gradient//initial parameter tao v epsilon1 epsilon2double tao = 1e-3;long long v = 2;double eps1 = 1e-12, eps2 = 1e-12;double u = tao * maxMatrixDiagonale(A); //找到雅可比矩陣對角線上最大的值,并乘taobool found = gradient.norm() <= eps1; //判斷是否小于閾值,小于這個閾值,即可退出。if (found) return;double last_sum = 0;int iterCnt = 0;while (iterCnt < MAX_ITER){VectorXd obj = objF(input, output, params);MatrixXd Jac = Jacobin(input, output, params); //jacobinMatrixXd A = Jac.transpose() * Jac; //HessianVectorXd gradient = Jac.transpose() * obj; //gradientif (gradient.norm() <= eps1){cout << "stop g(x) = 0 for a local minimizer optimizer." << endl;break;}cout << "A: " << endl << A << endl;VectorXd step = (A + u * MatrixXd::Identity(paraNum, paraNum)).inverse() * gradient; //negtive Hlm.cout << "step: " << endl << step << endl;if (step.norm() <= eps2 * (params.norm() + eps2)){cout << "stop because change in x is small" << endl;break;}VectorXd paramsNew(params.rows());paramsNew = params - step; //h_lm = -step;//compute f(x)obj = objF(input, output, params);//compute f(x_new)VectorXd obj_new = objF(input, output, paramsNew);double deltaF = Func(obj) - Func(obj_new);double deltaL = linerDeltaL(-1 * step, gradient, u);double roi = deltaF / deltaL;cout << "roi is : " << roi << endl;if (roi > 0){params = paramsNew;u *= max(1.0 / 3.0, 1 - pow(2 * roi - 1, 3));v = 2;}else{u = u * v;v = v * 2;}cout << "u = " << u << " v = " << v << endl;iterCnt++;cout << "Iterator " << iterCnt << " times, result is :" << endl << endl;} } int main(int argc, char* argv[]) {// obj = A * sin(Bx) + C * cos(D*x) - F//there are 4 parameter: A, B, C, D.int num_params = 4;//generate random data using these parameterint total_data = 100;VectorXd input(total_data);VectorXd output(total_data);double A = 5, B = 1, C = 10, D = 2;//load observation datafor (int i = 0; i < total_data; i++){//generate a random variable [-10 10]double x = 20.0 * ((rand() % 1000) / 1000.0) - 10.0;double deltaY = 2.0 * (rand() % 1000) / 1000.0;double y = A * sin(B*x) + C * cos(D*x) + deltaY;input(i) = x;output(i) = y;}//gauss the parametersVectorXd params_gaussNewton(num_params);//init gaussparams_gaussNewton << 3.6, 1.3, 7.2, 1.7;VectorXd params_levenMar = params_gaussNewton;levenMar(input, output, params_levenMar);cout << "Levenberg-Marquardt parameter: " << endl << params_levenMar << endl << endl << endl;}參考
1:https://zhuanlan.zhihu.com/p/136143299
2:https://blog.csdn.net/stihy/article/details/52737723
3:參考文獻:A Brief Description of the
Levenberg-Marquardt Algorithm Implemened
總結
以上是生活随笔為你收集整理的LM算法+推导+C++代码实践的全部內容,希望文章能夠幫你解決所遇到的問題。