ceres求解ICP--SLAM 十四講第七章課后題

by jie 2018.8.10
參考:如何用ceres進(jìn)行兩幀之間的BA優(yōu)化


思路分析

之前用ceres求解了pnp問題,3d-2d構(gòu)建cost fuction是最小重投影。那3d-3d呢?
也可以用最小重投影.思路是,將第一幀圖像坐標(biāo)系下的3d點經(jīng)過旋轉(zhuǎn)平移到第二幀圖像下,然后通過相機(jī)內(nèi)參求得其投影到圖像坐標(biāo)系下的坐標(biāo)。第二幀觀測到的與之匹配的3d點,也可以進(jìn)行重投影得到圖像坐標(biāo)系下的坐標(biāo)。兩者就殘差即可。

先來完整代碼:

#include <iostream>
#include <opencv2/core/core.hpp>
#include <ceres/ceres.h>
#include <chrono>

#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <Eigen/Geometry>
#include <Eigen/SVD>

#include "common/rotation.h"
using namespace std;
using namespace cv;

void find_feature_matches (
    const Mat& img_1, const Mat& img_2,
    std::vector<KeyPoint>& keypoints_1,
    std::vector<KeyPoint>& keypoints_2,
    std::vector< DMatch >& matches );

// 像素坐標(biāo)轉(zhuǎn)相機(jī)歸一化坐標(biāo)
Point2d pixel2cam ( const Point2d& p, const Mat& K );

void find_feature_matches (
    const Mat& img_1, const Mat& img_2,
    std::vector<KeyPoint>& keypoints_1,
    std::vector<KeyPoint>& keypoints_2,
    std::vector< DMatch >& matches );

// 像素坐標(biāo)轉(zhuǎn)相機(jī)歸一化坐標(biāo)
Point2d pixel2cam ( const Point2d& p, const Mat& K );

void pose_estimation_3d3d (
    const vector<Point3f>& pts1,
    const vector<Point3f>& pts2,
    Mat& R, Mat& t
);


struct cost_function_define
{
  cost_function_define(Point3f p1,Point3f p2):_p1(p1),_p2(p2){}
  template<typename T>
  bool operator()(const T* const cere_r,const T* const cere_t,T* residual)const
  {
    T p_1[3];
    T p_2[3];
    p_1[0]=T(_p1.x);
    p_1[1]=T(_p1.y);
    p_1[2]=T(_p1.z);
    AngleAxisRotatePoint(cere_r,p_1,p_2);
    p_2[0]=p_2[0]+cere_t[0];
    p_2[1]=p_2[1]+cere_t[1];
    p_2[2]=p_2[2]+cere_t[2];
    const T x=p_2[0]/p_2[2];
    const T y=p_2[1]/p_2[2];
    const T u=x*520.9+325.1;
    const T v=y*521.0+249.7;
    T p_3[3];
    p_3[0]=T(_p2.x);
    p_3[1]=T(_p2.y);
    p_3[2]=T(_p2.z);
    const T x1=p_3[0]/p_3[2];
    const T y1=p_3[1]/p_3[2];
    const T u1=x1*520.9+325.1;
    const T v1=y1*521.0+249.7;
    residual[0]=u-u1;
    residual[1]=v-v1;
    return true;
  }
   Point3f _p1,_p2;
};




int main ( int argc, char** argv )
{
    if ( argc != 5 )
    {
        cout<<"usage: pose_estimation_3d3d img1 img2 depth1 depth2"<<endl;
        return 1;
    }
    //-- 讀取圖像
    Mat img_1 = imread ( argv[1], CV_LOAD_IMAGE_COLOR );
    Mat img_2 = imread ( argv[2], CV_LOAD_IMAGE_COLOR );

    vector<KeyPoint> keypoints_1, keypoints_2;
    vector<DMatch> matches;
    find_feature_matches ( img_1, img_2, keypoints_1, keypoints_2, matches );
    cout<<"一共找到了"<<matches.size() <<"組匹配點"<<endl;

    // 建立3D點
    Mat depth1 = imread ( argv[3], CV_LOAD_IMAGE_UNCHANGED );       // 深度圖為16位無符號數(shù),單通道圖像
    Mat depth2 = imread ( argv[4], CV_LOAD_IMAGE_UNCHANGED );       // 深度圖為16位無符號數(shù),單通道圖像
    Mat K = ( Mat_<double> ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
    vector<Point3f> pts1, pts2;

    for ( DMatch m:matches )
    {
        ushort d1 = depth1.ptr<unsigned short> ( int ( keypoints_1[m.queryIdx].pt.y ) ) [ int ( keypoints_1[m.queryIdx].pt.x ) ];
        ushort d2 = depth2.ptr<unsigned short> ( int ( keypoints_2[m.trainIdx].pt.y ) ) [ int ( keypoints_2[m.trainIdx].pt.x ) ];
        if ( d1==0 || d2==0 )   // bad depth
            continue;
        Point2d p1 = pixel2cam ( keypoints_1[m.queryIdx].pt, K );
        Point2d p2 = pixel2cam ( keypoints_2[m.trainIdx].pt, K );
        float dd1 = float ( d1 ) /1000.0;
        float dd2 = float ( d2 ) /1000.0;
        pts1.push_back ( Point3f ( p1.x*dd1, p1.y*dd1, dd1 ) );
        pts2.push_back ( Point3f ( p2.x*dd2, p2.y*dd2, dd2 ) );
    }

    cout<<"3d-3d pairs: "<<pts1.size() <<endl;
    Mat R, t;
    pose_estimation_3d3d ( pts1, pts2, R, t );
    cout<<"ICP via SVD results: "<<endl;
    cout<<"R = "<<R<<endl;
    cout<<"t = "<<t<<endl;
    cout<<"R_inv = "<<R.t() <<endl;
    cout<<"t_inv = "<<-R.t() *t<<endl;

  for ( int i=0; i<5; i++ )
    {
        cout<<"p1 = "<<pts1[i]<<endl;
        cout<<"p2 = "<<pts2[i]<<endl;
        cout<<"(R*p2+t) = "<< 
            R * (Mat_<double>(3,1)<<pts2[i].x, pts2[i].y, pts2[i].z) + t
            <<endl;
        cout<<endl;
    }

    cout<<"----------------------------------"<<endl;
    cout<<"calling bundle adjustment"<<endl;

    
     double cere_rot[3],cere_tranf[3];
     cere_rot[0]=0;
     cere_rot[1]=0;
     cere_rot[2]=0;
     cere_tranf[0]=t.at<double>(0,0);
     cere_tranf[1]=t.at<double>(1,0);
     cere_tranf[2]=t.at<double>(2,0);

  //  bundleAdjustment( pts1, pts2, R, t );
    ceres::Problem problem;
  for(int i=0;i<pts1.size();i++)
  {
    ceres::CostFunction* costfunction=new ceres::AutoDiffCostFunction<cost_function_define,2,3,3>(new cost_function_define(pts1[i],pts2[i]));
    problem.AddResidualBlock(costfunction,NULL,cere_rot,cere_tranf);//注意,cere_rot不能為Mat類型
  }

  
  ceres::Solver::Options option;
  option.linear_solver_type=ceres::DENSE_SCHUR;
  //輸出迭代信息到屏幕
  option.minimizer_progress_to_stdout=true;
  //顯示優(yōu)化信息
  ceres::Solver::Summary summary;
  //開始求解
  ceres::Solve(option,&problem,&summary);
  //顯示優(yōu)化信息
  cout<<summary.BriefReport()<<endl;

    // verify p1 = R*p2 + t

  cout<<"-----------optional after---------------"<<endl;
  
  Mat cam_3d = ( Mat_<double> ( 3,1 )<<cere_rot[0],cere_rot[1],cere_rot[2]);
Mat cam_9d;
cv::Rodrigues ( cam_3d, cam_9d ); // r為旋轉(zhuǎn)向量形式,用Rodrigues公式轉(zhuǎn)換為矩陣

cout<<"cam_9d:"<<endl<<cam_9d<<endl;

cout<<"cam_t:"<<cere_tranf[0]<<"  "<<cere_tranf[1]<<"  "<<cere_tranf[2]<<endl;
  Mat tranf_3d = ( Mat_<double> ( 3,1 )<<cere_tranf[0],cere_tranf[1],cere_tranf[2]);


  for ( int i=0; i<5; i++ )
    {
        cout<<"p1 = "<<pts1[i]<<endl;
        cout<<"p2 = "<<pts2[i]<<endl;
        cout<<"(R*p1+t) = "<< 
            cam_9d * (Mat_<double>(3,1)<<pts1[i].x, pts1[i].y, pts1[i].z) + tranf_3d
            <<endl;
        cout<<endl;
    }
  
    
}

void find_feature_matches ( const Mat& img_1, const Mat& img_2,
                            std::vector<KeyPoint>& keypoints_1,
                            std::vector<KeyPoint>& keypoints_2,
                            std::vector< DMatch >& matches )
{
    //-- 初始化
    Mat descriptors_1, descriptors_2;
    // used in OpenCV3 
    Ptr<FeatureDetector> detector = ORB::create();
    Ptr<DescriptorExtractor> descriptor = ORB::create();
    // use this if you are in OpenCV2 
    // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
    // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
    Ptr<DescriptorMatcher> matcher  = DescriptorMatcher::create("BruteForce-Hamming");
    //-- 第一步:檢測 Oriented FAST 角點位置
    detector->detect ( img_1,keypoints_1 );
    detector->detect ( img_2,keypoints_2 );

    //-- 第二步:根據(jù)角點位置計算 BRIEF 描述子
    descriptor->compute ( img_1, keypoints_1, descriptors_1 );
    descriptor->compute ( img_2, keypoints_2, descriptors_2 );

    //-- 第三步:對兩幅圖像中的BRIEF描述子進(jìn)行匹配,使用 Hamming 距離
    vector<DMatch> match;
   // BFMatcher matcher ( NORM_HAMMING );
    matcher->match ( descriptors_1, descriptors_2, match );

    //-- 第四步:匹配點對篩選
    double min_dist=10000, max_dist=0;

    //找出所有匹配之間的最小距離和最大距離, 即是最相似的和最不相似的兩組點之間的距離
    for ( int i = 0; i < descriptors_1.rows; i++ )
    {
        double dist = match[i].distance;
        if ( dist < min_dist ) min_dist = dist;
        if ( dist > max_dist ) max_dist = dist;
    }

    printf ( "-- Max dist : %f \n", max_dist );
    printf ( "-- Min dist : %f \n", min_dist );

    //當(dāng)描述子之間的距離大于兩倍的最小距離時,即認(rèn)為匹配有誤.但有時候最小距離會非常小,設(shè)置一個經(jīng)驗值30作為下限.
    for ( int i = 0; i < descriptors_1.rows; i++ )
    {
        if ( match[i].distance <= max ( 2*min_dist, 30.0 ) )
        {
            matches.push_back ( match[i] );
        }
    }
}

Point2d pixel2cam ( const Point2d& p, const Mat& K )
{
    return Point2d
           (
               ( p.x - K.at<double> ( 0,2 ) ) / K.at<double> ( 0,0 ),
               ( p.y - K.at<double> ( 1,2 ) ) / K.at<double> ( 1,1 )
           );
}

void pose_estimation_3d3d (
    const vector<Point3f>& pts1,
    const vector<Point3f>& pts2,
    Mat& R, Mat& t
)
{
    Point3f p1, p2;     // center of mass
    int N = pts1.size();
    for ( int i=0; i<N; i++ )
    {
        p1 += pts1[i];
        p2 += pts2[i];
    }
    p1 = Point3f( Vec3f(p1) /  N);
    p2 = Point3f( Vec3f(p2) / N);
    vector<Point3f>     q1 ( N ), q2 ( N ); // remove the center
    for ( int i=0; i<N; i++ )
    {
        q1[i] = pts1[i] - p1;
        q2[i] = pts2[i] - p2;
    }

    // compute q1*q2^T
    Eigen::Matrix3d W = Eigen::Matrix3d::Zero();
    for ( int i=0; i<N; i++ )
    {
        W += Eigen::Vector3d ( q1[i].x, q1[i].y, q1[i].z ) * Eigen::Vector3d ( q2[i].x, q2[i].y, q2[i].z ).transpose();
    }
    cout<<"W="<<W<<endl;

    // SVD on W
    Eigen::JacobiSVD<Eigen::Matrix3d> svd ( W, Eigen::ComputeFullU|Eigen::ComputeFullV );
    Eigen::Matrix3d U = svd.matrixU();
    Eigen::Matrix3d V = svd.matrixV();
    cout<<"U="<<U<<endl;
    cout<<"V="<<V<<endl;

    Eigen::Matrix3d R_ = U* ( V.transpose() );
    Eigen::Vector3d t_ = Eigen::Vector3d ( p1.x, p1.y, p1.z ) - R_ * Eigen::Vector3d ( p2.x, p2.y, p2.z );

    // convert to cv::Mat
    R = ( Mat_<double> ( 3,3 ) <<
          R_ ( 0,0 ), R_ ( 0,1 ), R_ ( 0,2 ),
          R_ ( 1,0 ), R_ ( 1,1 ), R_ ( 1,2 ),
          R_ ( 2,0 ), R_ ( 2,1 ), R_ ( 2,2 )
        );
    t = ( Mat_<double> ( 3,1 ) << t_ ( 0,0 ), t_ ( 1,0 ), t_ ( 2,0 ) );
}

代碼分析

代碼前面是svd求解icp的方法。
代碼后面是ceres求解。
注意幾點:

  • 這里的R不是旋轉(zhuǎn)矩陣,也不是四元數(shù)表示的,而是用歐拉角表示的。
    通過函數(shù) AngleAxisRotatePoint(cere_r,p_1,p_2)可以對3D點進(jìn)行旋轉(zhuǎn)。相當(dāng)于用旋轉(zhuǎn)矩陣去左乘。
  • 觀測值是兩幀觀測到的相匹配的3D點,優(yōu)化變量是相機(jī)外參
  • 書上求解的結(jié)果是第二幀到第一幀的變化矩陣,而這里我求解的是第一幀到第二幀的變化矩陣,因此兩者互逆。
  • 其他參考上一篇文章:
    ceres求解PnP--SLAM 十四講第七章課后題
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時請結(jié)合常識與多方信息審慎甄別。
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點,簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。

友情鏈接更多精彩內(nèi)容