SRBA: Sparser Relative Bundle Adjustment
srba/impl/compute_minus_gradient.h
00001 /* +---------------------------------------------------------------------------+
00002    |                     Mobile Robot Programming Toolkit (MRPT)               |
00003    |                          http://www.mrpt.org/                             |
00004    |                                                                           |
00005    | Copyright (c) 2005-2015, Individual contributors, see AUTHORS file        |
00006    | See: http://www.mrpt.org/Authors - All rights reserved.                   |
00007    | Released under BSD License. See details in http://www.mrpt.org/License    |
00008    +---------------------------------------------------------------------------+ */
00009 
00010 #pragma once
00011 
00012 namespace srba {
00013 
00014 /*******************************************
00015       compute_minus_gradient
00016 
00017         grad = J^t * (h(x)-z)
00018  *******************************************/
00019 template <class KF2KF_POSE_TYPE,class LM_TYPE,class OBS_TYPE,class RBA_OPTIONS>
00020 void RbaEngine<KF2KF_POSE_TYPE,LM_TYPE,OBS_TYPE,RBA_OPTIONS>::compute_minus_gradient(
00021     Eigen::VectorXd & minus_grad,
00022     const std::vector<typename TSparseBlocksJacobians_dh_dAp::col_t*> & sparse_jacobs_Ap,
00023     const std::vector<typename TSparseBlocksJacobians_dh_df::col_t*> & sparse_jacobs_f,
00024     const vector_residuals_t  & residuals,
00025     const std::map<size_t,size_t> &obs_global_idx2residual_idx
00026     ) const
00027 {
00028     // Problem dimensions:
00029     const size_t POSE_DIMS = kf2kf_pose_t::REL_POSE_DIMS;
00030     const size_t LM_DIMS   = landmark_t::LM_DIMS;
00031 
00032     const size_t nUnknowns_k2k = sparse_jacobs_Ap.size();
00033     const size_t nUnknowns_k2f = sparse_jacobs_f.size();
00034 
00035     const size_t idx_start_f = POSE_DIMS*nUnknowns_k2k;
00036     const size_t nUnknowns_scalars = POSE_DIMS*nUnknowns_k2k + LM_DIMS*nUnknowns_k2f;
00037 
00038     if (static_cast<size_t>(minus_grad.size())!=nUnknowns_scalars)
00039         minus_grad.resize(nUnknowns_scalars);
00040 
00041     //size_t running_idx_obs=0; // for the precomputed "sequential_obs_indices"
00042 
00043     // grad_Ap:
00044     for (size_t i=0;i<nUnknowns_k2k;i++)
00045     {
00046         const typename TSparseBlocksJacobians_dh_dAp::col_t & col_i = *sparse_jacobs_Ap[i];
00047 
00048         array_pose_t accum_g_i;
00049         accum_g_i.zeros();
00050 
00051         for (typename TSparseBlocksJacobians_dh_dAp::col_t::const_iterator itJ = col_i.begin();itJ != col_i.end();++itJ)
00052         {
00053             //const size_t resid_idx = sequential_obs_indices[running_idx_obs++];
00054             const size_t obs_idx = itJ->first;
00055             std::map<size_t,size_t>::const_iterator it_obs = obs_global_idx2residual_idx.find(obs_idx);
00056             ASSERT_(it_obs!=obs_global_idx2residual_idx.end())
00057             const size_t resid_idx = it_obs->second;
00058 
00059             // Accumulate sub-gradient: // g += J^t * \Lambda * residual 
00060             RBA_OPTIONS::obs_noise_matrix_t::template accum_Jtr(accum_g_i, itJ->second.num, residuals[ resid_idx ], obs_idx, this->parameters.obs_noise );
00061         }
00062         // Do scaling (if applicable):
00063         RBA_OPTIONS::obs_noise_matrix_t::template scale_Jtr(accum_g_i, this->parameters.obs_noise );
00064 
00065         minus_grad.block<POSE_DIMS,1>(i*POSE_DIMS,0) = accum_g_i;
00066     }
00067     // grad_Af:
00068     for (size_t i=0;i<nUnknowns_k2f;i++)
00069     {
00070         const typename TSparseBlocksJacobians_dh_df::col_t & col_i = *sparse_jacobs_f[i];
00071 
00072         array_landmark_t accum_g_i;
00073         accum_g_i.zeros();
00074 
00075         for (typename TSparseBlocksJacobians_dh_df::col_t::const_iterator itJ = col_i.begin();itJ != col_i.end();++itJ)
00076         {
00077             //const size_t resid_idx = sequential_obs_indices[running_idx_obs++];
00078             const size_t obs_idx = itJ->first;
00079             std::map<size_t,size_t>::const_iterator it_obs = obs_global_idx2residual_idx.find(obs_idx);
00080             ASSERT_(it_obs!=obs_global_idx2residual_idx.end())
00081             const size_t resid_idx = it_obs->second;
00082 
00083             // Accumulate sub-gradient: // g += J^t * \Lambda * residual 
00084             RBA_OPTIONS::obs_noise_matrix_t::template accum_Jtr(accum_g_i, itJ->second.num, residuals[ resid_idx ], obs_idx, this->parameters.obs_noise );
00085         }
00086         // Do scaling (if applicable):
00087         RBA_OPTIONS::obs_noise_matrix_t::template scale_Jtr(accum_g_i, this->parameters.obs_noise );
00088 
00089         minus_grad.block<LM_DIMS,1>(idx_start_f+i*LM_DIMS,0) = accum_g_i;
00090     }
00091 }
00092 
00093 } // End of namespaces
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends