SRBA: Sparser Relative Bundle Adjustment
|
00001 /* +---------------------------------------------------------------------------+ 00002 | Mobile Robot Programming Toolkit (MRPT) | 00003 | http://www.mrpt.org/ | 00004 | | 00005 | Copyright (c) 2005-2015, Individual contributors, see AUTHORS file | 00006 | See: http://www.mrpt.org/Authors - All rights reserved. | 00007 | Released under BSD License. See details in http://www.mrpt.org/License | 00008 +---------------------------------------------------------------------------+ */ 00009 00010 #pragma once 00011 00012 namespace srba { 00013 00016 template <class HESS_Ap, class HESS_f, class HESS_Apf> 00017 class SchurComplement 00018 { 00019 public: 00020 00025 SchurComplement(HESS_Ap &_HAp, HESS_f & _Hf, HESS_Apf & _HApf, double * _minus_grad_Ap, double * _minus_grad_f) 00026 : HAp(_HAp), Hf(_Hf), HApf(_HApf), 00027 minus_grad_Ap(_minus_grad_Ap), 00028 minus_grad_f(_minus_grad_f), 00029 // Problem dims: 00030 nUnknowns_Ap( HAp.getColCount() ), 00031 nUnknowns_f( Hf.getColCount() ), 00032 nHf_invertible_blocks(0) 00033 { 00034 if (!nUnknowns_f || !nUnknowns_Ap) return; 00035 00036 // 0) Make copy of the original numerical values of HAp: 00037 // ----------------------------------------------------------------- 00038 HAp_original.copyNumericalValuesFrom( HAp ); 00039 00040 // 1) List of all diagonal blocks in Hf which have to be inverted 00041 // ----------------------------------------------------------------- 00042 m_Hf_blocks_info.resize(nUnknowns_f); 00043 for (size_t i=0;i<nUnknowns_f;i++) 00044 { 00045 const typename HESS_f::col_t & col_i = Hf.getCol(i); 00046 ASSERT_(!col_i.empty() && col_i.rbegin()->first==i) 00047 00048 m_Hf_blocks_info[i].sym_Hf_diag_blocks = &col_i.rbegin()->second.num; 00049 } 00050 00051 // 2) Build instructions to reduce H_Ap and grad_Ap 00052 // ---------------------------------------------------- 00053 m_sym_HAp_reduce.clear(); 00054 m_sym_GradAp_reduce.resize(nUnknowns_Ap); 00055 00056 for (size_t i=0;i<nUnknowns_Ap;i++) 00057 { // Only upper-half triangle: 00058 typename HESS_Ap::col_t & HAp_col_i = HAp.getCol(i); 00059 00060 // Go thru all the "j" (row) indices in j \in [0,i] 00061 // even if there's not an entry in the column map "HAp_col_i" (yet). 00062 // If the block (i,j) has any feature in common, then we'll add 00063 // a block HAp_{i,j} right here and add the corresponding instructions for building the numeric Schur. 00064 for (size_t j=0;j<=i;j++) 00065 { 00066 // We are at the HAp block (i,j): 00067 // Make a list of all the: 00068 // \Sum H_{pi,lk} * H_{lk}^{-1} * H_{pj,lk}^t 00069 // 00070 // This amounts to looking for the "intersecting set" of the two rows "i" & "j" in HApf: 00071 THApSymbolicEntry sym_ij; 00072 00073 // (i==j) -> take advantage of repeated terms and build instructions for the gradient: 00074 TGradApSymbolicEntry *grad_entries = (i==j) ? &m_sym_GradAp_reduce[i] : NULL; 00075 00076 00077 // get Rows i & j from HAp_f: 00078 typename HESS_Apf::col_t & row_i = HApf.getCol(i); 00079 typename HESS_Apf::col_t & row_j = HApf.getCol(j); 00080 00081 // code below based on "std::set_intersection" 00082 typename HESS_Apf::col_t::const_iterator it_i = row_i.begin(); 00083 typename HESS_Apf::col_t::const_iterator it_j = row_j.begin(); 00084 const typename HESS_Apf::col_t::const_iterator it_i_end = row_i.end(); 00085 const typename HESS_Apf::col_t::const_iterator it_j_end = row_j.end(); 00086 00087 while (it_i!=it_i_end && it_j!=it_j_end) 00088 { 00089 if ( it_i->first < it_j->first ) ++it_i; 00090 else if ( it_j->first < it_i->first ) ++it_j; 00091 else 00092 { 00093 // match between: it_i->first == it_j->first 00094 00095 // Add a new triplet: 00096 // * const typename HESS_Apf::matrix_t * Hpi_lk; 00097 // * const typename HESS_f::matrix_t * inv_Hf_lk; 00098 // * const typename HESS_Apf::matrix_t * Hpj_lk; 00099 00100 const size_t idx_feat = it_j->first; 00101 ASSERT_(idx_feat<nUnknowns_f) 00102 00103 // Gradient (only if we're at i==j) 00104 typename HESS_Apf::matrix_t *out_temporary_result = NULL; 00105 if (grad_entries) 00106 { 00107 grad_entries->lst_terms_to_subtract.resize( grad_entries->lst_terms_to_subtract.size()+1 ); 00108 typename TGradApSymbolicEntry::TEntry &ent = *grad_entries->lst_terms_to_subtract.rbegin(); 00109 ent.feat_idx = idx_feat; 00110 out_temporary_result = &ent.Hpi_lk_times_inv_Hf_lk; 00111 } 00112 00113 // Hessian: 00114 #if 0 00115 std::cout << "SymSchur.HAp("<<i<<","<<j<< "): HApf_" << it_i->first << " * HApf_" << it_j->first << std::endl; 00116 #endif 00117 sym_ij.lst_terms_to_add.push_back( typename THApSymbolicEntry::TEntry( 00118 &it_j->second.num, //&it_i->second.num, 00119 &m_Hf_blocks_info[idx_feat], 00120 &it_i->second.num, //&it_j->second.num, 00121 out_temporary_result ) ); 00122 00123 // Move: 00124 ++it_i; ++it_j; 00125 } 00126 } // end while (find intersect) 00127 00128 // Only append if not empty: 00129 if (!sym_ij.lst_terms_to_add.empty()) 00130 { 00131 // There are common observations between Api & Apj: 00132 // 1) If there was already an HAp_ij block, take a reference to it. 00133 // 2) Otherwise, insert it now. 00134 typename HESS_Ap::col_t::iterator itExistingRowEntry; 00135 if ( ((i==j) || (i!=j && HAp_col_i.size()!=1)) // If size==1 don't even waste time: it's a diagonal block. 00136 && 00137 (itExistingRowEntry=HAp_col_i.find(j))!= HAp_col_i.end()) 00138 { 00139 // Add reference to target matrix for numeric Schur: 00140 sym_ij.HAp_ij = & itExistingRowEntry ->second.num; 00141 } 00142 else 00143 { 00144 ASSERT_(i!=j) // We should only reach here for off-diagonal blocks! 00145 // Create & add reference to target matrix for numeric Schur: 00146 sym_ij.HAp_ij = & HAp_col_i[j].num; 00147 // Clear initial contents of the Hessian to all zeros: 00148 sym_ij.HAp_ij->setZero(); 00149 } 00150 00151 // Fast move into the deque: 00152 m_sym_HAp_reduce.resize( m_sym_HAp_reduce.size()+1 ); 00153 sym_ij.swap( *m_sym_HAp_reduce.rbegin() ); 00154 } 00155 00156 } // end for j (row in HAp) 00157 } // end for i (col in HAp) 00158 00159 } // end of ctor. 00160 00165 void realize_HAp_changed() 00166 { 00167 HAp_original.copyNumericalValuesFrom( HAp ); 00168 } 00169 00171 size_t getNumFeatures() const { return nUnknowns_f; } 00172 size_t getNumFeaturesFullRank() const { return nHf_invertible_blocks; } 00173 00174 00180 void numeric_build_reduced_system(const double lambda) 00181 { 00182 if (!nUnknowns_f || !nUnknowns_Ap) return; 00183 00184 // 0) Restore the original state of the Hessian (since all changes are defined as sums / substractions on it) 00185 // and this operation can be invoked several times with the same original HAp but diferent lambda values 00186 // (which forces a total recomputation due to the inv(Hf+\lambda*I) terms). 00187 // -------------------------------------------------------------------------------- 00188 HAp.copyNumericalValuesFrom( HAp_original ); 00189 00190 // 1) Invert diagonal blocks in Hf: 00191 // --------------------------------- 00192 nHf_invertible_blocks=0; 00193 for (size_t i=0;i<nUnknowns_f;i++) 00194 { 00195 // LU decomposition is rank-revealing (not like LLt) 00196 typename HESS_f::matrix_t Hfi = *m_Hf_blocks_info[i].sym_Hf_diag_blocks; 00197 for (int k=0;k<Hfi.cols();k++) 00198 Hfi.coeffRef(k,k)+=lambda; 00199 00200 const Eigen::FullPivLU<typename HESS_f::matrix_t> lu( Hfi ); 00201 00202 // Badly conditioned matrix? 00203 if (true== (m_Hf_blocks_info[i].num_Hf_diag_blocks_invertible = lu.isInvertible() )) 00204 { 00205 nHf_invertible_blocks++; 00206 m_Hf_blocks_info[i].num_Hf_diag_blocks_inverses = lu.inverse(); 00207 } 00208 } 00209 00210 // 2) H_Ap of the reduced system: 00211 // --------------------------------- 00212 typename HESS_Apf::matrix_t aux_Hpi_lk_times_inv_Hf_lk; 00213 for (typename std::deque<THApSymbolicEntry>::const_iterator it=m_sym_HAp_reduce.begin();it!=m_sym_HAp_reduce.end();++it) 00214 { 00215 const THApSymbolicEntry &sym_entry = *it; 00216 00217 typename HESS_Ap::matrix_t & HAp_ij = *sym_entry.HAp_ij; 00218 00219 const size_t N = sym_entry.lst_terms_to_add.size(); 00220 00221 //std::cout << "before:\n" << HAp_ij << std::endl; 00222 for (size_t i=0;i<N;i++) 00223 { 00224 const typename THApSymbolicEntry::TEntry &entry = sym_entry.lst_terms_to_add[i]; 00225 00226 if (entry.inv_Hf_lk->num_Hf_diag_blocks_invertible) 00227 { 00228 //cout << "Hfinv:\n" << entry.inv_Hf_lk->num_Hf_diag_blocks_inverses << endl; 00229 // \bar{Hp} -= Hpi_lk * inv(Hf_lk) * Hpj_lk^t 00230 00231 // Store this term for reuse with the gradient update, or use temporary local storage: 00232 typename HESS_Apf::matrix_t * Hpi_lk_times_inv_Hf_lk = 00233 entry.out_Hpi_lk_times_inv_Hf_lk!=NULL 00234 ? 00235 entry.out_Hpi_lk_times_inv_Hf_lk : &aux_Hpi_lk_times_inv_Hf_lk; 00236 00237 Hpi_lk_times_inv_Hf_lk->noalias() = (*entry.Hpi_lk) * entry.inv_Hf_lk->num_Hf_diag_blocks_inverses; 00238 00239 //std::cout << "product #" << i << ":\nHpi_lk:\n" << (*entry.Hpi_lk) << "\nHfi^-1:\n" << entry.inv_Hf_lk->num_Hf_diag_blocks_inverses << "\nHpj_lk^t:\n" << entry.Hpj_lk->transpose() << "\n\n"; 00240 HAp_ij.noalias() -= (*Hpi_lk_times_inv_Hf_lk) * (*entry.Hpj_lk).transpose(); 00241 } 00242 } 00243 //std::cout << "after:\n" << HAp_ij<< std::endl; 00244 } 00245 00246 // 3) g_Ap of the reduced system: 00247 // --------------------------------- 00248 double * modified_grad_Ap = minus_grad_Ap; 00249 for (size_t i=0;i<nUnknowns_Ap;i++) 00250 { 00251 vector_Ap_t grad_Ap = vector_Ap_t(modified_grad_Ap); // A map which wraps the pointer 00252 for (typename TGradApSymbolicEntry::lst_terms_t::const_iterator it=m_sym_GradAp_reduce[i].lst_terms_to_subtract.begin();it!=m_sym_GradAp_reduce[i].lst_terms_to_subtract.end();++it) 00253 { 00254 if (m_Hf_blocks_info[it->feat_idx].num_Hf_diag_blocks_invertible) 00255 { 00256 double *grad_df = this->minus_grad_f + it->feat_idx * HESS_f::matrix_t::RowsAtCompileTime; 00257 // g -= \Sum Hpi_lk * inv(Hf_lk) * grad_lk 00258 // 00259 grad_Ap.noalias() -= it->Hpi_lk_times_inv_Hf_lk * vector_f_t(grad_df); 00260 } 00261 } 00262 00263 // Advance to next part of gradient: 00264 modified_grad_Ap+= HESS_Ap::matrix_t::RowsAtCompileTime; 00265 } 00266 00267 00268 } // end of numeric_build_reduced_system 00269 00270 00271 void numeric_solve_for_features( 00272 double *in_deltas_Ap, 00273 double *out_deltas_feats 00274 ) 00275 { 00276 // 1/2: Go thru all the Hessian blocks of HApl and subtracts the corresponding term to 00277 // the grad_feats: 00278 for (size_t idx_Ap=0;idx_Ap<nUnknowns_Ap;idx_Ap++) 00279 { 00280 // get Row i from HAp_f: 00281 typename HESS_Apf::col_t & row_i = HApf.getCol(idx_Ap); 00282 00283 const vector_Ap_t delta_idx_Ap = vector_Ap_t(in_deltas_Ap + idx_Ap * HESS_Ap::matrix_t::RowsAtCompileTime ); 00284 00285 for (typename HESS_Apf::col_t::const_iterator itCol=row_i.begin();itCol!=row_i.end();++itCol) 00286 { 00287 const size_t idx_feat = itCol->first; 00288 if (!was_ith_feature_invertible(idx_feat)) 00289 continue; 00290 00291 double *grad_df = this->minus_grad_f + idx_feat * HESS_f::matrix_t::RowsAtCompileTime; 00292 00293 // g_reduced = -g_l - \Sum H^t_pi_lk * delta_Ap_i 00294 vector_f_t(grad_df).noalias() -= itCol->second.num.transpose() * delta_idx_Ap; 00295 } 00296 } 00297 00298 // 2/2: Solve each individual feature increment: 00299 for (size_t idx_feat=0;idx_feat<nUnknowns_f;idx_feat++) 00300 { 00301 if (!was_ith_feature_invertible(idx_feat)) 00302 continue; 00303 00304 vector_f_t delta_feat = vector_f_t( out_deltas_feats + idx_feat * HESS_f::matrix_t::RowsAtCompileTime ); 00305 vector_f_t grad_df = vector_f_t(this->minus_grad_f + idx_feat * HESS_f::matrix_t::RowsAtCompileTime ); 00306 //std::cout << grad_df.transpose() << std::endl << m_Hf_blocks_info[idx_feat].num_Hf_diag_blocks_inverses << std::endl << std::endl; 00307 00308 delta_feat = (m_Hf_blocks_info[idx_feat].num_Hf_diag_blocks_inverses * grad_df); 00309 } 00310 00311 } // end of numeric_solve_for_features 00312 00313 00314 bool was_ith_feature_invertible(const size_t i) const { return m_Hf_blocks_info[i].num_Hf_diag_blocks_invertible; } 00315 00316 private: 00317 // ----------- Input data ------------------ 00318 HESS_Ap HAp_original; // (Copy of the numerical values of HAp) 00319 HESS_Ap & HAp; // Column compressed 00320 HESS_f & Hf; // Column compressed 00321 HESS_Apf & HApf; // *ROW* compressed 00322 double * minus_grad_Ap; 00323 double * minus_grad_f; // Should be changed to "const" if used a Eigen::"ConstMap"... 00324 00325 const size_t nUnknowns_Ap; 00326 const size_t nUnknowns_f; 00327 size_t nHf_invertible_blocks; 00328 // ----------------------------------------- 00329 typedef typename Eigen::Map<Eigen::Matrix<double,HESS_Ap::matrix_t::RowsAtCompileTime,1> > vector_Ap_t; 00330 typedef typename Eigen::Map<Eigen::Matrix<double,HESS_f::matrix_t::RowsAtCompileTime,1> > vector_f_t; 00331 00332 00333 struct TInfoPerHfBlock 00334 { 00335 const typename HESS_f::matrix_t * sym_Hf_diag_blocks; 00336 typename HESS_f::matrix_t num_Hf_diag_blocks_inverses; 00337 bool num_Hf_diag_blocks_invertible; 00338 00339 TInfoPerHfBlock() : sym_Hf_diag_blocks(NULL), num_Hf_diag_blocks_invertible(false) { } 00340 00341 MRPT_MAKE_ALIGNED_OPERATOR_NEW 00342 }; 00343 00344 typedef typename mrpt::aligned_containers<TInfoPerHfBlock>::vector_t TInfoPerHfBlock_vector_t; 00345 00346 TInfoPerHfBlock_vector_t m_Hf_blocks_info; 00347 00349 struct THApSymbolicEntry 00350 { 00351 struct TEntry 00352 { 00353 TEntry( 00354 const typename HESS_Apf::matrix_t * _Hpi_lk, 00355 const TInfoPerHfBlock * _inv_Hf_lk, 00356 const typename HESS_Apf::matrix_t * _Hpj_lk, 00357 typename HESS_Apf::matrix_t * _out_Hpi_lk_times_inv_Hf_lk 00358 ) 00359 : 00360 Hpi_lk(_Hpi_lk), 00361 inv_Hf_lk(_inv_Hf_lk), 00362 Hpj_lk(_Hpj_lk), 00363 out_Hpi_lk_times_inv_Hf_lk(_out_Hpi_lk_times_inv_Hf_lk) 00364 { 00365 } 00366 00367 const typename HESS_Apf::matrix_t * Hpi_lk; 00368 const TInfoPerHfBlock * inv_Hf_lk; 00369 const typename HESS_Apf::matrix_t * Hpj_lk; 00370 typename HESS_Apf::matrix_t * out_Hpi_lk_times_inv_Hf_lk; 00371 }; 00372 00373 typename HESS_Ap::matrix_t * HAp_ij; 00374 std::deque<TEntry> lst_terms_to_add; 00375 00376 void swap(THApSymbolicEntry &o) 00377 { 00378 std::swap(HAp_ij,o.HAp_ij); 00379 lst_terms_to_add.swap(o.lst_terms_to_add); 00380 } 00381 }; 00382 00383 std::deque<THApSymbolicEntry> m_sym_HAp_reduce; 00384 00385 00386 struct TGradApSymbolicEntry 00387 { 00388 struct TEntry 00389 { 00390 size_t feat_idx; 00391 // Used as a temporary container for the product, but also because this term reappears in the gradient update: 00392 typename HESS_Apf::matrix_t Hpi_lk_times_inv_Hf_lk; 00393 00394 MRPT_MAKE_ALIGNED_OPERATOR_NEW 00395 }; 00396 00397 // BUG ALERT in Eigen/Deque/MSVC 2008 32bit: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=83 00398 // This workaround will lose performance but at least allow compiling. Remove this if someday a solution 00399 // to the bug above is found: 00400 #if defined(_MSC_VER) && (_MSC_VER < 1600) // if we use MSVC older than 2010: 00401 typedef typename mrpt::aligned_containers<TEntry>::list_t lst_terms_t; // slower than deque, but works... 00402 #else 00403 // (Warning: Don't replace the STL container with "vector" or any other that invalidate 00404 // pointers, which is assumed in the implementation. That's why I use "std::deque") 00405 typedef typename mrpt::aligned_containers<TEntry>::deque_t lst_terms_t; 00406 #endif 00407 // The list itself: 00408 lst_terms_t lst_terms_to_subtract; 00409 }; 00410 std::vector<TGradApSymbolicEntry> m_sym_GradAp_reduce; 00411 00412 }; // end of class SchurComplement 00413 00414 } // end of namespaces