12 #ifndef EIGEN_SPARSE_LU_H
13 #define EIGEN_SPARSE_LU_H
17 template <
typename _MatrixType,
typename _OrderingType = COLAMDOrdering<
typename _MatrixType::Index> >
class SparseLU;
18 template <
typename MappedSparseMatrixType>
struct SparseLUMatrixLReturnType;
19 template <
typename MatrixLType,
typename MatrixUType>
struct SparseLUMatrixUReturnType;
72 template <
typename _MatrixType,
typename _OrderingType>
73 class SparseLU :
public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::Index>
76 typedef _MatrixType MatrixType;
77 typedef _OrderingType OrderingType;
78 typedef typename MatrixType::Scalar Scalar;
79 typedef typename MatrixType::RealScalar RealScalar;
80 typedef typename MatrixType::Index Index;
82 typedef internal::MappedSuperNodalMatrix<Scalar, Index> SCMatrix;
86 typedef internal::SparseLUImpl<Scalar, Index> Base;
89 SparseLU():m_isInitialized(
true),m_lastError(
""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(
false),m_diagpivotthresh(1.0),m_detPermR(1)
93 SparseLU(
const MatrixType& matrix):m_isInitialized(
true),m_lastError(
""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(
false),m_diagpivotthresh(1.0),m_detPermR(1)
105 void factorize (
const MatrixType& matrix);
106 void simplicialfactorize(
const MatrixType& matrix);
120 inline Index rows()
const {
return m_mat.
rows(); }
121 inline Index cols()
const {
return m_mat.
cols(); }
125 m_symmetricmode = sym;
134 SparseLUMatrixLReturnType<SCMatrix>
matrixL()
const
136 return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
144 SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,Index> >
matrixU()
const
146 return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,Index> >(m_Lstore, m_Ustore);
168 m_diagpivotthresh = thresh;
177 template<
typename Rhs>
180 eigen_assert(m_factorizationIsOk &&
"SparseLU is not initialized.");
181 eigen_assert(rows()==B.rows()
182 &&
"SparseLU::solve(): invalid number of rows of the right hand side matrix B");
183 return internal::solve_retval<SparseLU, Rhs>(*
this, B.derived());
190 template<
typename Rhs>
193 eigen_assert(m_factorizationIsOk &&
"SparseLU is not initialized.");
194 eigen_assert(rows()==B.
rows()
195 &&
"SparseLU::solve(): invalid number of rows of the right hand side matrix B");
196 return internal::sparse_solve_retval<SparseLU, Rhs>(*
this, B.
derived());
209 eigen_assert(m_isInitialized &&
"Decomposition is not initialized.");
221 template<
typename Rhs,
typename Dest>
224 Dest& X(_X.derived());
225 eigen_assert(m_factorizationIsOk &&
"The matrix should be factorized first");
227 THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
231 X.resize(B.rows(),B.cols());
232 for(Index j = 0; j < B.cols(); ++j)
236 this->
matrixL().solveInPlace(X);
237 this->
matrixU().solveInPlace(X);
240 for (Index j = 0; j < B.cols(); ++j)
258 eigen_assert(m_factorizationIsOk &&
"The matrix should be factorized first.");
260 Scalar det = Scalar(1.);
263 for (Index j = 0; j < this->cols(); ++j)
265 for (
typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
267 if(it.row() < j)
continue;
270 det *= (std::abs)(it.value());
288 eigen_assert(m_factorizationIsOk &&
"The matrix should be factorized first.");
289 Scalar det = Scalar(0.);
290 for (Index j = 0; j < this->cols(); ++j)
292 for (
typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
294 if(it.row() < j)
continue;
297 det += (std::log)((std::abs)(it.value()));
311 eigen_assert(m_factorizationIsOk &&
"The matrix should be factorized first.");
312 return Scalar(m_detPermR);
317 void initperfvalues()
319 m_perfv.panel_size = 1;
321 m_perfv.maxsuper = 128;
324 m_perfv.fillfactor = 20;
329 bool m_isInitialized;
330 bool m_factorizationIsOk;
332 std::string m_lastError;
335 MappedSparseMatrix<Scalar,ColMajor,Index> m_Ustore;
336 PermutationType m_perm_c;
337 PermutationType m_perm_r ;
340 typename Base::GlobalLU_t m_glu;
343 bool m_symmetricmode;
345 internal::perfvalues<Index> m_perfv;
346 RealScalar m_diagpivotthresh;
347 Index m_nnzL, m_nnzU;
351 SparseLU (
const SparseLU& );
368 template <
typename MatrixType,
typename OrderingType>
380 if (m_perm_c.size()) {
383 const Index * outerIndexPtr;
384 if (mat.isCompressed()) outerIndexPtr = mat.outerIndexPtr();
387 Index *outerIndexPtr_t =
new Index[mat.cols()+1];
388 for(Index i = 0; i <= mat.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
389 outerIndexPtr = outerIndexPtr_t;
391 for (Index i = 0; i < mat.cols(); i++)
393 m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
394 m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
396 if(!mat.isCompressed())
delete[] outerIndexPtr;
403 if (!m_symmetricmode) {
410 Index m = m_mat.cols();
412 for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
417 for (Index i = 0; i < m; i++)
418 post_perm.
indices()(i) = post(i);
421 if(m_perm_c.size()) {
422 m_perm_c = post_perm * m_perm_c;
427 m_analysisIsOk =
true;
451 template <
typename MatrixType,
typename OrderingType>
454 using internal::emptyIdxLU;
455 eigen_assert(m_analysisIsOk &&
"analyzePattern() should be called first");
456 eigen_assert((matrix.rows() == matrix.cols()) &&
"Only for squared matrices");
458 typedef typename IndexVector::Scalar Index;
468 const Index * outerIndexPtr;
469 if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
472 Index* outerIndexPtr_t =
new Index[matrix.cols()+1];
473 for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
474 outerIndexPtr = outerIndexPtr_t;
476 for (Index i = 0; i < matrix.cols(); i++)
478 m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
479 m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
481 if(!matrix.isCompressed())
delete[] outerIndexPtr;
485 m_perm_c.resize(matrix.cols());
486 for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
489 Index m = m_mat.rows();
490 Index n = m_mat.cols();
491 Index nnz = m_mat.nonZeros();
492 Index maxpanel = m_perfv.panel_size * m;
495 Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
498 m_lastError =
"UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
499 m_factorizationIsOk =
false;
519 tempv.
setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, m) );
526 if ( m_symmetricmode ==
true )
527 Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
529 Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
533 m_perm_r.indices().setConstant(-1);
537 m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
538 m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
550 for (jcol = 0; jcol < n; )
553 Index panel_size = m_perfv.panel_size;
554 for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
556 if (relax_end(k) != emptyIdxLU)
558 panel_size = k - jcol;
563 panel_size = n - jcol;
566 Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
569 Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
572 for ( jj = jcol; jj< jcol + panel_size; jj++)
580 info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
583 m_lastError =
"UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
585 m_factorizationIsOk =
false;
591 info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
594 m_lastError =
"UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
596 m_factorizationIsOk =
false;
601 info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
604 m_lastError =
"UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
606 m_factorizationIsOk =
false;
611 info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
614 m_lastError =
"THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
615 std::ostringstream returnInfo;
617 m_lastError += returnInfo.str();
619 m_factorizationIsOk =
false;
624 if (pivrow != jj) m_detPermR *= -1;
627 Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
630 for (i = 0; i < nseg; i++)
633 repfnz_k(irep) = emptyIdxLU;
640 Base::countnz(n, m_nnzL, m_nnzU, m_glu);
642 Base::fixupL(n, m_perm_r.indices(), m_glu);
645 m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
650 m_factorizationIsOk =
true;
653 template<
typename MappedSupernodalType>
654 struct SparseLUMatrixLReturnType : internal::no_assignment_operator
656 typedef typename MappedSupernodalType::Index Index;
657 typedef typename MappedSupernodalType::Scalar Scalar;
658 SparseLUMatrixLReturnType(
const MappedSupernodalType& mapL) : m_mapL(mapL)
660 Index rows() {
return m_mapL.rows(); }
661 Index cols() {
return m_mapL.cols(); }
662 template<
typename Dest>
663 void solveInPlace( MatrixBase<Dest> &X)
const
665 m_mapL.solveInPlace(X);
667 const MappedSupernodalType& m_mapL;
670 template<
typename MatrixLType,
typename MatrixUType>
671 struct SparseLUMatrixUReturnType : internal::no_assignment_operator
673 typedef typename MatrixLType::Index Index;
674 typedef typename MatrixLType::Scalar Scalar;
675 SparseLUMatrixUReturnType(
const MatrixLType& mapL,
const MatrixUType& mapU)
676 : m_mapL(mapL),m_mapU(mapU)
678 Index rows() {
return m_mapL.rows(); }
679 Index cols() {
return m_mapL.cols(); }
681 template<
typename Dest>
void solveInPlace(MatrixBase<Dest> &X)
const
683 Index nrhs = X.cols();
686 for (Index k = m_mapL.nsuper(); k >= 0; k--)
688 Index fsupc = m_mapL.supToCol()[k];
689 Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc];
690 Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
691 Index luptr = m_mapL.colIndexPtr()[fsupc];
695 for (Index j = 0; j < nrhs; j++)
697 X(fsupc, j) /= m_mapL.valuePtr()[luptr];
702 Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
703 Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
704 U = A.template triangularView<Upper>().solve(U);
707 for (Index j = 0; j < nrhs; ++j)
709 for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
711 typename MatrixUType::InnerIterator it(m_mapU, jcol);
714 Index irow = it.index();
715 X(irow, j) -= X(jcol, j) * it.value();
721 const MatrixLType& m_mapL;
722 const MatrixUType& m_mapU;
727 template<
typename _MatrixType,
typename Derived,
typename Rhs>
728 struct solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
729 : solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
731 typedef SparseLU<_MatrixType,Derived> Dec;
732 EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
734 template<typename Dest>
void evalTo(Dest& dst)
const
736 dec()._solve(rhs(),dst);
740 template<
typename _MatrixType,
typename Derived,
typename Rhs>
741 struct sparse_solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
742 : sparse_solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
744 typedef SparseLU<_MatrixType,Derived> Dec;
745 EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
747 template<typename Dest>
void evalTo(Dest& dst)
const
749 this->defaultEvalTo(dst);