Eigen  3.2.92
SparseSelfAdjointView.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
12 
13 namespace Eigen {
14 
29 namespace internal {
30 
31 template<typename MatrixType, unsigned int Mode>
32 struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
33 };
34 
35 template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
36 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
37 
38 template<int Mode,typename MatrixType,int DestOrder>
39 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
40 
41 }
42 
43 template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
44  : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
45 {
46  public:
47 
48  enum {
49  Mode = _Mode,
50  RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
51  ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
52  };
53 
55  typedef typename MatrixType::Scalar Scalar;
56  typedef typename MatrixType::StorageIndex StorageIndex;
58  typedef typename MatrixType::Nested MatrixTypeNested;
59  typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
60 
61  explicit inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
62  {
63  eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
64  }
65 
66  inline Index rows() const { return m_matrix.rows(); }
67  inline Index cols() const { return m_matrix.cols(); }
68 
70  const _MatrixTypeNested& matrix() const { return m_matrix; }
71  _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
72 
78  template<typename OtherDerived>
81  {
83  }
84 
90  template<typename OtherDerived> friend
93  {
95  }
96 
98  template<typename OtherDerived>
101  {
102  return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
103  }
104 
106  template<typename OtherDerived> friend
109  {
110  return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
111  }
112 
121  template<typename DerivedU>
122  SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
123 
125  // TODO implement twists in a more evaluator friendly fashion
126  SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
127  {
128  return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
129  }
130 
131  template<typename SrcMatrixType,int SrcMode>
132  SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
133  {
134  internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
135  return *this;
136  }
137 
138  SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
139  {
140  PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
141  return *this = src.twistedBy(pnull);
142  }
143 
144  template<typename SrcMatrixType,unsigned int SrcMode>
145  SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
146  {
147  PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
148  return *this = src.twistedBy(pnull);
149  }
150 
151  void resize(Index rows, Index cols)
152  {
153  EIGEN_ONLY_USED_FOR_DEBUG(rows);
154  EIGEN_ONLY_USED_FOR_DEBUG(cols);
155  eigen_assert(rows == this->rows() && cols == this->cols()
156  && "SparseSelfadjointView::resize() does not actually allow to resize.");
157  }
158 
159  protected:
160 
161  typename MatrixType::Nested m_matrix;
162  //mutable VectorI m_countPerRow;
163  //mutable VectorI m_countPerCol;
164  private:
165  template<typename Dest> void evalTo(Dest &) const;
166 };
167 
168 /***************************************************************************
169 * Implementation of SparseMatrixBase methods
170 ***************************************************************************/
171 
172 template<typename Derived>
173 template<unsigned int UpLo>
174 typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
175 {
176  return SparseSelfAdjointView<const Derived, UpLo>(derived());
177 }
178 
179 template<typename Derived>
180 template<unsigned int UpLo>
181 typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
182 {
183  return SparseSelfAdjointView<Derived, UpLo>(derived());
184 }
185 
186 /***************************************************************************
187 * Implementation of SparseSelfAdjointView methods
188 ***************************************************************************/
189 
190 template<typename MatrixType, unsigned int Mode>
191 template<typename DerivedU>
192 SparseSelfAdjointView<MatrixType,Mode>&
193 SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
194 {
195  SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
196  if(alpha==Scalar(0))
197  m_matrix.const_cast_derived() = tmp.template triangularView<Mode>();
198  else
199  m_matrix.const_cast_derived() += alpha * tmp.template triangularView<Mode>();
200 
201  return *this;
202 }
203 
204 namespace internal {
205 
206 // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
207 // in the future selfadjoint-ness should be defined by the expression traits
208 // such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
209 template<typename MatrixType, unsigned int Mode>
210 struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
211 {
212  typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
213  typedef SparseSelfAdjointShape Shape;
214 
215  static const int AssumeAliasing = 0;
216 };
217 
218 struct SparseSelfAdjoint2Sparse {};
219 
220 template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
221 template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
222 
223 template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
224 struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse, Scalar>
225 {
226  typedef typename DstXprType::StorageIndex StorageIndex;
227  template<typename DestScalar,int StorageOrder>
228  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
229  {
230  internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
231  }
232 
233  template<typename DestScalar>
234  static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
235  {
236  // TODO directly evaluate into dst;
237  SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
238  internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
239  dst = tmp;
240  }
241 };
242 
243 } // end namespace internal
244 
245 /***************************************************************************
246 * Implementation of sparse self-adjoint time dense matrix
247 ***************************************************************************/
248 
249 namespace internal {
250 
251 template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
252 inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
253 {
254  EIGEN_ONLY_USED_FOR_DEBUG(alpha);
255  // TODO use alpha
256  eigen_assert(alpha==AlphaType(1) && "alpha != 1 is not implemented yet, sorry");
257 
258  typedef evaluator<SparseLhsType> LhsEval;
259  typedef typename evaluator<SparseLhsType>::InnerIterator LhsIterator;
260  typedef typename SparseLhsType::Scalar LhsScalar;
261 
262  enum {
263  LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
264  ProcessFirstHalf =
265  ((Mode&(Upper|Lower))==(Upper|Lower))
266  || ( (Mode&Upper) && !LhsIsRowMajor)
267  || ( (Mode&Lower) && LhsIsRowMajor),
268  ProcessSecondHalf = !ProcessFirstHalf
269  };
270 
271  LhsEval lhsEval(lhs);
272 
273  for (Index j=0; j<lhs.outerSize(); ++j)
274  {
275  LhsIterator i(lhsEval,j);
276  if (ProcessSecondHalf)
277  {
278  while (i && i.index()<j) ++i;
279  if(i && i.index()==j)
280  {
281  res.row(j) += i.value() * rhs.row(j);
282  ++i;
283  }
284  }
285  for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
286  {
287  Index a = LhsIsRowMajor ? j : i.index();
288  Index b = LhsIsRowMajor ? i.index() : j;
289  LhsScalar v = i.value();
290  res.row(a) += (v) * rhs.row(b);
291  res.row(b) += numext::conj(v) * rhs.row(a);
292  }
293  if (ProcessFirstHalf && i && (i.index()==j))
294  res.row(j) += i.value() * rhs.row(j);
295  }
296 }
297 
298 
299 template<typename LhsView, typename Rhs, int ProductType>
300 struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
301 {
302  template<typename Dest>
303  static void evalTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs)
304  {
305  typedef typename LhsView::_MatrixTypeNested Lhs;
306  typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
307  typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
308  LhsNested lhsNested(lhsView.matrix());
309  RhsNested rhsNested(rhs);
310 
311  dst.setZero();
312  internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, typename Dest::Scalar(1));
313  }
314 };
315 
316 template<typename Lhs, typename RhsView, int ProductType>
317 struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
318 {
319  template<typename Dest>
320  static void evalTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView)
321  {
322  typedef typename RhsView::_MatrixTypeNested Rhs;
323  typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
324  typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
325  LhsNested lhsNested(lhs);
326  RhsNested rhsNested(rhsView.matrix());
327 
328  dst.setZero();
329  // transpoe everything
330  Transpose<Dest> dstT(dst);
331  internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, typename Dest::Scalar(1));
332  }
333 };
334 
335 // NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
336 // TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
337 
338 template<typename LhsView, typename Rhs, int ProductTag>
339 struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
340  : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
341 {
342  typedef Product<LhsView, Rhs, DefaultProduct> XprType;
343  typedef typename XprType::PlainObject PlainObject;
344  typedef evaluator<PlainObject> Base;
345 
346  product_evaluator(const XprType& xpr)
347  : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
348  {
349  ::new (static_cast<Base*>(this)) Base(m_result);
350  generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
351  }
352 
353 protected:
354  typename Rhs::PlainObject m_lhs;
355  PlainObject m_result;
356 };
357 
358 template<typename Lhs, typename RhsView, int ProductTag>
359 struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
360  : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
361 {
362  typedef Product<Lhs, RhsView, DefaultProduct> XprType;
363  typedef typename XprType::PlainObject PlainObject;
364  typedef evaluator<PlainObject> Base;
365 
366  product_evaluator(const XprType& xpr)
367  : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
368  {
369  ::new (static_cast<Base*>(this)) Base(m_result);
370  generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
371  }
372 
373 protected:
374  typename Lhs::PlainObject m_rhs;
375  PlainObject m_result;
376 };
377 
378 } // namespace internal
379 
380 /***************************************************************************
381 * Implementation of symmetric copies and permutations
382 ***************************************************************************/
383 namespace internal {
384 
385 template<int Mode,typename MatrixType,int DestOrder>
386 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
387 {
388  typedef typename MatrixType::StorageIndex StorageIndex;
389  typedef typename MatrixType::Scalar Scalar;
390  typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
391  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
392 
393  Dest& dest(_dest.derived());
394  enum {
395  StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
396  };
397 
398  Index size = mat.rows();
399  VectorI count;
400  count.resize(size);
401  count.setZero();
402  dest.resize(size,size);
403  for(Index j = 0; j<size; ++j)
404  {
405  Index jp = perm ? perm[j] : j;
406  for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
407  {
408  Index i = it.index();
409  Index r = it.row();
410  Index c = it.col();
411  Index ip = perm ? perm[i] : i;
412  if(Mode==(Upper|Lower))
413  count[StorageOrderMatch ? jp : ip]++;
414  else if(r==c)
415  count[ip]++;
416  else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
417  {
418  count[ip]++;
419  count[jp]++;
420  }
421  }
422  }
423  Index nnz = count.sum();
424 
425  // reserve space
426  dest.resizeNonZeros(nnz);
427  dest.outerIndexPtr()[0] = 0;
428  for(Index j=0; j<size; ++j)
429  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
430  for(Index j=0; j<size; ++j)
431  count[j] = dest.outerIndexPtr()[j];
432 
433  // copy data
434  for(StorageIndex j = 0; j<size; ++j)
435  {
436  for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
437  {
438  StorageIndex i = internal::convert_index<StorageIndex>(it.index());
439  Index r = it.row();
440  Index c = it.col();
441 
442  StorageIndex jp = perm ? perm[j] : j;
443  StorageIndex ip = perm ? perm[i] : i;
444 
445  if(Mode==(Upper|Lower))
446  {
447  Index k = count[StorageOrderMatch ? jp : ip]++;
448  dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
449  dest.valuePtr()[k] = it.value();
450  }
451  else if(r==c)
452  {
453  Index k = count[ip]++;
454  dest.innerIndexPtr()[k] = ip;
455  dest.valuePtr()[k] = it.value();
456  }
457  else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
458  {
459  if(!StorageOrderMatch)
460  std::swap(ip,jp);
461  Index k = count[jp]++;
462  dest.innerIndexPtr()[k] = ip;
463  dest.valuePtr()[k] = it.value();
464  k = count[ip]++;
465  dest.innerIndexPtr()[k] = jp;
466  dest.valuePtr()[k] = numext::conj(it.value());
467  }
468  }
469  }
470 }
471 
472 template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
473 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
474 {
475  typedef typename MatrixType::StorageIndex StorageIndex;
476  typedef typename MatrixType::Scalar Scalar;
477  SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
478  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
479  enum {
480  SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
481  StorageOrderMatch = int(SrcOrder) == int(DstOrder),
482  DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
483  SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
484  };
485 
486  Index size = mat.rows();
487  VectorI count(size);
488  count.setZero();
489  dest.resize(size,size);
490  for(StorageIndex j = 0; j<size; ++j)
491  {
492  StorageIndex jp = perm ? perm[j] : j;
493  for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
494  {
495  StorageIndex i = it.index();
496  if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
497  continue;
498 
499  StorageIndex ip = perm ? perm[i] : i;
500  count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
501  }
502  }
503  dest.outerIndexPtr()[0] = 0;
504  for(Index j=0; j<size; ++j)
505  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
506  dest.resizeNonZeros(dest.outerIndexPtr()[size]);
507  for(Index j=0; j<size; ++j)
508  count[j] = dest.outerIndexPtr()[j];
509 
510  for(StorageIndex j = 0; j<size; ++j)
511  {
512 
513  for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
514  {
515  StorageIndex i = it.index();
516  if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
517  continue;
518 
519  StorageIndex jp = perm ? perm[j] : j;
520  StorageIndex ip = perm? perm[i] : i;
521 
522  Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
523  dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
524 
525  if(!StorageOrderMatch) std::swap(ip,jp);
526  if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
527  dest.valuePtr()[k] = numext::conj(it.value());
528  else
529  dest.valuePtr()[k] = it.value();
530  }
531  }
532 }
533 
534 }
535 
536 // TODO implement twists in a more evaluator friendly fashion
537 
538 namespace internal {
539 
540 template<typename MatrixType, int Mode>
541 struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
542 };
543 
544 }
545 
546 template<typename MatrixType,int Mode>
547 class SparseSymmetricPermutationProduct
548  : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
549 {
550  public:
551  typedef typename MatrixType::Scalar Scalar;
552  typedef typename MatrixType::StorageIndex StorageIndex;
553  enum {
554  RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
555  ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
556  };
557  protected:
558  typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
559  public:
560  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
561  typedef typename MatrixType::Nested MatrixTypeNested;
562  typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
563 
564  SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
565  : m_matrix(mat), m_perm(perm)
566  {}
567 
568  inline Index rows() const { return m_matrix.rows(); }
569  inline Index cols() const { return m_matrix.cols(); }
570 
571  const NestedExpression& matrix() const { return m_matrix; }
572  const Perm& perm() const { return m_perm; }
573 
574  protected:
575  MatrixTypeNested m_matrix;
576  const Perm& m_perm;
577 
578 };
579 
580 namespace internal {
581 
582 template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
583 struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar>, Sparse2Sparse>
584 {
585  typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
586  typedef typename DstXprType::StorageIndex DstIndex;
587  template<int Options>
588  static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
589  {
590  // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
591  SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
592  internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
593  dst = tmp;
594  }
595 
596  template<typename DestType,unsigned int DestMode>
597  static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
598  {
599  internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
600  }
601 };
602 
603 } // end namespace internal
604 
605 } // end namespace Eigen
606 
607 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
Product< SparseSelfAdjointView, OtherDerived > operator*(const SparseMatrixBase< OtherDerived > &rhs) const
Definition: SparseSelfAdjointView.h:80
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:107
Definition: Constants.h:206
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const SparseMatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition: SparseSelfAdjointView.h:92
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const MatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition: SparseSelfAdjointView.h:108
Definition: Constants.h:320
Definition: LDLT.h:16
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:43
Derived & derived()
Definition: EigenBase.h:44
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:37
const unsigned int RowMajorBit
Definition: Constants.h:61
Definition: EigenBase.h:28
Base class of any sparse matrices or sparse expressions.
Definition: ForwardDeclarations.h:278
Product< SparseSelfAdjointView, OtherDerived > operator*(const MatrixBase< OtherDerived > &rhs) const
Definition: SparseSelfAdjointView.h:100
Definition: Constants.h:322
Definition: Constants.h:204
Definition: Eigen_Colamd.h:54
SparseSelfAdjointView & rankUpdate(const SparseMatrixBase< DerivedU > &u, const Scalar &alpha=Scalar(1))
SparseSymmetricPermutationProduct< _MatrixTypeNested, Mode > twistedBy(const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
Definition: SparseSelfAdjointView.h:126
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:48