Please, help us to better know about our user community by answering the following short survey: https://forms.gle/wpyrxWi18ox9Z5ae9
Eigen  3.3.8
SparseMatrix.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
45 namespace internal {
46 template<typename _Scalar, int _Options, typename _StorageIndex>
47 struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48 {
49  typedef _Scalar Scalar;
50  typedef _StorageIndex StorageIndex;
51  typedef Sparse StorageKind;
52  typedef MatrixXpr XprKind;
53  enum {
54  RowsAtCompileTime = Dynamic,
55  ColsAtCompileTime = Dynamic,
56  MaxRowsAtCompileTime = Dynamic,
57  MaxColsAtCompileTime = Dynamic,
58  Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59  SupportedAccessPatterns = InnerRandomAccessPattern
60  };
61 };
62 
63 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65 {
66  typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
67  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
69 
70  typedef _Scalar Scalar;
71  typedef Dense StorageKind;
72  typedef _StorageIndex StorageIndex;
73  typedef MatrixXpr XprKind;
74 
75  enum {
76  RowsAtCompileTime = Dynamic,
77  ColsAtCompileTime = 1,
78  MaxRowsAtCompileTime = Dynamic,
79  MaxColsAtCompileTime = 1,
80  Flags = LvalueBit
81  };
82 };
83 
84 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87 {
88  enum {
89  Flags = 0
90  };
91 };
92 
93 } // end namespace internal
94 
95 template<typename _Scalar, int _Options, typename _StorageIndex>
97  : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98 {
100  using Base::convert_index;
101  friend class SparseVector<_Scalar,0,_StorageIndex>;
102  public:
103  using Base::isCompressed;
104  using Base::nonZeros;
105  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
106  using Base::operator+=;
107  using Base::operator-=;
108 
112  typedef typename Base::InnerIterator InnerIterator;
113  typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
114 
115 
116  using Base::IsRowMajor;
117  typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
118  enum {
119  Options = _Options
120  };
121 
122  typedef typename Base::IndexVector IndexVector;
123  typedef typename Base::ScalarVector ScalarVector;
124  protected:
125  typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
126 
127  Index m_outerSize;
128  Index m_innerSize;
129  StorageIndex* m_outerIndex;
130  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
131  Storage m_data;
132 
133  public:
134 
136  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
138  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
139 
141  inline Index innerSize() const { return m_innerSize; }
143  inline Index outerSize() const { return m_outerSize; }
144 
148  inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
152  inline Scalar* valuePtr() { return m_data.valuePtr(); }
153 
157  inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
161  inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
162 
166  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
170  inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
171 
175  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
179  inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
180 
182  inline Storage& data() { return m_data; }
184  inline const Storage& data() const { return m_data; }
185 
188  inline Scalar coeff(Index row, Index col) const
189  {
190  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
191 
192  const Index outer = IsRowMajor ? row : col;
193  const Index inner = IsRowMajor ? col : row;
194  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
195  return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
196  }
197 
206  inline Scalar& coeffRef(Index row, Index col)
207  {
208  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
209 
210  const Index outer = IsRowMajor ? row : col;
211  const Index inner = IsRowMajor ? col : row;
212 
213  Index start = m_outerIndex[outer];
214  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
215  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
216  if(end<=start)
217  return insert(row,col);
218  const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
219  if((p<end) && (m_data.index(p)==inner))
220  return m_data.value(p);
221  else
222  return insert(row,col);
223  }
224 
240  Scalar& insert(Index row, Index col);
241 
242  public:
243 
251  inline void setZero()
252  {
253  m_data.clear();
254  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
255  if(m_innerNonZeros)
256  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
257  }
258 
262  inline void reserve(Index reserveSize)
263  {
264  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
265  m_data.reserve(reserveSize);
266  }
267 
268  #ifdef EIGEN_PARSED_BY_DOXYGEN
269 
281  template<class SizesType>
282  inline void reserve(const SizesType& reserveSizes);
283  #else
284  template<class SizesType>
285  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
286  #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
287  typename
288  #endif
289  SizesType::value_type())
290  {
291  EIGEN_UNUSED_VARIABLE(enableif);
292  reserveInnerVectors(reserveSizes);
293  }
294  #endif // EIGEN_PARSED_BY_DOXYGEN
295  protected:
296  template<class SizesType>
297  inline void reserveInnerVectors(const SizesType& reserveSizes)
298  {
299  if(isCompressed())
300  {
301  Index totalReserveSize = 0;
302  // turn the matrix into non-compressed mode
303  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
304  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
305 
306  // temporarily use m_innerSizes to hold the new starting points.
307  StorageIndex* newOuterIndex = m_innerNonZeros;
308 
309  StorageIndex count = 0;
310  for(Index j=0; j<m_outerSize; ++j)
311  {
312  newOuterIndex[j] = count;
313  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
314  totalReserveSize += reserveSizes[j];
315  }
316  m_data.reserve(totalReserveSize);
317  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
318  for(Index j=m_outerSize-1; j>=0; --j)
319  {
320  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
321  for(Index i=innerNNZ-1; i>=0; --i)
322  {
323  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
324  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
325  }
326  previousOuterIndex = m_outerIndex[j];
327  m_outerIndex[j] = newOuterIndex[j];
328  m_innerNonZeros[j] = innerNNZ;
329  }
330  if(m_outerSize>0)
331  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
332 
333  m_data.resize(m_outerIndex[m_outerSize]);
334  }
335  else
336  {
337  StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
338  if (!newOuterIndex) internal::throw_std_bad_alloc();
339 
340  StorageIndex count = 0;
341  for(Index j=0; j<m_outerSize; ++j)
342  {
343  newOuterIndex[j] = count;
344  StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
345  StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
346  count += toReserve + m_innerNonZeros[j];
347  }
348  newOuterIndex[m_outerSize] = count;
349 
350  m_data.resize(count);
351  for(Index j=m_outerSize-1; j>=0; --j)
352  {
353  Index offset = newOuterIndex[j] - m_outerIndex[j];
354  if(offset>0)
355  {
356  StorageIndex innerNNZ = m_innerNonZeros[j];
357  for(Index i=innerNNZ-1; i>=0; --i)
358  {
359  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
360  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
361  }
362  }
363  }
364 
365  std::swap(m_outerIndex, newOuterIndex);
366  std::free(newOuterIndex);
367  }
368 
369  }
370  public:
371 
372  //--- low level purely coherent filling ---
373 
384  inline Scalar& insertBack(Index row, Index col)
385  {
386  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
387  }
388 
391  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
392  {
393  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
394  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
395  Index p = m_outerIndex[outer+1];
396  ++m_outerIndex[outer+1];
397  m_data.append(Scalar(0), inner);
398  return m_data.value(p);
399  }
400 
403  inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
404  {
405  Index p = m_outerIndex[outer+1];
406  ++m_outerIndex[outer+1];
407  m_data.append(Scalar(0), inner);
408  return m_data.value(p);
409  }
410 
413  inline void startVec(Index outer)
414  {
415  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
416  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
417  m_outerIndex[outer+1] = m_outerIndex[outer];
418  }
419 
423  inline void finalize()
424  {
425  if(isCompressed())
426  {
427  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
428  Index i = m_outerSize;
429  // find the last filled column
430  while (i>=0 && m_outerIndex[i]==0)
431  --i;
432  ++i;
433  while (i<=m_outerSize)
434  {
435  m_outerIndex[i] = size;
436  ++i;
437  }
438  }
439  }
440 
441  //---
442 
443  template<typename InputIterators>
444  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
445 
446  template<typename InputIterators,typename DupFunctor>
447  void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
448 
449  void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
450 
451  template<typename DupFunctor>
452  void collapseDuplicates(DupFunctor dup_func = DupFunctor());
453 
454  //---
455 
458  Scalar& insertByOuterInner(Index j, Index i)
459  {
460  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
461  }
462 
466  {
467  if(isCompressed())
468  return;
469 
470  eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
471 
472  Index oldStart = m_outerIndex[1];
473  m_outerIndex[1] = m_innerNonZeros[0];
474  for(Index j=1; j<m_outerSize; ++j)
475  {
476  Index nextOldStart = m_outerIndex[j+1];
477  Index offset = oldStart - m_outerIndex[j];
478  if(offset>0)
479  {
480  for(Index k=0; k<m_innerNonZeros[j]; ++k)
481  {
482  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
483  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
484  }
485  }
486  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
487  oldStart = nextOldStart;
488  }
489  std::free(m_innerNonZeros);
490  m_innerNonZeros = 0;
491  m_data.resize(m_outerIndex[m_outerSize]);
492  m_data.squeeze();
493  }
494 
496  void uncompress()
497  {
498  if(m_innerNonZeros != 0)
499  return;
500  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
501  for (Index i = 0; i < m_outerSize; i++)
502  {
503  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
504  }
505  }
506 
508  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
509  {
510  prune(default_prunning_func(reference,epsilon));
511  }
512 
520  template<typename KeepFunc>
521  void prune(const KeepFunc& keep = KeepFunc())
522  {
523  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
524  makeCompressed();
525 
526  StorageIndex k = 0;
527  for(Index j=0; j<m_outerSize; ++j)
528  {
529  Index previousStart = m_outerIndex[j];
530  m_outerIndex[j] = k;
531  Index end = m_outerIndex[j+1];
532  for(Index i=previousStart; i<end; ++i)
533  {
534  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
535  {
536  m_data.value(k) = m_data.value(i);
537  m_data.index(k) = m_data.index(i);
538  ++k;
539  }
540  }
541  }
542  m_outerIndex[m_outerSize] = k;
543  m_data.resize(k,0);
544  }
545 
555  {
556  // No change
557  if (this->rows() == rows && this->cols() == cols) return;
558 
559  // If one dimension is null, then there is nothing to be preserved
560  if(rows==0 || cols==0) return resize(rows,cols);
561 
562  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
563  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
564  StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
565 
566  // Deals with inner non zeros
567  if (m_innerNonZeros)
568  {
569  // Resize m_innerNonZeros
570  StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
571  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
572  m_innerNonZeros = newInnerNonZeros;
573 
574  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
575  m_innerNonZeros[i] = 0;
576  }
577  else if (innerChange < 0)
578  {
579  // Inner size decreased: allocate a new m_innerNonZeros
580  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
581  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
582  for(Index i = 0; i < m_outerSize; i++)
583  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
584  }
585 
586  // Change the m_innerNonZeros in case of a decrease of inner size
587  if (m_innerNonZeros && innerChange < 0)
588  {
589  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
590  {
591  StorageIndex &n = m_innerNonZeros[i];
592  StorageIndex start = m_outerIndex[i];
593  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
594  }
595  }
596 
597  m_innerSize = newInnerSize;
598 
599  // Re-allocate outer index structure if necessary
600  if (outerChange == 0)
601  return;
602 
603  StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
604  if (!newOuterIndex) internal::throw_std_bad_alloc();
605  m_outerIndex = newOuterIndex;
606  if (outerChange > 0)
607  {
608  StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
609  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
610  m_outerIndex[i] = last;
611  }
612  m_outerSize += outerChange;
613  }
614 
623  {
624  const Index outerSize = IsRowMajor ? rows : cols;
625  m_innerSize = IsRowMajor ? cols : rows;
626  m_data.clear();
627  if (m_outerSize != outerSize || m_outerSize==0)
628  {
629  std::free(m_outerIndex);
630  m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
631  if (!m_outerIndex) internal::throw_std_bad_alloc();
632 
633  m_outerSize = outerSize;
634  }
635  if(m_innerNonZeros)
636  {
637  std::free(m_innerNonZeros);
638  m_innerNonZeros = 0;
639  }
640  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
641  }
642 
645  void resizeNonZeros(Index size)
646  {
647  m_data.resize(size);
648  }
649 
652 
658 
660  inline SparseMatrix()
661  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
662  {
663  check_template_parameters();
664  resize(0, 0);
665  }
666 
669  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
670  {
671  check_template_parameters();
672  resize(rows, cols);
673  }
674 
676  template<typename OtherDerived>
678  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
679  {
680  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
681  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
682  check_template_parameters();
683  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
684  if (needToTranspose)
685  *this = other.derived();
686  else
687  {
688  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
689  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
690  #endif
691  internal::call_assignment_no_alias(*this, other.derived());
692  }
693  }
694 
696  template<typename OtherDerived, unsigned int UpLo>
698  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
699  {
700  check_template_parameters();
701  Base::operator=(other);
702  }
703 
705  inline SparseMatrix(const SparseMatrix& other)
706  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
707  {
708  check_template_parameters();
709  *this = other.derived();
710  }
711 
713  template<typename OtherDerived>
714  SparseMatrix(const ReturnByValue<OtherDerived>& other)
715  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
716  {
717  check_template_parameters();
718  initAssignment(other);
719  other.evalTo(*this);
720  }
721 
723  template<typename OtherDerived>
724  explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
725  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
726  {
727  check_template_parameters();
728  *this = other.derived();
729  }
730 
733  inline void swap(SparseMatrix& other)
734  {
735  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
736  std::swap(m_outerIndex, other.m_outerIndex);
737  std::swap(m_innerSize, other.m_innerSize);
738  std::swap(m_outerSize, other.m_outerSize);
739  std::swap(m_innerNonZeros, other.m_innerNonZeros);
740  m_data.swap(other.m_data);
741  }
742 
745  inline void setIdentity()
746  {
747  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
748  this->m_data.resize(rows());
749  Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
750  Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
751  Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
752  std::free(m_innerNonZeros);
753  m_innerNonZeros = 0;
754  }
755  inline SparseMatrix& operator=(const SparseMatrix& other)
756  {
757  if (other.isRValue())
758  {
759  swap(other.const_cast_derived());
760  }
761  else if(this!=&other)
762  {
763  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
764  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
765  #endif
766  initAssignment(other);
767  if(other.isCompressed())
768  {
769  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
770  m_data = other.m_data;
771  }
772  else
773  {
774  Base::operator=(other);
775  }
776  }
777  return *this;
778  }
779 
780 #ifndef EIGEN_PARSED_BY_DOXYGEN
781  template<typename OtherDerived>
782  inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
783  { return Base::operator=(other.derived()); }
784 #endif // EIGEN_PARSED_BY_DOXYGEN
785 
786  template<typename OtherDerived>
787  EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
788 
789  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
790  {
791  EIGEN_DBG_SPARSE(
792  s << "Nonzero entries:\n";
793  if(m.isCompressed())
794  {
795  for (Index i=0; i<m.nonZeros(); ++i)
796  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
797  }
798  else
799  {
800  for (Index i=0; i<m.outerSize(); ++i)
801  {
802  Index p = m.m_outerIndex[i];
803  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
804  Index k=p;
805  for (; k<pe; ++k) {
806  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
807  }
808  for (; k<m.m_outerIndex[i+1]; ++k) {
809  s << "(_,_) ";
810  }
811  }
812  }
813  s << std::endl;
814  s << std::endl;
815  s << "Outer pointers:\n";
816  for (Index i=0; i<m.outerSize(); ++i) {
817  s << m.m_outerIndex[i] << " ";
818  }
819  s << " $" << std::endl;
820  if(!m.isCompressed())
821  {
822  s << "Inner non zeros:\n";
823  for (Index i=0; i<m.outerSize(); ++i) {
824  s << m.m_innerNonZeros[i] << " ";
825  }
826  s << " $" << std::endl;
827  }
828  s << std::endl;
829  );
830  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
831  return s;
832  }
833 
835  inline ~SparseMatrix()
836  {
837  std::free(m_outerIndex);
838  std::free(m_innerNonZeros);
839  }
840 
842  Scalar sum() const;
843 
844 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
845 # include EIGEN_SPARSEMATRIX_PLUGIN
846 # endif
847 
848 protected:
849 
850  template<typename Other>
851  void initAssignment(const Other& other)
852  {
853  resize(other.rows(), other.cols());
854  if(m_innerNonZeros)
855  {
856  std::free(m_innerNonZeros);
857  m_innerNonZeros = 0;
858  }
859  }
860 
863  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
864 
867  class SingletonVector
868  {
869  StorageIndex m_index;
870  StorageIndex m_value;
871  public:
872  typedef StorageIndex value_type;
873  SingletonVector(Index i, Index v)
874  : m_index(convert_index(i)), m_value(convert_index(v))
875  {}
876 
877  StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
878  };
879 
882  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
883 
884 public:
887  EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
888  {
889  const Index outer = IsRowMajor ? row : col;
890  const Index inner = IsRowMajor ? col : row;
891 
892  eigen_assert(!isCompressed());
893  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
894 
895  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
896  m_data.index(p) = convert_index(inner);
897  return (m_data.value(p) = Scalar(0));
898  }
899 
900 private:
901  static void check_template_parameters()
902  {
903  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
904  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
905  }
906 
907  struct default_prunning_func {
908  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
909  inline bool operator() (const Index&, const Index&, const Scalar& value) const
910  {
911  return !internal::isMuchSmallerThan(value, reference, epsilon);
912  }
913  Scalar reference;
914  RealScalar epsilon;
915  };
916 };
917 
918 namespace internal {
919 
920 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
921 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
922 {
923  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
924  typedef typename SparseMatrixType::Scalar Scalar;
925  typedef typename SparseMatrixType::StorageIndex StorageIndex;
926  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
927 
928  if(begin!=end)
929  {
930  // pass 1: count the nnz per inner-vector
931  typename SparseMatrixType::IndexVector wi(trMat.outerSize());
932  wi.setZero();
933  for(InputIterator it(begin); it!=end; ++it)
934  {
935  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
936  wi(IsRowMajor ? it->col() : it->row())++;
937  }
938 
939  // pass 2: insert all the elements into trMat
940  trMat.reserve(wi);
941  for(InputIterator it(begin); it!=end; ++it)
942  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
943 
944  // pass 3:
945  trMat.collapseDuplicates(dup_func);
946  }
947 
948  // pass 4: transposed copy -> implicit sorting
949  mat = trMat;
950 }
951 
952 }
953 
954 
992 template<typename Scalar, int _Options, typename _StorageIndex>
993 template<typename InputIterators>
994 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
995 {
996  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
997 }
998 
1008 template<typename Scalar, int _Options, typename _StorageIndex>
1009 template<typename InputIterators,typename DupFunctor>
1010 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1011 {
1012  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1013 }
1014 
1016 template<typename Scalar, int _Options, typename _StorageIndex>
1017 template<typename DupFunctor>
1019 {
1020  eigen_assert(!isCompressed());
1021  // TODO, in practice we should be able to use m_innerNonZeros for that task
1022  IndexVector wi(innerSize());
1023  wi.fill(-1);
1024  StorageIndex count = 0;
1025  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1026  for(Index j=0; j<outerSize(); ++j)
1027  {
1028  StorageIndex start = count;
1029  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1030  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1031  {
1032  Index i = m_data.index(k);
1033  if(wi(i)>=start)
1034  {
1035  // we already meet this entry => accumulate it
1036  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1037  }
1038  else
1039  {
1040  m_data.value(count) = m_data.value(k);
1041  m_data.index(count) = m_data.index(k);
1042  wi(i) = count;
1043  ++count;
1044  }
1045  }
1046  m_outerIndex[j] = start;
1047  }
1048  m_outerIndex[m_outerSize] = count;
1049 
1050  // turn the matrix into compressed form
1051  std::free(m_innerNonZeros);
1052  m_innerNonZeros = 0;
1053  m_data.resize(m_outerIndex[m_outerSize]);
1054 }
1055 
1056 template<typename Scalar, int _Options, typename _StorageIndex>
1057 template<typename OtherDerived>
1058 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1059 {
1060  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1061  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1062 
1063  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1064  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1065  #endif
1066 
1067  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1068  if (needToTranspose)
1069  {
1070  #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1071  EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1072  #endif
1073  // two passes algorithm:
1074  // 1 - compute the number of coeffs per dest inner vector
1075  // 2 - do the actual copy/eval
1076  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1077  typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1078  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1079  typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1080  OtherCopy otherCopy(other.derived());
1081  OtherCopyEval otherCopyEval(otherCopy);
1082 
1083  SparseMatrix dest(other.rows(),other.cols());
1084  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1085 
1086  // pass 1
1087  // FIXME the above copy could be merged with that pass
1088  for (Index j=0; j<otherCopy.outerSize(); ++j)
1089  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1090  ++dest.m_outerIndex[it.index()];
1091 
1092  // prefix sum
1093  StorageIndex count = 0;
1094  IndexVector positions(dest.outerSize());
1095  for (Index j=0; j<dest.outerSize(); ++j)
1096  {
1097  StorageIndex tmp = dest.m_outerIndex[j];
1098  dest.m_outerIndex[j] = count;
1099  positions[j] = count;
1100  count += tmp;
1101  }
1102  dest.m_outerIndex[dest.outerSize()] = count;
1103  // alloc
1104  dest.m_data.resize(count);
1105  // pass 2
1106  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1107  {
1108  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1109  {
1110  Index pos = positions[it.index()]++;
1111  dest.m_data.index(pos) = j;
1112  dest.m_data.value(pos) = it.value();
1113  }
1114  }
1115  this->swap(dest);
1116  return *this;
1117  }
1118  else
1119  {
1120  if(other.isRValue())
1121  {
1122  initAssignment(other.derived());
1123  }
1124  // there is no special optimization
1125  return Base::operator=(other.derived());
1126  }
1127 }
1128 
1129 template<typename _Scalar, int _Options, typename _StorageIndex>
1131 {
1132  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1133 
1134  const Index outer = IsRowMajor ? row : col;
1135  const Index inner = IsRowMajor ? col : row;
1136 
1137  if(isCompressed())
1138  {
1139  if(nonZeros()==0)
1140  {
1141  // reserve space if not already done
1142  if(m_data.allocatedSize()==0)
1143  m_data.reserve(2*m_innerSize);
1144 
1145  // turn the matrix into non-compressed mode
1146  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1147  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1148 
1149  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1150 
1151  // pack all inner-vectors to the end of the pre-allocated space
1152  // and allocate the entire free-space to the first inner-vector
1153  StorageIndex end = convert_index(m_data.allocatedSize());
1154  for(Index j=1; j<=m_outerSize; ++j)
1155  m_outerIndex[j] = end;
1156  }
1157  else
1158  {
1159  // turn the matrix into non-compressed mode
1160  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1161  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1162  for(Index j=0; j<m_outerSize; ++j)
1163  m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1164  }
1165  }
1166 
1167  // check whether we can do a fast "push back" insertion
1168  Index data_end = m_data.allocatedSize();
1169 
1170  // First case: we are filling a new inner vector which is packed at the end.
1171  // We assume that all remaining inner-vectors are also empty and packed to the end.
1172  if(m_outerIndex[outer]==data_end)
1173  {
1174  eigen_internal_assert(m_innerNonZeros[outer]==0);
1175 
1176  // pack previous empty inner-vectors to end of the used-space
1177  // and allocate the entire free-space to the current inner-vector.
1178  StorageIndex p = convert_index(m_data.size());
1179  Index j = outer;
1180  while(j>=0 && m_innerNonZeros[j]==0)
1181  m_outerIndex[j--] = p;
1182 
1183  // push back the new element
1184  ++m_innerNonZeros[outer];
1185  m_data.append(Scalar(0), inner);
1186 
1187  // check for reallocation
1188  if(data_end != m_data.allocatedSize())
1189  {
1190  // m_data has been reallocated
1191  // -> move remaining inner-vectors back to the end of the free-space
1192  // so that the entire free-space is allocated to the current inner-vector.
1193  eigen_internal_assert(data_end < m_data.allocatedSize());
1194  StorageIndex new_end = convert_index(m_data.allocatedSize());
1195  for(Index k=outer+1; k<=m_outerSize; ++k)
1196  if(m_outerIndex[k]==data_end)
1197  m_outerIndex[k] = new_end;
1198  }
1199  return m_data.value(p);
1200  }
1201 
1202  // Second case: the next inner-vector is packed to the end
1203  // and the current inner-vector end match the used-space.
1204  if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1205  {
1206  eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1207 
1208  // add space for the new element
1209  ++m_innerNonZeros[outer];
1210  m_data.resize(m_data.size()+1);
1211 
1212  // check for reallocation
1213  if(data_end != m_data.allocatedSize())
1214  {
1215  // m_data has been reallocated
1216  // -> move remaining inner-vectors back to the end of the free-space
1217  // so that the entire free-space is allocated to the current inner-vector.
1218  eigen_internal_assert(data_end < m_data.allocatedSize());
1219  StorageIndex new_end = convert_index(m_data.allocatedSize());
1220  for(Index k=outer+1; k<=m_outerSize; ++k)
1221  if(m_outerIndex[k]==data_end)
1222  m_outerIndex[k] = new_end;
1223  }
1224 
1225  // and insert it at the right position (sorted insertion)
1226  Index startId = m_outerIndex[outer];
1227  Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1228  while ( (p > startId) && (m_data.index(p-1) > inner) )
1229  {
1230  m_data.index(p) = m_data.index(p-1);
1231  m_data.value(p) = m_data.value(p-1);
1232  --p;
1233  }
1234 
1235  m_data.index(p) = convert_index(inner);
1236  return (m_data.value(p) = 0);
1237  }
1238 
1239  if(m_data.size() != m_data.allocatedSize())
1240  {
1241  // make sure the matrix is compatible to random un-compressed insertion:
1242  m_data.resize(m_data.allocatedSize());
1243  this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1244  }
1245 
1246  return insertUncompressed(row,col);
1247 }
1248 
1249 template<typename _Scalar, int _Options, typename _StorageIndex>
1251 {
1252  eigen_assert(!isCompressed());
1253 
1254  const Index outer = IsRowMajor ? row : col;
1255  const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1256 
1257  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1258  StorageIndex innerNNZ = m_innerNonZeros[outer];
1259  if(innerNNZ>=room)
1260  {
1261  // this inner vector is full, we need to reallocate the whole buffer :(
1262  reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1263  }
1264 
1265  Index startId = m_outerIndex[outer];
1266  Index p = startId + m_innerNonZeros[outer];
1267  while ( (p > startId) && (m_data.index(p-1) > inner) )
1268  {
1269  m_data.index(p) = m_data.index(p-1);
1270  m_data.value(p) = m_data.value(p-1);
1271  --p;
1272  }
1273  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1274 
1275  m_innerNonZeros[outer]++;
1276 
1277  m_data.index(p) = inner;
1278  return (m_data.value(p) = Scalar(0));
1279 }
1280 
1281 template<typename _Scalar, int _Options, typename _StorageIndex>
1282 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1283 {
1284  eigen_assert(isCompressed());
1285 
1286  const Index outer = IsRowMajor ? row : col;
1287  const Index inner = IsRowMajor ? col : row;
1288 
1289  Index previousOuter = outer;
1290  if (m_outerIndex[outer+1]==0)
1291  {
1292  // we start a new inner vector
1293  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1294  {
1295  m_outerIndex[previousOuter] = convert_index(m_data.size());
1296  --previousOuter;
1297  }
1298  m_outerIndex[outer+1] = m_outerIndex[outer];
1299  }
1300 
1301  // here we have to handle the tricky case where the outerIndex array
1302  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1303  // the 2nd inner vector...
1304  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1305  && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1306 
1307  std::size_t startId = m_outerIndex[outer];
1308  // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1309  std::size_t p = m_outerIndex[outer+1];
1310  ++m_outerIndex[outer+1];
1311 
1312  double reallocRatio = 1;
1313  if (m_data.allocatedSize()<=m_data.size())
1314  {
1315  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1316  if (m_data.size()==0)
1317  {
1318  m_data.reserve(32);
1319  }
1320  else
1321  {
1322  // we need to reallocate the data, to reduce multiple reallocations
1323  // we use a smart resize algorithm based on the current filling ratio
1324  // in addition, we use double to avoid integers overflows
1325  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1326  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1327  // furthermore we bound the realloc ratio to:
1328  // 1) reduce multiple minor realloc when the matrix is almost filled
1329  // 2) avoid to allocate too much memory when the matrix is almost empty
1330  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1331  }
1332  }
1333  m_data.resize(m_data.size()+1,reallocRatio);
1334 
1335  if (!isLastVec)
1336  {
1337  if (previousOuter==-1)
1338  {
1339  // oops wrong guess.
1340  // let's correct the outer offsets
1341  for (Index k=0; k<=(outer+1); ++k)
1342  m_outerIndex[k] = 0;
1343  Index k=outer+1;
1344  while(m_outerIndex[k]==0)
1345  m_outerIndex[k++] = 1;
1346  while (k<=m_outerSize && m_outerIndex[k]!=0)
1347  m_outerIndex[k++]++;
1348  p = 0;
1349  --k;
1350  k = m_outerIndex[k]-1;
1351  while (k>0)
1352  {
1353  m_data.index(k) = m_data.index(k-1);
1354  m_data.value(k) = m_data.value(k-1);
1355  k--;
1356  }
1357  }
1358  else
1359  {
1360  // we are not inserting into the last inner vec
1361  // update outer indices:
1362  Index j = outer+2;
1363  while (j<=m_outerSize && m_outerIndex[j]!=0)
1364  m_outerIndex[j++]++;
1365  --j;
1366  // shift data of last vecs:
1367  Index k = m_outerIndex[j]-1;
1368  while (k>=Index(p))
1369  {
1370  m_data.index(k) = m_data.index(k-1);
1371  m_data.value(k) = m_data.value(k-1);
1372  k--;
1373  }
1374  }
1375  }
1376 
1377  while ( (p > startId) && (m_data.index(p-1) > inner) )
1378  {
1379  m_data.index(p) = m_data.index(p-1);
1380  m_data.value(p) = m_data.value(p-1);
1381  --p;
1382  }
1383 
1384  m_data.index(p) = inner;
1385  return (m_data.value(p) = Scalar(0));
1386 }
1387 
1388 namespace internal {
1389 
1390 template<typename _Scalar, int _Options, typename _StorageIndex>
1391 struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1392  : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1393 {
1394  typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1395  typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1396  evaluator() : Base() {}
1397  explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1398 };
1399 
1400 }
1401 
1402 } // end namespace Eigen
1403 
1404 #endif // EIGEN_SPARSEMATRIX_H
Eigen::SparseMatrix::resize
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:622
Eigen::SparseMatrix::cols
Index cols() const
Definition: SparseMatrix.h:138
Eigen
Namespace containing all symbols from the Eigen library.
Definition: Core:309
Eigen::SparseMatrix
A versatible sparse matrix representation.
Definition: SparseMatrix.h:98
Eigen::SparseMatrix::outerIndexPtr
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:170
Eigen::SparseMatrix::prune
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:508
Eigen::SparseMatrix::SparseMatrix
SparseMatrix()
Definition: SparseMatrix.h:660
Eigen::RowMajorBit
const unsigned int RowMajorBit
Definition: Constants.h:61
Eigen::SparseMatrix::uncompress
void uncompress()
Definition: SparseMatrix.h:496
Eigen::CompressedAccessBit
const unsigned int CompressedAccessBit
Definition: Constants.h:186
Eigen::Array
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:47
Eigen::RowMajor
@ RowMajor
Definition: Constants.h:322
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:697
Eigen::SparseCompressedBase::nonZeros
Index nonZeros() const
Definition: SparseCompressedBase.h:56
Eigen::SparseCompressedBase
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:38
Eigen::SparseMatrix::diagonal
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:651
Eigen::SparseMatrix::valuePtr
const Scalar * valuePtr() const
Definition: SparseMatrix.h:148
Eigen::SparseMatrix::innerIndexPtr
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:157
Eigen::LvalueBit
const unsigned int LvalueBit
Definition: Constants.h:139
Eigen::Dynamic
const int Dynamic
Definition: Constants.h:21
Eigen::SparseMatrix::setIdentity
void setIdentity()
Definition: SparseMatrix.h:745
Eigen::Diagonal
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:65
Eigen::SparseMatrix::outerSize
Index outerSize() const
Definition: SparseMatrix.h:143
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:705
Eigen::SparseMatrix::~SparseMatrix
~SparseMatrix()
Definition: SparseMatrix.h:835
Eigen::SparseMatrix::swap
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:733
Eigen::SparseMatrix::innerNonZeroPtr
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:175
Eigen::SparseMatrix::coeff
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:188
Eigen::Map
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:96
Eigen::SparseMatrix::reserve
void reserve(Index reserveSize)
Definition: SparseMatrix.h:262
Eigen::SparseMatrix::setFromTriplets
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition: SparseMatrix.h:1010
Eigen::SparseMatrix::outerIndexPtr
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:166
Eigen::SparseMatrix::innerNonZeroPtr
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:179
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:677
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:668
Eigen::SparseMatrix::insert
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1130
Eigen::SparseVector
a sparse vector class
Definition: SparseVector.h:66
Eigen::SparseMatrix::valuePtr
Scalar * valuePtr()
Definition: SparseMatrix.h:152
Eigen::SparseMatrix::setFromTriplets
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:994
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:714
Eigen::SparseMatrixBase
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:28
Eigen::SparseMatrix::setZero
void setZero()
Definition: SparseMatrix.h:251
Eigen::SparseMatrix::innerSize
Index innerSize() const
Definition: SparseMatrix.h:141
Eigen::SparseMatrix::conservativeResize
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:554
Eigen::SparseSelfAdjointView
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:45
Eigen::Matrix< StorageIndex, Dynamic, 1 >
Eigen::ColMajor
@ ColMajor
Definition: Constants.h:320
Eigen::SparseMatrix::prune
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:521
Eigen::SparseMatrix::rows
Index rows() const
Definition: SparseMatrix.h:136
Eigen::SparseMatrix::coeffRef
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:206
Eigen::SparseMatrix::diagonal
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:657
Eigen::SparseMatrix::makeCompressed
void makeCompressed()
Definition: SparseMatrix.h:465
Eigen::SparseMatrix::isCompressed
bool isCompressed() const
Definition: SparseCompressedBase.h:107
Eigen::SparseMatrix::innerIndexPtr
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:161
Eigen::SparseMatrix::reserve
void reserve(const SizesType &reserveSizes)
Eigen::NumTraits
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:151
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:724
Eigen::MappedSparseMatrix
Sparse matrix.
Definition: MappedSparseMatrix.h:34
Eigen::Index
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
Eigen::SparseCompressedBase::isCompressed
bool isCompressed() const
Definition: SparseCompressedBase.h:107