summaryrefslogtreecommitdiffhomepage
path: root/eigen/Eigen/src/SparseCore
diff options
context:
space:
mode:
authorStanislaw Halik <sthalik@misaki.pl>2017-03-25 14:17:07 +0100
committerStanislaw Halik <sthalik@misaki.pl>2017-03-25 14:17:07 +0100
commit35f7829af10c61e33dd2e2a7a015058e11a11ea0 (patch)
tree7135010dcf8fd0a49f3020d52112709bcb883bd6 /eigen/Eigen/src/SparseCore
parent6e8724193e40a932faf9064b664b529e7301c578 (diff)
update
Diffstat (limited to 'eigen/Eigen/src/SparseCore')
-rw-r--r--eigen/Eigen/src/SparseCore/AmbiVector.h98
-rw-r--r--eigen/Eigen/src/SparseCore/CMakeLists.txt6
-rw-r--r--eigen/Eigen/src/SparseCore/CompressedStorage.h138
-rw-r--r--eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h216
-rw-r--r--eigen/Eigen/src/SparseCore/MappedSparseMatrix.h164
-rw-r--r--eigen/Eigen/src/SparseCore/SparseAssign.h216
-rw-r--r--eigen/Eigen/src/SparseCore/SparseBlock.h681
-rw-r--r--eigen/Eigen/src/SparseCore/SparseColEtree.h44
-rw-r--r--eigen/Eigen/src/SparseCore/SparseCompressedBase.h357
-rw-r--r--eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h662
-rw-r--r--eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h155
-rw-r--r--eigen/Eigen/src/SparseCore/SparseDenseProduct.h417
-rw-r--r--eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h228
-rw-r--r--eigen/Eigen/src/SparseCore/SparseDot.h17
-rw-r--r--eigen/Eigen/src/SparseCore/SparseFuzzy.h29
-rw-r--r--eigen/Eigen/src/SparseCore/SparseMap.h305
-rw-r--r--eigen/Eigen/src/SparseCore/SparseMatrix.h637
-rw-r--r--eigen/Eigen/src/SparseCore/SparseMatrixBase.h269
-rw-r--r--eigen/Eigen/src/SparseCore/SparsePermutation.h170
-rw-r--r--eigen/Eigen/src/SparseCore/SparseProduct.h291
-rw-r--r--eigen/Eigen/src/SparseCore/SparseRedux.h5
-rw-r--r--eigen/Eigen/src/SparseCore/SparseRef.h397
-rw-r--r--eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h556
-rw-r--r--eigen/Eigen/src/SparseCore/SparseSolverBase.h124
-rw-r--r--eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h84
-rw-r--r--eigen/Eigen/src/SparseCore/SparseTranspose.h99
-rw-r--r--eigen/Eigen/src/SparseCore/SparseTriangularView.h252
-rw-r--r--eigen/Eigen/src/SparseCore/SparseUtil.h106
-rw-r--r--eigen/Eigen/src/SparseCore/SparseVector.h208
-rw-r--r--eigen/Eigen/src/SparseCore/SparseView.h230
-rw-r--r--eigen/Eigen/src/SparseCore/TriangularSolver.h117
31 files changed, 4790 insertions, 2488 deletions
diff --git a/eigen/Eigen/src/SparseCore/AmbiVector.h b/eigen/Eigen/src/SparseCore/AmbiVector.h
index 220c645..8a5cc91 100644
--- a/eigen/Eigen/src/SparseCore/AmbiVector.h
+++ b/eigen/Eigen/src/SparseCore/AmbiVector.h
@@ -19,15 +19,15 @@ namespace internal {
*
* See BasicSparseLLT and SparseProduct for usage examples.
*/
-template<typename _Scalar, typename _Index>
+template<typename _Scalar, typename _StorageIndex>
class AmbiVector
{
public:
typedef _Scalar Scalar;
- typedef _Index Index;
+ typedef _StorageIndex StorageIndex;
typedef typename NumTraits<Scalar>::Real RealScalar;
- AmbiVector(Index size)
+ explicit AmbiVector(Index size)
: m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
{
resize(size);
@@ -39,7 +39,7 @@ class AmbiVector
Index nonZeros() const;
/** Specifies a sub-vector to work on */
- void setBounds(Index start, Index end) { m_start = start; m_end = end; }
+ void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }
void setZero();
@@ -55,12 +55,16 @@ class AmbiVector
{
if (m_allocatedSize < size)
reallocate(size);
- m_size = size;
+ m_size = convert_index(size);
}
- Index size() const { return m_size; }
+ StorageIndex size() const { return m_size; }
protected:
+ StorageIndex convert_index(Index idx)
+ {
+ return internal::convert_index<StorageIndex>(idx);
+ }
void reallocate(Index size)
{
@@ -70,15 +74,15 @@ class AmbiVector
if (size<1000)
{
Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
- m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
+ m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
m_buffer = new Scalar[allocSize];
}
else
{
- m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
+ m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
m_buffer = new Scalar[size];
}
- m_size = size;
+ m_size = convert_index(size);
m_start = 0;
m_end = m_size;
}
@@ -86,7 +90,7 @@ class AmbiVector
void reallocateSparse()
{
Index copyElements = m_allocatedElements;
- m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
+ m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);
Index allocSize = m_allocatedElements * sizeof(ListEl);
allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
Scalar* newBuffer = new Scalar[allocSize];
@@ -99,30 +103,30 @@ class AmbiVector
// element type of the linked list
struct ListEl
{
- Index next;
- Index index;
+ StorageIndex next;
+ StorageIndex index;
Scalar value;
};
// used to store data in both mode
Scalar* m_buffer;
Scalar m_zero;
- Index m_size;
- Index m_start;
- Index m_end;
- Index m_allocatedSize;
- Index m_allocatedElements;
- Index m_mode;
+ StorageIndex m_size;
+ StorageIndex m_start;
+ StorageIndex m_end;
+ StorageIndex m_allocatedSize;
+ StorageIndex m_allocatedElements;
+ StorageIndex m_mode;
// linked list mode
- Index m_llStart;
- Index m_llCurrent;
- Index m_llSize;
+ StorageIndex m_llStart;
+ StorageIndex m_llCurrent;
+ StorageIndex m_llSize;
};
/** \returns the number of non zeros in the current sub vector */
-template<typename _Scalar,typename _Index>
-_Index AmbiVector<_Scalar,_Index>::nonZeros() const
+template<typename _Scalar,typename _StorageIndex>
+Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
{
if (m_mode==IsSparse)
return m_llSize;
@@ -130,8 +134,8 @@ _Index AmbiVector<_Scalar,_Index>::nonZeros() const
return m_end - m_start;
}
-template<typename _Scalar,typename _Index>
-void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
+template<typename _Scalar,typename _StorageIndex>
+void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
{
if (estimatedDensity>0.1)
init(IsDense);
@@ -139,8 +143,8 @@ void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
init(IsSparse);
}
-template<typename _Scalar,typename _Index>
-void AmbiVector<_Scalar,_Index>::init(int mode)
+template<typename _Scalar,typename _StorageIndex>
+void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
{
m_mode = mode;
if (m_mode==IsSparse)
@@ -155,15 +159,15 @@ void AmbiVector<_Scalar,_Index>::init(int mode)
*
* Don't worry, this function is extremely cheap.
*/
-template<typename _Scalar,typename _Index>
-void AmbiVector<_Scalar,_Index>::restart()
+template<typename _Scalar,typename _StorageIndex>
+void AmbiVector<_Scalar,_StorageIndex>::restart()
{
m_llCurrent = m_llStart;
}
/** Set all coefficients of current subvector to zero */
-template<typename _Scalar,typename _Index>
-void AmbiVector<_Scalar,_Index>::setZero()
+template<typename _Scalar,typename _StorageIndex>
+void AmbiVector<_Scalar,_StorageIndex>::setZero()
{
if (m_mode==IsDense)
{
@@ -178,8 +182,8 @@ void AmbiVector<_Scalar,_Index>::setZero()
}
}
-template<typename _Scalar,typename _Index>
-_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
+template<typename _Scalar,typename _StorageIndex>
+_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
@@ -195,7 +199,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
m_llCurrent = 0;
++m_llSize;
llElements[0].value = Scalar(0);
- llElements[0].index = i;
+ llElements[0].index = convert_index(i);
llElements[0].next = -1;
return llElements[0].value;
}
@@ -204,7 +208,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
// this is going to be the new first element of the list
ListEl& el = llElements[m_llSize];
el.value = Scalar(0);
- el.index = i;
+ el.index = convert_index(i);
el.next = m_llStart;
m_llStart = m_llSize;
++m_llSize;
@@ -213,7 +217,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
}
else
{
- Index nextel = llElements[m_llCurrent].next;
+ StorageIndex nextel = llElements[m_llCurrent].next;
eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
while (nextel >= 0 && llElements[nextel].index<=i)
{
@@ -237,7 +241,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
// let's insert a new coefficient
ListEl& el = llElements[m_llSize];
el.value = Scalar(0);
- el.index = i;
+ el.index = convert_index(i);
el.next = llElements[m_llCurrent].next;
llElements[m_llCurrent].next = m_llSize;
++m_llSize;
@@ -247,8 +251,8 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
}
}
-template<typename _Scalar,typename _Index>
-_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
+template<typename _Scalar,typename _StorageIndex>
+_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
@@ -275,8 +279,8 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
}
/** Iterator over the nonzero coefficients */
-template<typename _Scalar,typename _Index>
-class AmbiVector<_Scalar,_Index>::Iterator
+template<typename _Scalar,typename _StorageIndex>
+class AmbiVector<_Scalar,_StorageIndex>::Iterator
{
public:
typedef _Scalar Scalar;
@@ -288,7 +292,7 @@ class AmbiVector<_Scalar,_Index>::Iterator
* In practice, all coefficients having a magnitude smaller than \a epsilon
* are skipped.
*/
- Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
+ explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
: m_vector(vec)
{
using std::abs;
@@ -320,7 +324,7 @@ class AmbiVector<_Scalar,_Index>::Iterator
}
}
- Index index() const { return m_cachedIndex; }
+ StorageIndex index() const { return m_cachedIndex; }
Scalar value() const { return m_cachedValue; }
operator bool() const { return m_cachedIndex>=0; }
@@ -332,7 +336,7 @@ class AmbiVector<_Scalar,_Index>::Iterator
{
do {
++m_cachedIndex;
- } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
+ } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon);
if (m_cachedIndex<m_vector.m_end)
m_cachedValue = m_vector.m_buffer[m_cachedIndex];
else
@@ -343,7 +347,7 @@ class AmbiVector<_Scalar,_Index>::Iterator
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
do {
m_currentEl = llElements[m_currentEl].next;
- } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<m_epsilon);
+ } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon);
if (m_currentEl<0)
{
m_cachedIndex = -1;
@@ -359,9 +363,9 @@ class AmbiVector<_Scalar,_Index>::Iterator
protected:
const AmbiVector& m_vector; // the target vector
- Index m_currentEl; // the current element in sparse/linked-list mode
+ StorageIndex m_currentEl; // the current element in sparse/linked-list mode
RealScalar m_epsilon; // epsilon used to prune zero coefficients
- Index m_cachedIndex; // current coordinate
+ StorageIndex m_cachedIndex; // current coordinate
Scalar m_cachedValue; // current value
bool m_isDense; // mode of the vector
};
diff --git a/eigen/Eigen/src/SparseCore/CMakeLists.txt b/eigen/Eigen/src/SparseCore/CMakeLists.txt
deleted file mode 100644
index d860452..0000000
--- a/eigen/Eigen/src/SparseCore/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_SparseCore_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_SparseCore_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCore COMPONENT Devel
- )
diff --git a/eigen/Eigen/src/SparseCore/CompressedStorage.h b/eigen/Eigen/src/SparseCore/CompressedStorage.h
index 34cad3d..d89fa0d 100644
--- a/eigen/Eigen/src/SparseCore/CompressedStorage.h
+++ b/eigen/Eigen/src/SparseCore/CompressedStorage.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -18,13 +18,13 @@ namespace internal {
* Stores a sparse set of values as a list of values and a list of indices.
*
*/
-template<typename _Scalar,typename _Index>
+template<typename _Scalar,typename _StorageIndex>
class CompressedStorage
{
public:
typedef _Scalar Scalar;
- typedef _Index Index;
+ typedef _StorageIndex StorageIndex;
protected:
@@ -36,7 +36,7 @@ class CompressedStorage
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{}
- CompressedStorage(size_t size)
+ explicit CompressedStorage(Index size)
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{
resize(size);
@@ -51,8 +51,11 @@ class CompressedStorage
CompressedStorage& operator=(const CompressedStorage& other)
{
resize(other.size());
- internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
- internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
+ if(other.size()>0)
+ {
+ internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
+ internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
+ }
return *this;
}
@@ -70,9 +73,9 @@ class CompressedStorage
delete[] m_indices;
}
- void reserve(size_t size)
+ void reserve(Index size)
{
- size_t newAllocatedSize = m_size + size;
+ Index newAllocatedSize = m_size + size;
if (newAllocatedSize > m_allocatedSize)
reallocate(newAllocatedSize);
}
@@ -83,44 +86,40 @@ class CompressedStorage
reallocate(m_size);
}
- void resize(size_t size, double reserveSizeFactor = 0)
+ void resize(Index size, double reserveSizeFactor = 0)
{
if (m_allocatedSize<size)
- reallocate(size + size_t(reserveSizeFactor*double(size)));
+ {
+ Index realloc_size = (std::min<Index>)(NumTraits<StorageIndex>::highest(), size + Index(reserveSizeFactor*double(size)));
+ if(realloc_size<size)
+ internal::throw_std_bad_alloc();
+ reallocate(realloc_size);
+ }
m_size = size;
}
void append(const Scalar& v, Index i)
{
- Index id = static_cast<Index>(m_size);
+ Index id = m_size;
resize(m_size+1, 1);
m_values[id] = v;
- m_indices[id] = i;
+ m_indices[id] = internal::convert_index<StorageIndex>(i);
}
- inline size_t size() const { return m_size; }
- inline size_t allocatedSize() const { return m_allocatedSize; }
+ inline Index size() const { return m_size; }
+ inline Index allocatedSize() const { return m_allocatedSize; }
inline void clear() { m_size = 0; }
const Scalar* valuePtr() const { return m_values; }
Scalar* valuePtr() { return m_values; }
- const Index* indexPtr() const { return m_indices; }
- Index* indexPtr() { return m_indices; }
-
- inline Scalar& value(size_t i) { return m_values[i]; }
- inline const Scalar& value(size_t i) const { return m_values[i]; }
+ const StorageIndex* indexPtr() const { return m_indices; }
+ StorageIndex* indexPtr() { return m_indices; }
- inline Index& index(size_t i) { return m_indices[i]; }
- inline const Index& index(size_t i) const { return m_indices[i]; }
+ inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; }
+ inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; }
- static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
- {
- CompressedStorage res;
- res.m_indices = indices;
- res.m_values = values;
- res.m_allocatedSize = res.m_size = size;
- return res;
- }
+ inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
+ inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
inline Index searchLowerIndex(Index key) const
@@ -129,17 +128,17 @@ class CompressedStorage
}
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
- inline Index searchLowerIndex(size_t start, size_t end, Index key) const
+ inline Index searchLowerIndex(Index start, Index end, Index key) const
{
while(end>start)
{
- size_t mid = (end+start)>>1;
+ Index mid = (end+start)>>1;
if (m_indices[mid]<key)
start = mid+1;
else
end = mid;
}
- return static_cast<Index>(start);
+ return start;
}
/** \returns the stored value at index \a key
@@ -152,20 +151,20 @@ class CompressedStorage
return m_values[m_size-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
- const size_t id = searchLowerIndex(0,m_size-1,key);
+ const Index id = searchLowerIndex(0,m_size-1,key);
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
/** Like at(), but the search is performed in the range [start,end) */
- inline Scalar atInRange(size_t start, size_t end, Index key, const Scalar& defaultValue = Scalar(0)) const
+ inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const
{
if (start>=end)
- return Scalar(0);
+ return defaultValue;
else if (end>start && key==m_indices[end-1])
return m_values[end-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
- const size_t id = searchLowerIndex(start,end-1,key);
+ const Index id = searchLowerIndex(start,end-1,key);
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
@@ -174,16 +173,35 @@ class CompressedStorage
* such that the keys are sorted. */
inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
{
- size_t id = searchLowerIndex(0,m_size,key);
+ Index id = searchLowerIndex(0,m_size,key);
if (id>=m_size || m_indices[id]!=key)
{
- resize(m_size+1,1);
- for (size_t j=m_size-1; j>id; --j)
+ if (m_allocatedSize<m_size+1)
+ {
+ m_allocatedSize = 2*(m_size+1);
+ internal::scoped_array<Scalar> newValues(m_allocatedSize);
+ internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
+
+ // copy first chunk
+ internal::smart_copy(m_values, m_values +id, newValues.ptr());
+ internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());
+
+ // copy the rest
+ if(m_size>id)
+ {
+ internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
+ internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
+ }
+ std::swap(m_values,newValues.ptr());
+ std::swap(m_indices,newIndices.ptr());
+ }
+ else if(m_size>id)
{
- m_indices[j] = m_indices[j-1];
- m_values[j] = m_values[j-1];
+ internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);
+ internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
}
- m_indices[id] = key;
+ m_size++;
+ m_indices[id] = internal::convert_index<StorageIndex>(key);
m_values[id] = defaultValue;
}
return m_values[id];
@@ -191,9 +209,9 @@ class CompressedStorage
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
- size_t k = 0;
- size_t n = size();
- for (size_t i=0; i<n; ++i)
+ Index k = 0;
+ Index n = size();
+ for (Index i=0; i<n; ++i)
{
if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
{
@@ -207,29 +225,29 @@ class CompressedStorage
protected:
- inline void reallocate(size_t size)
+ inline void reallocate(Index size)
{
- Scalar* newValues = new Scalar[size];
- Index* newIndices = new Index[size];
- size_t copySize = (std::min)(size, m_size);
- // copy
+ #ifdef EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
+ EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
+ #endif
+ eigen_internal_assert(size!=m_allocatedSize);
+ internal::scoped_array<Scalar> newValues(size);
+ internal::scoped_array<StorageIndex> newIndices(size);
+ Index copySize = (std::min)(size, m_size);
if (copySize>0) {
- internal::smart_copy(m_values, m_values+copySize, newValues);
- internal::smart_copy(m_indices, m_indices+copySize, newIndices);
+ internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
+ internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
}
- // delete old stuff
- delete[] m_values;
- delete[] m_indices;
- m_values = newValues;
- m_indices = newIndices;
+ std::swap(m_values,newValues.ptr());
+ std::swap(m_indices,newIndices.ptr());
m_allocatedSize = size;
}
protected:
Scalar* m_values;
- Index* m_indices;
- size_t m_size;
- size_t m_allocatedSize;
+ StorageIndex* m_indices;
+ Index m_size;
+ Index m_allocatedSize;
};
diff --git a/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 5c320e2..492eb0a 100644
--- a/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -15,27 +15,31 @@ namespace Eigen {
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
-static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
{
typedef typename remove_all<Lhs>::type::Scalar Scalar;
- typedef typename remove_all<Lhs>::type::Index Index;
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
-
- std::vector<bool> mask(rows,false);
- Matrix<Scalar,Dynamic,1> values(rows);
- Matrix<Index,Dynamic,1> indices(rows);
-
+
+ ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
+ ei_declare_aligned_stack_constructed_variable(Scalar, values, rows, 0);
+ ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
+
+ std::memset(mask,0,sizeof(bool)*rows);
+
+ evaluator<Lhs> lhsEval(lhs);
+ evaluator<Rhs> rhsEval(rhs);
+
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
- Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
+ Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
res.setZero();
res.reserve(Index(estimated_nnz_prod));
@@ -45,11 +49,11 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
res.startVec(j);
Index nnz = 0;
- for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
Scalar y = rhsIt.value();
Index k = rhsIt.index();
- for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
+ for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
{
Index i = lhsIt.index();
Scalar x = lhsIt.value();
@@ -64,53 +68,51 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
values[i] += x * y;
}
}
-
- // unordered insertion
- for(Index k=0; k<nnz; ++k)
- {
- Index i = indices[k];
- res.insertBackByOuterInnerUnordered(j,i) = values[i];
- mask[i] = false;
- }
-
-#if 0
- // alternative ordered insertion code:
-
- Index t200 = rows/(log2(200)*1.39);
- Index t = (rows*100)/139;
-
- // FIXME reserve nnz non zeros
- // FIXME implement fast sort algorithms for very small nnz
- // if the result is sparse enough => use a quick sort
- // otherwise => loop through the entire vector
- // In order to avoid to perform an expensive log2 when the
- // result is clearly very sparse we use a linear bound up to 200.
- //if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
- //res.startVec(j);
- if(true)
+ if(!sortedInsertion)
{
- if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
+ // unordered insertion
for(Index k=0; k<nnz; ++k)
{
Index i = indices[k];
- res.insertBackByOuterInner(j,i) = values[i];
+ res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
}
else
{
- // dense path
- for(Index i=0; i<rows; ++i)
+ // alternative ordered insertion code:
+ const Index t200 = rows/11; // 11 == (log2(200)*1.39)
+ const Index t = (rows*100)/139;
+
+ // FIXME reserve nnz non zeros
+ // FIXME implement faster sorting algorithms for very small nnz
+ // if the result is sparse enough => use a quick sort
+ // otherwise => loop through the entire vector
+ // In order to avoid to perform an expensive log2 when the
+ // result is clearly very sparse we use a linear bound up to 200.
+ if((nnz<200 && nnz<t200) || nnz * numext::log2(int(nnz)) < t)
{
- if(mask[i])
+ if(nnz>1) std::sort(indices,indices+nnz);
+ for(Index k=0; k<nnz; ++k)
{
- mask[i] = false;
+ Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
+ mask[i] = false;
+ }
+ }
+ else
+ {
+ // dense path
+ for(Index i=0; i<rows; ++i)
+ {
+ if(mask[i])
+ {
+ mask[i] = false;
+ res.insertBackByOuterInner(j,i) = values[i];
+ }
}
}
}
-#endif
-
}
res.finalize();
}
@@ -134,13 +136,28 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
- ColMajorMatrix resCol(lhs.rows(),rhs.cols());
- internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
- // sort the non zeros:
- RowMajorMatrix resRow(resCol);
- res = resRow;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
+ typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;
+
+ // If the result is tall and thin (in the extreme case a column vector)
+ // then it is faster to sort the coefficients inplace instead of transposing twice.
+ // FIXME, the following heuristic is probably not very good.
+ if(lhs.rows()>rhs.cols())
+ {
+ ColMajorMatrix resCol(lhs.rows(),rhs.cols());
+ // perform sorted insertion
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, true);
+ res = resCol.markAsRValue();
+ }
+ else
+ {
+ ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());
+ // ressort to transpose to sort the entries
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);
+ RowMajorMatrix resRow(resCol);
+ res = resRow.markAsRValue();
+ }
}
};
@@ -149,7 +166,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
RowMajorMatrix rhsRow = rhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
@@ -162,7 +179,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
RowMajorMatrix lhsRow = lhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
@@ -175,7 +192,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
res = resRow;
@@ -190,7 +207,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
res = resCol;
@@ -202,7 +219,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
ColMajorMatrix lhsCol = lhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
@@ -215,7 +232,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
ColMajorMatrix rhsCol = rhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
@@ -228,8 +245,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
// sort the non zeros:
@@ -240,6 +257,89 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
} // end namespace internal
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ Index cols = rhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ evaluator<Lhs> lhsEval(lhs);
+ evaluator<Rhs> rhsEval(rhs);
+
+ for (Index j=0; j<cols; ++j)
+ {
+ for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
+ {
+ Scalar y = rhsIt.value();
+ Index k = rhsIt.index();
+ for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
+ {
+ Index i = lhsIt.index();
+ Scalar x = lhsIt.value();
+ res.coeffRef(i,j) += x * y;
+ }
+ }
+ }
+}
+
+
+} // end namespace internal
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+ int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor>
+struct sparse_sparse_to_dense_product_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ internal::sparse_sparse_to_dense_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
+ ColMajorMatrix lhsCol(lhs);
+ internal::sparse_sparse_to_dense_product_impl<ColMajorMatrix,Rhs,ResultType>(lhsCol, rhs, res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
+ ColMajorMatrix rhsCol(rhs);
+ internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorMatrix,ResultType>(lhs, rhsCol, res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ Transpose<ResultType> trRes(res);
+ internal::sparse_sparse_to_dense_product_impl<Rhs,Lhs,Transpose<ResultType> >(rhs, lhs, trRes);
+ }
+};
+
+
+} // end namespace internal
+
} // end namespace Eigen
#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
diff --git a/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h b/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h
index ab1a266..67718c8 100644
--- a/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h
+++ b/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -10,9 +10,10 @@
#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
#define EIGEN_MAPPED_SPARSEMATRIX_H
-namespace Eigen {
+namespace Eigen {
-/** \class MappedSparseMatrix
+/** \deprecated Use Map<SparseMatrix<> >
+ * \class MappedSparseMatrix
*
* \brief Sparse matrix
*
@@ -22,160 +23,45 @@ namespace Eigen {
*
*/
namespace internal {
-template<typename _Scalar, int _Flags, typename _Index>
-struct traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : traits<SparseMatrix<_Scalar, _Flags, _Index> >
+template<typename _Scalar, int _Flags, typename _StorageIndex>
+struct traits<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> > : traits<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
{};
-}
+} // end namespace internal
-template<typename _Scalar, int _Flags, typename _Index>
+template<typename _Scalar, int _Flags, typename _StorageIndex>
class MappedSparseMatrix
- : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
+ : public Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
{
- public:
- EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
- enum { IsRowMajor = Base::IsRowMajor };
-
- protected:
-
- Index m_outerSize;
- Index m_innerSize;
- Index m_nnz;
- Index* m_outerIndex;
- Index* m_innerIndices;
- Scalar* m_values;
+ typedef Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> > Base;
public:
-
- inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
- inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
- inline Index innerSize() const { return m_innerSize; }
- inline Index outerSize() const { return m_outerSize; }
- bool isCompressed() const { return true; }
-
- //----------------------------------------
- // direct access interface
- inline const Scalar* valuePtr() const { return m_values; }
- inline Scalar* valuePtr() { return m_values; }
-
- inline const Index* innerIndexPtr() const { return m_innerIndices; }
- inline Index* innerIndexPtr() { return m_innerIndices; }
-
- inline const Index* outerIndexPtr() const { return m_outerIndex; }
- inline Index* outerIndexPtr() { return m_outerIndex; }
- //----------------------------------------
-
- inline Scalar coeff(Index row, Index col) const
- {
- const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
+ typedef typename Base::StorageIndex StorageIndex;
+ typedef typename Base::Scalar Scalar;
- Index start = m_outerIndex[outer];
- Index end = m_outerIndex[outer+1];
- if (start==end)
- return Scalar(0);
- else if (end>0 && inner==m_innerIndices[end-1])
- return m_values[end-1];
- // ^^ optimization: let's first check if it is the last coefficient
- // (very common in high level algorithms)
-
- const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
- const Index id = r-&m_innerIndices[0];
- return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
- }
-
- inline Scalar& coeffRef(Index row, Index col)
- {
- const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
-
- Index start = m_outerIndex[outer];
- Index end = m_outerIndex[outer+1];
- eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
- eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
- Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
- const Index id = r-&m_innerIndices[0];
- eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
- return m_values[id];
- }
-
- class InnerIterator;
- class ReverseInnerIterator;
-
- /** \returns the number of non zero coefficients */
- inline Index nonZeros() const { return m_nnz; }
-
- inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
- : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
- m_innerIndices(innerIndexPtr), m_values(valuePtr)
+ inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZeroPtr = 0)
+ : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)
{}
/** Empty destructor */
inline ~MappedSparseMatrix() {}
};
-template<typename Scalar, int _Flags, typename _Index>
-class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
-{
- public:
- InnerIterator(const MappedSparseMatrix& mat, Index outer)
- : m_matrix(mat),
- m_outer(outer),
- m_id(mat.outerIndexPtr()[outer]),
- m_start(m_id),
- m_end(mat.outerIndexPtr()[outer+1])
- {}
-
- inline InnerIterator& operator++() { m_id++; return *this; }
-
- inline Scalar value() const { return m_matrix.valuePtr()[m_id]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id]); }
-
- inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
-
- protected:
- const MappedSparseMatrix& m_matrix;
- const Index m_outer;
- Index m_id;
- const Index m_start;
- const Index m_end;
-};
+namespace internal {
-template<typename Scalar, int _Flags, typename _Index>
-class MappedSparseMatrix<Scalar,_Flags,_Index>::ReverseInnerIterator
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct evaluator<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> >
+ : evaluator<SparseCompressedBase<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> > >
{
- public:
- ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer)
- : m_matrix(mat),
- m_outer(outer),
- m_id(mat.outerIndexPtr()[outer+1]),
- m_start(mat.outerIndexPtr()[outer]),
- m_end(m_id)
- {}
-
- inline ReverseInnerIterator& operator--() { m_id--; return *this; }
-
- inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id-1]); }
-
- inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); }
-
- protected:
- const MappedSparseMatrix& m_matrix;
- const Index m_outer;
- Index m_id;
- const Index m_start;
- const Index m_end;
+ typedef MappedSparseMatrix<_Scalar,_Options,_StorageIndex> XprType;
+ typedef evaluator<SparseCompressedBase<XprType> > Base;
+
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
};
+}
+
} // end namespace Eigen
#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/eigen/Eigen/src/SparseCore/SparseAssign.h b/eigen/Eigen/src/SparseCore/SparseAssign.h
new file mode 100644
index 0000000..18352a8
--- /dev/null
+++ b/eigen/Eigen/src/SparseCore/SparseAssign.h
@@ -0,0 +1,216 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEASSIGN_H
+#define EIGEN_SPARSEASSIGN_H
+
+namespace Eigen {
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
+{
+ internal::call_assignment_no_alias(derived(), other.derived());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
+{
+ // TODO use the evaluator mechanism
+ other.evalTo(derived());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
+{
+ // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
+ internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
+ ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
+ return derived();
+}
+
+template<typename Derived>
+inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
+{
+ internal::call_assignment_no_alias(derived(), other.derived());
+ return derived();
+}
+
+namespace internal {
+
+template<>
+struct storage_kind_to_evaluator_kind<Sparse> {
+ typedef IteratorBased Kind;
+};
+
+template<>
+struct storage_kind_to_shape<Sparse> {
+ typedef SparseShape Shape;
+};
+
+struct Sparse2Sparse {};
+struct Sparse2Dense {};
+
+template<> struct AssignmentKind<SparseShape, SparseShape> { typedef Sparse2Sparse Kind; };
+template<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; };
+template<> struct AssignmentKind<DenseShape, SparseShape> { typedef Sparse2Dense Kind; };
+template<> struct AssignmentKind<DenseShape, SparseTriangularShape> { typedef Sparse2Dense Kind; };
+
+
+template<typename DstXprType, typename SrcXprType>
+void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
+{
+ typedef typename DstXprType::Scalar Scalar;
+ typedef internal::evaluator<DstXprType> DstEvaluatorType;
+ typedef internal::evaluator<SrcXprType> SrcEvaluatorType;
+
+ SrcEvaluatorType srcEvaluator(src);
+
+ const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
+ const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
+ if ((!transpose) && src.isRValue())
+ {
+ // eval without temporary
+ dst.resize(src.rows(), src.cols());
+ dst.setZero();
+ dst.reserve((std::max)(src.rows(),src.cols())*2);
+ for (Index j=0; j<outerEvaluationSize; ++j)
+ {
+ dst.startVec(j);
+ for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
+ {
+ Scalar v = it.value();
+ dst.insertBackByOuterInner(j,it.index()) = v;
+ }
+ }
+ dst.finalize();
+ }
+ else
+ {
+ // eval through a temporary
+ eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+ enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
+
+
+ DstXprType temp(src.rows(), src.cols());
+
+ temp.reserve((std::max)(src.rows(),src.cols())*2);
+ for (Index j=0; j<outerEvaluationSize; ++j)
+ {
+ temp.startVec(j);
+ for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
+ {
+ Scalar v = it.value();
+ temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
+ }
+ }
+ temp.finalize();
+
+ dst = temp.markAsRValue();
+ }
+}
+
+// Generic Sparse to Sparse assignment
+template< typename DstXprType, typename SrcXprType, typename Functor>
+struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
+{
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ {
+ assign_sparse_to_sparse(dst.derived(), src.derived());
+ }
+};
+
+// Generic Sparse to Dense assignment
+template< typename DstXprType, typename SrcXprType, typename Functor>
+struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
+{
+ static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
+ {
+ if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
+ dst.setZero();
+
+ internal::evaluator<SrcXprType> srcEval(src);
+ resize_if_allowed(dst, src, func);
+ internal::evaluator<DstXprType> dstEval(dst);
+
+ const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
+ for (Index j=0; j<outerEvaluationSize; ++j)
+ for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
+ func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
+ }
+};
+
+// Specialization for "dst = dec.solve(rhs)"
+// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
+template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
+struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
+{
+ typedef Solve<DecType,RhsType> SrcXprType;
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
+ {
+ Index dstRows = src.rows();
+ Index dstCols = src.cols();
+ if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
+ dst.resize(dstRows, dstCols);
+
+ src.dec()._solve_impl(src.rhs(), dst);
+ }
+};
+
+struct Diagonal2Sparse {};
+
+template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
+
+template< typename DstXprType, typename SrcXprType, typename Functor>
+struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
+{
+ typedef typename DstXprType::StorageIndex StorageIndex;
+ typedef typename DstXprType::Scalar Scalar;
+ typedef Array<StorageIndex,Dynamic,1> ArrayXI;
+ typedef Array<Scalar,Dynamic,1> ArrayXS;
+ template<int Options>
+ static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ {
+ Index dstRows = src.rows();
+ Index dstCols = src.cols();
+ if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
+ dst.resize(dstRows, dstCols);
+
+ Index size = src.diagonal().size();
+ dst.makeCompressed();
+ dst.resizeNonZeros(size);
+ Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
+ Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
+ Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
+ }
+
+ template<typename DstDerived>
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ {
+ dst.diagonal() = src.diagonal();
+ }
+
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ { dst.diagonal() += src.diagonal(); }
+
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ { dst.diagonal() -= src.diagonal(); }
+};
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEASSIGN_H
diff --git a/eigen/Eigen/src/SparseCore/SparseBlock.h b/eigen/Eigen/src/SparseCore/SparseBlock.h
index ac4124b..511e92b 100644
--- a/eigen/Eigen/src/SparseCore/SparseBlock.h
+++ b/eigen/Eigen/src/SparseCore/SparseBlock.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -10,8 +10,9 @@
#ifndef EIGEN_SPARSE_BLOCK_H
#define EIGEN_SPARSE_BLOCK_H
-namespace Eigen {
+namespace Eigen {
+// Subset of columns or rows
template<typename XprType, int BlockRows, int BlockCols>
class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
@@ -22,185 +23,189 @@ public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
protected:
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
+ typedef SparseMatrixBase<BlockType> Base;
+ using Base::convert_index;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
-
- class InnerIterator: public XprType::InnerIterator
- {
- typedef typename BlockImpl::Index Index;
- public:
- inline InnerIterator(const BlockType& xpr, Index outer)
- : XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
- {}
- inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
- inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
- protected:
- Index m_outer;
- };
- class ReverseInnerIterator: public XprType::ReverseInnerIterator
- {
- typedef typename BlockImpl::Index Index;
- public:
- inline ReverseInnerIterator(const BlockType& xpr, Index outer)
- : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
- {}
- inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
- inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
- protected:
- Index m_outer;
- };
- inline BlockImpl(const XprType& xpr, int i)
- : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+ inline BlockImpl(XprType& xpr, Index i)
+ : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
{}
- inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
- : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+ inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
{}
-
- inline const Scalar coeff(int row, int col) const
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ Index nonZeros() const
{
- return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart);
+ typedef internal::evaluator<XprType> EvaluatorType;
+ EvaluatorType matEval(m_matrix);
+ Index nnz = 0;
+ Index end = m_outerStart + m_outerSize.value();
+ for(Index j=m_outerStart; j<end; ++j)
+ for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
+ ++nnz;
+ return nnz;
}
-
- inline const Scalar coeff(int index) const
+
+ inline const Scalar coeff(Index row, Index col) const
+ {
+ return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
+ }
+
+ inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
- EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
- EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+ inline const XprType& nestedExpression() const { return m_matrix; }
+ inline XprType& nestedExpression() { return m_matrix; }
+ Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
+ Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
+ Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
- typename XprType::Nested m_matrix;
+ typename internal::ref_selector<XprType>::non_const_type m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
- private:
- Index nonZeros() const;
+ protected:
+ // Disable assignment with clear error message.
+ // Note that simply removing operator= yields compilation errors with ICC+MSVC
+ template<typename T>
+ BlockImpl& operator=(const T&)
+ {
+ EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
+ return *this;
+ }
};
/***************************************************************************
-* specialisation for SparseMatrix
+* specialization for SparseMatrix
***************************************************************************/
-template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
-class BlockImpl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
- : public SparseMatrixBase<Block<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true> >
+namespace internal {
+
+template<typename SparseMatrixType, int BlockRows, int BlockCols>
+class sparse_matrix_block_impl
+ : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
{
- typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
- typedef Block<const SparseMatrixType, BlockRows, BlockCols, true> ConstBlockType;
+ typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
+ using Base::convert_index;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
protected:
+ typedef typename Base::IndexVector IndexVector;
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
public:
-
- class InnerIterator: public SparseMatrixType::InnerIterator
- {
- public:
- inline InnerIterator(const BlockType& xpr, Index outer)
- : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
- {}
- inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
- inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
- protected:
- Index m_outer;
- };
- class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator
- {
- public:
- inline ReverseInnerIterator(const BlockType& xpr, Index outer)
- : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
- {}
- inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
- inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
- protected:
- Index m_outer;
- };
- inline BlockImpl(const SparseMatrixType& xpr, int i)
- : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+ inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
+ : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
{}
- inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
- : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+ inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
{}
template<typename OtherDerived>
inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
{
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
- _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
- // This assignement is slow if this vector set is not empty
+ _NestedMatrixType& matrix = m_matrix;
+ // This assignment is slow if this vector set is not empty
// and/or it is not at the end of the nonzeros of the underlying matrix.
// 1 - eval to a temporary to avoid transposition and/or aliasing issues
- SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, Index> tmp(other);
+ Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
+ eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
// 2 - let's check whether there is enough allocated memory
Index nnz = tmp.nonZeros();
- Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
- Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending posiiton of the current block
+ Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
+ Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
Index block_size = end - start; // available room in the current block
Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
-
+
Index free_size = m_matrix.isCompressed()
? Index(matrix.data().allocatedSize()) + block_size
: block_size;
- if(nnz>free_size)
+ Index tmp_start = tmp.outerIndexPtr()[0];
+
+ bool update_trailing_pointers = false;
+ if(nnz>free_size)
{
// realloc manually to reduce copies
typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
- std::memcpy(newdata.valuePtr(), m_matrix.data().valuePtr(), start*sizeof(Scalar));
- std::memcpy(newdata.indexPtr(), m_matrix.data().indexPtr(), start*sizeof(Index));
+ internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
+ internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
- std::memcpy(newdata.valuePtr() + start, tmp.data().valuePtr(), nnz*sizeof(Scalar));
- std::memcpy(newdata.indexPtr() + start, tmp.data().indexPtr(), nnz*sizeof(Index));
+ internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
+ internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
+
+ internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
+ internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
- std::memcpy(newdata.valuePtr()+start+nnz, matrix.data().valuePtr()+end, tail_size*sizeof(Scalar));
- std::memcpy(newdata.indexPtr()+start+nnz, matrix.data().indexPtr()+end, tail_size*sizeof(Index));
-
newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
matrix.data().swap(newdata);
+
+ update_trailing_pointers = true;
}
else
{
- // no need to realloc, simply copy the tail at its respective position and insert tmp
- matrix.data().resize(start + nnz + tail_size);
+ if(m_matrix.isCompressed())
+ {
+ // no need to realloc, simply copy the tail at its respective position and insert tmp
+ matrix.data().resize(start + nnz + tail_size);
- std::memmove(matrix.data().valuePtr()+start+nnz, matrix.data().valuePtr()+end, tail_size*sizeof(Scalar));
- std::memmove(matrix.data().indexPtr()+start+nnz, matrix.data().indexPtr()+end, tail_size*sizeof(Index));
+ internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz);
+ internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
- std::memcpy(matrix.data().valuePtr()+start, tmp.data().valuePtr(), nnz*sizeof(Scalar));
- std::memcpy(matrix.data().indexPtr()+start, tmp.data().indexPtr(), nnz*sizeof(Index));
+ update_trailing_pointers = true;
+ }
+
+ internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
+ internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
}
-
- // update innerNonZeros
- if(!m_matrix.isCompressed())
- for(Index j=0; j<m_outerSize.value(); ++j)
- matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros();
-
- // update outer index pointers
- Index p = start;
- for(Index k=0; k<m_outerSize.value(); ++k)
+
+ // update outer index pointers and innerNonZeros
+ if(IsVectorAtCompileTime)
{
- matrix.outerIndexPtr()[m_outerStart+k] = p;
- p += tmp.innerVector(k).nonZeros();
+ if(!m_matrix.isCompressed())
+ matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
+ matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
}
- std::ptrdiff_t offset = nnz - block_size;
- for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
+ else
{
- matrix.outerIndexPtr()[k] += offset;
+ StorageIndex p = StorageIndex(start);
+ for(Index k=0; k<m_outerSize.value(); ++k)
+ {
+ StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
+ if(!m_matrix.isCompressed())
+ matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
+ matrix.outerIndexPtr()[m_outerStart+k] = p;
+ p += nnz_k;
+ }
+ }
+
+ if(update_trailing_pointers)
+ {
+ StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
+ for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
+ {
+ matrix.outerIndexPtr()[k] += offset;
+ }
}
return derived();
@@ -212,50 +217,46 @@ public:
}
inline const Scalar* valuePtr() const
- { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ { return m_matrix.valuePtr(); }
inline Scalar* valuePtr()
- { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ { return m_matrix.valuePtr(); }
- inline const Index* innerIndexPtr() const
- { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
- inline Index* innerIndexPtr()
- { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ inline const StorageIndex* innerIndexPtr() const
+ { return m_matrix.innerIndexPtr(); }
+ inline StorageIndex* innerIndexPtr()
+ { return m_matrix.innerIndexPtr(); }
- inline const Index* outerIndexPtr() const
+ inline const StorageIndex* outerIndexPtr() const
+ { return m_matrix.outerIndexPtr() + m_outerStart; }
+ inline StorageIndex* outerIndexPtr()
{ return m_matrix.outerIndexPtr() + m_outerStart; }
- inline Index* outerIndexPtr()
- { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; }
- Index nonZeros() const
- {
- if(m_matrix.isCompressed())
- return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
- - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]);
- else if(m_outerSize.value()==0)
- return 0;
- else
- return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
- }
-
- inline Scalar& coeffRef(int row, int col)
+ inline const StorageIndex* innerNonZeroPtr() const
+ { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
+ inline StorageIndex* innerNonZeroPtr()
+ { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
+
+ bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
+
+ inline Scalar& coeffRef(Index row, Index col)
{
- return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
+ return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
-
- inline const Scalar coeff(int row, int col) const
+
+ inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
-
- inline const Scalar coeff(int index) const
+
+ inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
const Scalar& lastCoeff() const
{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl);
- eigen_assert(nonZeros()>0);
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
+ eigen_assert(Base::nonZeros()>0);
if(m_matrix.isCompressed())
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
else
@@ -265,110 +266,62 @@ public:
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+ inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
+ inline SparseMatrixType& nestedExpression() { return m_matrix; }
+ Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
+ Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
+ Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
protected:
- typename SparseMatrixType::Nested m_matrix;
+ typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
};
+} // namespace internal
-template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
-class BlockImpl<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
- : public SparseMatrixBase<Block<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true> >
+template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
+class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
+ : public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
{
- typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
- typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
- typedef Block<const SparseMatrixType, BlockRows, BlockCols, true> BlockType;
public:
- enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
- EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
-protected:
- enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
-public:
-
- class InnerIterator: public SparseMatrixType::InnerIterator
- {
- public:
- inline InnerIterator(const BlockType& xpr, Index outer)
- : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
- {}
- inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
- inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
- protected:
- Index m_outer;
- };
- class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator
- {
- public:
- inline ReverseInnerIterator(const BlockType& xpr, Index outer)
- : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
- {}
- inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
- inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
- protected:
- Index m_outer;
- };
-
- inline BlockImpl(const SparseMatrixType& xpr, int i)
- : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
- {}
-
- inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
- : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
- {}
-
- inline const Scalar* valuePtr() const
- { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
-
- inline const Index* innerIndexPtr() const
- { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
-
- inline const Index* outerIndexPtr() const
- { return m_matrix.outerIndexPtr() + m_outerStart; }
-
- Index nonZeros() const
- {
- if(m_matrix.isCompressed())
- return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
- - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]);
- else if(m_outerSize.value()==0)
- return 0;
- else
- return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
- }
-
- inline const Scalar coeff(int row, int col) const
- {
- return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
- }
-
- inline const Scalar coeff(int index) const
- {
- return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
- }
-
- const Scalar& lastCoeff() const
- {
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl);
- eigen_assert(nonZeros()>0);
- if(m_matrix.isCompressed())
- return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
- else
- return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
- }
-
- EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
- EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
-
- protected:
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
+ typedef _StorageIndex StorageIndex;
+ typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
+ typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
+ inline BlockImpl(SparseMatrixType& xpr, Index i)
+ : Base(xpr, i)
+ {}
+
+ inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ : Base(xpr, startRow, startCol, blockRows, blockCols)
+ {}
+
+ using Base::operator=;
+};
- typename SparseMatrixType::Nested m_matrix;
- Index m_outerStart;
- const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
+template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
+class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
+ : public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
+{
+public:
+ typedef _StorageIndex StorageIndex;
+ typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
+ typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
+ inline BlockImpl(SparseMatrixType& xpr, Index i)
+ : Base(xpr, i)
+ {}
+
+ inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ : Base(xpr, startRow, startCol, blockRows, blockCols)
+ {}
+
+ using Base::operator=;
+private:
+ template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
+ template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
};
//----------
@@ -397,7 +350,7 @@ SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
return Block<Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
-
+
}
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
@@ -410,129 +363,241 @@ SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
-
+
}
/** Generic implementation of sparse Block expression.
- * Real-only.
+ * Real-only.
*/
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
{
- typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
- typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+ typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+ typedef SparseMatrixBase<BlockType> Base;
+ using Base::convert_index;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+ typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
+
/** Column or Row constructor
*/
- inline BlockImpl(const XprType& xpr, int i)
+ inline BlockImpl(XprType& xpr, Index i)
: m_matrix(xpr),
- m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
- m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
+ m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
+ m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{}
/** Dynamic-size constructor
*/
- inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
- : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols)
+ inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
{}
- inline int rows() const { return m_blockRows.value(); }
- inline int cols() const { return m_blockCols.value(); }
+ inline Index rows() const { return m_blockRows.value(); }
+ inline Index cols() const { return m_blockCols.value(); }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
- return m_matrix.const_cast_derived()
- .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+ return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
}
- inline const Scalar coeff(int row, int col) const
+ inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
- return m_matrix.const_cast_derived()
- .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
- inline const Scalar coeff(int index) const
+ inline const Scalar coeff(Index index) const
{
- return m_matrix
- .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
-
- inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
-
- class InnerIterator : public _MatrixTypeNested::InnerIterator
- {
- typedef typename _MatrixTypeNested::InnerIterator Base;
- const BlockType& m_block;
- Index m_end;
- public:
-
- EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer)
- : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
- m_block(block),
- m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
- {
- while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) )
- Base::operator++();
- }
- inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
- inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
- inline Index row() const { return Base::row() - m_block.m_startRow.value(); }
- inline Index col() const { return Base::col() - m_block.m_startCol.value(); }
-
- inline operator bool() const { return Base::operator bool() && Base::index() < m_end; }
- };
- class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator
- {
- typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
- const BlockType& m_block;
- Index m_begin;
- public:
-
- EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer)
- : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
- m_block(block),
- m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value())
- {
- while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) )
- Base::operator--();
- }
+ inline const XprType& nestedExpression() const { return m_matrix; }
+ inline XprType& nestedExpression() { return m_matrix; }
+ Index startRow() const { return m_startRow.value(); }
+ Index startCol() const { return m_startCol.value(); }
+ Index blockRows() const { return m_blockRows.value(); }
+ Index blockCols() const { return m_blockCols.value(); }
- inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
- inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
- inline Index row() const { return Base::row() - m_block.m_startRow.value(); }
- inline Index col() const { return Base::col() - m_block.m_startCol.value(); }
-
- inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; }
- };
protected:
- friend class InnerIterator;
- friend class ReverseInnerIterator;
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
+// friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
+ friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
- typename XprType::Nested m_matrix;
+ Index nonZeros() const { return Dynamic; }
+
+ typename internal::ref_selector<XprType>::non_const_type m_matrix;
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
- private:
- Index nonZeros() const;
+
+ protected:
+ // Disable assignment with clear error message.
+ // Note that simply removing operator= yields compilation errors with ICC+MSVC
+ template<typename T>
+ BlockImpl& operator=(const T&)
+ {
+ EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
+ return *this;
+ }
+
+};
+
+namespace internal {
+
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
+struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
+ : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
+{
+ class InnerVectorInnerIterator;
+ class OuterVectorInnerIterator;
+ public:
+ typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
+ typedef typename XprType::StorageIndex StorageIndex;
+ typedef typename XprType::Scalar Scalar;
+
+ enum {
+ IsRowMajor = XprType::IsRowMajor,
+
+ OuterVector = (BlockCols==1 && ArgType::IsRowMajor)
+ | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
+ // revert to || as soon as not needed anymore.
+ (BlockRows==1 && !ArgType::IsRowMajor),
+
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
+
+ explicit unary_evaluator(const XprType& op)
+ : m_argImpl(op.nestedExpression()), m_block(op)
+ {}
+
+ inline Index nonZerosEstimate() const {
+ Index nnz = m_block.nonZeros();
+ if(nnz<0)
+ return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
+ return nnz;
+ }
+
+ protected:
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+
+ evaluator<ArgType> m_argImpl;
+ const XprType &m_block;
+};
+
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
+class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
+ : public EvalIterator
+{
+ enum { IsRowMajor = unary_evaluator::IsRowMajor };
+ const XprType& m_block;
+ Index m_end;
+public:
+
+ EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
+ : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
+ m_block(aEval.m_block),
+ m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
+ {
+ while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
+ EvalIterator::operator++();
+ }
+
+ inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }
+ inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
+ inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
+ inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
+
+ inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
+};
+
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
+class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
+{
+ enum { IsRowMajor = unary_evaluator::IsRowMajor };
+ const unary_evaluator& m_eval;
+ Index m_outerPos;
+ const Index m_innerIndex;
+ Index m_end;
+ EvalIterator m_it;
+public:
+
+ EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
+ : m_eval(aEval),
+ m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
+ m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
+ m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
+ m_it(m_eval.m_argImpl, m_outerPos)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+
+ while(m_it && m_it.index() < m_innerIndex) ++m_it;
+ if((!m_it) || (m_it.index()!=m_innerIndex))
+ ++(*this);
+ }
+
+ inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
+ inline Index outer() const { return 0; }
+ inline Index row() const { return IsRowMajor ? 0 : index(); }
+ inline Index col() const { return IsRowMajor ? index() : 0; }
+
+ inline Scalar value() const { return m_it.value(); }
+ inline Scalar& valueRef() { return m_it.valueRef(); }
+
+ inline OuterVectorInnerIterator& operator++()
+ {
+ // search next non-zero entry
+ while(++m_outerPos<m_end)
+ {
+ // Restart iterator at the next inner-vector:
+ m_it.~EvalIterator();
+ ::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);
+ // search for the key m_innerIndex in the current outer-vector
+ while(m_it && m_it.index() < m_innerIndex) ++m_it;
+ if(m_it && m_it.index()==m_innerIndex) break;
+ }
+ return *this;
+ }
+
+ inline operator bool() const { return m_outerPos < m_end; }
};
+template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
+struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
+ : evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
+{
+ typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
+ typedef evaluator<SparseCompressedBase<XprType> > Base;
+ explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
+};
+
+template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
+struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
+ : evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
+{
+ typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
+ typedef evaluator<SparseCompressedBase<XprType> > Base;
+ explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
+};
+
+} // end namespace internal
+
+
} // end namespace Eigen
#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/eigen/Eigen/src/SparseCore/SparseColEtree.h b/eigen/Eigen/src/SparseCore/SparseColEtree.h
index f8745f4..ebe02d1 100644
--- a/eigen/Eigen/src/SparseCore/SparseColEtree.h
+++ b/eigen/Eigen/src/SparseCore/SparseColEtree.h
@@ -58,30 +58,29 @@ Index etree_find (Index i, IndexVector& pp)
* \param perm The permutation to apply to the column of \b mat
*/
template <typename MatrixType, typename IndexVector>
-int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0)
+int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0)
{
- typedef typename MatrixType::Index Index;
- Index nc = mat.cols(); // Number of columns
- Index m = mat.rows();
- Index diagSize = (std::min)(nc,m);
+ typedef typename MatrixType::StorageIndex StorageIndex;
+ StorageIndex nc = convert_index<StorageIndex>(mat.cols()); // Number of columns
+ StorageIndex m = convert_index<StorageIndex>(mat.rows());
+ StorageIndex diagSize = (std::min)(nc,m);
IndexVector root(nc); // root of subtree of etree
root.setZero();
IndexVector pp(nc); // disjoint sets
pp.setZero(); // Initialize disjoint sets
parent.resize(mat.cols());
//Compute first nonzero column in each row
- Index row,col;
firstRowElt.resize(m);
firstRowElt.setConstant(nc);
firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
bool found_diag;
- for (col = 0; col < nc; col++)
+ for (StorageIndex col = 0; col < nc; col++)
{
- Index pcol = col;
+ StorageIndex pcol = col;
if(perm) pcol = perm[col];
for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
{
- row = it.row();
+ Index row = it.row();
firstRowElt(row) = (std::min)(firstRowElt(row), col);
}
}
@@ -89,8 +88,8 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
except use (firstRowElt[r],c) in place of an edge (r,c) of A.
Thus each row clique in A'*A is replaced by a star
centered at its first vertex, which has the same fill. */
- Index rset, cset, rroot;
- for (col = 0; col < nc; col++)
+ StorageIndex rset, cset, rroot;
+ for (StorageIndex col = 0; col < nc; col++)
{
found_diag = col>=m;
pp(col) = col;
@@ -99,7 +98,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
parent(col) = nc;
/* The diagonal element is treated here even if it does not exist in the matrix
* hence the loop is executed once more */
- Index pcol = col;
+ StorageIndex pcol = col;
if(perm) pcol = perm[col];
for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
{ // A sequence of interleaved find and union is performed
@@ -107,7 +106,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
if(it) i = it.index();
if (i == col) found_diag = true;
- row = firstRowElt(i);
+ StorageIndex row = firstRowElt(i);
if (row >= col) continue;
rset = internal::etree_find(row, pp); // Find the name of the set containing row
rroot = root(rset);
@@ -127,10 +126,11 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
* Depth-first search from vertex n. No recursion.
* This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
*/
-template <typename Index, typename IndexVector>
-void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum)
+template <typename IndexVector>
+void nr_etdfs (typename IndexVector::Scalar n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, typename IndexVector::Scalar postnum)
{
- Index current = n, first, next;
+ typedef typename IndexVector::Scalar StorageIndex;
+ StorageIndex current = n, first, next;
while (postnum != n)
{
// No kid for the current node
@@ -174,22 +174,22 @@ void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector
* \param parent Input tree
* \param post postordered tree
*/
-template <typename Index, typename IndexVector>
-void treePostorder(Index n, IndexVector& parent, IndexVector& post)
+template <typename IndexVector>
+void treePostorder(typename IndexVector::Scalar n, IndexVector& parent, IndexVector& post)
{
+ typedef typename IndexVector::Scalar StorageIndex;
IndexVector first_kid, next_kid; // Linked list of children
- Index postnum;
+ StorageIndex postnum;
// Allocate storage for working arrays and results
first_kid.resize(n+1);
next_kid.setZero(n+1);
post.setZero(n+1);
// Set up structure describing children
- Index v, dad;
first_kid.setConstant(-1);
- for (v = n-1; v >= 0; v--)
+ for (StorageIndex v = n-1; v >= 0; v--)
{
- dad = parent(v);
+ StorageIndex dad = parent(v);
next_kid(v) = first_kid(dad);
first_kid(dad) = v;
}
diff --git a/eigen/Eigen/src/SparseCore/SparseCompressedBase.h b/eigen/Eigen/src/SparseCore/SparseCompressedBase.h
new file mode 100644
index 0000000..e0b3c22
--- /dev/null
+++ b/eigen/Eigen/src/SparseCore/SparseCompressedBase.h
@@ -0,0 +1,357 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_COMPRESSED_BASE_H
+#define EIGEN_SPARSE_COMPRESSED_BASE_H
+
+namespace Eigen {
+
+template<typename Derived> class SparseCompressedBase;
+
+namespace internal {
+
+template<typename Derived>
+struct traits<SparseCompressedBase<Derived> > : traits<Derived>
+{};
+
+} // end namespace internal
+
+/** \ingroup SparseCore_Module
+ * \class SparseCompressedBase
+ * \brief Common base class for sparse [compressed]-{row|column}-storage format.
+ *
+ * This class defines the common interface for all derived classes implementing the compressed sparse storage format, such as:
+ * - SparseMatrix
+ * - Ref<SparseMatrixType,Options>
+ * - Map<SparseMatrixType>
+ *
+ */
+template<typename Derived>
+class SparseCompressedBase
+ : public SparseMatrixBase<Derived>
+{
+ public:
+ typedef SparseMatrixBase<Derived> Base;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseCompressedBase)
+ using Base::operator=;
+ using Base::IsRowMajor;
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ protected:
+ typedef typename Base::IndexVector IndexVector;
+ Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
+ const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
+
+ public:
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const
+ {
+ if(Derived::IsVectorAtCompileTime && outerIndexPtr()==0)
+ return derived().nonZeros();
+ else if(isCompressed())
+ return outerIndexPtr()[derived().outerSize()]-outerIndexPtr()[0];
+ else if(derived().outerSize()==0)
+ return 0;
+ else
+ return innerNonZeros().sum();
+ }
+
+ /** \returns a const pointer to the array of values.
+ * This function is aimed at interoperability with other libraries.
+ * \sa innerIndexPtr(), outerIndexPtr() */
+ inline const Scalar* valuePtr() const { return derived().valuePtr(); }
+ /** \returns a non-const pointer to the array of values.
+ * This function is aimed at interoperability with other libraries.
+ * \sa innerIndexPtr(), outerIndexPtr() */
+ inline Scalar* valuePtr() { return derived().valuePtr(); }
+
+ /** \returns a const pointer to the array of inner indices.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), outerIndexPtr() */
+ inline const StorageIndex* innerIndexPtr() const { return derived().innerIndexPtr(); }
+ /** \returns a non-const pointer to the array of inner indices.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), outerIndexPtr() */
+ inline StorageIndex* innerIndexPtr() { return derived().innerIndexPtr(); }
+
+ /** \returns a const pointer to the array of the starting positions of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 for SparseVector
+ * \sa valuePtr(), innerIndexPtr() */
+ inline const StorageIndex* outerIndexPtr() const { return derived().outerIndexPtr(); }
+ /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 for SparseVector
+ * \sa valuePtr(), innerIndexPtr() */
+ inline StorageIndex* outerIndexPtr() { return derived().outerIndexPtr(); }
+
+ /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 in compressed mode */
+ inline const StorageIndex* innerNonZeroPtr() const { return derived().innerNonZeroPtr(); }
+ /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 in compressed mode */
+ inline StorageIndex* innerNonZeroPtr() { return derived().innerNonZeroPtr(); }
+
+ /** \returns whether \c *this is in compressed form. */
+ inline bool isCompressed() const { return innerNonZeroPtr()==0; }
+
+ /** \returns a read-only view of the stored coefficients as a 1D array expression.
+ *
+ * \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
+ *
+ * \sa valuePtr(), isCompressed() */
+ const Map<const Array<Scalar,Dynamic,1> > coeffs() const { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
+
+ /** \returns a read-write view of the stored coefficients as a 1D array expression
+ *
+ * \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
+ *
+ * Here is an example:
+ * \include SparseMatrix_coeffs.cpp
+ * and the output is:
+ * \include SparseMatrix_coeffs.out
+ *
+ * \sa valuePtr(), isCompressed() */
+ Map<Array<Scalar,Dynamic,1> > coeffs() { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
+
+ protected:
+ /** Default constructor. Do nothing. */
+ SparseCompressedBase() {}
+ private:
+ template<typename OtherDerived> explicit SparseCompressedBase(const SparseCompressedBase<OtherDerived>&);
+};
+
+template<typename Derived>
+class SparseCompressedBase<Derived>::InnerIterator
+{
+ public:
+ InnerIterator()
+ : m_values(0), m_indices(0), m_outer(0), m_id(0), m_end(0)
+ {}
+
+ InnerIterator(const InnerIterator& other)
+ : m_values(other.m_values), m_indices(other.m_indices), m_outer(other.m_outer), m_id(other.m_id), m_end(other.m_end)
+ {}
+
+ InnerIterator& operator=(const InnerIterator& other)
+ {
+ m_values = other.m_values;
+ m_indices = other.m_indices;
+ const_cast<OuterType&>(m_outer).setValue(other.m_outer.value());
+ m_id = other.m_id;
+ m_end = other.m_end;
+ return *this;
+ }
+
+ InnerIterator(const SparseCompressedBase& mat, Index outer)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)
+ {
+ if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)
+ {
+ m_id = 0;
+ m_end = mat.nonZeros();
+ }
+ else
+ {
+ m_id = mat.outerIndexPtr()[outer];
+ if(mat.isCompressed())
+ m_end = mat.outerIndexPtr()[outer+1];
+ else
+ m_end = m_id + mat.innerNonZeroPtr()[outer];
+ }
+ }
+
+ explicit InnerIterator(const SparseCompressedBase& mat)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_id(0), m_end(mat.nonZeros())
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ }
+
+ explicit InnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
+ : m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_id(0), m_end(data.size())
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ }
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+ inline InnerIterator& operator+=(Index i) { m_id += i ; return *this; }
+
+ inline InnerIterator operator+(Index i)
+ {
+ InnerIterator result = *this;
+ result += i;
+ return result;
+ }
+
+ inline const Scalar& value() const { return m_values[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
+
+ inline StorageIndex index() const { return m_indices[m_id]; }
+ inline Index outer() const { return m_outer.value(); }
+ inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const Scalar* m_values;
+ const StorageIndex* m_indices;
+ typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;
+ const OuterType m_outer;
+ Index m_id;
+ Index m_end;
+ private:
+ // If you get here, then you're not using the right InnerIterator type, e.g.:
+ // SparseMatrix<double,RowMajor> A;
+ // SparseMatrix<double>::InnerIterator it(A,0);
+ template<typename T> InnerIterator(const SparseMatrixBase<T>&, Index outer);
+};
+
+template<typename Derived>
+class SparseCompressedBase<Derived>::ReverseInnerIterator
+{
+ public:
+ ReverseInnerIterator(const SparseCompressedBase& mat, Index outer)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)
+ {
+ if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)
+ {
+ m_start = 0;
+ m_id = mat.nonZeros();
+ }
+ else
+ {
+ m_start = mat.outerIndexPtr()[outer];
+ if(mat.isCompressed())
+ m_id = mat.outerIndexPtr()[outer+1];
+ else
+ m_id = m_start + mat.innerNonZeroPtr()[outer];
+ }
+ }
+
+ explicit ReverseInnerIterator(const SparseCompressedBase& mat)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_start(0), m_id(mat.nonZeros())
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ }
+
+ explicit ReverseInnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
+ : m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_start(0), m_id(data.size())
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ }
+
+ inline ReverseInnerIterator& operator--() { --m_id; return *this; }
+ inline ReverseInnerIterator& operator-=(Index i) { m_id -= i; return *this; }
+
+ inline ReverseInnerIterator operator-(Index i)
+ {
+ ReverseInnerIterator result = *this;
+ result -= i;
+ return result;
+ }
+
+ inline const Scalar& value() const { return m_values[m_id-1]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
+
+ inline StorageIndex index() const { return m_indices[m_id-1]; }
+ inline Index outer() const { return m_outer.value(); }
+ inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }
+
+ inline operator bool() const { return (m_id > m_start); }
+
+ protected:
+ const Scalar* m_values;
+ const StorageIndex* m_indices;
+ typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;
+ const OuterType m_outer;
+ Index m_start;
+ Index m_id;
+};
+
+namespace internal {
+
+template<typename Derived>
+struct evaluator<SparseCompressedBase<Derived> >
+ : evaluator_base<Derived>
+{
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::InnerIterator InnerIterator;
+
+ enum {
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ Flags = Derived::Flags
+ };
+
+ evaluator() : m_matrix(0), m_zero(0)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+ explicit evaluator(const Derived &mat) : m_matrix(&mat), m_zero(0)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_matrix->nonZeros();
+ }
+
+ operator Derived&() { return m_matrix->const_cast_derived(); }
+ operator const Derived&() const { return *m_matrix; }
+
+ typedef typename DenseCoeffsBase<Derived,ReadOnlyAccessors>::CoeffReturnType CoeffReturnType;
+ const Scalar& coeff(Index row, Index col) const
+ {
+ Index p = find(row,col);
+
+ if(p==Dynamic)
+ return m_zero;
+ else
+ return m_matrix->const_cast_derived().valuePtr()[p];
+ }
+
+ Scalar& coeffRef(Index row, Index col)
+ {
+ Index p = find(row,col);
+ eigen_assert(p!=Dynamic && "written coefficient does not exist");
+ return m_matrix->const_cast_derived().valuePtr()[p];
+ }
+
+protected:
+
+ Index find(Index row, Index col) const
+ {
+ eigen_internal_assert(row>=0 && row<m_matrix->rows() && col>=0 && col<m_matrix->cols());
+
+ const Index outer = Derived::IsRowMajor ? row : col;
+ const Index inner = Derived::IsRowMajor ? col : row;
+
+ Index start = m_matrix->outerIndexPtr()[outer];
+ Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];
+ eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
+ const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner) - m_matrix->innerIndexPtr();
+
+ return ((p<end) && (m_matrix->innerIndexPtr()[p]==inner)) ? p : Dynamic;
+ }
+
+ const Derived *m_matrix;
+ const Scalar m_zero;
+};
+
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_COMPRESSED_BASE_H
diff --git a/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index 5462737..c41c07a 100644
--- a/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -28,88 +28,57 @@ namespace Eigen {
// generic sparse
// 4 - dense op dense product dense
// generic dense
-
-namespace internal {
-
-template<> struct promote_storage_type<Dense,Sparse>
-{ typedef Sparse ret; };
-
-template<> struct promote_storage_type<Sparse,Dense>
-{ typedef Sparse ret; };
-
-template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
- typename _LhsStorageMode = typename traits<Lhs>::StorageKind,
- typename _RhsStorageMode = typename traits<Rhs>::StorageKind>
-class sparse_cwise_binary_op_inner_iterator_selector;
-
-} // end namespace internal
+//
+// TODO to ease compiler job, we could specialize product/quotient with a scalar
+// and fallback to cwise-unary evaluator using bind1st_op and bind2nd_op.
template<typename BinaryOp, typename Lhs, typename Rhs>
class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
: public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
public:
- class InnerIterator;
- class ReverseInnerIterator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+ typedef SparseMatrixBase<Derived> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
CwiseBinaryOpImpl()
{
EIGEN_STATIC_ASSERT((
(!internal::is_same<typename internal::traits<Lhs>::StorageKind,
typename internal::traits<Rhs>::StorageKind>::value)
- || ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))),
+ || ((internal::evaluator<Lhs>::Flags&RowMajorBit) == (internal::evaluator<Rhs>::Flags&RowMajorBit))),
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
}
};
-template<typename BinaryOp, typename Lhs, typename Rhs>
-class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
- : public internal::sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs,typename CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator>
-{
- public:
- typedef typename Lhs::Index Index;
- typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
- BinaryOp,Lhs,Rhs, InnerIterator> Base;
-
- // NOTE: we have to prefix Index by "typename Lhs::" to avoid an ICE with VC11
- EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename Lhs::Index outer)
- : Base(binOp.derived(),outer)
- {}
-};
-
-/***************************************************************************
-* Implementation of inner-iterators
-***************************************************************************/
-
-// template<typename T> struct internal::func_is_conjunction { enum { ret = false }; };
-// template<typename T> struct internal::func_is_conjunction<internal::scalar_product_op<T> > { enum { ret = true }; };
-
-// TODO generalize the internal::scalar_product_op specialization to all conjunctions if any !
-
namespace internal {
-// sparse - sparse (generic)
-template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
-class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived, Sparse, Sparse>
-{
- typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
- typedef typename traits<CwiseBinaryXpr>::Scalar Scalar;
- typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
- typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
- typedef typename _LhsNested::InnerIterator LhsIterator;
- typedef typename _RhsNested::InnerIterator RhsIterator;
- typedef typename Lhs::Index Index;
+
+// Generic "sparse OP sparse"
+template<typename XprType> struct binary_sparse_evaluator;
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IteratorBased>
+ : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+protected:
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename XprType::StorageIndex StorageIndex;
+public:
+
+ class InnerIterator
+ {
public:
-
- EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
- : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+
+ EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
+ : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
{
this->operator++();
}
- EIGEN_STRONG_INLINE Derived& operator++()
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
{
if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
{
@@ -135,12 +104,13 @@ class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived
m_value = 0; // this is to avoid a compilation warning
m_id = -1;
}
- return *static_cast<Derived*>(this);
+ return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
- EIGEN_STRONG_INLINE Index index() const { return m_id; }
+ EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
@@ -151,25 +121,301 @@ class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
Scalar m_value;
- Index m_id;
+ StorageIndex m_id;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ explicit binary_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs())
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_lhsImpl.nonZerosEstimate() + m_rhsImpl.nonZerosEstimate();
+ }
+
+protected:
+ const BinaryOp m_functor;
+ evaluator<Lhs> m_lhsImpl;
+ evaluator<Rhs> m_rhsImpl;
};
-// sparse - sparse (product)
-template<typename T, typename Lhs, typename Rhs, typename Derived>
-class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Sparse>
+// dense op sparse
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IteratorBased>
+ : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
- typedef scalar_product_op<T> BinaryFunc;
- typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
- typedef typename CwiseBinaryXpr::Scalar Scalar;
- typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
- typedef typename _LhsNested::InnerIterator LhsIterator;
- typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
- typedef typename _RhsNested::InnerIterator RhsIterator;
- typedef typename Lhs::Index Index;
+protected:
+ typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename XprType::StorageIndex StorageIndex;
+public:
+
+ class InnerIterator
+ {
+ enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
public:
- EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
- : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
+ : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.rhs().innerSize())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ ++m_id;
+ if(m_id<m_innerSize)
+ {
+ Scalar lhsVal = m_lhsEval.coeff(IsRowMajor?m_rhsIter.outer():m_id,
+ IsRowMajor?m_id:m_rhsIter.outer());
+ if(m_rhsIter && m_rhsIter.index()==m_id)
+ {
+ m_value = m_functor(lhsVal, m_rhsIter.value());
+ ++m_rhsIter;
+ }
+ else
+ m_value = m_functor(lhsVal, Scalar(0));
+ }
+
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
+
+ EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }
+ EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_rhsIter.outer() : m_id; }
+ EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_rhsIter.outer(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
+
+ protected:
+ const evaluator<Lhs> &m_lhsEval;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ StorageIndex m_id;
+ StorageIndex m_innerSize;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ explicit binary_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs()),
+ m_expr(xpr)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_expr.size();
+ }
+
+protected:
+ const BinaryOp m_functor;
+ evaluator<Lhs> m_lhsImpl;
+ evaluator<Rhs> m_rhsImpl;
+ const XprType &m_expr;
+};
+
+// sparse op dense
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IndexBased>
+ : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+protected:
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename XprType::StorageIndex StorageIndex;
+public:
+
+ class InnerIterator
+ {
+ enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
+ : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.lhs().innerSize())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ ++m_id;
+ if(m_id<m_innerSize)
+ {
+ Scalar rhsVal = m_rhsEval.coeff(IsRowMajor?m_lhsIter.outer():m_id,
+ IsRowMajor?m_id:m_lhsIter.outer());
+ if(m_lhsIter && m_lhsIter.index()==m_id)
+ {
+ m_value = m_functor(m_lhsIter.value(), rhsVal);
+ ++m_lhsIter;
+ }
+ else
+ m_value = m_functor(Scalar(0),rhsVal);
+ }
+
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
+
+ EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
+ EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_lhsIter.outer() : m_id; }
+ EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_lhsIter.outer(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ const evaluator<Rhs> &m_rhsEval;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ StorageIndex m_id;
+ StorageIndex m_innerSize;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ explicit binary_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs()),
+ m_expr(xpr)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_expr.size();
+ }
+
+protected:
+ const BinaryOp m_functor;
+ evaluator<Lhs> m_lhsImpl;
+ evaluator<Rhs> m_rhsImpl;
+ const XprType &m_expr;
+};
+
+template<typename T,
+ typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
+ typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
+ typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
+ typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct sparse_conjunction_evaluator;
+
+// "sparse .* sparse"
+template<typename T1, typename T2, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IteratorBased>
+ : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
+{
+ typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
+ typedef sparse_conjunction_evaluator<XprType> Base;
+ explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
+};
+// "dense .* sparse"
+template<typename T1, typename T2, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IndexBased, IteratorBased>
+ : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
+{
+ typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
+ typedef sparse_conjunction_evaluator<XprType> Base;
+ explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
+};
+// "sparse .* dense"
+template<typename T1, typename T2, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>
+ : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
+{
+ typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
+ typedef sparse_conjunction_evaluator<XprType> Base;
+ explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
+};
+
+// "sparse ./ dense"
+template<typename T1, typename T2, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>
+ : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> >
+{
+ typedef CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> XprType;
+ typedef sparse_conjunction_evaluator<XprType> Base;
+ explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
+};
+
+// "sparse && sparse"
+template<typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IteratorBased>
+ : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
+{
+ typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
+ typedef sparse_conjunction_evaluator<XprType> Base;
+ explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
+};
+// "dense && sparse"
+template<typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IndexBased, IteratorBased>
+ : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
+{
+ typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
+ typedef sparse_conjunction_evaluator<XprType> Base;
+ explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
+};
+// "sparse && dense"
+template<typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IndexBased>
+ : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
+{
+ typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
+ typedef sparse_conjunction_evaluator<XprType> Base;
+ explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
+};
+
+// "sparse ^ sparse"
+template<typename XprType>
+struct sparse_conjunction_evaluator<XprType, IteratorBased, IteratorBased>
+ : evaluator_base<XprType>
+{
+protected:
+ typedef typename XprType::Functor BinaryOp;
+ typedef typename XprType::Lhs LhsArg;
+ typedef typename XprType::Rhs RhsArg;
+ typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
+ typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
+ typedef typename XprType::StorageIndex StorageIndex;
+ typedef typename traits<XprType>::Scalar Scalar;
+public:
+
+ class InnerIterator
+ {
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
+ : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
{
while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
{
@@ -180,7 +426,7 @@ class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs,
}
}
- EIGEN_STRONG_INLINE Derived& operator++()
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_lhsIter;
++m_rhsIter;
@@ -191,12 +437,13 @@ class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs,
else
++m_rhsIter;
}
- return *static_cast<Derived*>(this);
+ return *this;
}
-
+
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
- EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
@@ -205,91 +452,182 @@ class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs,
protected:
LhsIterator m_lhsIter;
RhsIterator m_rhsIter;
- const BinaryFunc& m_functor;
+ const BinaryOp& m_functor;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ explicit sparse_conjunction_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs())
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return (std::min)(m_lhsImpl.nonZerosEstimate(), m_rhsImpl.nonZerosEstimate());
+ }
+
+protected:
+ const BinaryOp m_functor;
+ evaluator<LhsArg> m_lhsImpl;
+ evaluator<RhsArg> m_rhsImpl;
};
-// sparse - dense (product)
-template<typename T, typename Lhs, typename Rhs, typename Derived>
-class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Dense>
+// "dense ^ sparse"
+template<typename XprType>
+struct sparse_conjunction_evaluator<XprType, IndexBased, IteratorBased>
+ : evaluator_base<XprType>
{
- typedef scalar_product_op<T> BinaryFunc;
- typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
- typedef typename CwiseBinaryXpr::Scalar Scalar;
- typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
- typedef typename traits<CwiseBinaryXpr>::RhsNested RhsNested;
- typedef typename _LhsNested::InnerIterator LhsIterator;
- typedef typename Lhs::Index Index;
- enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
- public:
+protected:
+ typedef typename XprType::Functor BinaryOp;
+ typedef typename XprType::Lhs LhsArg;
+ typedef typename XprType::Rhs RhsArg;
+ typedef evaluator<LhsArg> LhsEvaluator;
+ typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
+ typedef typename XprType::StorageIndex StorageIndex;
+ typedef typename traits<XprType>::Scalar Scalar;
+public:
+
+ class InnerIterator
+ {
+ enum { IsRowMajor = (int(RhsArg::Flags)&RowMajorBit)==RowMajorBit };
- EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
- : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
+ : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_outer(outer)
{}
- EIGEN_STRONG_INLINE Derived& operator++()
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
{
- ++m_lhsIter;
- return *static_cast<Derived*>(this);
+ ++m_rhsIter;
+ return *this;
}
EIGEN_STRONG_INLINE Scalar value() const
- { return m_functor(m_lhsIter.value(),
- m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
+ { return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
- EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
- EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
- EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
-
- EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+ EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); }
+ EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
+ EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+
protected:
- RhsNested m_rhs;
- LhsIterator m_lhsIter;
- const BinaryFunc m_functor;
+ const LhsEvaluator &m_lhsEval;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
const Index m_outer;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ explicit sparse_conjunction_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs())
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_rhsImpl.nonZerosEstimate();
+ }
+
+protected:
+ const BinaryOp m_functor;
+ evaluator<LhsArg> m_lhsImpl;
+ evaluator<RhsArg> m_rhsImpl;
};
-// sparse - dense (product)
-template<typename T, typename Lhs, typename Rhs, typename Derived>
-class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Dense, Sparse>
+// "sparse ^ dense"
+template<typename XprType>
+struct sparse_conjunction_evaluator<XprType, IteratorBased, IndexBased>
+ : evaluator_base<XprType>
{
- typedef scalar_product_op<T> BinaryFunc;
- typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
- typedef typename CwiseBinaryXpr::Scalar Scalar;
- typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
- typedef typename _RhsNested::InnerIterator RhsIterator;
- typedef typename Lhs::Index Index;
+protected:
+ typedef typename XprType::Functor BinaryOp;
+ typedef typename XprType::Lhs LhsArg;
+ typedef typename XprType::Rhs RhsArg;
+ typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
+ typedef evaluator<RhsArg> RhsEvaluator;
+ typedef typename XprType::StorageIndex StorageIndex;
+ typedef typename traits<XprType>::Scalar Scalar;
+public:
+
+ class InnerIterator
+ {
+ enum { IsRowMajor = (int(LhsArg::Flags)&RowMajorBit)==RowMajorBit };
- enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
public:
-
- EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
- : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+
+ EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
+ : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_outer(outer)
{}
- EIGEN_STRONG_INLINE Derived& operator++()
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
{
- ++m_rhsIter;
- return *static_cast<Derived*>(this);
+ ++m_lhsIter;
+ return *this;
}
EIGEN_STRONG_INLINE Scalar value() const
- { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
-
- EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
- EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
- EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
+ { return m_functor(m_lhsIter.value(),
+ m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
- EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+ EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+ EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+
protected:
- const CwiseBinaryXpr& m_xpr;
- RhsIterator m_rhsIter;
- const BinaryFunc& m_functor;
+ LhsIterator m_lhsIter;
+ const evaluator<RhsArg> &m_rhsEval;
+ const BinaryOp& m_functor;
const Index m_outer;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ explicit sparse_conjunction_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs())
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_lhsImpl.nonZerosEstimate();
+ }
+
+protected:
+ const BinaryOp m_functor;
+ evaluator<LhsArg> m_lhsImpl;
+ evaluator<RhsArg> m_rhsImpl;
};
-} // end namespace internal
+}
/***************************************************************************
* Implementation of SparseMatrixBase and SparseCwise functions/operators
@@ -297,6 +635,22 @@ class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs,
template<typename Derived>
template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
+{
+ call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
+{
+ call_assignment(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived &
SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
{
@@ -313,12 +667,56 @@ SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& othe
template<typename Derived>
template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator+=(const DiagonalBase<OtherDerived>& other)
+{
+ call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator-=(const DiagonalBase<OtherDerived>& other)
+{
+ call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
{
return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
}
+template<typename DenseDerived, typename SparseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
+operator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
+{
+ return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
+}
+
+template<typename SparseDerived, typename DenseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
+operator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
+{
+ return CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
+}
+
+template<typename DenseDerived, typename SparseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
+operator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
+{
+ return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
+}
+
+template<typename SparseDerived, typename DenseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
+operator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
+{
+ return CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
+}
+
} // end namespace Eigen
#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
index 5a50c78..ea79737 100644
--- a/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
+++ b/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,138 +12,121 @@
namespace Eigen {
-template<typename UnaryOp, typename MatrixType>
-class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
- : public SparseMatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
+namespace internal {
+
+template<typename UnaryOp, typename ArgType>
+struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
+ : public evaluator_base<CwiseUnaryOp<UnaryOp,ArgType> >
{
public:
+ typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
class InnerIterator;
- class ReverseInnerIterator;
-
- typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
- EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_argImpl.nonZerosEstimate();
+ }
protected:
- typedef typename internal::traits<Derived>::_XprTypeNested _MatrixTypeNested;
- typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
- typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator;
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+
+ const UnaryOp m_functor;
+ evaluator<ArgType> m_argImpl;
};
-template<typename UnaryOp, typename MatrixType>
-class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::InnerIterator
- : public CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeIterator
+template<typename UnaryOp, typename ArgType>
+class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator
+ : public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator
{
- typedef typename CwiseUnaryOpImpl::Scalar Scalar;
- typedef typename CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeIterator Base;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
- EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer)
- : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
+ : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{ Base::operator++(); return *this; }
- EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); }
-
- protected:
- const UnaryOp m_functor;
- private:
- typename CwiseUnaryOpImpl::Scalar& valueRef();
-};
-
-template<typename UnaryOp, typename MatrixType>
-class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::ReverseInnerIterator
- : public CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeReverseIterator
-{
- typedef typename CwiseUnaryOpImpl::Scalar Scalar;
- typedef typename CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeReverseIterator Base;
- public:
-
- EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer)
- : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
- {}
-
- EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
- { Base::operator--(); return *this; }
-
- EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); }
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
protected:
const UnaryOp m_functor;
private:
- typename CwiseUnaryOpImpl::Scalar& valueRef();
+ Scalar& valueRef();
};
-template<typename ViewOp, typename MatrixType>
-class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>
- : public SparseMatrixBase<CwiseUnaryView<ViewOp, MatrixType> >
+template<typename ViewOp, typename ArgType>
+struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>
+ : public evaluator_base<CwiseUnaryView<ViewOp,ArgType> >
{
public:
+ typedef CwiseUnaryView<ViewOp, ArgType> XprType;
class InnerIterator;
- class ReverseInnerIterator;
-
- typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
- EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<ViewOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
protected:
- typedef typename internal::traits<Derived>::_MatrixTypeNested _MatrixTypeNested;
- typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
- typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator;
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+
+ const ViewOp m_functor;
+ evaluator<ArgType> m_argImpl;
};
-template<typename ViewOp, typename MatrixType>
-class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::InnerIterator
- : public CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeIterator
+template<typename ViewOp, typename ArgType>
+class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator
+ : public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator
{
- typedef typename CwiseUnaryViewImpl::Scalar Scalar;
- typedef typename CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeIterator Base;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
- EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer)
- : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
+ : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{ Base::operator++(); return *this; }
- EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); }
- EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); }
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
+ EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); }
protected:
const ViewOp m_functor;
};
-template<typename ViewOp, typename MatrixType>
-class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::ReverseInnerIterator
- : public CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeReverseIterator
-{
- typedef typename CwiseUnaryViewImpl::Scalar Scalar;
- typedef typename CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeReverseIterator Base;
- public:
-
- EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer)
- : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
- {}
-
- EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
- { Base::operator--(); return *this; }
-
- EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); }
- EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); }
-
- protected:
- const ViewOp m_functor;
-};
+} // end namespace internal
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator*=(const Scalar& other)
{
+ typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
+ internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
- for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ for (EvalIterator i(thisEval,j); i; ++i)
i.valueRef() *= other;
return derived();
}
@@ -152,8 +135,10 @@ template<typename Derived>
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator/=(const Scalar& other)
{
+ typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
+ internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
- for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ for (EvalIterator i(thisEval,j); i; ++i)
i.valueRef() /= other;
return derived();
}
diff --git a/eigen/Eigen/src/SparseCore/SparseDenseProduct.h b/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
index ccb6ae7..0547db5 100644
--- a/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,196 +12,93 @@
namespace Eigen {
-template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
-{
- typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
-};
-
-template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
-{
- typedef typename internal::conditional<
- Lhs::IsRowMajor,
- SparseDenseOuterProduct<Rhs,Lhs,true>,
- SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type;
-};
-
-template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
-{
- typedef DenseTimeSparseProduct<Lhs,Rhs> Type;
-};
-
-template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
-{
- typedef typename internal::conditional<
- Rhs::IsRowMajor,
- SparseDenseOuterProduct<Rhs,Lhs,true>,
- SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type;
-};
-
namespace internal {
-template<typename Lhs, typename Rhs, bool Tr>
-struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
-{
- typedef Sparse StorageKind;
- typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
- typename traits<Rhs>::Scalar>::ReturnType Scalar;
- typedef typename Lhs::Index Index;
- typedef typename Lhs::Nested LhsNested;
- typedef typename Rhs::Nested RhsNested;
- typedef typename remove_all<LhsNested>::type _LhsNested;
- typedef typename remove_all<RhsNested>::type _RhsNested;
-
- enum {
- LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost,
- RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost,
-
- RowsAtCompileTime = Tr ? int(traits<Rhs>::RowsAtCompileTime) : int(traits<Lhs>::RowsAtCompileTime),
- ColsAtCompileTime = Tr ? int(traits<Lhs>::ColsAtCompileTime) : int(traits<Rhs>::ColsAtCompileTime),
- MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime) : int(traits<Lhs>::MaxRowsAtCompileTime),
- MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime) : int(traits<Rhs>::MaxColsAtCompileTime),
-
- Flags = Tr ? RowMajorBit : 0,
-
- CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost
- };
-};
-
-} // end namespace internal
-
-template<typename Lhs, typename Rhs, bool Tr>
-class SparseDenseOuterProduct
- : public SparseMatrixBase<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
-{
- public:
-
- typedef SparseMatrixBase<SparseDenseOuterProduct> Base;
- EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct)
- typedef internal::traits<SparseDenseOuterProduct> Traits;
-
- private:
-
- typedef typename Traits::LhsNested LhsNested;
- typedef typename Traits::RhsNested RhsNested;
- typedef typename Traits::_LhsNested _LhsNested;
- typedef typename Traits::_RhsNested _RhsNested;
-
- public:
-
- class InnerIterator;
-
- EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs)
- : m_lhs(lhs), m_rhs(rhs)
- {
- EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
- }
-
- EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs)
- : m_lhs(lhs), m_rhs(rhs)
- {
- EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
- }
-
- EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
- EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
-
- EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
- EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
-
- protected:
- LhsNested m_lhs;
- RhsNested m_rhs;
-};
-
-template<typename Lhs, typename Rhs, bool Transpose>
-class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
-{
- typedef typename _LhsNested::InnerIterator Base;
- typedef typename SparseDenseOuterProduct::Index Index;
- public:
- EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
- : Base(prod.lhs(), 0), m_outer(outer), m_factor(get(prod.rhs(), outer, typename internal::traits<Rhs>::StorageKind() ))
- { }
-
- inline Index outer() const { return m_outer; }
- inline Index row() const { return Transpose ? m_outer : Base::index(); }
- inline Index col() const { return Transpose ? Base::index() : m_outer; }
-
- inline Scalar value() const { return Base::value() * m_factor; }
-
- protected:
- static Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense())
- {
- return rhs.coeff(outer);
- }
-
- static Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse())
- {
- typename Traits::_RhsNested::InnerIterator it(rhs, outer);
- if (it && it.index()==0)
- return it.value();
-
- return Scalar(0);
- }
-
- Index m_outer;
- Scalar m_factor;
-};
-
-namespace internal {
-template<typename Lhs, typename Rhs>
-struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
- : traits<ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs> >
-{
- typedef Dense StorageKind;
- typedef MatrixXpr XprKind;
-};
+template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
+template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
+ typename AlphaType,
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct sparse_time_dense_product_impl;
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, true>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename Lhs::Index Index;
- typedef typename Lhs::InnerIterator LhsInnerIterator;
+ typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+ typedef evaluator<Lhs> LhsEval;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
+ LhsEval lhsEval(lhs);
+
+ Index n = lhs.outerSize();
+#ifdef EIGEN_HAS_OPENMP
+ Eigen::initParallel();
+ Index threads = Eigen::nbThreads();
+#endif
+
for(Index c=0; c<rhs.cols(); ++c)
{
- Index n = lhs.outerSize();
- for(Index j=0; j<n; ++j)
+#ifdef EIGEN_HAS_OPENMP
+ // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
+ // It basically represents the minimal amount of work to be done to be worth it.
+ if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
+ {
+ #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
+ for(Index i=0; i<n; ++i)
+ processRow(lhsEval,rhs,res,alpha,i,c);
+ }
+ else
+#endif
{
- typename Res::Scalar tmp(0);
- for(LhsInnerIterator it(lhs,j); it ;++it)
- tmp += it.value() * rhs.coeff(it.index(),c);
- res.coeffRef(j,c) += alpha * tmp;
+ for(Index i=0; i<n; ++i)
+ processRow(lhsEval,rhs,res,alpha,i,c);
}
}
}
+
+ static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
+ {
+ typename Res::Scalar tmp(0);
+ for(LhsInnerIterator it(lhsEval,i); it ;++it)
+ tmp += it.value() * rhs.coeff(it.index(),col);
+ res.coeffRef(i,col) += alpha * tmp;
+ }
+
};
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, true>
+// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
+// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
+// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
+// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
+// {
+// enum {
+// Defined = 1
+// };
+// typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
+// };
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename Lhs::InnerIterator LhsInnerIterator;
- typedef typename Lhs::Index Index;
- static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
+ typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
+ evaluator<Lhs> lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
- typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
- for(LhsInnerIterator it(lhs,j); it ;++it)
+// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
+ typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
+ for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
}
@@ -209,38 +106,38 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, false>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename Lhs::InnerIterator LhsInnerIterator;
- typedef typename Lhs::Index Index;
+ typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
+ evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Res::RowXpr res_j(res.row(j));
- for(LhsInnerIterator it(lhs,j); it ;++it)
+ for(LhsInnerIterator it(lhsEval,j); it ;++it)
res_j += (alpha*it.value()) * rhs.row(it.index());
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, false>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename Lhs::InnerIterator LhsInnerIterator;
- typedef typename Lhs::Index Index;
+ typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
+ evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
- for(LhsInnerIterator it(lhs,j); it ;++it)
+ for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.row(it.index()) += (alpha*it.value()) * rhs_j;
}
}
@@ -249,63 +146,175 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
- sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType>::run(lhs, rhs, res, alpha);
+ sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
} // end namespace internal
-template<typename Lhs, typename Rhs>
-class SparseTimeDenseProduct
- : public ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
-{
- public:
- EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct)
-
- SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
- {}
-
- template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
- {
- internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha);
- }
+namespace internal {
- private:
- SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&);
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
+ : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
+{
+ typedef typename Product<Lhs,Rhs>::Scalar Scalar;
+
+ template<typename Dest>
+ static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ {
+ typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;
+ typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhs);
+ RhsNested rhsNested(rhs);
+ internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
+ }
};
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
+ : generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
+{};
-// dense = dense * sparse
-namespace internal {
-template<typename Lhs, typename Rhs>
-struct traits<DenseTimeSparseProduct<Lhs,Rhs> >
- : traits<ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs> >
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
+ : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
{
- typedef Dense StorageKind;
+ typedef typename Product<Lhs,Rhs>::Scalar Scalar;
+
+ template<typename Dst>
+ static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ {
+ typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;
+ typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;
+ LhsNested lhsNested(lhs);
+ RhsNested rhsNested(rhs);
+
+ // transpose everything
+ Transpose<Dst> dstT(dst);
+ internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
+ }
};
-} // end namespace internal
-template<typename Lhs, typename Rhs>
-class DenseTimeSparseProduct
- : public ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs>
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
+ : generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
+{};
+
+template<typename LhsT, typename RhsT, bool NeedToTranspose>
+struct sparse_dense_outer_product_evaluator
{
+protected:
+ typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
+ typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
+ typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
+
+ // if the actual left-hand side is a dense vector,
+ // then build a sparse-view so that we can seamlessly iterate over it.
+ typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
+ Lhs1, SparseView<Lhs1> >::type ActualLhs;
+ typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
+ Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
+
+ typedef evaluator<ActualLhs> LhsEval;
+ typedef evaluator<ActualRhs> RhsEval;
+ typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
+ typedef typename ProdXprType::Scalar Scalar;
+
+public:
+ enum {
+ Flags = NeedToTranspose ? RowMajorBit : 0,
+ CoeffReadCost = HugeCost
+ };
+
+ class InnerIterator : public LhsIterator
+ {
public:
- EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct)
-
- DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
+ : LhsIterator(xprEval.m_lhsXprImpl, 0),
+ m_outer(outer),
+ m_empty(false),
+ m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
{}
+
+ EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
+ EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
+ EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
- template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+ EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
+ EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
+
+ protected:
+ Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
+ {
+ return rhs.coeff(outer);
+ }
+
+ Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
{
- Transpose<const _LhsNested> lhs_t(m_lhs);
- Transpose<const _RhsNested> rhs_t(m_rhs);
- Transpose<Dest> dest_t(dest);
- internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha);
+ typename RhsEval::InnerIterator it(rhs, outer);
+ if (it && it.index()==0 && it.value()!=Scalar(0))
+ return it.value();
+ m_empty = true;
+ return Scalar(0);
}
+
+ Index m_outer;
+ bool m_empty;
+ Scalar m_factor;
+ };
+
+ sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
+ : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ // transpose case
+ sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
+ : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+protected:
+ const LhsArg m_lhs;
+ evaluator<ActualLhs> m_lhsXprImpl;
+ evaluator<ActualRhs> m_rhsXprImpl;
+};
- private:
- DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
+// sparse * dense outer product
+template<typename Lhs, typename Rhs>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>
+ : sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
+{
+ typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
+
+ typedef Product<Lhs, Rhs> XprType;
+ typedef typename XprType::PlainObject PlainObject;
+
+ explicit product_evaluator(const XprType& xpr)
+ : Base(xpr.lhs(), xpr.rhs())
+ {}
+
};
+template<typename Lhs, typename Rhs>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>
+ : sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
+{
+ typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
+
+ typedef Product<Lhs, Rhs> XprType;
+ typedef typename XprType::PlainObject PlainObject;
+
+ explicit product_evaluator(const XprType& xpr)
+ : Base(xpr.lhs(), xpr.rhs())
+ {}
+
+};
+
+} // end namespace internal
+
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h b/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
index 1bb590e..941c03b 100644
--- a/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
+++ b/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -26,171 +26,113 @@ namespace Eigen {
namespace internal {
-template<typename Lhs, typename Rhs>
-struct traits<SparseDiagonalProduct<Lhs, Rhs> >
-{
- typedef typename remove_all<Lhs>::type _Lhs;
- typedef typename remove_all<Rhs>::type _Rhs;
- typedef typename _Lhs::Scalar Scalar;
- typedef typename promote_index_type<typename traits<Lhs>::Index,
- typename traits<Rhs>::Index>::type Index;
- typedef Sparse StorageKind;
- typedef MatrixXpr XprKind;
- enum {
- RowsAtCompileTime = _Lhs::RowsAtCompileTime,
- ColsAtCompileTime = _Rhs::ColsAtCompileTime,
-
- MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime,
-
- SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags),
- Flags = (SparseFlags&RowMajorBit),
- CoeffReadCost = Dynamic
- };
+enum {
+ SDP_AsScalarProduct,
+ SDP_AsCwiseProduct
};
+
+template<typename SparseXprType, typename DiagonalCoeffType, int SDP_Tag>
+struct sparse_diagonal_product_evaluator;
-enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
-class sparse_diagonal_product_inner_iterator_selector;
-
-} // end namespace internal
-
-template<typename Lhs, typename Rhs>
-class SparseDiagonalProduct
- : public SparseMatrixBase<SparseDiagonalProduct<Lhs,Rhs> >,
- internal::no_assignment_operator
+template<typename Lhs, typename Rhs, int ProductTag>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, DiagonalShape, SparseShape>
+ : public sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct>
{
- typedef typename Lhs::Nested LhsNested;
- typedef typename Rhs::Nested RhsNested;
-
- typedef typename internal::remove_all<LhsNested>::type _LhsNested;
- typedef typename internal::remove_all<RhsNested>::type _RhsNested;
-
- enum {
- LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal
- : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor,
- RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal
- : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor
- };
-
- public:
-
- EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
-
- typedef internal::sparse_diagonal_product_inner_iterator_selector
- <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
-
- // We do not want ReverseInnerIterator for diagonal-sparse products,
- // but this dummy declaration is needed to make diag * sparse * diag compile.
- class ReverseInnerIterator;
-
- EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
- : m_lhs(lhs), m_rhs(rhs)
- {
- eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
- }
-
- EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
-
- EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
- EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
-
- protected:
- LhsNested m_lhs;
- RhsNested m_rhs;
+ typedef Product<Lhs, Rhs, DefaultProduct> XprType;
+ enum { CoeffReadCost = HugeCost, Flags = Rhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags
+
+ typedef sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct> Base;
+ explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) {}
};
-namespace internal {
-
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
- : public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
+template<typename Lhs, typename Rhs, int ProductTag>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, SparseShape, DiagonalShape>
+ : public sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct>
{
- typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
- typedef typename Lhs::Index Index;
- public:
- inline sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, Index outer)
- : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
- {}
+ typedef Product<Lhs, Rhs, DefaultProduct> XprType;
+ enum { CoeffReadCost = HugeCost, Flags = Lhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags
+
+ typedef sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct> Base;
+ explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal().transpose()) {}
};
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
- : public CwiseBinaryOp<
- scalar_product_op<typename Lhs::Scalar>,
- const typename Rhs::ConstInnerVectorReturnType,
- const typename Lhs::DiagonalVectorType>::InnerIterator
+template<typename SparseXprType, typename DiagonalCoeffType>
+struct sparse_diagonal_product_evaluator<SparseXprType, DiagonalCoeffType, SDP_AsScalarProduct>
{
- typedef typename CwiseBinaryOp<
- scalar_product_op<typename Lhs::Scalar>,
- const typename Rhs::ConstInnerVectorReturnType,
- const typename Lhs::DiagonalVectorType>::InnerIterator Base;
- typedef typename Lhs::Index Index;
- Index m_outer;
+protected:
+ typedef typename evaluator<SparseXprType>::InnerIterator SparseXprInnerIterator;
+ typedef typename SparseXprType::Scalar Scalar;
+
+public:
+ class InnerIterator : public SparseXprInnerIterator
+ {
public:
- inline sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, Index outer)
- : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0), m_outer(outer)
+ InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
+ : SparseXprInnerIterator(xprEval.m_sparseXprImpl, outer),
+ m_coeff(xprEval.m_diagCoeffImpl.coeff(outer))
{}
- inline Index outer() const { return m_outer; }
- inline Index col() const { return m_outer; }
-};
+ EIGEN_STRONG_INLINE Scalar value() const { return m_coeff * SparseXprInnerIterator::value(); }
+ protected:
+ typename DiagonalCoeffType::Scalar m_coeff;
+ };
+
+ sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagonalCoeffType &diagCoeff)
+ : m_sparseXprImpl(sparseXpr), m_diagCoeffImpl(diagCoeff)
+ {}
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
- : public CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator
-{
- typedef typename CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator Base;
- typedef typename Lhs::Index Index;
- public:
- inline sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, Index outer)
- : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
- {}
+ Index nonZerosEstimate() const { return m_sparseXprImpl.nonZerosEstimate(); }
+
+protected:
+ evaluator<SparseXprType> m_sparseXprImpl;
+ evaluator<DiagonalCoeffType> m_diagCoeffImpl;
};
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
- : public CwiseBinaryOp<
- scalar_product_op<typename Rhs::Scalar>,
- const typename Lhs::ConstInnerVectorReturnType,
- const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+
+template<typename SparseXprType, typename DiagCoeffType>
+struct sparse_diagonal_product_evaluator<SparseXprType, DiagCoeffType, SDP_AsCwiseProduct>
{
- typedef typename CwiseBinaryOp<
- scalar_product_op<typename Rhs::Scalar>,
- const typename Lhs::ConstInnerVectorReturnType,
- const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
- typedef typename Lhs::Index Index;
- Index m_outer;
+ typedef typename SparseXprType::Scalar Scalar;
+ typedef typename SparseXprType::StorageIndex StorageIndex;
+
+ typedef typename nested_eval<DiagCoeffType,SparseXprType::IsRowMajor ? SparseXprType::RowsAtCompileTime
+ : SparseXprType::ColsAtCompileTime>::type DiagCoeffNested;
+
+ class InnerIterator
+ {
+ typedef typename evaluator<SparseXprType>::InnerIterator SparseXprIter;
public:
- inline sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, Index outer)
- : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0), m_outer(outer)
+ InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
+ : m_sparseIter(xprEval.m_sparseXprEval, outer), m_diagCoeffNested(xprEval.m_diagCoeffNested)
{}
- inline Index outer() const { return m_outer; }
- inline Index row() const { return m_outer; }
+ inline Scalar value() const { return m_sparseIter.value() * m_diagCoeffNested.coeff(index()); }
+ inline StorageIndex index() const { return m_sparseIter.index(); }
+ inline Index outer() const { return m_sparseIter.outer(); }
+ inline Index col() const { return SparseXprType::IsRowMajor ? m_sparseIter.index() : m_sparseIter.outer(); }
+ inline Index row() const { return SparseXprType::IsRowMajor ? m_sparseIter.outer() : m_sparseIter.index(); }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++() { ++m_sparseIter; return *this; }
+ inline operator bool() const { return m_sparseIter; }
+
+ protected:
+ SparseXprIter m_sparseIter;
+ DiagCoeffNested m_diagCoeffNested;
+ };
+
+ sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagCoeffType &diagCoeff)
+ : m_sparseXprEval(sparseXpr), m_diagCoeffNested(diagCoeff)
+ {}
+
+ Index nonZerosEstimate() const { return m_sparseXprEval.nonZerosEstimate(); }
+
+protected:
+ evaluator<SparseXprType> m_sparseXprEval;
+ DiagCoeffNested m_diagCoeffNested;
};
} // end namespace internal
-// SparseMatrixBase functions
-
-template<typename Derived>
-template<typename OtherDerived>
-const SparseDiagonalProduct<Derived,OtherDerived>
-SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) const
-{
- return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
-}
-
} // end namespace Eigen
#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/eigen/Eigen/src/SparseCore/SparseDot.h b/eigen/Eigen/src/SparseCore/SparseDot.h
index db39c9a..38bc4aa 100644
--- a/eigen/Eigen/src/SparseCore/SparseDot.h
+++ b/eigen/Eigen/src/SparseCore/SparseDot.h
@@ -26,7 +26,8 @@ SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
eigen_assert(size() == other.size());
eigen_assert(other.size()>0 && "you are using a non initialized vector");
- typename Derived::InnerIterator i(derived(),0);
+ internal::evaluator<Derived> thisEval(derived());
+ typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
Scalar res(0);
while (i)
{
@@ -49,16 +50,12 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
eigen_assert(size() == other.size());
- typedef typename Derived::Nested Nested;
- typedef typename OtherDerived::Nested OtherNested;
- typedef typename internal::remove_all<Nested>::type NestedCleaned;
- typedef typename internal::remove_all<OtherNested>::type OtherNestedCleaned;
+ internal::evaluator<Derived> thisEval(derived());
+ typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
+
+ internal::evaluator<OtherDerived> otherEval(other.derived());
+ typename internal::evaluator<OtherDerived>::InnerIterator j(otherEval, 0);
- Nested nthis(derived());
- OtherNested nother(other.derived());
-
- typename NestedCleaned::InnerIterator i(nthis,0);
- typename OtherNestedCleaned::InnerIterator j(nother,0);
Scalar res(0);
while (i && j)
{
diff --git a/eigen/Eigen/src/SparseCore/SparseFuzzy.h b/eigen/Eigen/src/SparseCore/SparseFuzzy.h
index 45f36e9..7d47eb9 100644
--- a/eigen/Eigen/src/SparseCore/SparseFuzzy.h
+++ b/eigen/Eigen/src/SparseCore/SparseFuzzy.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -10,17 +10,20 @@
#ifndef EIGEN_SPARSE_FUZZY_H
#define EIGEN_SPARSE_FUZZY_H
-// template<typename Derived>
-// template<typename OtherDerived>
-// bool SparseMatrixBase<Derived>::isApprox(
-// const OtherDerived& other,
-// typename NumTraits<Scalar>::Real prec
-// ) const
-// {
-// const typename internal::nested<Derived,2>::type nested(derived());
-// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
-// return (nested - otherNested).cwise().abs2().sum()
-// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
-// }
+namespace Eigen {
+
+template<typename Derived>
+template<typename OtherDerived>
+bool SparseMatrixBase<Derived>::isApprox(const SparseMatrixBase<OtherDerived>& other, const RealScalar &prec) const
+{
+ const typename internal::nested_eval<Derived,2,PlainObject>::type actualA(derived());
+ typename internal::conditional<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor),
+ const typename internal::nested_eval<OtherDerived,2,PlainObject>::type,
+ const PlainObject>::type actualB(other.derived());
+
+ return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm());
+}
+
+} // end namespace Eigen
#endif // EIGEN_SPARSE_FUZZY_H
diff --git a/eigen/Eigen/src/SparseCore/SparseMap.h b/eigen/Eigen/src/SparseCore/SparseMap.h
new file mode 100644
index 0000000..f99be33
--- /dev/null
+++ b/eigen/Eigen/src/SparseCore/SparseMap.h
@@ -0,0 +1,305 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_MAP_H
+#define EIGEN_SPARSE_MAP_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct traits<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ typedef traits<PlainObjectType> TraitsBase;
+ enum {
+ Flags = TraitsBase::Flags & (~NestByRefBit)
+ };
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct traits<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ typedef traits<PlainObjectType> TraitsBase;
+ enum {
+ Flags = TraitsBase::Flags & (~ (NestByRefBit | LvalueBit))
+ };
+};
+
+} // end namespace internal
+
+template<typename Derived,
+ int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
+> class SparseMapBase;
+
+/** \ingroup SparseCore_Module
+ * class SparseMapBase
+ * \brief Common base class for Map and Ref instance of sparse matrix and vector.
+ */
+template<typename Derived>
+class SparseMapBase<Derived,ReadOnlyAccessors>
+ : public SparseCompressedBase<Derived>
+{
+ public:
+ typedef SparseCompressedBase<Derived> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::StorageIndex StorageIndex;
+ enum { IsRowMajor = Base::IsRowMajor };
+ using Base::operator=;
+ protected:
+
+ typedef typename internal::conditional<
+ bool(internal::is_lvalue<Derived>::value),
+ Scalar *, const Scalar *>::type ScalarPointer;
+ typedef typename internal::conditional<
+ bool(internal::is_lvalue<Derived>::value),
+ StorageIndex *, const StorageIndex *>::type IndexPointer;
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Array<StorageIndex,2,1> m_zero_nnz;
+ IndexPointer m_outerIndex;
+ IndexPointer m_innerIndices;
+ ScalarPointer m_values;
+ IndexPointer m_innerNonZeros;
+
+ public:
+
+ /** \copydoc SparseMatrixBase::rows() */
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ /** \copydoc SparseMatrixBase::cols() */
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ /** \copydoc SparseMatrixBase::innerSize() */
+ inline Index innerSize() const { return m_innerSize; }
+ /** \copydoc SparseMatrixBase::outerSize() */
+ inline Index outerSize() const { return m_outerSize; }
+ /** \copydoc SparseCompressedBase::nonZeros */
+ inline Index nonZeros() const { return m_zero_nnz[1]; }
+
+ /** \copydoc SparseCompressedBase::isCompressed */
+ bool isCompressed() const { return m_innerNonZeros==0; }
+
+ //----------------------------------------
+ // direct access interface
+ /** \copydoc SparseMatrix::valuePtr */
+ inline const Scalar* valuePtr() const { return m_values; }
+ /** \copydoc SparseMatrix::innerIndexPtr */
+ inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; }
+ /** \copydoc SparseMatrix::outerIndexPtr */
+ inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
+ /** \copydoc SparseMatrix::innerNonZeroPtr */
+ inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
+ //----------------------------------------
+
+ /** \copydoc SparseMatrix::coeff */
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = isCompressed() ? m_outerIndex[outer+1] : start + m_innerNonZeros[outer];
+ if (start==end)
+ return Scalar(0);
+ else if (end>0 && inner==m_innerIndices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+
+ const StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+ const Index id = r-&m_innerIndices[0];
+ return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
+ }
+
+ inline SparseMapBase(Index rows, Index cols, Index nnz, IndexPointer outerIndexPtr, IndexPointer innerIndexPtr,
+ ScalarPointer valuePtr, IndexPointer innerNonZerosPtr = 0)
+ : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(outerIndexPtr),
+ m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(innerNonZerosPtr)
+ {}
+
+ // for vectors
+ inline SparseMapBase(Index size, Index nnz, IndexPointer innerIndexPtr, ScalarPointer valuePtr)
+ : m_outerSize(1), m_innerSize(size), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(m_zero_nnz.data()),
+ m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(0)
+ {}
+
+ /** Empty destructor */
+ inline ~SparseMapBase() {}
+
+ protected:
+ inline SparseMapBase() {}
+};
+
+/** \ingroup SparseCore_Module
+ * class SparseMapBase
+ * \brief Common base class for writable Map and Ref instance of sparse matrix and vector.
+ */
+template<typename Derived>
+class SparseMapBase<Derived,WriteAccessors>
+ : public SparseMapBase<Derived,ReadOnlyAccessors>
+{
+ typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
+
+ public:
+ typedef SparseMapBase<Derived, ReadOnlyAccessors> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::StorageIndex StorageIndex;
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ using Base::operator=;
+
+ public:
+
+ //----------------------------------------
+ // direct access interface
+ using Base::valuePtr;
+ using Base::innerIndexPtr;
+ using Base::outerIndexPtr;
+ using Base::innerNonZeroPtr;
+ /** \copydoc SparseMatrix::valuePtr */
+ inline Scalar* valuePtr() { return Base::m_values; }
+ /** \copydoc SparseMatrix::innerIndexPtr */
+ inline StorageIndex* innerIndexPtr() { return Base::m_innerIndices; }
+ /** \copydoc SparseMatrix::outerIndexPtr */
+ inline StorageIndex* outerIndexPtr() { return Base::m_outerIndex; }
+ /** \copydoc SparseMatrix::innerNonZeroPtr */
+ inline StorageIndex* innerNonZeroPtr() { return Base::m_innerNonZeros; }
+ //----------------------------------------
+
+ /** \copydoc SparseMatrix::coeffRef */
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = Base::m_outerIndex[outer];
+ Index end = Base::isCompressed() ? Base::m_outerIndex[outer+1] : start + Base::m_innerNonZeros[outer];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ StorageIndex* r = std::lower_bound(&Base::m_innerIndices[start],&Base::m_innerIndices[end],inner);
+ const Index id = r - &Base::m_innerIndices[0];
+ eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+ return const_cast<Scalar*>(Base::m_values)[id];
+ }
+
+ inline SparseMapBase(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr,
+ Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
+ : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
+ {}
+
+ // for vectors
+ inline SparseMapBase(Index size, Index nnz, StorageIndex* innerIndexPtr, Scalar* valuePtr)
+ : Base(size, nnz, innerIndexPtr, valuePtr)
+ {}
+
+ /** Empty destructor */
+ inline ~SparseMapBase() {}
+
+ protected:
+ inline SparseMapBase() {}
+};
+
+/** \ingroup SparseCore_Module
+ *
+ * \brief Specialization of class Map for SparseMatrix-like storage.
+ *
+ * \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
+ *
+ * \sa class Map, class SparseMatrix, class Ref<SparseMatrixType,Options>
+ */
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
+ : public SparseMapBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+#else
+template<typename SparseMatrixType>
+class Map<SparseMatrixType>
+ : public SparseMapBase<Derived,WriteAccessors>
+#endif
+{
+ public:
+ typedef SparseMapBase<Map> Base;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ public:
+
+ /** Constructs a read-write Map to a sparse matrix of size \a rows x \a cols, containing \a nnz non-zero coefficients,
+ * stored as a sparse format as defined by the pointers \a outerIndexPtr, \a innerIndexPtr, and \a valuePtr.
+ * If the optional parameter \a innerNonZerosPtr is the null pointer, then a standard compressed format is assumed.
+ *
+ * This constructor is available only if \c SparseMatrixType is non-const.
+ *
+ * More details on the expected storage schemes are given in the \ref TutorialSparse "manual pages".
+ */
+ inline Map(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr,
+ StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
+ : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
+ {}
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** Empty destructor */
+ inline ~Map() {}
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
+ : public SparseMapBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+{
+ public:
+ typedef SparseMapBase<Map> Base;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ public:
+#endif
+ /** This is the const version of the above constructor.
+ *
+ * This constructor is available only if \c SparseMatrixType is const, e.g.:
+ * \code Map<const SparseMatrix<double> > \endcode
+ */
+ inline Map(Index rows, Index cols, Index nnz, const StorageIndex* outerIndexPtr,
+ const StorageIndex* innerIndexPtr, const Scalar* valuePtr, const StorageIndex* innerNonZerosPtr = 0)
+ : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
+ {}
+
+ /** Empty destructor */
+ inline ~Map() {}
+};
+
+namespace internal {
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_MAP_H
diff --git a/eigen/Eigen/src/SparseCore/SparseMatrix.h b/eigen/Eigen/src/SparseCore/SparseMatrix.h
index 3b8946a..323c232 100644
--- a/eigen/Eigen/src/SparseCore/SparseMatrix.h
+++ b/eigen/Eigen/src/SparseCore/SparseMatrix.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -32,18 +32,22 @@ namespace Eigen {
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
* \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
* is ColMajor or RowMajor. The default is 0 which means column-major.
- * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
+ * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
+ *
+ * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
+ * whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
+ * Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
*
* This class can be extended with the help of the plugin mechanism described on the page
- * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
*/
namespace internal {
-template<typename _Scalar, int _Options, typename _Index>
-struct traits<SparseMatrix<_Scalar, _Options, _Index> >
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
{
typedef _Scalar Scalar;
- typedef _Index Index;
+ typedef _StorageIndex StorageIndex;
typedef Sparse StorageKind;
typedef MatrixXpr XprKind;
enum {
@@ -51,22 +55,21 @@ struct traits<SparseMatrix<_Scalar, _Options, _Index> >
ColsAtCompileTime = Dynamic,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic,
- Flags = _Options | NestByRefBit | LvalueBit,
- CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
-template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
-struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
+template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
+struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
{
- typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
- typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
+ typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
typedef _Scalar Scalar;
typedef Dense StorageKind;
- typedef _Index Index;
+ typedef _StorageIndex StorageIndex;
typedef MatrixXpr XprKind;
enum {
@@ -74,47 +77,61 @@ struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex>
ColsAtCompileTime = 1,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = 1,
- Flags = 0,
- CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
+ Flags = LvalueBit
+ };
+};
+
+template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
+struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
+ : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
+{
+ enum {
+ Flags = 0
};
};
} // end namespace internal
-template<typename _Scalar, int _Options, typename _Index>
+template<typename _Scalar, int _Options, typename _StorageIndex>
class SparseMatrix
- : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+ : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
{
+ typedef SparseCompressedBase<SparseMatrix> Base;
+ using Base::convert_index;
+ friend class SparseVector<_Scalar,0,_StorageIndex>;
public:
+ using Base::isCompressed;
+ using Base::nonZeros;
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
- EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
- EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+ using Base::operator+=;
+ using Base::operator-=;
typedef MappedSparseMatrix<Scalar,Flags> Map;
+ typedef Diagonal<SparseMatrix> DiagonalReturnType;
+ typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
+ typedef typename Base::InnerIterator InnerIterator;
+ typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
+
+
using Base::IsRowMajor;
- typedef internal::CompressedStorage<Scalar,Index> Storage;
+ typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
enum {
Options = _Options
};
+ typedef typename Base::IndexVector IndexVector;
+ typedef typename Base::ScalarVector ScalarVector;
protected:
-
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
Index m_outerSize;
Index m_innerSize;
- Index* m_outerIndex;
- Index* m_innerNonZeros; // optional, if null then the data is compressed
+ StorageIndex* m_outerIndex;
+ StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
Storage m_data;
-
- Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
- const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
public:
- /** \returns whether \c *this is in compressed form. */
- inline bool isCompressed() const { return m_innerNonZeros==0; }
-
/** \returns the number of rows of the matrix */
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
/** \returns the number of columns of the matrix */
@@ -137,29 +154,29 @@ class SparseMatrix
/** \returns a const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
- inline const Index* innerIndexPtr() const { return m_data.indexPtr(); }
+ inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
/** \returns a non-const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
- inline Index* innerIndexPtr() { return m_data.indexPtr(); }
+ inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
/** \returns a const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), innerIndexPtr() */
- inline const Index* outerIndexPtr() const { return m_outerIndex; }
+ inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
/** \returns a non-const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), innerIndexPtr() */
- inline Index* outerIndexPtr() { return m_outerIndex; }
+ inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
/** \returns a const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
- inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
+ inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
/** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
- inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
+ inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
/** \internal */
inline Storage& data() { return m_data; }
@@ -175,7 +192,7 @@ class SparseMatrix
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
- return m_data.atInRange(m_outerIndex[outer], end, inner);
+ return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
}
/** \returns a non-const reference to the value of the matrix at position \a i, \a j
@@ -198,7 +215,7 @@ class SparseMatrix
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
if(end<=start)
return insert(row,col);
- const Index p = m_data.searchLowerIndex(start,end-1,inner);
+ const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
if((p<end) && (m_data.index(p)==inner))
return m_data.value(p);
else
@@ -209,45 +226,34 @@ class SparseMatrix
* The non zero coefficient must \b not already exist.
*
* If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
- * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
- * call reserve(const SizesType &) to reserve a more appropriate number of elements per
- * inner vector that better match your scenario.
+ * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
+ * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
+ * inserted by increasing outer-indices.
+ *
+ * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
+ * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
*
- * This function performs a sorted insertion in O(1) if the elements of each inner vector are
- * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
+ * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
+ * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
*
*/
- Scalar& insert(Index row, Index col)
- {
- eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
-
- if(isCompressed())
- {
- reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
- }
- return insertUncompressed(row,col);
- }
+ Scalar& insert(Index row, Index col);
public:
- class InnerIterator;
- class ReverseInnerIterator;
-
- /** Removes all non zeros but keep allocated memory */
+ /** Removes all non zeros but keep allocated memory
+ *
+ * This function does not free the currently allocated memory. To release as much as memory as possible,
+ * call \code mat.data().squeeze(); \endcode after resizing it.
+ *
+ * \sa resize(Index,Index), data()
+ */
inline void setZero()
{
m_data.clear();
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
- if(m_innerNonZeros)
- memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
- }
-
- /** \returns the number of non zero coefficients */
- inline Index nonZeros() const
- {
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
if(m_innerNonZeros)
- return innerNonZeros().sum();
- return static_cast<Index>(m_data.size());
+ memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
}
/** Preallocates \a reserveSize non zeros.
@@ -262,22 +268,25 @@ class SparseMatrix
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
*
- * This function turns the matrix in non-compressed mode */
+ * This function turns the matrix in non-compressed mode.
+ *
+ * The type \c SizesType must expose the following interface:
+ \code
+ typedef value_type;
+ const value_type& operator[](i) const;
+ \endcode
+ * for \c i in the [0,this->outerSize()[ range.
+ * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
+ */
template<class SizesType>
inline void reserve(const SizesType& reserveSizes);
#else
template<class SizesType>
- inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
- {
- EIGEN_UNUSED_VARIABLE(enableif);
- reserveInnerVectors(reserveSizes);
- }
- template<class SizesType>
- inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
- #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
+ inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
+ #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
typename
#endif
- SizesType::Scalar())
+ SizesType::value_type())
{
EIGEN_UNUSED_VARIABLE(enableif);
reserveInnerVectors(reserveSizes);
@@ -289,15 +298,15 @@ class SparseMatrix
{
if(isCompressed())
{
- std::size_t totalReserveSize = 0;
+ Index totalReserveSize = 0;
// turn the matrix into non-compressed mode
- m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
// temporarily use m_innerSizes to hold the new starting points.
- Index* newOuterIndex = m_innerNonZeros;
+ StorageIndex* newOuterIndex = m_innerNonZeros;
- Index count = 0;
+ StorageIndex count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
@@ -305,10 +314,10 @@ class SparseMatrix
totalReserveSize += reserveSizes[j];
}
m_data.reserve(totalReserveSize);
- Index previousOuterIndex = m_outerIndex[m_outerSize];
+ StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
for(Index j=m_outerSize-1; j>=0; --j)
{
- Index innerNNZ = previousOuterIndex - m_outerIndex[j];
+ StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
@@ -324,15 +333,15 @@ class SparseMatrix
}
else
{
- Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
+ StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
if (!newOuterIndex) internal::throw_std_bad_alloc();
- Index count = 0;
+ StorageIndex count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
- Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
- Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
+ StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
+ StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
count += toReserve + m_innerNonZeros[j];
}
newOuterIndex[m_outerSize] = count;
@@ -343,7 +352,7 @@ class SparseMatrix
Index offset = newOuterIndex[j] - m_outerIndex[j];
if(offset>0)
{
- Index innerNNZ = m_innerNonZeros[j];
+ StorageIndex innerNNZ = m_innerNonZeros[j];
for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
@@ -380,11 +389,11 @@ class SparseMatrix
* \sa insertBack, startVec */
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
{
- eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
+ eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
Index p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
- m_data.append(0, inner);
+ m_data.append(Scalar(0), inner);
return m_data.value(p);
}
@@ -394,7 +403,7 @@ class SparseMatrix
{
Index p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
- m_data.append(0, inner);
+ m_data.append(Scalar(0), inner);
return m_data.value(p);
}
@@ -414,7 +423,7 @@ class SparseMatrix
{
if(isCompressed())
{
- Index size = static_cast<Index>(m_data.size());
+ StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
Index i = m_outerSize;
// find the last filled column
while (i>=0 && m_outerIndex[i]==0)
@@ -433,7 +442,13 @@ class SparseMatrix
template<typename InputIterators>
void setFromTriplets(const InputIterators& begin, const InputIterators& end);
- void sumupDuplicates();
+ template<typename InputIterators,typename DupFunctor>
+ void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
+
+ void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
+
+ template<typename DupFunctor>
+ void collapseDuplicates(DupFunctor dup_func = DupFunctor());
//---
@@ -451,6 +466,8 @@ class SparseMatrix
if(isCompressed())
return;
+ eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
+
Index oldStart = m_outerIndex[1];
m_outerIndex[1] = m_innerNonZeros[0];
for(Index j=1; j<m_outerSize; ++j)
@@ -479,7 +496,7 @@ class SparseMatrix
{
if(m_innerNonZeros != 0)
return;
- m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
for (Index i = 0; i < m_outerSize; i++)
{
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
@@ -503,10 +520,9 @@ class SparseMatrix
void prune(const KeepFunc& keep = KeepFunc())
{
// TODO optimize the uncompressed mode to avoid moving and allocating the data twice
- // TODO also implement a unit test
makeCompressed();
- Index k = 0;
+ StorageIndex k = 0;
for(Index j=0; j<m_outerSize; ++j)
{
Index previousStart = m_outerIndex[j];
@@ -527,7 +543,12 @@ class SparseMatrix
}
/** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
- * \sa resizeNonZeros(Index), reserve(), setZero()
+ *
+ * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
+ * and the storage of the out of bounds coefficients is kept and reserved.
+ * Call makeCompressed() to pack the entries and squeeze extra memory.
+ *
+ * \sa reserve(), setZero(), makeCompressed()
*/
void conservativeResize(Index rows, Index cols)
{
@@ -539,13 +560,13 @@ class SparseMatrix
Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
- Index newInnerSize = IsRowMajor ? cols : rows;
+ StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
// Deals with inner non zeros
if (m_innerNonZeros)
{
// Resize m_innerNonZeros
- Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
+ StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
if (!newInnerNonZeros) internal::throw_std_bad_alloc();
m_innerNonZeros = newInnerNonZeros;
@@ -555,7 +576,7 @@ class SparseMatrix
else if (innerChange < 0)
{
// Inner size decreased: allocate a new m_innerNonZeros
- m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
for(Index i = 0; i < m_outerSize; i++)
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
@@ -566,8 +587,8 @@ class SparseMatrix
{
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
{
- Index &n = m_innerNonZeros[i];
- Index start = m_outerIndex[i];
+ StorageIndex &n = m_innerNonZeros[i];
+ StorageIndex start = m_outerIndex[i];
while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
}
}
@@ -578,12 +599,12 @@ class SparseMatrix
if (outerChange == 0)
return;
- Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
+ StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
if (!newOuterIndex) internal::throw_std_bad_alloc();
m_outerIndex = newOuterIndex;
if (outerChange > 0)
{
- Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+ StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
m_outerIndex[i] = last;
}
@@ -591,7 +612,11 @@ class SparseMatrix
}
/** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
- * \sa resizeNonZeros(Index), reserve(), setZero()
+ *
+ * This function does not free the currently allocated memory. To release as much as memory as possible,
+ * call \code mat.data().squeeze(); \endcode after resizing it.
+ *
+ * \sa reserve(), setZero()
*/
void resize(Index rows, Index cols)
{
@@ -601,7 +626,7 @@ class SparseMatrix
if (m_outerSize != outerSize || m_outerSize==0)
{
std::free(m_outerIndex);
- m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
+ m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
if (!m_outerIndex) internal::throw_std_bad_alloc();
m_outerSize = outerSize;
@@ -611,19 +636,24 @@ class SparseMatrix
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
}
/** \internal
* Resize the nonzero vector to \a size */
void resizeNonZeros(Index size)
{
- // TODO remove this function
m_data.resize(size);
}
- /** \returns a const expression of the diagonal coefficients */
- const Diagonal<const SparseMatrix> diagonal() const { return *this; }
+ /** \returns a const expression of the diagonal coefficients. */
+ const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
+
+ /** \returns a read-write expression of the diagonal coefficients.
+ * \warning If the diagonal entries are written, then all diagonal
+ * entries \b must already exist, otherwise an assertion will be raised.
+ */
+ DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
/** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
inline SparseMatrix()
@@ -649,7 +679,16 @@ class SparseMatrix
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
check_template_parameters();
- *this = other.derived();
+ const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
+ if (needToTranspose)
+ *this = other.derived();
+ else
+ {
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ #endif
+ internal::call_assignment_no_alias(*this, other.derived());
+ }
}
/** Constructs a sparse matrix from the sparse selfadjoint view \a other */
@@ -658,7 +697,7 @@ class SparseMatrix
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
- *this = other;
+ Base::operator=(other);
}
/** Copy constructor (it performs a deep copy) */
@@ -678,6 +717,15 @@ class SparseMatrix
initAssignment(other);
other.evalTo(*this);
}
+
+ /** \brief Copy constructor with in-place evaluation */
+ template<typename OtherDerived>
+ explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ *this = other.derived();
+ }
/** Swaps the content of two sparse matrices of the same type.
* This is a fast operation that simply swaps the underlying pointers and parameters. */
@@ -697,9 +745,9 @@ class SparseMatrix
{
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
this->m_data.resize(rows());
- Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_data.indexPtr(), rows()).setLinSpaced(0, rows()-1);
- Eigen::Map<Matrix<Scalar, Dynamic, 1> >(this->m_data.valuePtr(), rows()).setOnes();
- Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
+ Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
+ Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
+ Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
@@ -711,10 +759,13 @@ class SparseMatrix
}
else if(this!=&other)
{
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ #endif
initAssignment(other);
if(other.isCompressed())
{
- memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
+ internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
m_data = other.m_data;
}
else
@@ -725,22 +776,11 @@ class SparseMatrix
return *this;
}
- #ifndef EIGEN_PARSED_BY_DOXYGEN
- template<typename Lhs, typename Rhs>
- inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
- { return Base::operator=(product); }
-
- template<typename OtherDerived>
- inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
- {
- initAssignment(other);
- return Base::operator=(other.derived());
- }
-
+#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
- #endif
+#endif // EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
@@ -750,30 +790,38 @@ class SparseMatrix
EIGEN_DBG_SPARSE(
s << "Nonzero entries:\n";
if(m.isCompressed())
+ {
for (Index i=0; i<m.nonZeros(); ++i)
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ }
else
+ {
for (Index i=0; i<m.outerSize(); ++i)
{
Index p = m.m_outerIndex[i];
Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
- for (; k<pe; ++k)
+ for (; k<pe; ++k) {
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
- for (; k<m.m_outerIndex[i+1]; ++k)
+ }
+ for (; k<m.m_outerIndex[i+1]; ++k) {
s << "(_,_) ";
+ }
}
+ }
s << std::endl;
s << std::endl;
s << "Outer pointers:\n";
- for (Index i=0; i<m.outerSize(); ++i)
+ for (Index i=0; i<m.outerSize(); ++i) {
s << m.m_outerIndex[i] << " ";
+ }
s << " $" << std::endl;
if(!m.isCompressed())
{
s << "Inner non zeros:\n";
- for (Index i=0; i<m.outerSize(); ++i)
+ for (Index i=0; i<m.outerSize(); ++i) {
s << m.m_innerNonZeros[i] << " ";
+ }
s << " $" << std::endl;
}
s << std::endl;
@@ -789,10 +837,8 @@ class SparseMatrix
std::free(m_innerNonZeros);
}
-#ifndef EIGEN_PARSED_BY_DOXYGEN
/** Overloaded for performance */
Scalar sum() const;
-#endif
# ifdef EIGEN_SPARSEMATRIX_PLUGIN
# include EIGEN_SPARSEMATRIX_PLUGIN
@@ -819,15 +865,15 @@ protected:
* A vector object that is equal to 0 everywhere but v at the position i */
class SingletonVector
{
- Index m_index;
- Index m_value;
+ StorageIndex m_index;
+ StorageIndex m_value;
public:
- typedef Index value_type;
+ typedef StorageIndex value_type;
SingletonVector(Index i, Index v)
- : m_index(i), m_value(v)
+ : m_index(convert_index(i)), m_value(convert_index(v))
{}
- Index operator[](Index i) const { return i==m_index ? m_value : 0; }
+ StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
};
/** \internal
@@ -846,14 +892,14 @@ public:
eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
- m_data.index(p) = inner;
+ m_data.index(p) = convert_index(inner);
return (m_data.value(p) = 0);
}
private:
static void check_template_parameters()
{
- EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+ EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
@@ -868,87 +914,20 @@ private:
};
};
-template<typename Scalar, int _Options, typename _Index>
-class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
-{
- public:
- InnerIterator(const SparseMatrix& mat, Index outer)
- : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
- {
- if(mat.isCompressed())
- m_end = mat.m_outerIndex[outer+1];
- else
- m_end = m_id + mat.m_innerNonZeros[outer];
- }
-
- inline InnerIterator& operator++() { m_id++; return *this; }
-
- inline const Scalar& value() const { return m_values[m_id]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
-
- inline Index index() const { return m_indices[m_id]; }
- inline Index outer() const { return m_outer; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id < m_end); }
-
- protected:
- const Scalar* m_values;
- const Index* m_indices;
- const Index m_outer;
- Index m_id;
- Index m_end;
-};
-
-template<typename Scalar, int _Options, typename _Index>
-class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
-{
- public:
- ReverseInnerIterator(const SparseMatrix& mat, Index outer)
- : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
- {
- if(mat.isCompressed())
- m_id = mat.m_outerIndex[outer+1];
- else
- m_id = m_start + mat.m_innerNonZeros[outer];
- }
-
- inline ReverseInnerIterator& operator--() { --m_id; return *this; }
-
- inline const Scalar& value() const { return m_values[m_id-1]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
-
- inline Index index() const { return m_indices[m_id-1]; }
- inline Index outer() const { return m_outer; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id > m_start); }
-
- protected:
- const Scalar* m_values;
- const Index* m_indices;
- const Index m_outer;
- Index m_id;
- const Index m_start;
-};
-
namespace internal {
-template<typename InputIterator, typename SparseMatrixType>
-void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
+template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
+void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
{
- EIGEN_UNUSED_VARIABLE(Options);
enum { IsRowMajor = SparseMatrixType::IsRowMajor };
typedef typename SparseMatrixType::Scalar Scalar;
- typedef typename SparseMatrixType::Index Index;
- SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
+ typedef typename SparseMatrixType::StorageIndex StorageIndex;
+ SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
if(begin!=end)
{
// pass 1: count the nnz per inner-vector
- Matrix<Index,Dynamic,1> wi(trMat.outerSize());
+ typename SparseMatrixType::IndexVector wi(trMat.outerSize());
wi.setZero();
for(InputIterator it(begin); it!=end; ++it)
{
@@ -962,7 +941,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
// pass 3:
- trMat.sumupDuplicates();
+ trMat.collapseDuplicates(dup_func);
}
// pass 4: transposed copy -> implicit sorting
@@ -1009,26 +988,43 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
* be explicitely stored into a std::vector for instance.
*/
-template<typename Scalar, int _Options, typename _Index>
+template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators>
-void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
+void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
+{
+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
+}
+
+/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
+ * \code
+ * value = dup_func(OldValue, NewValue)
+ * \endcode
+ * Here is a C++11 example keeping the latest entry only:
+ * \code
+ * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
+ * \endcode
+ */
+template<typename Scalar, int _Options, typename _StorageIndex>
+template<typename InputIterators,typename DupFunctor>
+void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
{
- internal::set_from_triplets(begin, end, *this);
+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
}
/** \internal */
-template<typename Scalar, int _Options, typename _Index>
-void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
+template<typename Scalar, int _Options, typename _StorageIndex>
+template<typename DupFunctor>
+void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
{
eigen_assert(!isCompressed());
// TODO, in practice we should be able to use m_innerNonZeros for that task
- Matrix<Index,Dynamic,1> wi(innerSize());
+ IndexVector wi(innerSize());
wi.fill(-1);
- Index count = 0;
+ StorageIndex count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
for(Index j=0; j<outerSize(); ++j)
{
- Index start = count;
+ StorageIndex start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
{
@@ -1036,7 +1032,7 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
if(wi(i)>=start)
{
// we already meet this entry => accumulate it
- m_data.value(wi(i)) += m_data.value(k);
+ m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
}
else
{
@@ -1056,39 +1052,48 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
m_data.resize(m_outerIndex[m_outerSize]);
}
-template<typename Scalar, int _Options, typename _Index>
+template<typename Scalar, int _Options, typename _StorageIndex>
template<typename OtherDerived>
-EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
+EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
- const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ #endif
+
+ const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
if (needToTranspose)
{
+ #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
+ EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
+ #endif
// two passes algorithm:
// 1 - compute the number of coeffs per dest inner vector
// 2 - do the actual copy/eval
// Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
- typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+ typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ typedef internal::evaluator<_OtherCopy> OtherCopyEval;
OtherCopy otherCopy(other.derived());
+ OtherCopyEval otherCopyEval(otherCopy);
SparseMatrix dest(other.rows(),other.cols());
- Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
+ Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
for (Index j=0; j<otherCopy.outerSize(); ++j)
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
++dest.m_outerIndex[it.index()];
// prefix sum
- Index count = 0;
- Matrix<Index,Dynamic,1> positions(dest.outerSize());
+ StorageIndex count = 0;
+ IndexVector positions(dest.outerSize());
for (Index j=0; j<dest.outerSize(); ++j)
{
- Index tmp = dest.m_outerIndex[j];
+ StorageIndex tmp = dest.m_outerIndex[j];
dest.m_outerIndex[j] = count;
positions[j] = count;
count += tmp;
@@ -1097,9 +1102,9 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
// alloc
dest.m_data.resize(count);
// pass 2
- for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
{
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
{
Index pos = positions[it.index()]++;
dest.m_data.index(pos) = j;
@@ -1112,26 +1117,148 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
else
{
if(other.isRValue())
+ {
initAssignment(other.derived());
+ }
// there is no special optimization
return Base::operator=(other.derived());
}
}
-template<typename _Scalar, int _Options, typename _Index>
-EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
+template<typename _Scalar, int _Options, typename _StorageIndex>
+typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
+{
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ if(isCompressed())
+ {
+ if(nonZeros()==0)
+ {
+ // reserve space if not already done
+ if(m_data.allocatedSize()==0)
+ m_data.reserve(2*m_innerSize);
+
+ // turn the matrix into non-compressed mode
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
+ if(!m_innerNonZeros) internal::throw_std_bad_alloc();
+
+ memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
+
+ // pack all inner-vectors to the end of the pre-allocated space
+ // and allocate the entire free-space to the first inner-vector
+ StorageIndex end = convert_index(m_data.allocatedSize());
+ for(Index j=1; j<=m_outerSize; ++j)
+ m_outerIndex[j] = end;
+ }
+ else
+ {
+ // turn the matrix into non-compressed mode
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
+ if(!m_innerNonZeros) internal::throw_std_bad_alloc();
+ for(Index j=0; j<m_outerSize; ++j)
+ m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
+ }
+ }
+
+ // check whether we can do a fast "push back" insertion
+ Index data_end = m_data.allocatedSize();
+
+ // First case: we are filling a new inner vector which is packed at the end.
+ // We assume that all remaining inner-vectors are also empty and packed to the end.
+ if(m_outerIndex[outer]==data_end)
+ {
+ eigen_internal_assert(m_innerNonZeros[outer]==0);
+
+ // pack previous empty inner-vectors to end of the used-space
+ // and allocate the entire free-space to the current inner-vector.
+ StorageIndex p = convert_index(m_data.size());
+ Index j = outer;
+ while(j>=0 && m_innerNonZeros[j]==0)
+ m_outerIndex[j--] = p;
+
+ // push back the new element
+ ++m_innerNonZeros[outer];
+ m_data.append(Scalar(0), inner);
+
+ // check for reallocation
+ if(data_end != m_data.allocatedSize())
+ {
+ // m_data has been reallocated
+ // -> move remaining inner-vectors back to the end of the free-space
+ // so that the entire free-space is allocated to the current inner-vector.
+ eigen_internal_assert(data_end < m_data.allocatedSize());
+ StorageIndex new_end = convert_index(m_data.allocatedSize());
+ for(Index k=outer+1; k<=m_outerSize; ++k)
+ if(m_outerIndex[k]==data_end)
+ m_outerIndex[k] = new_end;
+ }
+ return m_data.value(p);
+ }
+
+ // Second case: the next inner-vector is packed to the end
+ // and the current inner-vector end match the used-space.
+ if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
+ {
+ eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
+
+ // add space for the new element
+ ++m_innerNonZeros[outer];
+ m_data.resize(m_data.size()+1);
+
+ // check for reallocation
+ if(data_end != m_data.allocatedSize())
+ {
+ // m_data has been reallocated
+ // -> move remaining inner-vectors back to the end of the free-space
+ // so that the entire free-space is allocated to the current inner-vector.
+ eigen_internal_assert(data_end < m_data.allocatedSize());
+ StorageIndex new_end = convert_index(m_data.allocatedSize());
+ for(Index k=outer+1; k<=m_outerSize; ++k)
+ if(m_outerIndex[k]==data_end)
+ m_outerIndex[k] = new_end;
+ }
+
+ // and insert it at the right position (sorted insertion)
+ Index startId = m_outerIndex[outer];
+ Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_data.index(p) = convert_index(inner);
+ return (m_data.value(p) = 0);
+ }
+
+ if(m_data.size() != m_data.allocatedSize())
+ {
+ // make sure the matrix is compatible to random un-compressed insertion:
+ m_data.resize(m_data.allocatedSize());
+ this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
+ }
+
+ return insertUncompressed(row,col);
+}
+
+template<typename _Scalar, int _Options, typename _StorageIndex>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
{
eigen_assert(!isCompressed());
const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
+ const StorageIndex inner = convert_index(IsRowMajor ? col : row);
Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
- Index innerNNZ = m_innerNonZeros[outer];
+ StorageIndex innerNNZ = m_innerNonZeros[outer];
if(innerNNZ>=room)
{
// this inner vector is full, we need to reallocate the whole buffer :(
- reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
+ reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
}
Index startId = m_outerIndex[outer];
@@ -1142,7 +1269,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
m_data.value(p) = m_data.value(p-1);
--p;
}
- eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
+ eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
m_innerNonZeros[outer]++;
@@ -1150,8 +1277,8 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
return (m_data.value(p) = 0);
}
-template<typename _Scalar, int _Options, typename _Index>
-EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
+template<typename _Scalar, int _Options, typename _StorageIndex>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
{
eigen_assert(isCompressed());
@@ -1164,7 +1291,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
// we start a new inner vector
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
{
- m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+ m_outerIndex[previousOuter] = convert_index(m_data.size());
--previousOuter;
}
m_outerIndex[outer+1] = m_outerIndex[outer];
@@ -1174,11 +1301,11 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
// starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
// the 2nd inner vector...
bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
- && (size_t(m_outerIndex[outer+1]) == m_data.size());
+ && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
- size_t startId = m_outerIndex[outer];
- // FIXME let's make sure sizeof(long int) == sizeof(size_t)
- size_t p = m_outerIndex[outer+1];
+ std::size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
+ std::size_t p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
double reallocRatio = 1;
@@ -1257,6 +1384,20 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
return (m_data.value(p) = 0);
}
+namespace internal {
+
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
+ : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
+{
+ typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
+ typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
+ evaluator() : Base() {}
+ explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
+};
+
+}
+
} // end namespace Eigen
#endif // EIGEN_SPARSEMATRIX_H
diff --git a/eigen/Eigen/src/SparseCore/SparseMatrixBase.h b/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
index 6f4a47c..c6b548f 100644
--- a/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -18,39 +18,41 @@ namespace Eigen {
*
* \brief Base class of any sparse matrices or sparse expressions
*
- * \tparam Derived
+ * \tparam Derived is the derived type, e.g. a sparse matrix type, or an expression, etc.
*
* This class can be extended with the help of the plugin mechanism described on the page
- * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
*/
template<typename Derived> class SparseMatrixBase
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- : public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
- typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
- EigenBase<Derived> >
-#else
: public EigenBase<Derived>
-#endif // not EIGEN_PARSED_BY_DOXYGEN
{
public:
typedef typename internal::traits<Derived>::Scalar Scalar;
+
+ /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.
+ *
+ * It is an alias for the Scalar type */
+ typedef Scalar value_type;
+
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
- typedef typename internal::traits<Derived>::Index Index;
- typedef typename internal::traits<Derived>::Index StorageIndex;
+
+ /** The integer type used to \b store indices within a SparseMatrix.
+ * For a \c SparseMatrix<Scalar,Options,IndexType> it an alias of the third template parameter \c IndexType. */
+ typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
+
typedef typename internal::add_const_on_value_type_if_arithmetic<
typename internal::packet_traits<Scalar>::type
>::type PacketReturnType;
typedef SparseMatrixBase StorageBaseType;
+
+ typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
+ typedef Matrix<Scalar,Dynamic,1> ScalarVector;
template<typename OtherDerived>
- Derived& operator=(const EigenBase<OtherDerived> &other)
- {
- other.derived().evalTo(derived());
- return derived();
- }
+ Derived& operator=(const EigenBase<OtherDerived> &other);
enum {
@@ -90,11 +92,6 @@ template<typename Derived> class SparseMatrixBase
* constructed from this one. See the \ref flags "list of flags".
*/
- CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
- /**< This is a rough measure of how expensive it is to read one coefficient from
- * this expression.
- */
-
IsRowMajor = Flags&RowMajorBit ? 1 : 0,
InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
@@ -110,10 +107,11 @@ template<typename Derived> class SparseMatrixBase
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
Transpose<const Derived>
>::type AdjointReturnType;
+ typedef Transpose<Derived> TransposeReturnType;
+ typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
-
- typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, Index> PlainObject;
-
+ // FIXME storage order do not match evaluator storage order
+ typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, StorageIndex> PlainObject;
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is the "real scalar" type; if the \a Scalar type is already real numbers
@@ -131,6 +129,8 @@ template<typename Derived> class SparseMatrixBase
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
+ /** type of the equivalent dense matrix */
+ typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
/** type of the equivalent square matrix */
typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
@@ -140,11 +140,20 @@ template<typename Derived> class SparseMatrixBase
inline Derived& const_cast_derived() const
{ return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
- typedef internal::special_scalar_op_base<Derived, Scalar, RealScalar, EigenBase<Derived> > Base;
- using Base::operator*;
+ typedef EigenBase<Derived> Base;
+
#endif // not EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+#define EIGEN_DOC_UNARY_ADDONS(METHOD,OP) /** <p>This method does not change the sparsity of \c *this: the OP is applied to explicitly stored coefficients only. \sa SparseCompressedBase::coeffs() </p> */
+#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /** <p> \warning This method returns a read-only expression for any sparse matrices. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
+#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) /** <p> \warning This method returns a read-write expression for COND sparse matrices only. Otherwise, the returned expression is read-only. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
+#else
+#define EIGEN_DOC_UNARY_ADDONS(X,Y)
+#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
+#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
+#endif
# include "../plugins/CommonCwiseUnaryOps.h"
# include "../plugins/CommonCwiseBinaryOps.h"
# include "../plugins/MatrixCwiseUnaryOps.h"
@@ -153,8 +162,10 @@ template<typename Derived> class SparseMatrixBase
# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
# include EIGEN_SPARSEMATRIXBASE_PLUGIN
# endif
-# undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+#undef EIGEN_DOC_UNARY_ADDONS
+#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
+#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
/** \returns the number of rows. \sa cols() */
inline Index rows() const { return derived().rows(); }
@@ -163,9 +174,6 @@ template<typename Derived> class SparseMatrixBase
/** \returns the number of coefficients, which is \a rows()*cols().
* \sa rows(), cols(). */
inline Index size() const { return rows() * cols(); }
- /** \returns the number of nonzero coefficients which is in practice the number
- * of stored coefficients. */
- inline Index nonZeros() const { return derived().nonZeros(); }
/** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns
* \code rows()==1 || cols()==1 \endcode
@@ -185,93 +193,23 @@ template<typename Derived> class SparseMatrixBase
template<typename OtherDerived>
- Derived& operator=(const ReturnByValue<OtherDerived>& other)
- {
- other.evalTo(derived());
- return derived();
- }
-
+ Derived& operator=(const ReturnByValue<OtherDerived>& other);
template<typename OtherDerived>
- inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
- {
- return assign(other.derived());
- }
+ inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other);
- inline Derived& operator=(const Derived& other)
- {
-// if (other.isRValue())
-// derived().swap(other.const_cast_derived());
-// else
- return assign(other.derived());
- }
+ inline Derived& operator=(const Derived& other);
protected:
template<typename OtherDerived>
- inline Derived& assign(const OtherDerived& other)
- {
- const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
- if ((!transpose) && other.isRValue())
- {
- // eval without temporary
- derived().resize(other.rows(), other.cols());
- derived().setZero();
- derived().reserve((std::max)(this->rows(),this->cols())*2);
- for (Index j=0; j<outerSize; ++j)
- {
- derived().startVec(j);
- for (typename OtherDerived::InnerIterator it(other, j); it; ++it)
- {
- Scalar v = it.value();
- derived().insertBackByOuterInner(j,it.index()) = v;
- }
- }
- derived().finalize();
- }
- else
- {
- assignGeneric(other);
- }
- return derived();
- }
+ inline Derived& assign(const OtherDerived& other);
template<typename OtherDerived>
- inline void assignGeneric(const OtherDerived& other)
- {
- //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
- (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
- "the transpose operation is supposed to be handled in SparseMatrix::operator=");
-
- enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
-
- const Index outerSize = other.outerSize();
- //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
- // thanks to shallow copies, we always eval to a tempary
- Derived temp(other.rows(), other.cols());
-
- temp.reserve((std::max)(this->rows(),this->cols())*2);
- for (Index j=0; j<outerSize; ++j)
- {
- temp.startVec(j);
- for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
- {
- Scalar v = it.value();
- temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
- }
- }
- temp.finalize();
-
- derived() = temp.markAsRValue();
- }
+ inline void assignGeneric(const OtherDerived& other);
public:
- template<typename Lhs, typename Rhs>
- inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
-
friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
{
typedef typename Derived::Nested Nested;
@@ -279,11 +217,12 @@ template<typename Derived> class SparseMatrixBase
if (Flags&RowMajorBit)
{
- const Nested nm(m.derived());
+ Nested nm(m.derived());
+ internal::evaluator<NestedCleaned> thisEval(nm);
for (Index row=0; row<nm.outerSize(); ++row)
{
Index col = 0;
- for (typename NestedCleaned::InnerIterator it(nm.derived(), row); it; ++it)
+ for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, row); it; ++it)
{
for ( ; col<it.index(); ++col)
s << "0 ";
@@ -297,10 +236,11 @@ template<typename Derived> class SparseMatrixBase
}
else
{
- const Nested nm(m.derived());
+ Nested nm(m.derived());
+ internal::evaluator<NestedCleaned> thisEval(nm);
if (m.cols() == 1) {
Index row = 0;
- for (typename NestedCleaned::InnerIterator it(nm.derived(), 0); it; ++it)
+ for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, 0); it; ++it)
{
for ( ; row<it.index(); ++row)
s << "0" << std::endl;
@@ -312,8 +252,8 @@ template<typename Derived> class SparseMatrixBase
}
else
{
- SparseMatrix<Scalar, RowMajorBit, Index> trans = m;
- s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, Index> >&>(trans);
+ SparseMatrix<Scalar, RowMajorBit, StorageIndex> trans = m;
+ s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, StorageIndex> >&>(trans);
}
}
return s;
@@ -323,12 +263,22 @@ template<typename Derived> class SparseMatrixBase
Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
template<typename OtherDerived>
Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ Derived& operator+=(const DiagonalBase<OtherDerived>& other);
+ template<typename OtherDerived>
+ Derived& operator-=(const DiagonalBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ Derived& operator+=(const EigenBase<OtherDerived> &other);
+ template<typename OtherDerived>
+ Derived& operator-=(const EigenBase<OtherDerived> &other);
Derived& operator*=(const Scalar& other);
Derived& operator/=(const Scalar& other);
template<typename OtherDerived> struct CwiseProductDenseReturnType {
- typedef CwiseBinaryOp<internal::scalar_product_op<typename internal::scalar_product_traits<
+ typedef CwiseBinaryOp<internal::scalar_product_op<typename ScalarBinaryOpTraits<
typename internal::traits<Derived>::Scalar,
typename internal::traits<OtherDerived>::Scalar
>::ReturnType>,
@@ -341,36 +291,37 @@ template<typename Derived> class SparseMatrixBase
EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType<OtherDerived>::Type
cwiseProduct(const MatrixBase<OtherDerived> &other) const;
- // sparse * sparse
- template<typename OtherDerived>
- const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
- operator*(const SparseMatrixBase<OtherDerived> &other) const;
-
// sparse * diagonal
template<typename OtherDerived>
- const SparseDiagonalProduct<Derived,OtherDerived>
- operator*(const DiagonalBase<OtherDerived> &other) const;
+ const Product<Derived,OtherDerived>
+ operator*(const DiagonalBase<OtherDerived> &other) const
+ { return Product<Derived,OtherDerived>(derived(), other.derived()); }
// diagonal * sparse
template<typename OtherDerived> friend
- const SparseDiagonalProduct<OtherDerived,Derived>
+ const Product<OtherDerived,Derived>
operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
- { return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
-
- /** dense * sparse (return a dense object unless it is an outer product) */
- template<typename OtherDerived> friend
- const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
- operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
- { return typename DenseSparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
-
- /** sparse * dense (returns a dense object unless it is an outer product) */
+ { return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+
+ // sparse * sparse
+ template<typename OtherDerived>
+ const Product<Derived,OtherDerived,AliasFreeProduct>
+ operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+ // sparse * dense
template<typename OtherDerived>
- const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+ const Product<Derived,OtherDerived>
operator*(const MatrixBase<OtherDerived> &other) const
- { return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived()); }
+ { return Product<Derived,OtherDerived>(derived(), other.derived()); }
+
+ // dense * sparse
+ template<typename OtherDerived> friend
+ const Product<OtherDerived,Derived>
+ operator*(const MatrixBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+ { return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
/** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
- SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+ SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
{
return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
}
@@ -378,22 +329,16 @@ template<typename Derived> class SparseMatrixBase
template<typename OtherDerived>
Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
- #ifdef EIGEN2_SUPPORT
- // deprecated
- template<typename OtherDerived>
- typename internal::plain_matrix_type_column_major<OtherDerived>::type
- solveTriangular(const MatrixBase<OtherDerived>& other) const;
-
- // deprecated
- template<typename OtherDerived>
- void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
- #endif // EIGEN2_SUPPORT
-
template<int Mode>
- inline const SparseTriangularView<Derived, Mode> triangularView() const;
+ inline const TriangularView<const Derived, Mode> triangularView() const;
+
+ template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SparseSelfAdjointView<Derived, UpLo> Type; };
+ template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SparseSelfAdjointView<const Derived, UpLo> Type; };
- template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
- template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
+ template<unsigned int UpLo> inline
+ typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
+ template<unsigned int UpLo> inline
+ typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
@@ -401,9 +346,9 @@ template<typename Derived> class SparseMatrixBase
RealScalar norm() const;
RealScalar blueNorm() const;
- Transpose<Derived> transpose() { return derived(); }
- const Transpose<const Derived> transpose() const { return derived(); }
- const AdjointReturnType adjoint() const { return transpose(); }
+ TransposeReturnType transpose() { return TransposeReturnType(derived()); }
+ const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }
+ const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); }
// inner-vector
typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
@@ -417,25 +362,14 @@ template<typename Derived> class SparseMatrixBase
InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
- /** \internal use operator= */
- template<typename DenseDerived>
- void evalTo(MatrixBase<DenseDerived>& dst) const
- {
- dst.setZero();
- for (Index j=0; j<outerSize(); ++j)
- for (typename Derived::InnerIterator i(derived(),j); i; ++i)
- dst.coeffRef(i.row(),i.col()) = i.value();
- }
-
- Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+ DenseMatrixType toDense() const
{
- return derived();
+ return DenseMatrixType(derived());
}
template<typename OtherDerived>
bool isApprox(const SparseMatrixBase<OtherDerived>& other,
- const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
- { return toDense().isApprox(other.toDense(),prec); }
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
template<typename OtherDerived>
bool isApprox(const MatrixBase<OtherDerived>& other,
@@ -451,10 +385,19 @@ template<typename Derived> class SparseMatrixBase
{ return typename internal::eval<Derived>::type(derived()); }
Scalar sum() const;
+
+ inline const SparseView<Derived>
+ pruned(const Scalar& reference = Scalar(0), const RealScalar& epsilon = NumTraits<Scalar>::dummy_precision()) const;
protected:
bool m_isRValue;
+
+ static inline StorageIndex convert_index(const Index idx) {
+ return internal::convert_index<StorageIndex>(idx);
+ }
+ private:
+ template<typename Dest> void evalTo(Dest &) const;
};
} // end namespace Eigen
diff --git a/eigen/Eigen/src/SparseCore/SparsePermutation.h b/eigen/Eigen/src/SparseCore/SparsePermutation.h
index 75e2100..ef38357 100644
--- a/eigen/Eigen/src/SparseCore/SparsePermutation.h
+++ b/eigen/Eigen/src/SparseCore/SparsePermutation.h
@@ -16,131 +16,161 @@ namespace Eigen {
namespace internal {
-template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
-struct traits<permut_sparsematrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+template<typename ExpressionType, int Side, bool Transposed>
+struct permutation_matrix_product<ExpressionType, Side, Transposed, SparseShape>
{
- typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
- typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
- typedef typename MatrixTypeNestedCleaned::Index Index;
- enum {
- SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
- MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
- };
+ typedef typename nested_eval<ExpressionType, 1>::type MatrixType;
+ typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
- typedef typename internal::conditional<MoveOuter,
- SparseMatrix<Scalar,SrcStorageOrder,Index>,
- SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> >::type ReturnType;
-};
-
-template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
-struct permut_sparsematrix_product_retval
- : public ReturnByValue<permut_sparsematrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
-{
- typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
- typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
- typedef typename MatrixTypeNestedCleaned::Index Index;
+ typedef typename MatrixTypeCleaned::Scalar Scalar;
+ typedef typename MatrixTypeCleaned::StorageIndex StorageIndex;
enum {
- SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
+ SrcStorageOrder = MatrixTypeCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
};
+
+ typedef typename internal::conditional<MoveOuter,
+ SparseMatrix<Scalar,SrcStorageOrder,StorageIndex>,
+ SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> >::type ReturnType;
- permut_sparsematrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
- : m_permutation(perm), m_matrix(matrix)
- {}
-
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
-
- template<typename Dest> inline void evalTo(Dest& dst) const
+ template<typename Dest,typename PermutationType>
+ static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr)
{
+ MatrixType mat(xpr);
if(MoveOuter)
{
- SparseMatrix<Scalar,SrcStorageOrder,Index> tmp(m_matrix.rows(), m_matrix.cols());
- Matrix<Index,Dynamic,1> sizes(m_matrix.outerSize());
- for(Index j=0; j<m_matrix.outerSize(); ++j)
+ SparseMatrix<Scalar,SrcStorageOrder,StorageIndex> tmp(mat.rows(), mat.cols());
+ Matrix<StorageIndex,Dynamic,1> sizes(mat.outerSize());
+ for(Index j=0; j<mat.outerSize(); ++j)
{
- Index jp = m_permutation.indices().coeff(j);
- sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros();
+ Index jp = perm.indices().coeff(j);
+ sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = StorageIndex(mat.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros());
}
tmp.reserve(sizes);
- for(Index j=0; j<m_matrix.outerSize(); ++j)
+ for(Index j=0; j<mat.outerSize(); ++j)
{
- Index jp = m_permutation.indices().coeff(j);
+ Index jp = perm.indices().coeff(j);
Index jsrc = ((Side==OnTheRight) ^ Transposed) ? jp : j;
Index jdst = ((Side==OnTheLeft) ^ Transposed) ? jp : j;
- for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,jsrc); it; ++it)
+ for(typename MatrixTypeCleaned::InnerIterator it(mat,jsrc); it; ++it)
tmp.insertByOuterInner(jdst,it.index()) = it.value();
}
dst = tmp;
}
else
{
- SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> tmp(m_matrix.rows(), m_matrix.cols());
- Matrix<Index,Dynamic,1> sizes(tmp.outerSize());
+ SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> tmp(mat.rows(), mat.cols());
+ Matrix<StorageIndex,Dynamic,1> sizes(tmp.outerSize());
sizes.setZero();
- PermutationMatrix<Dynamic,Dynamic,Index> perm;
+ PermutationMatrix<Dynamic,Dynamic,StorageIndex> perm_cpy;
if((Side==OnTheLeft) ^ Transposed)
- perm = m_permutation;
+ perm_cpy = perm;
else
- perm = m_permutation.transpose();
+ perm_cpy = perm.transpose();
- for(Index j=0; j<m_matrix.outerSize(); ++j)
- for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,j); it; ++it)
- sizes[perm.indices().coeff(it.index())]++;
+ for(Index j=0; j<mat.outerSize(); ++j)
+ for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)
+ sizes[perm_cpy.indices().coeff(it.index())]++;
tmp.reserve(sizes);
- for(Index j=0; j<m_matrix.outerSize(); ++j)
- for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,j); it; ++it)
- tmp.insertByOuterInner(perm.indices().coeff(it.index()),j) = it.value();
+ for(Index j=0; j<mat.outerSize(); ++j)
+ for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)
+ tmp.insertByOuterInner(perm_cpy.indices().coeff(it.index()),j) = it.value();
dst = tmp;
}
}
-
- protected:
- const PermutationType& m_permutation;
- typename MatrixType::Nested m_matrix;
};
}
+namespace internal {
+
+template <int ProductTag> struct product_promote_storage_type<Sparse, PermutationStorage, ProductTag> { typedef Sparse ret; };
+template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Sparse, ProductTag> { typedef Sparse ret; };
+
+// TODO, the following two overloads are only needed to define the right temporary type through
+// typename traits<permutation_sparse_matrix_product<Rhs,Lhs,OnTheRight,false> >::ReturnType
+// whereas it should be correctly handled by traits<Product<> >::PlainObject
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, PermutationShape, SparseShape>
+ : public evaluator<typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType>
+{
+ typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;
+ typedef typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType PlainObject;
+ typedef evaluator<PlainObject> Base;
+
+ enum {
+ Flags = Base::Flags | EvalBeforeNestingBit
+ };
+
+ explicit product_evaluator(const XprType& xpr)
+ : m_result(xpr.rows(), xpr.cols())
+ {
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ generic_product_impl<Lhs, Rhs, PermutationShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
+ }
+protected:
+ PlainObject m_result;
+};
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, SparseShape, PermutationShape >
+ : public evaluator<typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType>
+{
+ typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;
+ typedef typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType PlainObject;
+ typedef evaluator<PlainObject> Base;
+
+ enum {
+ Flags = Base::Flags | EvalBeforeNestingBit
+ };
+
+ explicit product_evaluator(const XprType& xpr)
+ : m_result(xpr.rows(), xpr.cols())
+ {
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ generic_product_impl<Lhs, Rhs, SparseShape, PermutationShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
+ }
+
+protected:
+ PlainObject m_result;
+};
+
+} // end namespace internal
/** \returns the matrix with the permutation applied to the columns
*/
template<typename SparseDerived, typename PermDerived>
-inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, false>
+inline const Product<SparseDerived, PermDerived, AliasFreeProduct>
operator*(const SparseMatrixBase<SparseDerived>& matrix, const PermutationBase<PermDerived>& perm)
-{
- return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, false>(perm, matrix.derived());
-}
+{ return Product<SparseDerived, PermDerived, AliasFreeProduct>(matrix.derived(), perm.derived()); }
/** \returns the matrix with the permutation applied to the rows
*/
template<typename SparseDerived, typename PermDerived>
-inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, false>
+inline const Product<PermDerived, SparseDerived, AliasFreeProduct>
operator*( const PermutationBase<PermDerived>& perm, const SparseMatrixBase<SparseDerived>& matrix)
-{
- return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, false>(perm, matrix.derived());
-}
-
+{ return Product<PermDerived, SparseDerived, AliasFreeProduct>(perm.derived(), matrix.derived()); }
/** \returns the matrix with the inverse permutation applied to the columns.
*/
-template<typename SparseDerived, typename PermDerived>
-inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, true>
-operator*(const SparseMatrixBase<SparseDerived>& matrix, const Transpose<PermutationBase<PermDerived> >& tperm)
+template<typename SparseDerived, typename PermutationType>
+inline const Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>
+operator*(const SparseMatrixBase<SparseDerived>& matrix, const InverseImpl<PermutationType, PermutationStorage>& tperm)
{
- return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, true>(tperm.nestedPermutation(), matrix.derived());
+ return Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>(matrix.derived(), tperm.derived());
}
/** \returns the matrix with the inverse permutation applied to the rows.
*/
-template<typename SparseDerived, typename PermDerived>
-inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, true>
-operator*(const Transpose<PermutationBase<PermDerived> >& tperm, const SparseMatrixBase<SparseDerived>& matrix)
+template<typename SparseDerived, typename PermutationType>
+inline const Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>
+operator*(const InverseImpl<PermutationType,PermutationStorage>& tperm, const SparseMatrixBase<SparseDerived>& matrix)
{
- return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, true>(tperm.nestedPermutation(), matrix.derived());
+ return Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>(tperm.derived(), matrix.derived());
}
} // end namespace Eigen
diff --git a/eigen/Eigen/src/SparseCore/SparseProduct.h b/eigen/Eigen/src/SparseCore/SparseProduct.h
index cf76630..4cbf687 100644
--- a/eigen/Eigen/src/SparseCore/SparseProduct.h
+++ b/eigen/Eigen/src/SparseCore/SparseProduct.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,158 +12,6 @@
namespace Eigen {
-template<typename Lhs, typename Rhs>
-struct SparseSparseProductReturnType
-{
- typedef typename internal::traits<Lhs>::Scalar Scalar;
- typedef typename internal::traits<Lhs>::Index Index;
- enum {
- LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
- RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
- TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
- TransposeLhs = LhsRowMajor && (!RhsRowMajor)
- };
-
- typedef typename internal::conditional<TransposeLhs,
- SparseMatrix<Scalar,0,Index>,
- typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
-
- typedef typename internal::conditional<TransposeRhs,
- SparseMatrix<Scalar,0,Index>,
- typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
-
- typedef SparseSparseProduct<LhsNested, RhsNested> Type;
-};
-
-namespace internal {
-template<typename LhsNested, typename RhsNested>
-struct traits<SparseSparseProduct<LhsNested, RhsNested> >
-{
- typedef MatrixXpr XprKind;
- // clean the nested types:
- typedef typename remove_all<LhsNested>::type _LhsNested;
- typedef typename remove_all<RhsNested>::type _RhsNested;
- typedef typename _LhsNested::Scalar Scalar;
- typedef typename promote_index_type<typename traits<_LhsNested>::Index,
- typename traits<_RhsNested>::Index>::type Index;
-
- enum {
- LhsCoeffReadCost = _LhsNested::CoeffReadCost,
- RhsCoeffReadCost = _RhsNested::CoeffReadCost,
- LhsFlags = _LhsNested::Flags,
- RhsFlags = _RhsNested::Flags,
-
- RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
- ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
- MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
-
- InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
-
- EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
-
- RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
-
- Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
- | EvalBeforeAssigningBit
- | EvalBeforeNestingBit,
-
- CoeffReadCost = Dynamic
- };
-
- typedef Sparse StorageKind;
-};
-
-} // end namespace internal
-
-template<typename LhsNested, typename RhsNested>
-class SparseSparseProduct : internal::no_assignment_operator,
- public SparseMatrixBase<SparseSparseProduct<LhsNested, RhsNested> >
-{
- public:
-
- typedef SparseMatrixBase<SparseSparseProduct> Base;
- EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct)
-
- private:
-
- typedef typename internal::traits<SparseSparseProduct>::_LhsNested _LhsNested;
- typedef typename internal::traits<SparseSparseProduct>::_RhsNested _RhsNested;
-
- public:
-
- template<typename Lhs, typename Rhs>
- EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs)
- : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true)
- {
- init();
- }
-
- template<typename Lhs, typename Rhs>
- EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance)
- : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false)
- {
- init();
- }
-
- SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) const
- {
- using std::abs;
- return SparseSparseProduct(m_lhs,m_rhs,abs(reference)*epsilon);
- }
-
- template<typename Dest>
- void evalTo(Dest& result) const
- {
- if(m_conservative)
- internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result);
- else
- internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance);
- }
-
- EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
-
- EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
- EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
-
- protected:
- void init()
- {
- eigen_assert(m_lhs.cols() == m_rhs.rows());
-
- enum {
- ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
- || _RhsNested::RowsAtCompileTime==Dynamic
- || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
- AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
- SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
- };
- // note to the lost user:
- // * for a dot product use: v1.dot(v2)
- // * for a coeff-wise product use: v1.cwise()*v2
- EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
- INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
- EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
- INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
- EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
- }
-
- LhsNested m_lhs;
- RhsNested m_rhs;
- RealScalar m_tolerance;
- bool m_conservative;
-};
-
-// sparse = sparse * sparse
-template<typename Derived>
-template<typename Lhs, typename Rhs>
-inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<Lhs,Rhs>& product)
-{
- product.evalTo(derived());
- return derived();
-}
-
/** \returns an expression of the product of two sparse matrices.
* By default a conservative product preserving the symbolic non zeros is performed.
* The automatic pruning of the small values can be achieved by calling the pruned() function
@@ -177,12 +25,145 @@ inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<L
* */
template<typename Derived>
template<typename OtherDerived>
-inline const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+inline const Product<Derived,OtherDerived,AliasFreeProduct>
SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
{
- return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+ return Product<Derived,OtherDerived,AliasFreeProduct>(derived(), other.derived());
}
+namespace internal {
+
+// sparse * sparse
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ {
+ evalTo(dst, lhs, rhs, typename evaluator_traits<Dest>::Shape());
+ }
+
+ // dense += sparse * sparse
+ template<typename Dest,typename ActualLhs>
+ static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
+ {
+ typedef typename nested_eval<ActualLhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhs);
+ RhsNested rhsNested(rhs);
+ internal::sparse_sparse_to_dense_product_selector<typename remove_all<LhsNested>::type,
+ typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
+ }
+
+ // dense -= sparse * sparse
+ template<typename Dest>
+ static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
+ {
+ addTo(dst, -lhs, rhs);
+ }
+
+protected:
+
+ // sparse = sparse * sparse
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, SparseShape)
+ {
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhs);
+ RhsNested rhsNested(rhs);
+ internal::conservative_sparse_sparse_product_selector<typename remove_all<LhsNested>::type,
+ typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
+ }
+
+ // dense = sparse * sparse
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, DenseShape)
+ {
+ dst.setZero();
+ addTo(dst, lhs, rhs);
+ }
+};
+
+// sparse * sparse-triangular
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, SparseShape, SparseTriangularShape, ProductType>
+ : public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
+{};
+
+// sparse-triangular * sparse
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType>
+ : public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
+{};
+
+// dense = sparse-product (can be sparse*sparse, sparse*perm, etc.)
+template< typename DstXprType, typename Lhs, typename Rhs>
+struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
+{
+ typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
+ {
+ Index dstRows = src.rows();
+ Index dstCols = src.cols();
+ if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
+ dst.resize(dstRows, dstCols);
+
+ generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
+ }
+};
+
+// dense += sparse-product (can be sparse*sparse, sparse*perm, etc.)
+template< typename DstXprType, typename Lhs, typename Rhs>
+struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
+{
+ typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
+ {
+ generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());
+ }
+};
+
+// dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.)
+template< typename DstXprType, typename Lhs, typename Rhs>
+struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
+{
+ typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
+ {
+ generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());
+ }
+};
+
+template<typename Lhs, typename Rhs, int Options>
+struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>
+ : public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>
+{
+ typedef SparseView<Product<Lhs, Rhs, Options> > XprType;
+ typedef typename XprType::PlainObject PlainObject;
+ typedef evaluator<PlainObject> Base;
+
+ explicit unary_evaluator(const XprType& xpr)
+ : m_result(xpr.rows(), xpr.cols())
+ {
+ using std::abs;
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(xpr.nestedExpression().lhs());
+ RhsNested rhsNested(xpr.nestedExpression().rhs());
+
+ internal::sparse_sparse_product_with_pruning_selector<typename remove_all<LhsNested>::type,
+ typename remove_all<RhsNested>::type, PlainObject>::run(lhsNested,rhsNested,m_result,
+ abs(xpr.reference())*xpr.epsilon());
+ }
+
+protected:
+ PlainObject m_result;
+};
+
+} // end namespace internal
+
} // end namespace Eigen
#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/eigen/Eigen/src/SparseCore/SparseRedux.h b/eigen/Eigen/src/SparseCore/SparseRedux.h
index 51ed9ae..4587749 100644
--- a/eigen/Eigen/src/SparseCore/SparseRedux.h
+++ b/eigen/Eigen/src/SparseCore/SparseRedux.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -18,8 +18,9 @@ SparseMatrixBase<Derived>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
Scalar res(0);
+ internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
- for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
+ for (typename internal::evaluator<Derived>::InnerIterator iter(thisEval,j); iter; ++iter)
res += iter.value();
return res;
}
diff --git a/eigen/Eigen/src/SparseCore/SparseRef.h b/eigen/Eigen/src/SparseCore/SparseRef.h
new file mode 100644
index 0000000..d91f38f
--- /dev/null
+++ b/eigen/Eigen/src/SparseCore/SparseRef.h
@@ -0,0 +1,397 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_REF_H
+#define EIGEN_SPARSE_REF_H
+
+namespace Eigen {
+
+enum {
+ StandardCompressedFormat = 2 /**< used by Ref<SparseMatrix> to specify whether the input storage must be in standard compressed form */
+};
+
+namespace internal {
+
+template<typename Derived> class SparseRefBase;
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
+struct traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
+ : public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ enum {
+ Options = _Options,
+ Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
+ };
+
+ template<typename Derived> struct match {
+ enum {
+ StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
+ MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && StorageOrderMatch
+ };
+ typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
+ };
+
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
+struct traits<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
+ : public traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
+{
+ enum {
+ Flags = (traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
+ };
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
+struct traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
+ : public traits<SparseVector<MatScalar,MatOptions,MatIndex> >
+{
+ typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ enum {
+ Options = _Options,
+ Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
+ };
+
+ template<typename Derived> struct match {
+ enum {
+ MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && Derived::IsVectorAtCompileTime
+ };
+ typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
+ };
+
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
+struct traits<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
+ : public traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
+{
+ enum {
+ Flags = (traits<SparseVector<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
+ };
+};
+
+template<typename Derived>
+struct traits<SparseRefBase<Derived> > : public traits<Derived> {};
+
+template<typename Derived> class SparseRefBase
+ : public SparseMapBase<Derived>
+{
+public:
+
+ typedef SparseMapBase<Derived> Base;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseRefBase)
+
+ SparseRefBase()
+ : Base(RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime, 0, 0, 0, 0, 0)
+ {}
+
+protected:
+
+ template<typename Expression>
+ void construct(Expression& expr)
+ {
+ if(expr.outerIndexPtr()==0)
+ ::new (static_cast<Base*>(this)) Base(expr.size(), expr.nonZeros(), expr.innerIndexPtr(), expr.valuePtr());
+ else
+ ::new (static_cast<Base*>(this)) Base(expr.rows(), expr.cols(), expr.nonZeros(), expr.outerIndexPtr(), expr.innerIndexPtr(), expr.valuePtr(), expr.innerNonZeroPtr());
+ }
+};
+
+} // namespace internal
+
+
+/**
+ * \ingroup SparseCore_Module
+ *
+ * \brief A sparse matrix expression referencing an existing sparse expression
+ *
+ * \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
+ * \tparam Options specifies whether the a standard compressed format is required \c Options is \c #StandardCompressedFormat, or \c 0.
+ * The default is \c 0.
+ *
+ * \sa class Ref
+ */
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType >
+ : public internal::SparseRefBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
+#else
+template<typename SparseMatrixType, int Options>
+class Ref<SparseMatrixType, Options>
+ : public SparseMapBase<Derived,WriteAccessors> // yes, that's weird to use Derived here, but that works!
+#endif
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ typedef internal::traits<Ref> Traits;
+ template<int OtherOptions>
+ inline Ref(const SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
+ template<int OtherOptions>
+ inline Ref(const MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
+ public:
+
+ typedef internal::SparseRefBase<Ref> Base;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
+
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<int OtherOptions>
+ inline Ref(SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
+ {
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
+ Base::construct(expr.derived());
+ }
+
+ template<int OtherOptions>
+ inline Ref(MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
+ {
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
+ Base::construct(expr.derived());
+ }
+
+ template<typename Derived>
+ inline Ref(const SparseCompressedBase<Derived>& expr)
+ #else
+ /** Implicit constructor from any sparse expression (2D matrix or 1D vector) */
+ template<typename Derived>
+ inline Ref(SparseCompressedBase<Derived>& expr)
+ #endif
+ {
+ EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
+ Base::construct(expr.const_cast_derived());
+ }
+};
+
+// this is the const ref version
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
+ : public internal::SparseRefBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+{
+ typedef SparseMatrix<MatScalar,MatOptions,MatIndex> TPlainObjectType;
+ typedef internal::traits<Ref> Traits;
+ public:
+
+ typedef internal::SparseRefBase<Ref> Base;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
+
+ template<typename Derived>
+ inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)
+ {
+ construct(expr.derived(), typename Traits::template match<Derived>::type());
+ }
+
+ inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {
+ // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
+ }
+
+ template<typename OtherRef>
+ inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {
+ construct(other.derived(), typename Traits::template match<OtherRef>::type());
+ }
+
+ ~Ref() {
+ if(m_hasCopy) {
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ obj->~TPlainObjectType();
+ }
+ }
+
+ protected:
+
+ template<typename Expression>
+ void construct(const Expression& expr,internal::true_type)
+ {
+ if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed()))
+ {
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ ::new (obj) TPlainObjectType(expr);
+ m_hasCopy = true;
+ Base::construct(*obj);
+ }
+ else
+ {
+ Base::construct(expr);
+ }
+ }
+
+ template<typename Expression>
+ void construct(const Expression& expr, internal::false_type)
+ {
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ ::new (obj) TPlainObjectType(expr);
+ m_hasCopy = true;
+ Base::construct(*obj);
+ }
+
+ protected:
+ char m_object_bytes[sizeof(TPlainObjectType)];
+ bool m_hasCopy;
+};
+
+
+
+/**
+ * \ingroup SparseCore_Module
+ *
+ * \brief A sparse vector expression referencing an existing sparse vector expression
+ *
+ * \tparam SparseVectorType the equivalent sparse vector type of the referenced data, it must be a template instance of class SparseVector.
+ *
+ * \sa class Ref
+ */
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType >
+ : public internal::SparseRefBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
+#else
+template<typename SparseVectorType>
+class Ref<SparseVectorType>
+ : public SparseMapBase<Derived,WriteAccessors>
+#endif
+{
+ typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
+ typedef internal::traits<Ref> Traits;
+ template<int OtherOptions>
+ inline Ref(const SparseVector<MatScalar,OtherOptions,MatIndex>& expr);
+ public:
+
+ typedef internal::SparseRefBase<Ref> Base;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<int OtherOptions>
+ inline Ref(SparseVector<MatScalar,OtherOptions,MatIndex>& expr)
+ {
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseVector<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ Base::construct(expr.derived());
+ }
+
+ template<typename Derived>
+ inline Ref(const SparseCompressedBase<Derived>& expr)
+ #else
+ /** Implicit constructor from any 1D sparse vector expression */
+ template<typename Derived>
+ inline Ref(SparseCompressedBase<Derived>& expr)
+ #endif
+ {
+ EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ Base::construct(expr.const_cast_derived());
+ }
+};
+
+// this is the const ref version
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType>
+ : public internal::SparseRefBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+{
+ typedef SparseVector<MatScalar,MatOptions,MatIndex> TPlainObjectType;
+ typedef internal::traits<Ref> Traits;
+ public:
+
+ typedef internal::SparseRefBase<Ref> Base;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
+
+ template<typename Derived>
+ inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)
+ {
+ construct(expr.derived(), typename Traits::template match<Derived>::type());
+ }
+
+ inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {
+ // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
+ }
+
+ template<typename OtherRef>
+ inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {
+ construct(other.derived(), typename Traits::template match<OtherRef>::type());
+ }
+
+ ~Ref() {
+ if(m_hasCopy) {
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ obj->~TPlainObjectType();
+ }
+ }
+
+ protected:
+
+ template<typename Expression>
+ void construct(const Expression& expr,internal::true_type)
+ {
+ Base::construct(expr);
+ }
+
+ template<typename Expression>
+ void construct(const Expression& expr, internal::false_type)
+ {
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ ::new (obj) TPlainObjectType(expr);
+ m_hasCopy = true;
+ Base::construct(*obj);
+ }
+
+ protected:
+ char m_object_bytes[sizeof(TPlainObjectType)];
+ bool m_hasCopy;
+};
+
+namespace internal {
+
+// FIXME shall we introduce a general evaluatior_ref that we can specialize for any sparse object once, and thus remove this copy-pasta thing...
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
+struct evaluator<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+ : evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
+{
+ typedef evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
+ typedef Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
+ evaluator() : Base() {}
+ explicit evaluator(const XprType &mat) : Base(mat) {}
+};
+
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_REF_H
diff --git a/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h b/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
index 0eda96b..9e39be7 100644
--- a/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ b/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -11,14 +11,14 @@
#define EIGEN_SPARSE_SELFADJOINTVIEW_H
namespace Eigen {
-
+
/** \ingroup SparseCore_Module
* \class SparseSelfAdjointView
*
* \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
*
* \param MatrixType the type of the dense matrix storing the coefficients
- * \param UpLo can be either \c #Lower or \c #Upper
+ * \param Mode can be either \c #Lower or \c #Upper
*
* This class is an expression of a sefladjoint matrix from a triangular part of a matrix
* with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
@@ -26,38 +26,39 @@ namespace Eigen {
*
* \sa SparseMatrixBase::selfadjointView()
*/
-template<typename Lhs, typename Rhs, int UpLo>
-class SparseSelfAdjointTimeDenseProduct;
-
-template<typename Lhs, typename Rhs, int UpLo>
-class DenseTimeSparseSelfAdjointProduct;
-
namespace internal {
-template<typename MatrixType, unsigned int UpLo>
-struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
+template<typename MatrixType, unsigned int Mode>
+struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
};
-template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
-void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
-template<int UpLo,typename MatrixType,int DestOrder>
-void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+template<int Mode,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
}
-template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
- : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
+template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
+ : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
{
public:
+
+ enum {
+ Mode = _Mode,
+ RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
+ ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
+ };
+ typedef EigenBase<SparseSelfAdjointView> Base;
typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- typedef Matrix<Index,Dynamic,1> VectorI;
- typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename MatrixType::StorageIndex StorageIndex;
+ typedef Matrix<StorageIndex,Dynamic,1> VectorI;
+ typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
-
- inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+
+ explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
{
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
}
@@ -67,7 +68,7 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
/** \internal \returns a reference to the nested matrix */
const _MatrixTypeNested& matrix() const { return m_matrix; }
- _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+ typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
/** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
*
@@ -75,10 +76,10 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
template<typename OtherDerived>
- SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>
+ Product<SparseSelfAdjointView, OtherDerived>
operator*(const SparseMatrixBase<OtherDerived>& rhs) const
{
- return SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>(*this, rhs.derived());
+ return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
}
/** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
@@ -87,26 +88,26 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
template<typename OtherDerived> friend
- SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject >
+ Product<OtherDerived, SparseSelfAdjointView>
operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
- return SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject>(lhs.derived(), rhs);
+ return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
}
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
template<typename OtherDerived>
- SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
+ Product<SparseSelfAdjointView,OtherDerived>
operator*(const MatrixBase<OtherDerived>& rhs) const
{
- return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
+ return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
}
/** Efficient dense vector/matrix times sparse self-adjoint matrix product */
template<typename OtherDerived> friend
- DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
+ Product<OtherDerived,SparseSelfAdjointView>
operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
- return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
+ return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
}
/** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
@@ -120,56 +121,48 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
template<typename DerivedU>
SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
- /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
- template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
- {
- internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
- }
-
- template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
- {
- // TODO directly evaluate into _dest;
- SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
- internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
- _dest = tmp;
- }
-
/** \returns an expression of P H P^-1 */
- SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+ // TODO implement twists in a more evaluator friendly fashion
+ SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
{
- return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
+ return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
}
-
- template<typename SrcMatrixType,int SrcUpLo>
- SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
+
+ template<typename SrcMatrixType,int SrcMode>
+ SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
{
- permutedMatrix.evalTo(*this);
+ internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
return *this;
}
-
SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
{
- PermutationMatrix<Dynamic> pnull;
+ PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
return *this = src.twistedBy(pnull);
}
- template<typename SrcMatrixType,unsigned int SrcUpLo>
- SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcUpLo>& src)
+ template<typename SrcMatrixType,unsigned int SrcMode>
+ SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
{
- PermutationMatrix<Dynamic> pnull;
+ PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
return *this = src.twistedBy(pnull);
}
-
- // const SparseLLT<PlainObject, UpLo> llt() const;
- // const SparseLDLT<PlainObject, UpLo> ldlt() const;
-
+ void resize(Index rows, Index cols)
+ {
+ EIGEN_ONLY_USED_FOR_DEBUG(rows);
+ EIGEN_ONLY_USED_FOR_DEBUG(cols);
+ eigen_assert(rows == this->rows() && cols == this->cols()
+ && "SparseSelfadjointView::resize() does not actually allow to resize.");
+ }
+
protected:
- typename MatrixType::Nested m_matrix;
- mutable VectorI m_countPerRow;
- mutable VectorI m_countPerCol;
+ MatrixTypeNested m_matrix;
+ //mutable VectorI m_countPerRow;
+ //mutable VectorI m_countPerCol;
+ private:
+ template<typename Dest> void evalTo(Dest &) const;
};
/***************************************************************************
@@ -178,146 +171,268 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
template<typename Derived>
template<unsigned int UpLo>
-const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
+typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
{
- return derived();
+ return SparseSelfAdjointView<const Derived, UpLo>(derived());
}
template<typename Derived>
template<unsigned int UpLo>
-SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
+typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
{
- return derived();
+ return SparseSelfAdjointView<Derived, UpLo>(derived());
}
/***************************************************************************
* Implementation of SparseSelfAdjointView methods
***************************************************************************/
-template<typename MatrixType, unsigned int UpLo>
+template<typename MatrixType, unsigned int Mode>
template<typename DerivedU>
-SparseSelfAdjointView<MatrixType,UpLo>&
-SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
+SparseSelfAdjointView<MatrixType,Mode>&
+SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
{
- SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
+ SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
if(alpha==Scalar(0))
- m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
+ m_matrix = tmp.template triangularView<Mode>();
else
- m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
+ m_matrix += alpha * tmp.template triangularView<Mode>();
return *this;
}
+namespace internal {
+
+// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
+// in the future selfadjoint-ness should be defined by the expression traits
+// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
+template<typename MatrixType, unsigned int Mode>
+struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
+{
+ typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
+ typedef SparseSelfAdjointShape Shape;
+};
+
+struct SparseSelfAdjoint2Sparse {};
+
+template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
+template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
+
+template< typename DstXprType, typename SrcXprType, typename Functor>
+struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
+{
+ typedef typename DstXprType::StorageIndex StorageIndex;
+ typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
+
+ template<typename DestScalar,int StorageOrder>
+ static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
+ {
+ internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
+ }
+
+ // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
+ template<typename DestScalar,int StorageOrder,typename AssignFunc>
+ static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
+ {
+ SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
+ run(tmp, src, AssignOpType());
+ call_assignment_no_alias_no_transpose(dst, tmp, func);
+ }
+
+ template<typename DestScalar,int StorageOrder>
+ static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
+ const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
+ {
+ SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
+ run(tmp, src, AssignOpType());
+ dst += tmp;
+ }
+
+ template<typename DestScalar,int StorageOrder>
+ static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
+ const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
+ {
+ SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
+ run(tmp, src, AssignOpType());
+ dst -= tmp;
+ }
+
+ template<typename DestScalar>
+ static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
+ {
+ // TODO directly evaluate into dst;
+ SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
+ internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
+ dst = tmp;
+ }
+};
+
+} // end namespace internal
+
/***************************************************************************
* Implementation of sparse self-adjoint time dense matrix
***************************************************************************/
namespace internal {
-template<typename Lhs, typename Rhs, int UpLo>
-struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
- : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
-{
- typedef Dense StorageKind;
-};
-}
-template<typename Lhs, typename Rhs, int UpLo>
-class SparseSelfAdjointTimeDenseProduct
- : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
+inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
- public:
- EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
-
- SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
- {}
+ EIGEN_ONLY_USED_FOR_DEBUG(alpha);
+
+ typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
+ typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
+ typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
+ typedef typename LhsEval::InnerIterator LhsIterator;
+ typedef typename SparseLhsType::Scalar LhsScalar;
+
+ enum {
+ LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
+ ProcessFirstHalf =
+ ((Mode&(Upper|Lower))==(Upper|Lower))
+ || ( (Mode&Upper) && !LhsIsRowMajor)
+ || ( (Mode&Lower) && LhsIsRowMajor),
+ ProcessSecondHalf = !ProcessFirstHalf
+ };
+
+ SparseLhsTypeNested lhs_nested(lhs);
+ LhsEval lhsEval(lhs_nested);
- template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+ // work on one column at once
+ for (Index k=0; k<rhs.cols(); ++k)
+ {
+ for (Index j=0; j<lhs.outerSize(); ++j)
{
- EIGEN_ONLY_USED_FOR_DEBUG(alpha);
- // TODO use alpha
- eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
- typedef typename internal::remove_all<Lhs>::type _Lhs;
- typedef typename _Lhs::InnerIterator LhsInnerIterator;
- enum {
- LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
- ProcessFirstHalf =
- ((UpLo&(Upper|Lower))==(Upper|Lower))
- || ( (UpLo&Upper) && !LhsIsRowMajor)
- || ( (UpLo&Lower) && LhsIsRowMajor),
- ProcessSecondHalf = !ProcessFirstHalf
- };
- for (Index j=0; j<m_lhs.outerSize(); ++j)
+ LhsIterator i(lhsEval,j);
+ // handle diagonal coeff
+ if (ProcessSecondHalf)
{
- LhsInnerIterator i(m_lhs,j);
- if (ProcessSecondHalf)
- {
- while (i && i.index()<j) ++i;
- if(i && i.index()==j)
- {
- dest.row(j) += i.value() * m_rhs.row(j);
- ++i;
- }
- }
- for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ while (i && i.index()<j) ++i;
+ if(i && i.index()==j)
{
- Index a = LhsIsRowMajor ? j : i.index();
- Index b = LhsIsRowMajor ? i.index() : j;
- typename Lhs::Scalar v = i.value();
- dest.row(a) += (v) * m_rhs.row(b);
- dest.row(b) += numext::conj(v) * m_rhs.row(a);
+ res(j,k) += alpha * i.value() * rhs(j,k);
+ ++i;
}
- if (ProcessFirstHalf && i && (i.index()==j))
- dest.row(j) += i.value() * m_rhs.row(j);
}
+
+ // premultiplied rhs for scatters
+ typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
+ // accumulator for partial scalar product
+ typename DenseResType::Scalar res_j(0);
+ for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ {
+ LhsScalar lhs_ij = i.value();
+ if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
+ res_j += lhs_ij * rhs(i.index(),k);
+ res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
+ }
+ res(j,k) += alpha * res_j;
+
+ // handle diagonal coeff
+ if (ProcessFirstHalf && i && (i.index()==j))
+ res(j,k) += alpha * i.value() * rhs(j,k);
}
+ }
+}
- private:
- SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
+
+template<typename LhsView, typename Rhs, int ProductType>
+struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
+: generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
+{
+ template<typename Dest>
+ static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
+ {
+ typedef typename LhsView::_MatrixTypeNested Lhs;
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhsView.matrix());
+ RhsNested rhsNested(rhs);
+
+ internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
+ }
};
-namespace internal {
-template<typename Lhs, typename Rhs, int UpLo>
-struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
- : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
-{};
-}
+template<typename Lhs, typename RhsView, int ProductType>
+struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
+: generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
+{
+ template<typename Dest>
+ static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
+ {
+ typedef typename RhsView::_MatrixTypeNested Rhs;
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhs);
+ RhsNested rhsNested(rhsView.matrix());
+
+ // transpose everything
+ Transpose<Dest> dstT(dst);
+ internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
+ }
+};
-template<typename Lhs, typename Rhs, int UpLo>
-class DenseTimeSparseSelfAdjointProduct
- : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
+// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
+
+template<typename LhsView, typename Rhs, int ProductTag>
+struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
+ : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
{
- public:
- EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
+ typedef Product<LhsView, Rhs, DefaultProduct> XprType;
+ typedef typename XprType::PlainObject PlainObject;
+ typedef evaluator<PlainObject> Base;
- DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
- {}
+ product_evaluator(const XprType& xpr)
+ : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
+ {
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
+ }
+
+protected:
+ typename Rhs::PlainObject m_lhs;
+ PlainObject m_result;
+};
- template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const
- {
- // TODO
- }
+template<typename Lhs, typename RhsView, int ProductTag>
+struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
+ : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
+{
+ typedef Product<Lhs, RhsView, DefaultProduct> XprType;
+ typedef typename XprType::PlainObject PlainObject;
+ typedef evaluator<PlainObject> Base;
- private:
- DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
+ product_evaluator(const XprType& xpr)
+ : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
+ {
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
+ }
+
+protected:
+ typename Lhs::PlainObject m_rhs;
+ PlainObject m_result;
};
+} // namespace internal
+
/***************************************************************************
* Implementation of symmetric copies and permutations
***************************************************************************/
namespace internal {
-
-template<typename MatrixType, int UpLo>
-struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
-};
-template<int UpLo,typename MatrixType,int DestOrder>
-void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+template<int Mode,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
{
- typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::Scalar Scalar;
- typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
- typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
+ typedef Matrix<StorageIndex,Dynamic,1> VectorI;
+ typedef evaluator<MatrixType> MatEval;
+ typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
+ MatEval matEval(mat);
Dest& dest(_dest.derived());
enum {
StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
@@ -331,17 +446,17 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
for(Index j = 0; j<size; ++j)
{
Index jp = perm ? perm[j] : j;
- for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ for(MatIterator it(matEval,j); it; ++it)
{
Index i = it.index();
Index r = it.row();
Index c = it.col();
Index ip = perm ? perm[i] : i;
- if(UpLo==(Upper|Lower))
+ if(Mode==(Upper|Lower))
count[StorageOrderMatch ? jp : ip]++;
else if(r==c)
count[ip]++;
- else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
+ else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
{
count[ip]++;
count[jp]++;
@@ -359,18 +474,18 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
count[j] = dest.outerIndexPtr()[j];
// copy data
- for(Index j = 0; j<size; ++j)
+ for(StorageIndex j = 0; j<size; ++j)
{
- for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ for(MatIterator it(matEval,j); it; ++it)
{
- Index i = it.index();
+ StorageIndex i = internal::convert_index<StorageIndex>(it.index());
Index r = it.row();
Index c = it.col();
- Index jp = perm ? perm[j] : j;
- Index ip = perm ? perm[i] : i;
+ StorageIndex jp = perm ? perm[j] : j;
+ StorageIndex ip = perm ? perm[i] : i;
- if(UpLo==(Upper|Lower))
+ if(Mode==(Upper|Lower))
{
Index k = count[StorageOrderMatch ? jp : ip]++;
dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
@@ -382,7 +497,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
dest.innerIndexPtr()[k] = ip;
dest.valuePtr()[k] = it.value();
}
- else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
+ else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
{
if(!StorageOrderMatch)
std::swap(ip,jp);
@@ -397,35 +512,40 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
}
}
-template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
-void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
{
- typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::Scalar Scalar;
- SparseMatrix<Scalar,DstOrder,Index>& dest(_dest.derived());
- typedef Matrix<Index,Dynamic,1> VectorI;
+ SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
+ typedef Matrix<StorageIndex,Dynamic,1> VectorI;
+ typedef evaluator<MatrixType> MatEval;
+ typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
+
enum {
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
- DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
- SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
+ DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
+ SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
};
+
+ MatEval matEval(mat);
Index size = mat.rows();
VectorI count(size);
count.setZero();
dest.resize(size,size);
- for(Index j = 0; j<size; ++j)
+ for(StorageIndex j = 0; j<size; ++j)
{
- Index jp = perm ? perm[j] : j;
- for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ StorageIndex jp = perm ? perm[j] : j;
+ for(MatIterator it(matEval,j); it; ++it)
{
- Index i = it.index();
- if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+ StorageIndex i = it.index();
+ if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
- Index ip = perm ? perm[i] : i;
- count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ StorageIndex ip = perm ? perm[i] : i;
+ count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
}
}
dest.outerIndexPtr()[0] = 0;
@@ -435,23 +555,23 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
for(Index j=0; j<size; ++j)
count[j] = dest.outerIndexPtr()[j];
- for(Index j = 0; j<size; ++j)
+ for(StorageIndex j = 0; j<size; ++j)
{
- for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ for(MatIterator it(matEval,j); it; ++it)
{
- Index i = it.index();
- if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+ StorageIndex i = it.index();
+ if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
- Index jp = perm ? perm[j] : j;
- Index ip = perm? perm[i] : i;
+ StorageIndex jp = perm ? perm[j] : j;
+ StorageIndex ip = perm? perm[i] : i;
- Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
- dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
+ Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
if(!StorageOrderMatch) std::swap(ip,jp);
- if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
+ if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
dest.valuePtr()[k] = numext::conj(it.value());
else
dest.valuePtr()[k] = it.value();
@@ -461,19 +581,33 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
}
-template<typename MatrixType,int UpLo>
+// TODO implement twists in a more evaluator friendly fashion
+
+namespace internal {
+
+template<typename MatrixType, int Mode>
+struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
+};
+
+}
+
+template<typename MatrixType,int Mode>
class SparseSymmetricPermutationProduct
- : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
+ : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
{
public:
typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::StorageIndex StorageIndex;
+ enum {
+ RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
+ ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
+ };
protected:
- typedef PermutationMatrix<Dynamic,Dynamic,Index> Perm;
+ typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
public:
- typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef typename MatrixType::Nested MatrixTypeNested;
- typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
: m_matrix(mat), m_perm(perm)
@@ -481,20 +615,9 @@ class SparseSymmetricPermutationProduct
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
-
- template<typename DestScalar, int Options, typename DstIndex>
- void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
- {
-// internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
- SparseMatrix<DestScalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
- internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data());
- _dest = tmp;
- }
-
- template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
- {
- internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
- }
+
+ const NestedExpression& matrix() const { return m_matrix; }
+ const Perm& perm() const { return m_perm; }
protected:
MatrixTypeNested m_matrix;
@@ -502,6 +625,31 @@ class SparseSymmetricPermutationProduct
};
+namespace internal {
+
+template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
+struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
+{
+ typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
+ typedef typename DstXprType::StorageIndex DstIndex;
+ template<int Options>
+ static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
+ {
+ // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
+ SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
+ internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
+ dst = tmp;
+ }
+
+ template<typename DestType,unsigned int DestMode>
+ static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
+ {
+ internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
+ }
+};
+
+} // end namespace internal
+
} // end namespace Eigen
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/eigen/Eigen/src/SparseCore/SparseSolverBase.h b/eigen/Eigen/src/SparseCore/SparseSolverBase.h
new file mode 100644
index 0000000..b4c9a42
--- /dev/null
+++ b/eigen/Eigen/src/SparseCore/SparseSolverBase.h
@@ -0,0 +1,124 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSESOLVERBASE_H
+#define EIGEN_SPARSESOLVERBASE_H
+
+namespace Eigen {
+
+namespace internal {
+
+ /** \internal
+ * Helper functions to solve with a sparse right-hand-side and result.
+ * The rhs is decomposed into small vertical panels which are solved through dense temporaries.
+ */
+template<typename Decomposition, typename Rhs, typename Dest>
+typename enable_if<Rhs::ColsAtCompileTime!=1 && Dest::ColsAtCompileTime!=1>::type
+solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
+{
+ EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ typedef typename Dest::Scalar DestScalar;
+ // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
+ static const Index NbColsAtOnce = 4;
+ Index rhsCols = rhs.cols();
+ Index size = rhs.rows();
+ // the temporary matrices do not need more columns than NbColsAtOnce:
+ Index tmpCols = (std::min)(rhsCols, NbColsAtOnce);
+ Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols);
+ Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols);
+ for(Index k=0; k<rhsCols; k+=NbColsAtOnce)
+ {
+ Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);
+ tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);
+ tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));
+ dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
+ }
+}
+
+// Overload for vector as rhs
+template<typename Decomposition, typename Rhs, typename Dest>
+typename enable_if<Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1>::type
+solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
+{
+ typedef typename Dest::Scalar DestScalar;
+ Index size = rhs.rows();
+ Eigen::Matrix<DestScalar,Dynamic,1> rhs_dense(rhs);
+ Eigen::Matrix<DestScalar,Dynamic,1> dest_dense(size);
+ dest_dense = dec.solve(rhs_dense);
+ dest = dest_dense.sparseView();
+}
+
+} // end namespace internal
+
+/** \class SparseSolverBase
+ * \ingroup SparseCore_Module
+ * \brief A base class for sparse solvers
+ *
+ * \tparam Derived the actual type of the solver.
+ *
+ */
+template<typename Derived>
+class SparseSolverBase : internal::noncopyable
+{
+ public:
+
+ /** Default constructor */
+ SparseSolverBase()
+ : m_isInitialized(false)
+ {}
+
+ ~SparseSolverBase()
+ {}
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const Solve<Derived, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "Solver is not initialized.");
+ eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
+ return Solve<Derived, Rhs>(derived(), b.derived());
+ }
+
+ /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const Solve<Derived, Rhs>
+ solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "Solver is not initialized.");
+ eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
+ return Solve<Derived, Rhs>(derived(), b.derived());
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal default implementation of solving with a sparse rhs */
+ template<typename Rhs,typename Dest>
+ void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const
+ {
+ internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived());
+ }
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+
+ protected:
+
+ mutable bool m_isInitialized;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSESOLVERBASE_H
diff --git a/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 55b84a4..21c4190 100644
--- a/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -22,7 +22,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
typedef typename remove_all<Lhs>::type::Scalar Scalar;
- typedef typename remove_all<Lhs>::type::Index Index;
+ typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
@@ -31,21 +31,24 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
- AmbiVector<Scalar,Index> tempVector(rows);
+ AmbiVector<Scalar,StorageIndex> tempVector(rows);
+ // mimics a resizeByInnerOuter:
+ if(ResultType::IsRowMajor)
+ res.resize(cols, rows);
+ else
+ res.resize(rows, cols);
+
+ evaluator<Lhs> lhsEval(lhs);
+ evaluator<Rhs> rhsEval(rhs);
+
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
- Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
-
- // mimics a resizeByInnerOuter:
- if(ResultType::IsRowMajor)
- res.resize(cols, rows);
- else
- res.resize(rows, cols);
+ Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
res.reserve(estimated_nnz_prod);
double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols()));
@@ -56,18 +59,18 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
// let's do a more accurate determination of the nnz ratio for the current column j of res
tempVector.init(ratioColRes);
tempVector.setZero();
- for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
// FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
tempVector.restart();
Scalar x = rhsIt.value();
- for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+ for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)
{
tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
}
}
res.startVec(j);
- for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector,tolerance); it; ++it)
+ for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
res.insertBackByOuterInner(j,it.index()) = it.value();
}
res.finalize();
@@ -100,7 +103,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// we need a col-major matrix to hold the result
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> SparseTemporaryType;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> SparseTemporaryType;
SparseTemporaryType _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
res = _res;
@@ -126,8 +129,8 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
ColMajorMatrixLhs colLhs(lhs);
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
@@ -140,8 +143,53 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
}
};
-// NOTE the 2 others cases (col row *) must never occur since they are caught
-// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;
+ RowMajorMatrixLhs rowLhs(lhs);
+ sparse_sparse_product_with_pruning_selector<RowMajorMatrixLhs,Rhs,ResultType,RowMajor,RowMajor>(rowLhs,rhs,res,tolerance);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;
+ RowMajorMatrixRhs rowRhs(rhs);
+ sparse_sparse_product_with_pruning_selector<Lhs,RowMajorMatrixRhs,ResultType,RowMajor,RowMajor,RowMajor>(lhs,rowRhs,res,tolerance);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
+ ColMajorMatrixRhs colRhs(rhs);
+ internal::sparse_sparse_product_with_pruning_impl<Lhs,ColMajorMatrixRhs,ResultType>(lhs, colRhs, res, tolerance);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
+ ColMajorMatrixLhs colLhs(lhs);
+ internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,Rhs,ResultType>(colLhs, rhs, res, tolerance);
+ }
+};
} // end namespace internal
diff --git a/eigen/Eigen/src/SparseCore/SparseTranspose.h b/eigen/Eigen/src/SparseCore/SparseTranspose.h
index 76d031d..3757d4c 100644
--- a/eigen/Eigen/src/SparseCore/SparseTranspose.h
+++ b/eigen/Eigen/src/SparseCore/SparseTranspose.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,52 +12,81 @@
namespace Eigen {
-template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
- : public SparseMatrixBase<Transpose<MatrixType> >
-{
- typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
+namespace internal {
+ template<typename MatrixType,int CompressedAccess=int(MatrixType::Flags&CompressedAccessBit)>
+ class SparseTransposeImpl
+ : public SparseMatrixBase<Transpose<MatrixType> >
+ {};
+
+ template<typename MatrixType>
+ class SparseTransposeImpl<MatrixType,CompressedAccessBit>
+ : public SparseCompressedBase<Transpose<MatrixType> >
+ {
+ typedef SparseCompressedBase<Transpose<MatrixType> > Base;
public:
-
- EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType> )
-
- class InnerIterator;
- class ReverseInnerIterator;
+ using Base::derived;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::StorageIndex StorageIndex;
inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
-};
+
+ inline const Scalar* valuePtr() const { return derived().nestedExpression().valuePtr(); }
+ inline const StorageIndex* innerIndexPtr() const { return derived().nestedExpression().innerIndexPtr(); }
+ inline const StorageIndex* outerIndexPtr() const { return derived().nestedExpression().outerIndexPtr(); }
+ inline const StorageIndex* innerNonZeroPtr() const { return derived().nestedExpression().innerNonZeroPtr(); }
-// NOTE: VC10 and VC11 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
-// a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index;
-// does not fix the issue.
-// An alternative is to define the nested class in the parent class itself.
-template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerIterator
- : public _MatrixTypeNested::InnerIterator
+ inline Scalar* valuePtr() { return derived().nestedExpression().valuePtr(); }
+ inline StorageIndex* innerIndexPtr() { return derived().nestedExpression().innerIndexPtr(); }
+ inline StorageIndex* outerIndexPtr() { return derived().nestedExpression().outerIndexPtr(); }
+ inline StorageIndex* innerNonZeroPtr() { return derived().nestedExpression().innerNonZeroPtr(); }
+ };
+}
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
+ : public internal::SparseTransposeImpl<MatrixType>
{
- typedef typename _MatrixTypeNested::InnerIterator Base;
- typedef typename TransposeImpl::Index Index;
- public:
-
- EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
- : Base(trans.derived().nestedExpression(), outer)
- {}
- typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
- typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+ protected:
+ typedef internal::SparseTransposeImpl<MatrixType> Base;
};
-template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
- : public _MatrixTypeNested::ReverseInnerIterator
+namespace internal {
+
+template<typename ArgType>
+struct unary_evaluator<Transpose<ArgType>, IteratorBased>
+ : public evaluator_base<Transpose<ArgType> >
{
- typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
- typedef typename TransposeImpl::Index Index;
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
public:
+ typedef Transpose<ArgType> XprType;
+
+ inline Index nonZerosEstimate() const {
+ return m_argImpl.nonZerosEstimate();
+ }
- EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
- : Base(xpr.derived().nestedExpression(), outer)
- {}
- typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
- typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+ class InnerIterator : public EvalIterator
+ {
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
+ : EvalIterator(unaryOp.m_argImpl,outer)
+ {}
+
+ Index row() const { return EvalIterator::col(); }
+ Index col() const { return EvalIterator::row(); }
+ };
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ explicit unary_evaluator(const XprType& op) :m_argImpl(op.nestedExpression()) {}
+
+ protected:
+ evaluator<ArgType> m_argImpl;
};
+} // end namespace internal
+
} // end namespace Eigen
#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/eigen/Eigen/src/SparseCore/SparseTriangularView.h b/eigen/Eigen/src/SparseCore/SparseTriangularView.h
index 333127b..9ac1202 100644
--- a/eigen/Eigen/src/SparseCore/SparseTriangularView.h
+++ b/eigen/Eigen/src/SparseCore/SparseTriangularView.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
@@ -11,19 +11,19 @@
#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
#define EIGEN_SPARSE_TRIANGULARVIEW_H
-namespace Eigen {
-
-namespace internal {
-
-template<typename MatrixType, int Mode>
-struct traits<SparseTriangularView<MatrixType,Mode> >
-: public traits<MatrixType>
-{};
-
-} // namespace internal
-
-template<typename MatrixType, int Mode> class SparseTriangularView
- : public SparseMatrixBase<SparseTriangularView<MatrixType,Mode> >
+namespace Eigen {
+
+/** \ingroup SparseCore_Module
+ *
+ * \brief Base class for a triangular part in a \b sparse matrix
+ *
+ * This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated.
+ * It extends class TriangularView with additional methods which are available for sparse expressions only.
+ *
+ * \sa class TriangularView, SparseMatrixBase::triangularView()
+ */
+template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<MatrixType,Mode,Sparse>
+ : public SparseMatrixBase<TriangularView<MatrixType,Mode> >
{
enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
|| ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
@@ -31,147 +31,157 @@ template<typename MatrixType, int Mode> class SparseTriangularView
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
};
+
+ typedef TriangularView<MatrixType,Mode> TriangularViewType;
+
+ protected:
+ // dummy solve function to make TriangularView happy.
+ void solve() const;
+ typedef SparseMatrixBase<TriangularViewType> Base;
public:
- EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView)
-
- class InnerIterator;
- class ReverseInnerIterator;
-
- inline Index rows() const { return m_matrix.rows(); }
- inline Index cols() const { return m_matrix.cols(); }
-
+ EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)
+
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
- inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {}
-
- /** \internal */
- inline const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
-
- template<typename OtherDerived>
- typename internal::plain_matrix_type_column_major<OtherDerived>::type
- solve(const MatrixBase<OtherDerived>& other) const;
+ template<typename RhsType, typename DstType>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {
+ if(!(internal::is_same<RhsType,DstType>::value && internal::extract_data(dst) == internal::extract_data(rhs)))
+ dst = rhs;
+ this->solveInPlace(dst);
+ }
+ /** Applies the inverse of \c *this to the dense vector or matrix \a other, "in-place" */
template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
- template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
- protected:
- MatrixTypeNested m_matrix;
+ /** Applies the inverse of \c *this to the sparse vector or matrix \a other, "in-place" */
+ template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
+
};
-template<typename MatrixType, int Mode>
-class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
+namespace internal {
+
+template<typename ArgType, unsigned int Mode>
+struct unary_evaluator<TriangularView<ArgType,Mode>, IteratorBased>
+ : evaluator_base<TriangularView<ArgType,Mode> >
{
- typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
- typedef typename SparseTriangularView::Index Index;
- public:
+ typedef TriangularView<ArgType,Mode> XprType;
+
+protected:
+
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::StorageIndex StorageIndex;
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+
+ enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit))
+ || ((Mode&Upper) && (ArgType::Flags&RowMajorBit)),
+ SkipLast = !SkipFirst,
+ SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
+ HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
+ };
+
+public:
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {}
+
+ inline Index nonZerosEstimate() const {
+ return m_argImpl.nonZerosEstimate();
+ }
+
+ class InnerIterator : public EvalIterator
+ {
+ typedef EvalIterator Base;
+ public:
- EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
- : Base(view.nestedExpression(), outer), m_returnOne(false)
- {
- if(SkipFirst)
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer)
+ : Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()<xprEval.m_arg.innerSize())
{
- while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
- Base::operator++();
- if(HasUnitDiag)
- m_returnOne = true;
+ if(SkipFirst)
+ {
+ while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
+ Base::operator++();
+ if(HasUnitDiag)
+ m_returnOne = m_containsDiag;
+ }
+ else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+ {
+ if((!SkipFirst) && Base::operator bool())
+ Base::operator++();
+ m_returnOne = m_containsDiag;
+ }
}
- else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
{
- if((!SkipFirst) && Base::operator bool())
+ if(HasUnitDiag && m_returnOne)
+ m_returnOne = false;
+ else
+ {
Base::operator++();
- m_returnOne = true;
+ if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+ {
+ if((!SkipFirst) && Base::operator bool())
+ Base::operator++();
+ m_returnOne = m_containsDiag;
+ }
+ }
+ return *this;
}
- }
-
- EIGEN_STRONG_INLINE InnerIterator& operator++()
- {
- if(HasUnitDiag && m_returnOne)
- m_returnOne = false;
- else
+
+ EIGEN_STRONG_INLINE operator bool() const
{
- Base::operator++();
- if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+ if(HasUnitDiag && m_returnOne)
+ return true;
+ if(SkipFirst) return Base::operator bool();
+ else
{
- if((!SkipFirst) && Base::operator bool())
- Base::operator++();
- m_returnOne = true;
+ if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
+ else return (Base::operator bool() && this->index() <= this->outer());
}
}
- return *this;
- }
-
- inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); }
- inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); }
- inline Index index() const
- {
- if(HasUnitDiag && m_returnOne) return Base::outer();
- else return Base::index();
- }
- inline Scalar value() const
- {
- if(HasUnitDiag && m_returnOne) return Scalar(1);
- else return Base::value();
- }
- EIGEN_STRONG_INLINE operator bool() const
- {
- if(HasUnitDiag && m_returnOne)
- return true;
- if(SkipFirst) return Base::operator bool();
- else
+// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); }
+// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); }
+ inline StorageIndex index() const
{
- if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
- else return (Base::operator bool() && this->index() <= this->outer());
+ if(HasUnitDiag && m_returnOne) return internal::convert_index<StorageIndex>(Base::outer());
+ else return Base::index();
}
- }
- protected:
- bool m_returnOne;
-};
-
-template<typename MatrixType, int Mode>
-class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
-{
- typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
- typedef typename SparseTriangularView::Index Index;
- public:
-
- EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer)
- : Base(view.nestedExpression(), outer)
- {
- eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
- if(SkipLast) {
- while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer))
- --(*this);
- }
- }
-
- EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
- { Base::operator--(); return *this; }
-
- inline Index row() const { return Base::row(); }
- inline Index col() const { return Base::col(); }
-
- EIGEN_STRONG_INLINE operator bool() const
- {
- if (SkipLast) return Base::operator bool() ;
- else
+ inline Scalar value() const
{
- if(SkipDiag) return (Base::operator bool() && this->index() > this->outer());
- else return (Base::operator bool() && this->index() >= this->outer());
+ if(HasUnitDiag && m_returnOne) return Scalar(1);
+ else return Base::value();
}
- }
+
+ protected:
+ bool m_returnOne;
+ bool m_containsDiag;
+ private:
+ Scalar& valueRef();
+ };
+
+protected:
+ evaluator<ArgType> m_argImpl;
+ const ArgType& m_arg;
};
+} // end namespace internal
+
template<typename Derived>
template<int Mode>
-inline const SparseTriangularView<Derived, Mode>
+inline const TriangularView<const Derived, Mode>
SparseMatrixBase<Derived>::triangularView() const
{
- return derived();
+ return TriangularView<const Derived, Mode>(derived());
}
} // end namespace Eigen
diff --git a/eigen/Eigen/src/SparseCore/SparseUtil.h b/eigen/Eigen/src/SparseCore/SparseUtil.h
index d627546..74df0d4 100644
--- a/eigen/Eigen/src/SparseCore/SparseUtil.h
+++ b/eigen/Eigen/src/SparseCore/SparseUtil.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -37,42 +37,23 @@ EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
}
#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
-EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
-EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
-EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
-EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
-EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
-
-#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
- typedef BaseClass Base; \
- typedef typename Eigen::internal::traits<Derived >::Scalar Scalar; \
- typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
- typedef typename Eigen::internal::nested<Derived >::type Nested; \
- typedef typename Eigen::internal::traits<Derived >::StorageKind StorageKind; \
- typedef typename Eigen::internal::traits<Derived >::Index Index; \
- enum { RowsAtCompileTime = Eigen::internal::traits<Derived >::RowsAtCompileTime, \
- ColsAtCompileTime = Eigen::internal::traits<Derived >::ColsAtCompileTime, \
- Flags = Eigen::internal::traits<Derived >::Flags, \
- CoeffReadCost = Eigen::internal::traits<Derived >::CoeffReadCost, \
- SizeAtCompileTime = Base::SizeAtCompileTime, \
- IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
- using Base::derived; \
- using Base::const_cast_derived;
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =)
+
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
- _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived >)
+ EIGEN_GENERIC_PUBLIC_INTERFACE(Derived)
+
const int CoherentAccessPattern = 0x1;
const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
-template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
-template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
-template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
-template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class DynamicSparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseVector;
+template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class MappedSparseMatrix;
-template<typename MatrixType, int Mode> class SparseTriangularView;
template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
template<typename MatrixType> class SparseView;
@@ -84,42 +65,44 @@ template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProdu
template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
template<typename Lhs, typename Rhs,
- int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
+ int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
+
template<typename Lhs, typename Rhs,
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
namespace internal {
-template<typename T,int Rows,int Cols> struct sparse_eval;
+template<typename T,int Rows,int Cols,int Flags> struct sparse_eval;
template<typename T> struct eval<T,Sparse>
- : public sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime>
+ : sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime,traits<T>::Flags>
{};
-template<typename T,int Cols> struct sparse_eval<T,1,Cols> {
+template<typename T,int Cols,int Flags> struct sparse_eval<T,1,Cols,Flags> {
typedef typename traits<T>::Scalar _Scalar;
- typedef typename traits<T>::Index _Index;
+ typedef typename traits<T>::StorageIndex _StorageIndex;
public:
- typedef SparseVector<_Scalar, RowMajor, _Index> type;
+ typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type;
};
-template<typename T,int Rows> struct sparse_eval<T,Rows,1> {
+template<typename T,int Rows,int Flags> struct sparse_eval<T,Rows,1,Flags> {
typedef typename traits<T>::Scalar _Scalar;
- typedef typename traits<T>::Index _Index;
+ typedef typename traits<T>::StorageIndex _StorageIndex;
public:
- typedef SparseVector<_Scalar, ColMajor, _Index> type;
+ typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type;
};
-template<typename T,int Rows,int Cols> struct sparse_eval {
+// TODO this seems almost identical to plain_matrix_type<T, Sparse>
+template<typename T,int Rows,int Cols,int Flags> struct sparse_eval {
typedef typename traits<T>::Scalar _Scalar;
- typedef typename traits<T>::Index _Index;
- enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
+ typedef typename traits<T>::StorageIndex _StorageIndex;
+ enum { _Options = ((Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
public:
- typedef SparseMatrix<_Scalar, _Options, _Index> type;
+ typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
};
-template<typename T> struct sparse_eval<T,1,1> {
+template<typename T,int Flags> struct sparse_eval<T,1,1,Flags> {
typedef typename traits<T>::Scalar _Scalar;
public:
typedef Matrix<_Scalar, 1, 1> type;
@@ -128,12 +111,35 @@ template<typename T> struct sparse_eval<T,1,1> {
template<typename T> struct plain_matrix_type<T,Sparse>
{
typedef typename traits<T>::Scalar _Scalar;
- typedef typename traits<T>::Index _Index;
- enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
+ typedef typename traits<T>::StorageIndex _StorageIndex;
+ enum { _Options = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
public:
- typedef SparseMatrix<_Scalar, _Options, _Index> type;
+ typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
+};
+
+template<typename T>
+struct plain_object_eval<T,Sparse>
+ : sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime, evaluator<T>::Flags>
+{};
+
+template<typename Decomposition, typename RhsType>
+struct solve_traits<Decomposition,RhsType,Sparse>
+{
+ typedef typename sparse_eval<RhsType, RhsType::RowsAtCompileTime, RhsType::ColsAtCompileTime,traits<RhsType>::Flags>::type PlainObject;
};
+template<typename Derived>
+struct generic_xpr_base<Derived, MatrixXpr, Sparse>
+{
+ typedef SparseMatrixBase<Derived> type;
+};
+
+struct SparseTriangularShape { static std::string debugName() { return "SparseTriangularShape"; } };
+struct SparseSelfAdjointShape { static std::string debugName() { return "SparseSelfAdjointShape"; } };
+
+template<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type; };
+template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape type; };
+
} // end namespace internal
/** \ingroup SparseCore_Module
@@ -144,26 +150,26 @@ template<typename T> struct plain_matrix_type<T,Sparse>
*
* \sa SparseMatrix::setFromTriplets()
*/
-template<typename Scalar, typename Index=typename SparseMatrix<Scalar>::Index >
+template<typename Scalar, typename StorageIndex=typename SparseMatrix<Scalar>::StorageIndex >
class Triplet
{
public:
Triplet() : m_row(0), m_col(0), m_value(0) {}
- Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0))
+ Triplet(const StorageIndex& i, const StorageIndex& j, const Scalar& v = Scalar(0))
: m_row(i), m_col(j), m_value(v)
{}
/** \returns the row index of the element */
- const Index& row() const { return m_row; }
+ const StorageIndex& row() const { return m_row; }
/** \returns the column index of the element */
- const Index& col() const { return m_col; }
+ const StorageIndex& col() const { return m_col; }
/** \returns the value of the element */
const Scalar& value() const { return m_value; }
protected:
- Index m_row, m_col;
+ StorageIndex m_row, m_col;
Scalar m_value;
};
diff --git a/eigen/Eigen/src/SparseCore/SparseVector.h b/eigen/Eigen/src/SparseCore/SparseVector.h
index c7ee89c..19b0fbc 100644
--- a/eigen/Eigen/src/SparseCore/SparseVector.h
+++ b/eigen/Eigen/src/SparseCore/SparseVector.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -22,15 +22,15 @@ namespace Eigen {
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
*
* This class can be extended with the help of the plugin mechanism described on the page
- * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
*/
namespace internal {
-template<typename _Scalar, int _Options, typename _Index>
-struct traits<SparseVector<_Scalar, _Options, _Index> >
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct traits<SparseVector<_Scalar, _Options, _StorageIndex> >
{
typedef _Scalar Scalar;
- typedef _Index Index;
+ typedef _StorageIndex StorageIndex;
typedef Sparse StorageKind;
typedef MatrixXpr XprKind;
enum {
@@ -40,8 +40,7 @@ struct traits<SparseVector<_Scalar, _Options, _Index> >
ColsAtCompileTime = IsColVector ? 1 : Dynamic,
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
- Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit),
- CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
@@ -61,18 +60,18 @@ struct sparse_vector_assign_selector;
}
-template<typename _Scalar, int _Options, typename _Index>
+template<typename _Scalar, int _Options, typename _StorageIndex>
class SparseVector
- : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
+ : public SparseCompressedBase<SparseVector<_Scalar, _Options, _StorageIndex> >
{
- typedef SparseMatrixBase<SparseVector> SparseBase;
-
+ typedef SparseCompressedBase<SparseVector> Base;
+ using Base::convert_index;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
- typedef internal::CompressedStorage<Scalar,Index> Storage;
+ typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
enum { IsColVector = internal::traits<SparseVector>::IsColVector };
enum {
@@ -87,9 +86,14 @@ class SparseVector
EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
- EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return m_data.indexPtr(); }
- EIGEN_STRONG_INLINE Index* innerIndexPtr() { return m_data.indexPtr(); }
+ EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
+ EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
+ inline const StorageIndex* outerIndexPtr() const { return 0; }
+ inline StorageIndex* outerIndexPtr() { return 0; }
+ inline const StorageIndex* innerNonZeroPtr() const { return 0; }
+ inline StorageIndex* innerNonZeroPtr() { return 0; }
+
/** \internal */
inline Storage& data() { return m_data; }
/** \internal */
@@ -103,13 +107,13 @@ class SparseVector
inline Scalar coeff(Index i) const
{
eigen_assert(i>=0 && i<m_size);
- return m_data.at(i);
+ return m_data.at(StorageIndex(i));
}
inline Scalar& coeffRef(Index row, Index col)
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
- return coeff(IsColVector ? row : col);
+ return coeffRef(IsColVector ? row : col);
}
/** \returns a reference to the coefficient value at given index \a i
@@ -121,18 +125,19 @@ class SparseVector
inline Scalar& coeffRef(Index i)
{
eigen_assert(i>=0 && i<m_size);
- return m_data.atWithInsertion(i);
+
+ return m_data.atWithInsertion(StorageIndex(i));
}
public:
- class InnerIterator;
- class ReverseInnerIterator;
+ typedef typename Base::InnerIterator InnerIterator;
+ typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
inline void setZero() { m_data.clear(); }
/** \returns the number of non zero coefficients */
- inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
+ inline Index nonZeros() const { return m_data.size(); }
inline void startVec(Index outer)
{
@@ -151,6 +156,18 @@ class SparseVector
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
+
+ Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ return insertBackUnordered(inner);
+ }
+ inline Scalar& insertBackUnordered(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
inline Scalar& insert(Index row, Index col)
{
@@ -177,7 +194,7 @@ class SparseVector
m_data.value(p+1) = m_data.value(p);
--p;
}
- m_data.index(p+1) = i;
+ m_data.index(p+1) = convert_index(i);
m_data.value(p+1) = 0;
return m_data.value(p+1);
}
@@ -189,28 +206,59 @@ class SparseVector
inline void finalize() {}
+ /** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
m_data.prune(reference,epsilon);
}
+ /** Resizes the sparse vector to \a rows x \a cols
+ *
+ * This method is provided for compatibility with matrices.
+ * For a column vector, \a cols must be equal to 1.
+ * For a row vector, \a rows must be equal to 1.
+ *
+ * \sa resize(Index)
+ */
void resize(Index rows, Index cols)
{
- eigen_assert(rows==1 || cols==1);
+ eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1");
resize(IsColVector ? rows : cols);
}
+ /** Resizes the sparse vector to \a newSize
+ * This method deletes all entries, thus leaving an empty sparse vector
+ *
+ * \sa conservativeResize(), setZero() */
void resize(Index newSize)
{
m_size = newSize;
m_data.clear();
}
+ /** Resizes the sparse vector to \a newSize, while leaving old values untouched.
+ *
+ * If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved.
+ * Call .data().squeeze() to free extra memory.
+ *
+ * \sa reserve(), setZero()
+ */
+ void conservativeResize(Index newSize)
+ {
+ if (newSize < m_size)
+ {
+ Index i = 0;
+ while (i<m_data.size() && m_data.index(i)<newSize) ++i;
+ m_data.resize(i);
+ }
+ m_size = newSize;
+ }
+
void resizeNonZeros(Index size) { m_data.resize(size); }
inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
- inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
+ explicit inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
@@ -218,12 +266,15 @@ class SparseVector
inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
: m_size(0)
{
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ #endif
check_template_parameters();
*this = other.derived();
}
inline SparseVector(const SparseVector& other)
- : SparseBase(other), m_size(0)
+ : Base(other), m_size(0)
{
check_template_parameters();
*this = other.derived();
@@ -239,6 +290,14 @@ class SparseVector
m_data.swap(other.m_data);
}
+ template<int OtherOptions>
+ inline void swap(SparseMatrix<Scalar,OtherOptions,StorageIndex>& other)
+ {
+ eigen_assert(other.outerSize()==1);
+ std::swap(m_size, other.m_innerSize);
+ m_data.swap(other.m_data);
+ }
+
inline SparseVector& operator=(const SparseVector& other)
{
if (other.isRValue())
@@ -337,7 +396,7 @@ protected:
static void check_template_parameters()
{
- EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+ EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
@@ -345,77 +404,46 @@ protected:
Index m_size;
};
-template<typename Scalar, int _Options, typename _Index>
-class SparseVector<Scalar,_Options,_Index>::InnerIterator
-{
- public:
- InnerIterator(const SparseVector& vec, Index outer=0)
- : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
- {
- EIGEN_UNUSED_VARIABLE(outer);
- eigen_assert(outer==0);
- }
-
- InnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
- : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
- {}
-
- inline InnerIterator& operator++() { m_id++; return *this; }
-
- inline Scalar value() const { return m_data.value(m_id); }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
-
- inline Index index() const { return m_data.index(m_id); }
- inline Index row() const { return IsColVector ? index() : 0; }
- inline Index col() const { return IsColVector ? 0 : index(); }
-
- inline operator bool() const { return (m_id < m_end); }
-
- protected:
- const internal::CompressedStorage<Scalar,Index>& m_data;
- Index m_id;
- const Index m_end;
-};
+namespace internal {
-template<typename Scalar, int _Options, typename _Index>
-class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
+template<typename _Scalar, int _Options, typename _Index>
+struct evaluator<SparseVector<_Scalar,_Options,_Index> >
+ : evaluator_base<SparseVector<_Scalar,_Options,_Index> >
{
- public:
- ReverseInnerIterator(const SparseVector& vec, Index outer=0)
- : m_data(vec.m_data), m_id(static_cast<Index>(m_data.size())), m_start(0)
- {
- EIGEN_UNUSED_VARIABLE(outer);
- eigen_assert(outer==0);
- }
-
- ReverseInnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
- : m_data(data), m_id(static_cast<Index>(m_data.size())), m_start(0)
- {}
-
- inline ReverseInnerIterator& operator--() { m_id--; return *this; }
-
- inline Scalar value() const { return m_data.value(m_id-1); }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id-1)); }
-
- inline Index index() const { return m_data.index(m_id-1); }
- inline Index row() const { return IsColVector ? index() : 0; }
- inline Index col() const { return IsColVector ? 0 : index(); }
-
- inline operator bool() const { return (m_id > m_start); }
+ typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType;
+ typedef evaluator_base<SparseVectorType> Base;
+ typedef typename SparseVectorType::InnerIterator InnerIterator;
+ typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
+
+ enum {
+ CoeffReadCost = NumTraits<_Scalar>::ReadCost,
+ Flags = SparseVectorType::Flags
+ };
- protected:
- const internal::CompressedStorage<Scalar,Index>& m_data;
- Index m_id;
- const Index m_start;
+ evaluator() : Base() {}
+
+ explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_matrix->nonZeros();
+ }
+
+ operator SparseVectorType&() { return m_matrix->const_cast_derived(); }
+ operator const SparseVectorType&() const { return *m_matrix; }
+
+ const SparseVectorType *m_matrix;
};
-namespace internal {
-
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
static void run(Dest& dst, const Src& src) {
eigen_internal_assert(src.innerSize()==src.size());
- for(typename Src::InnerIterator it(src, 0); it; ++it)
+ typedef internal::evaluator<Src> SrcEvaluatorType;
+ SrcEvaluatorType srcEval(src);
+ for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)
dst.insert(it.index()) = it.value();
}
};
@@ -424,9 +452,11 @@ template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
static void run(Dest& dst, const Src& src) {
eigen_internal_assert(src.outerSize()==src.size());
- for(typename Dest::Index i=0; i<src.size(); ++i)
+ typedef internal::evaluator<Src> SrcEvaluatorType;
+ SrcEvaluatorType srcEval(src);
+ for(Index i=0; i<src.size(); ++i)
{
- typename Src::InnerIterator it(src, i);
+ typename SrcEvaluatorType::InnerIterator it(srcEval, i);
if(it)
dst.insert(i) = it.value();
}
diff --git a/eigen/Eigen/src/SparseCore/SparseView.h b/eigen/Eigen/src/SparseCore/SparseView.h
index 2820b39..7c4aea7 100644
--- a/eigen/Eigen/src/SparseCore/SparseView.h
+++ b/eigen/Eigen/src/SparseCore/SparseView.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
@@ -18,7 +18,7 @@ namespace internal {
template<typename MatrixType>
struct traits<SparseView<MatrixType> > : traits<MatrixType>
{
- typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::StorageIndex StorageIndex;
typedef Sparse StorageKind;
enum {
Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
@@ -27,71 +27,225 @@ struct traits<SparseView<MatrixType> > : traits<MatrixType>
} // end namespace internal
+/** \ingroup SparseCore_Module
+ * \class SparseView
+ *
+ * \brief Expression of a dense or sparse matrix with zero or too small values removed
+ *
+ * \tparam MatrixType the type of the object of which we are removing the small entries
+ *
+ * This class represents an expression of a given dense or sparse matrix with
+ * entries smaller than \c reference * \c epsilon are removed.
+ * It is the return type of MatrixBase::sparseView() and SparseMatrixBase::pruned()
+ * and most of the time this is the only way it is used.
+ *
+ * \sa MatrixBase::sparseView(), SparseMatrixBase::pruned()
+ */
template<typename MatrixType>
class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
{
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+ typedef SparseMatrixBase<SparseView > Base;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
+ typedef typename internal::remove_all<MatrixType>::type NestedExpression;
explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0),
const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision())
: m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {}
- class InnerIterator;
-
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
inline Index innerSize() const { return m_matrix.innerSize(); }
inline Index outerSize() const { return m_matrix.outerSize(); }
-
+
+ /** \returns the nested expression */
+ const typename internal::remove_all<MatrixTypeNested>::type&
+ nestedExpression() const { return m_matrix; }
+
+ Scalar reference() const { return m_reference; }
+ RealScalar epsilon() const { return m_epsilon; }
+
protected:
MatrixTypeNested m_matrix;
Scalar m_reference;
- typename NumTraits<Scalar>::Real m_epsilon;
+ RealScalar m_epsilon;
};
-template<typename MatrixType>
-class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
-{
- typedef typename SparseView::Index Index;
-public:
- typedef typename _MatrixTypeNested::InnerIterator IterBase;
- InnerIterator(const SparseView& view, Index outer) :
- IterBase(view.m_matrix, outer), m_view(view)
- {
- incrementToNonZero();
- }
-
- EIGEN_STRONG_INLINE InnerIterator& operator++()
- {
- IterBase::operator++();
- incrementToNonZero();
- return *this;
- }
-
- using IterBase::value;
+namespace internal {
-protected:
- const SparseView& m_view;
+// TODO find a way to unify the two following variants
+// This is tricky because implementing an inner iterator on top of an IndexBased evaluator is
+// not easy because the evaluators do not expose the sizes of the underlying expression.
+
+template<typename ArgType>
+struct unary_evaluator<SparseView<ArgType>, IteratorBased>
+ : public evaluator_base<SparseView<ArgType> >
+{
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+ public:
+ typedef SparseView<ArgType> XprType;
+
+ class InnerIterator : public EvalIterator
+ {
+ typedef typename XprType::Scalar Scalar;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
+ : EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view)
+ {
+ incrementToNonZero();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ EvalIterator::operator++();
+ incrementToNonZero();
+ return *this;
+ }
+
+ using EvalIterator::value;
+
+ protected:
+ const XprType &m_view;
+
+ private:
+ void incrementToNonZero()
+ {
+ while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon()))
+ {
+ EvalIterator::operator++();
+ }
+ }
+ };
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
+
+ protected:
+ evaluator<ArgType> m_argImpl;
+ const XprType &m_view;
+};
-private:
- void incrementToNonZero()
- {
- while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon))
+template<typename ArgType>
+struct unary_evaluator<SparseView<ArgType>, IndexBased>
+ : public evaluator_base<SparseView<ArgType> >
+{
+ public:
+ typedef SparseView<ArgType> XprType;
+ protected:
+ enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::StorageIndex StorageIndex;
+ public:
+
+ class InnerIterator
{
- IterBase::operator++();
- }
- }
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
+ : m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize())
+ {
+ incrementToNonZero();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ m_inner++;
+ incrementToNonZero();
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ {
+ return (IsRowMajor) ? m_sve.m_argImpl.coeff(m_outer, m_inner)
+ : m_sve.m_argImpl.coeff(m_inner, m_outer);
+ }
+
+ EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
+
+ protected:
+ const unary_evaluator &m_sve;
+ Index m_inner;
+ const Index m_outer;
+ const Index m_end;
+
+ private:
+ void incrementToNonZero()
+ {
+ while((bool(*this)) && internal::isMuchSmallerThan(value(), m_sve.m_view.reference(), m_sve.m_view.epsilon()))
+ {
+ m_inner++;
+ }
+ }
+ };
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
+
+ protected:
+ evaluator<ArgType> m_argImpl;
+ const XprType &m_view;
};
+} // end namespace internal
+
+/** \ingroup SparseCore_Module
+ *
+ * \returns a sparse expression of the dense expression \c *this with values smaller than
+ * \a reference * \a epsilon removed.
+ *
+ * This method is typically used when prototyping to convert a quickly assembled dense Matrix \c D to a SparseMatrix \c S:
+ * \code
+ * MatrixXd D(n,m);
+ * SparseMatrix<double> S;
+ * S = D.sparseView(); // suppress numerical zeros (exact)
+ * S = D.sparseView(reference);
+ * S = D.sparseView(reference,epsilon);
+ * \endcode
+ * where \a reference is a meaningful non zero reference value,
+ * and \a epsilon is a tolerance factor defaulting to NumTraits<Scalar>::dummy_precision().
+ *
+ * \sa SparseMatrixBase::pruned(), class SparseView */
+template<typename Derived>
+const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& reference,
+ const typename NumTraits<Scalar>::Real& epsilon) const
+{
+ return SparseView<Derived>(derived(), reference, epsilon);
+}
+
+/** \returns an expression of \c *this with values smaller than
+ * \a reference * \a epsilon removed.
+ *
+ * This method is typically used in conjunction with the product of two sparse matrices
+ * to automatically prune the smallest values as follows:
+ * \code
+ * C = (A*B).pruned(); // suppress numerical zeros (exact)
+ * C = (A*B).pruned(ref);
+ * C = (A*B).pruned(ref,epsilon);
+ * \endcode
+ * where \c ref is a meaningful non zero reference value.
+ * */
template<typename Derived>
-const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
- const typename NumTraits<Scalar>::Real& m_epsilon) const
+const SparseView<Derived>
+SparseMatrixBase<Derived>::pruned(const Scalar& reference,
+ const RealScalar& epsilon) const
{
- return SparseView<Derived>(derived(), m_reference, m_epsilon);
+ return SparseView<Derived>(derived(), reference, epsilon);
}
} // end namespace Eigen
diff --git a/eigen/Eigen/src/SparseCore/TriangularSolver.h b/eigen/Eigen/src/SparseCore/TriangularSolver.h
index ccc12af..f9c56ba 100644
--- a/eigen/Eigen/src/SparseCore/TriangularSolver.h
+++ b/eigen/Eigen/src/SparseCore/TriangularSolver.h
@@ -28,16 +28,19 @@ template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
{
typedef typename Rhs::Scalar Scalar;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
- for(int col=0 ; col<other.cols() ; ++col)
+ LhsEval lhsEval(lhs);
+ for(Index col=0 ; col<other.cols() ; ++col)
{
- for(int i=0; i<lhs.rows(); ++i)
+ for(Index i=0; i<lhs.rows(); ++i)
{
Scalar tmp = other.coeff(i,col);
Scalar lastVal(0);
- int lastIndex = 0;
- for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
+ Index lastIndex = 0;
+ for(LhsIterator it(lhsEval, i); it; ++it)
{
lastVal = it.value();
lastIndex = it.index();
@@ -62,15 +65,18 @@ template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
{
typedef typename Rhs::Scalar Scalar;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
- for(int col=0 ; col<other.cols() ; ++col)
+ LhsEval lhsEval(lhs);
+ for(Index col=0 ; col<other.cols() ; ++col)
{
- for(int i=lhs.rows()-1 ; i>=0 ; --i)
+ for(Index i=lhs.rows()-1 ; i>=0 ; --i)
{
Scalar tmp = other.coeff(i,col);
Scalar l_ii(0);
- typename Lhs::InnerIterator it(lhs, i);
+ LhsIterator it(lhsEval, i);
while(it && it.index()<i)
++it;
if(!(Mode & UnitDiag))
@@ -86,10 +92,8 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
tmp -= it.value() * other.coeff(it.index(),col);
}
- if (Mode & UnitDiag)
- other.coeffRef(i,col) = tmp;
- else
- other.coeffRef(i,col) = tmp/l_ii;
+ if (Mode & UnitDiag) other.coeffRef(i,col) = tmp;
+ else other.coeffRef(i,col) = tmp/l_ii;
}
}
}
@@ -100,16 +104,19 @@ template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
- for(int col=0 ; col<other.cols() ; ++col)
+ LhsEval lhsEval(lhs);
+ for(Index col=0 ; col<other.cols() ; ++col)
{
- for(int i=0; i<lhs.cols(); ++i)
+ for(Index i=0; i<lhs.cols(); ++i)
{
Scalar& tmp = other.coeffRef(i,col);
if (tmp!=Scalar(0)) // optimization when other is actually sparse
{
- typename Lhs::InnerIterator it(lhs, i);
+ LhsIterator it(lhsEval, i);
while(it && it.index()<i)
++it;
if(!(Mode & UnitDiag))
@@ -132,11 +139,14 @@ template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
- for(int col=0 ; col<other.cols() ; ++col)
+ LhsEval lhsEval(lhs);
+ for(Index col=0 ; col<other.cols() ; ++col)
{
- for(int i=lhs.cols()-1; i>=0; --i)
+ for(Index i=lhs.cols()-1; i>=0; --i)
{
Scalar& tmp = other.coeffRef(i,col);
if (tmp!=Scalar(0)) // optimization when other is actually sparse
@@ -144,13 +154,13 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
if(!(Mode & UnitDiag))
{
// TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements
- typename Lhs::ReverseInnerIterator it(lhs, i);
+ LhsIterator it(lhsEval, i);
while(it && it.index()!=i)
- --it;
+ ++it;
eigen_assert(it && it.index()==i);
other.coeffRef(i,col) /= it.value();
}
- typename Lhs::InnerIterator it(lhs, i);
+ LhsIterator it(lhsEval, i);
for(; it && it.index()<i; ++it)
other.coeffRef(it.index(), col) -= tmp * it.value();
}
@@ -161,11 +171,13 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
} // end namespace internal
-template<typename ExpressionType,int Mode>
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+template<typename ExpressionType,unsigned int Mode>
template<typename OtherDerived>
-void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDerived>& other) const
+void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(MatrixBase<OtherDerived>& other) const
{
- eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+ eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
@@ -174,21 +186,12 @@ void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDer
typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
OtherCopy otherCopy(other.derived());
- internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(m_matrix, otherCopy);
+ internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(derived().nestedExpression(), otherCopy);
if (copy)
other = otherCopy;
}
-
-template<typename ExpressionType,int Mode>
-template<typename OtherDerived>
-typename internal::plain_matrix_type_column_major<OtherDerived>::type
-SparseTriangularView<ExpressionType,Mode>::solve(const MatrixBase<OtherDerived>& other) const
-{
- typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
- solveInPlace(res);
- return res;
-}
+#endif
// pure sparse path
@@ -208,18 +211,18 @@ template<typename Lhs, typename Rhs, int Mode, int UpLo>
struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
- typedef typename promote_index_type<typename traits<Lhs>::Index,
- typename traits<Rhs>::Index>::type Index;
+ typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,
+ typename traits<Rhs>::StorageIndex>::type StorageIndex;
static void run(const Lhs& lhs, Rhs& other)
{
const bool IsLower = (UpLo==Lower);
- AmbiVector<Scalar,Index> tempVector(other.rows()*2);
+ AmbiVector<Scalar,StorageIndex> tempVector(other.rows()*2);
tempVector.setBounds(0,other.rows());
Rhs res(other.rows(), other.cols());
res.reserve(other.nonZeros());
- for(int col=0 ; col<other.cols() ; ++col)
+ for(Index col=0 ; col<other.cols() ; ++col)
{
// FIXME estimate number of non zeros
tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
@@ -230,7 +233,7 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
}
- for(int i=IsLower?0:lhs.cols()-1;
+ for(Index i=IsLower?0:lhs.cols()-1;
IsLower?i<lhs.cols():i>=0;
i+=IsLower?1:-1)
{
@@ -267,9 +270,9 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
}
- int count = 0;
+ Index count = 0;
// FIXME compute a reference value to filter zeros
- for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
+ for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector/*,1e-12*/); it; ++it)
{
++ count;
// std::cerr << "fill " << it.index() << ", " << col << "\n";
@@ -286,11 +289,12 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
} // end namespace internal
-template<typename ExpressionType,int Mode>
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+template<typename ExpressionType,unsigned int Mode>
template<typename OtherDerived>
-void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
+void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
{
- eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+ eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
@@ -299,35 +303,12 @@ void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<Ot
// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
// OtherCopy otherCopy(other.derived());
- internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(m_matrix, other.derived());
+ internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(derived().nestedExpression(), other.derived());
// if (copy)
// other = otherCopy;
}
-
-#ifdef EIGEN2_SUPPORT
-
-// deprecated stuff:
-
-/** \deprecated */
-template<typename Derived>
-template<typename OtherDerived>
-void SparseMatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
-{
- this->template triangular<Flags&(Upper|Lower)>().solveInPlace(other);
-}
-
-/** \deprecated */
-template<typename Derived>
-template<typename OtherDerived>
-typename internal::plain_matrix_type_column_major<OtherDerived>::type
-SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
-{
- typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
- derived().solveTriangularInPlace(res);
- return res;
-}
-#endif // EIGEN2_SUPPORT
+#endif
} // end namespace Eigen