diff options
Diffstat (limited to 'eigen/Eigen/src/SparseCore/CompressedStorage.h')
-rw-r--r-- | eigen/Eigen/src/SparseCore/CompressedStorage.h | 138 |
1 files changed, 78 insertions, 60 deletions
diff --git a/eigen/Eigen/src/SparseCore/CompressedStorage.h b/eigen/Eigen/src/SparseCore/CompressedStorage.h index 34cad3d..d89fa0d 100644 --- a/eigen/Eigen/src/SparseCore/CompressedStorage.h +++ b/eigen/Eigen/src/SparseCore/CompressedStorage.h @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> +// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -18,13 +18,13 @@ namespace internal { * Stores a sparse set of values as a list of values and a list of indices. * */ -template<typename _Scalar,typename _Index> +template<typename _Scalar,typename _StorageIndex> class CompressedStorage { public: typedef _Scalar Scalar; - typedef _Index Index; + typedef _StorageIndex StorageIndex; protected: @@ -36,7 +36,7 @@ class CompressedStorage : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) {} - CompressedStorage(size_t size) + explicit CompressedStorage(Index size) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { resize(size); @@ -51,8 +51,11 @@ class CompressedStorage CompressedStorage& operator=(const CompressedStorage& other) { resize(other.size()); - internal::smart_copy(other.m_values, other.m_values + m_size, m_values); - internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices); + if(other.size()>0) + { + internal::smart_copy(other.m_values, other.m_values + m_size, m_values); + internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices); + } return *this; } @@ -70,9 +73,9 @@ class CompressedStorage delete[] m_indices; } - void reserve(size_t size) + void reserve(Index size) { - size_t newAllocatedSize = m_size + size; + Index newAllocatedSize = m_size + size; if (newAllocatedSize > m_allocatedSize) reallocate(newAllocatedSize); } @@ -83,44 +86,40 @@ class CompressedStorage reallocate(m_size); } - void resize(size_t size, double reserveSizeFactor = 0) + void resize(Index size, double reserveSizeFactor = 0) { if (m_allocatedSize<size) - reallocate(size + size_t(reserveSizeFactor*double(size))); + { + Index realloc_size = (std::min<Index>)(NumTraits<StorageIndex>::highest(), size + Index(reserveSizeFactor*double(size))); + if(realloc_size<size) + internal::throw_std_bad_alloc(); + reallocate(realloc_size); + } m_size = size; } void append(const Scalar& v, Index i) { - Index id = static_cast<Index>(m_size); + Index id = m_size; resize(m_size+1, 1); m_values[id] = v; - m_indices[id] = i; + m_indices[id] = internal::convert_index<StorageIndex>(i); } - inline size_t size() const { return m_size; } - inline size_t allocatedSize() const { return m_allocatedSize; } + inline Index size() const { return m_size; } + inline Index allocatedSize() const { return m_allocatedSize; } inline void clear() { m_size = 0; } const Scalar* valuePtr() const { return m_values; } Scalar* valuePtr() { return m_values; } - const Index* indexPtr() const { return m_indices; } - Index* indexPtr() { return m_indices; } - - inline Scalar& value(size_t i) { return m_values[i]; } - inline const Scalar& value(size_t i) const { return m_values[i]; } + const StorageIndex* indexPtr() const { return m_indices; } + StorageIndex* indexPtr() { return m_indices; } - inline Index& index(size_t i) { return m_indices[i]; } - inline const Index& index(size_t i) const { return m_indices[i]; } + inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; } + inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; } - static CompressedStorage Map(Index* indices, Scalar* values, size_t size) - { - CompressedStorage res; - res.m_indices = indices; - res.m_values = values; - res.m_allocatedSize = res.m_size = size; - return res; - } + inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; } + inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; } /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */ inline Index searchLowerIndex(Index key) const @@ -129,17 +128,17 @@ class CompressedStorage } /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */ - inline Index searchLowerIndex(size_t start, size_t end, Index key) const + inline Index searchLowerIndex(Index start, Index end, Index key) const { while(end>start) { - size_t mid = (end+start)>>1; + Index mid = (end+start)>>1; if (m_indices[mid]<key) start = mid+1; else end = mid; } - return static_cast<Index>(start); + return start; } /** \returns the stored value at index \a key @@ -152,20 +151,20 @@ class CompressedStorage return m_values[m_size-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) - const size_t id = searchLowerIndex(0,m_size-1,key); + const Index id = searchLowerIndex(0,m_size-1,key); return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue; } /** Like at(), but the search is performed in the range [start,end) */ - inline Scalar atInRange(size_t start, size_t end, Index key, const Scalar& defaultValue = Scalar(0)) const + inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const { if (start>=end) - return Scalar(0); + return defaultValue; else if (end>start && key==m_indices[end-1]) return m_values[end-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) - const size_t id = searchLowerIndex(start,end-1,key); + const Index id = searchLowerIndex(start,end-1,key); return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue; } @@ -174,16 +173,35 @@ class CompressedStorage * such that the keys are sorted. */ inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0)) { - size_t id = searchLowerIndex(0,m_size,key); + Index id = searchLowerIndex(0,m_size,key); if (id>=m_size || m_indices[id]!=key) { - resize(m_size+1,1); - for (size_t j=m_size-1; j>id; --j) + if (m_allocatedSize<m_size+1) + { + m_allocatedSize = 2*(m_size+1); + internal::scoped_array<Scalar> newValues(m_allocatedSize); + internal::scoped_array<StorageIndex> newIndices(m_allocatedSize); + + // copy first chunk + internal::smart_copy(m_values, m_values +id, newValues.ptr()); + internal::smart_copy(m_indices, m_indices+id, newIndices.ptr()); + + // copy the rest + if(m_size>id) + { + internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1); + internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1); + } + std::swap(m_values,newValues.ptr()); + std::swap(m_indices,newIndices.ptr()); + } + else if(m_size>id) { - m_indices[j] = m_indices[j-1]; - m_values[j] = m_values[j-1]; + internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1); + internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1); } - m_indices[id] = key; + m_size++; + m_indices[id] = internal::convert_index<StorageIndex>(key); m_values[id] = defaultValue; } return m_values[id]; @@ -191,9 +209,9 @@ class CompressedStorage void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) { - size_t k = 0; - size_t n = size(); - for (size_t i=0; i<n; ++i) + Index k = 0; + Index n = size(); + for (Index i=0; i<n; ++i) { if (!internal::isMuchSmallerThan(value(i), reference, epsilon)) { @@ -207,29 +225,29 @@ class CompressedStorage protected: - inline void reallocate(size_t size) + inline void reallocate(Index size) { - Scalar* newValues = new Scalar[size]; - Index* newIndices = new Index[size]; - size_t copySize = (std::min)(size, m_size); - // copy + #ifdef EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN + EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN + #endif + eigen_internal_assert(size!=m_allocatedSize); + internal::scoped_array<Scalar> newValues(size); + internal::scoped_array<StorageIndex> newIndices(size); + Index copySize = (std::min)(size, m_size); if (copySize>0) { - internal::smart_copy(m_values, m_values+copySize, newValues); - internal::smart_copy(m_indices, m_indices+copySize, newIndices); + internal::smart_copy(m_values, m_values+copySize, newValues.ptr()); + internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr()); } - // delete old stuff - delete[] m_values; - delete[] m_indices; - m_values = newValues; - m_indices = newIndices; + std::swap(m_values,newValues.ptr()); + std::swap(m_indices,newIndices.ptr()); m_allocatedSize = size; } protected: Scalar* m_values; - Index* m_indices; - size_t m_size; - size_t m_allocatedSize; + StorageIndex* m_indices; + Index m_size; + Index m_allocatedSize; }; |