diff options
author | Stanislaw Halik <sthalik@misaki.pl> | 2019-03-03 21:09:10 +0100 |
---|---|---|
committer | Stanislaw Halik <sthalik@misaki.pl> | 2019-03-03 21:10:13 +0100 |
commit | f0238cfb6997c4acfc2bd200de7295f3fa36968f (patch) | |
tree | b215183760e4f615b9c1dabc1f116383b72a1b55 /eigen/Eigen/src/IterativeLinearSolvers | |
parent | 543edd372a5193d04b3de9f23c176ab439e51b31 (diff) |
don't index Eigen
Diffstat (limited to 'eigen/Eigen/src/IterativeLinearSolvers')
8 files changed, 0 insertions, 2287 deletions
diff --git a/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h deleted file mode 100644 index f66c846..0000000 --- a/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +++ /dev/null @@ -1,226 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_BASIC_PRECONDITIONERS_H -#define EIGEN_BASIC_PRECONDITIONERS_H - -namespace Eigen { - -/** \ingroup IterativeLinearSolvers_Module - * \brief A preconditioner based on the digonal entries - * - * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix. - * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: - \code - A.diagonal().asDiagonal() . x = b - \endcode - * - * \tparam _Scalar the type of the scalar. - * - * \implsparsesolverconcept - * - * This preconditioner is suitable for both selfadjoint and general problems. - * The diagonal entries are pre-inverted and stored into a dense vector. - * - * \note A variant that has yet to be implemented would attempt to preserve the norm of each column. - * - * \sa class LeastSquareDiagonalPreconditioner, class ConjugateGradient - */ -template <typename _Scalar> -class DiagonalPreconditioner -{ - typedef _Scalar Scalar; - typedef Matrix<Scalar,Dynamic,1> Vector; - public: - typedef typename Vector::StorageIndex StorageIndex; - enum { - ColsAtCompileTime = Dynamic, - MaxColsAtCompileTime = Dynamic - }; - - DiagonalPreconditioner() : m_isInitialized(false) {} - - template<typename MatType> - explicit DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols()) - { - compute(mat); - } - - Index rows() const { return m_invdiag.size(); } - Index cols() const { return m_invdiag.size(); } - - template<typename MatType> - DiagonalPreconditioner& analyzePattern(const MatType& ) - { - return *this; - } - - template<typename MatType> - DiagonalPreconditioner& factorize(const MatType& mat) - { - m_invdiag.resize(mat.cols()); - for(int j=0; j<mat.outerSize(); ++j) - { - typename MatType::InnerIterator it(mat,j); - while(it && it.index()!=j) ++it; - if(it && it.index()==j && it.value()!=Scalar(0)) - m_invdiag(j) = Scalar(1)/it.value(); - else - m_invdiag(j) = Scalar(1); - } - m_isInitialized = true; - return *this; - } - - template<typename MatType> - DiagonalPreconditioner& compute(const MatType& mat) - { - return factorize(mat); - } - - /** \internal */ - template<typename Rhs, typename Dest> - void _solve_impl(const Rhs& b, Dest& x) const - { - x = m_invdiag.array() * b.array() ; - } - - template<typename Rhs> inline const Solve<DiagonalPreconditioner, Rhs> - solve(const MatrixBase<Rhs>& b) const - { - eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized."); - eigen_assert(m_invdiag.size()==b.rows() - && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); - return Solve<DiagonalPreconditioner, Rhs>(*this, b.derived()); - } - - ComputationInfo info() { return Success; } - - protected: - Vector m_invdiag; - bool m_isInitialized; -}; - -/** \ingroup IterativeLinearSolvers_Module - * \brief Jacobi preconditioner for LeastSquaresConjugateGradient - * - * This class allows to approximately solve for A' A x = A' b problems assuming A' A is a diagonal matrix. - * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: - \code - (A.adjoint() * A).diagonal().asDiagonal() * x = b - \endcode - * - * \tparam _Scalar the type of the scalar. - * - * \implsparsesolverconcept - * - * The diagonal entries are pre-inverted and stored into a dense vector. - * - * \sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner - */ -template <typename _Scalar> -class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar> -{ - typedef _Scalar Scalar; - typedef typename NumTraits<Scalar>::Real RealScalar; - typedef DiagonalPreconditioner<_Scalar> Base; - using Base::m_invdiag; - public: - - LeastSquareDiagonalPreconditioner() : Base() {} - - template<typename MatType> - explicit LeastSquareDiagonalPreconditioner(const MatType& mat) : Base() - { - compute(mat); - } - - template<typename MatType> - LeastSquareDiagonalPreconditioner& analyzePattern(const MatType& ) - { - return *this; - } - - template<typename MatType> - LeastSquareDiagonalPreconditioner& factorize(const MatType& mat) - { - // Compute the inverse squared-norm of each column of mat - m_invdiag.resize(mat.cols()); - if(MatType::IsRowMajor) - { - m_invdiag.setZero(); - for(Index j=0; j<mat.outerSize(); ++j) - { - for(typename MatType::InnerIterator it(mat,j); it; ++it) - m_invdiag(it.index()) += numext::abs2(it.value()); - } - for(Index j=0; j<mat.cols(); ++j) - if(numext::real(m_invdiag(j))>RealScalar(0)) - m_invdiag(j) = RealScalar(1)/numext::real(m_invdiag(j)); - } - else - { - for(Index j=0; j<mat.outerSize(); ++j) - { - RealScalar sum = mat.col(j).squaredNorm(); - if(sum>RealScalar(0)) - m_invdiag(j) = RealScalar(1)/sum; - else - m_invdiag(j) = RealScalar(1); - } - } - Base::m_isInitialized = true; - return *this; - } - - template<typename MatType> - LeastSquareDiagonalPreconditioner& compute(const MatType& mat) - { - return factorize(mat); - } - - ComputationInfo info() { return Success; } - - protected: -}; - -/** \ingroup IterativeLinearSolvers_Module - * \brief A naive preconditioner which approximates any matrix as the identity matrix - * - * \implsparsesolverconcept - * - * \sa class DiagonalPreconditioner - */ -class IdentityPreconditioner -{ - public: - - IdentityPreconditioner() {} - - template<typename MatrixType> - explicit IdentityPreconditioner(const MatrixType& ) {} - - template<typename MatrixType> - IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; } - - template<typename MatrixType> - IdentityPreconditioner& factorize(const MatrixType& ) { return *this; } - - template<typename MatrixType> - IdentityPreconditioner& compute(const MatrixType& ) { return *this; } - - template<typename Rhs> - inline const Rhs& solve(const Rhs& b) const { return b; } - - ComputationInfo info() { return Success; } -}; - -} // end namespace Eigen - -#endif // EIGEN_BASIC_PRECONDITIONERS_H diff --git a/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h deleted file mode 100644 index 454f468..0000000 --- a/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +++ /dev/null @@ -1,228 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> -// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_BICGSTAB_H -#define EIGEN_BICGSTAB_H - -namespace Eigen { - -namespace internal { - -/** \internal Low-level bi conjugate gradient stabilized algorithm - * \param mat The matrix A - * \param rhs The right hand side vector b - * \param x On input and initial solution, on output the computed solution. - * \param precond A preconditioner being able to efficiently solve for an - * approximation of Ax=b (regardless of b) - * \param iters On input the max number of iteration, on output the number of performed iterations. - * \param tol_error On input the tolerance error, on output an estimation of the relative error. - * \return false in the case of numerical issue, for example a break down of BiCGSTAB. - */ -template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> -bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, - const Preconditioner& precond, Index& iters, - typename Dest::RealScalar& tol_error) -{ - using std::sqrt; - using std::abs; - typedef typename Dest::RealScalar RealScalar; - typedef typename Dest::Scalar Scalar; - typedef Matrix<Scalar,Dynamic,1> VectorType; - RealScalar tol = tol_error; - Index maxIters = iters; - - Index n = mat.cols(); - VectorType r = rhs - mat * x; - VectorType r0 = r; - - RealScalar r0_sqnorm = r0.squaredNorm(); - RealScalar rhs_sqnorm = rhs.squaredNorm(); - if(rhs_sqnorm == 0) - { - x.setZero(); - return true; - } - Scalar rho = 1; - Scalar alpha = 1; - Scalar w = 1; - - VectorType v = VectorType::Zero(n), p = VectorType::Zero(n); - VectorType y(n), z(n); - VectorType kt(n), ks(n); - - VectorType s(n), t(n); - - RealScalar tol2 = tol*tol*rhs_sqnorm; - RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon(); - Index i = 0; - Index restarts = 0; - - while ( r.squaredNorm() > tol2 && i<maxIters ) - { - Scalar rho_old = rho; - - rho = r0.dot(r); - if (abs(rho) < eps2*r0_sqnorm) - { - // The new residual vector became too orthogonal to the arbitrarily chosen direction r0 - // Let's restart with a new r0: - r = rhs - mat * x; - r0 = r; - rho = r0_sqnorm = r.squaredNorm(); - if(restarts++ == 0) - i = 0; - } - Scalar beta = (rho/rho_old) * (alpha / w); - p = r + beta * (p - w * v); - - y = precond.solve(p); - - v.noalias() = mat * y; - - alpha = rho / r0.dot(v); - s = r - alpha * v; - - z = precond.solve(s); - t.noalias() = mat * z; - - RealScalar tmp = t.squaredNorm(); - if(tmp>RealScalar(0)) - w = t.dot(s) / tmp; - else - w = Scalar(0); - x += alpha * y + w * z; - r = s - w * t; - ++i; - } - tol_error = sqrt(r.squaredNorm()/rhs_sqnorm); - iters = i; - return true; -} - -} - -template< typename _MatrixType, - typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> > -class BiCGSTAB; - -namespace internal { - -template< typename _MatrixType, typename _Preconditioner> -struct traits<BiCGSTAB<_MatrixType,_Preconditioner> > -{ - typedef _MatrixType MatrixType; - typedef _Preconditioner Preconditioner; -}; - -} - -/** \ingroup IterativeLinearSolvers_Module - * \brief A bi conjugate gradient stabilized solver for sparse square problems - * - * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient - * stabilized algorithm. The vectors x and b can be either dense or sparse. - * - * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. - * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner - * - * \implsparsesolverconcept - * - * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() - * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations - * and NumTraits<Scalar>::epsilon() for the tolerance. - * - * The tolerance corresponds to the relative residual error: |Ax-b|/|b| - * - * \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format. - * Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled. - * See \ref TopicMultiThreading for details. - * - * This class can be used as the direct solver classes. Here is a typical usage example: - * \include BiCGSTAB_simple.cpp - * - * By default the iterations start with x=0 as an initial guess of the solution. - * One can control the start using the solveWithGuess() method. - * - * BiCGSTAB can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. - * - * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner - */ -template< typename _MatrixType, typename _Preconditioner> -class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> > -{ - typedef IterativeSolverBase<BiCGSTAB> Base; - using Base::matrix; - using Base::m_error; - using Base::m_iterations; - using Base::m_info; - using Base::m_isInitialized; -public: - typedef _MatrixType MatrixType; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::RealScalar RealScalar; - typedef _Preconditioner Preconditioner; - -public: - - /** Default constructor. */ - BiCGSTAB() : Base() {} - - /** Initialize the solver with matrix \a A for further \c Ax=b solving. - * - * This constructor is a shortcut for the default constructor followed - * by a call to compute(). - * - * \warning this class stores a reference to the matrix A as well as some - * precomputed values that depend on it. Therefore, if \a A is changed - * this class becomes invalid. Call compute() to update it with the new - * matrix A, or modify a copy of A. - */ - template<typename MatrixDerived> - explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {} - - ~BiCGSTAB() {} - - /** \internal */ - template<typename Rhs,typename Dest> - void _solve_with_guess_impl(const Rhs& b, Dest& x) const - { - bool failed = false; - for(Index j=0; j<b.cols(); ++j) - { - m_iterations = Base::maxIterations(); - m_error = Base::m_tolerance; - - typename Dest::ColXpr xj(x,j); - if(!internal::bicgstab(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error)) - failed = true; - } - m_info = failed ? NumericalIssue - : m_error <= Base::m_tolerance ? Success - : NoConvergence; - m_isInitialized = true; - } - - /** \internal */ - using Base::_solve_impl; - template<typename Rhs,typename Dest> - void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const - { - x.resize(this->rows(),b.cols()); - x.setZero(); - _solve_with_guess_impl(b,x); - } - -protected: - -}; - -} // end namespace Eigen - -#endif // EIGEN_BICGSTAB_H diff --git a/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h deleted file mode 100644 index f7ce471..0000000 --- a/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +++ /dev/null @@ -1,246 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_CONJUGATE_GRADIENT_H -#define EIGEN_CONJUGATE_GRADIENT_H - -namespace Eigen { - -namespace internal { - -/** \internal Low-level conjugate gradient algorithm - * \param mat The matrix A - * \param rhs The right hand side vector b - * \param x On input and initial solution, on output the computed solution. - * \param precond A preconditioner being able to efficiently solve for an - * approximation of Ax=b (regardless of b) - * \param iters On input the max number of iteration, on output the number of performed iterations. - * \param tol_error On input the tolerance error, on output an estimation of the relative error. - */ -template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> -EIGEN_DONT_INLINE -void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, - const Preconditioner& precond, Index& iters, - typename Dest::RealScalar& tol_error) -{ - using std::sqrt; - using std::abs; - typedef typename Dest::RealScalar RealScalar; - typedef typename Dest::Scalar Scalar; - typedef Matrix<Scalar,Dynamic,1> VectorType; - - RealScalar tol = tol_error; - Index maxIters = iters; - - Index n = mat.cols(); - - VectorType residual = rhs - mat * x; //initial residual - - RealScalar rhsNorm2 = rhs.squaredNorm(); - if(rhsNorm2 == 0) - { - x.setZero(); - iters = 0; - tol_error = 0; - return; - } - const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)(); - RealScalar threshold = numext::maxi(tol*tol*rhsNorm2,considerAsZero); - RealScalar residualNorm2 = residual.squaredNorm(); - if (residualNorm2 < threshold) - { - iters = 0; - tol_error = sqrt(residualNorm2 / rhsNorm2); - return; - } - - VectorType p(n); - p = precond.solve(residual); // initial search direction - - VectorType z(n), tmp(n); - RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM - Index i = 0; - while(i < maxIters) - { - tmp.noalias() = mat * p; // the bottleneck of the algorithm - - Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir - x += alpha * p; // update solution - residual -= alpha * tmp; // update residual - - residualNorm2 = residual.squaredNorm(); - if(residualNorm2 < threshold) - break; - - z = precond.solve(residual); // approximately solve for "A z = residual" - - RealScalar absOld = absNew; - absNew = numext::real(residual.dot(z)); // update the absolute value of r - RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction - p = z + beta * p; // update search direction - i++; - } - tol_error = sqrt(residualNorm2 / rhsNorm2); - iters = i; -} - -} - -template< typename _MatrixType, int _UpLo=Lower, - typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> > -class ConjugateGradient; - -namespace internal { - -template< typename _MatrixType, int _UpLo, typename _Preconditioner> -struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> > -{ - typedef _MatrixType MatrixType; - typedef _Preconditioner Preconditioner; -}; - -} - -/** \ingroup IterativeLinearSolvers_Module - * \brief A conjugate gradient solver for sparse (or dense) self-adjoint problems - * - * This class allows to solve for A.x = b linear problems using an iterative conjugate gradient algorithm. - * The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse. - * - * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. - * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower, - * \c Upper, or \c Lower|Upper in which the full matrix entries will be considered. - * Default is \c Lower, best performance is \c Lower|Upper. - * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner - * - * \implsparsesolverconcept - * - * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() - * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations - * and NumTraits<Scalar>::epsilon() for the tolerance. - * - * The tolerance corresponds to the relative residual error: |Ax-b|/|b| - * - * \b Performance: Even though the default value of \c _UpLo is \c Lower, significantly higher performance is - * achieved when using a complete matrix and \b Lower|Upper as the \a _UpLo template parameter. Moreover, in this - * case multi-threading can be exploited if the user code is compiled with OpenMP enabled. - * See \ref TopicMultiThreading for details. - * - * This class can be used as the direct solver classes. Here is a typical usage example: - \code - int n = 10000; - VectorXd x(n), b(n); - SparseMatrix<double> A(n,n); - // fill A and b - ConjugateGradient<SparseMatrix<double>, Lower|Upper> cg; - cg.compute(A); - x = cg.solve(b); - std::cout << "#iterations: " << cg.iterations() << std::endl; - std::cout << "estimated error: " << cg.error() << std::endl; - // update b, and solve again - x = cg.solve(b); - \endcode - * - * By default the iterations start with x=0 as an initial guess of the solution. - * One can control the start using the solveWithGuess() method. - * - * ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. - * - * \sa class LeastSquaresConjugateGradient, class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner - */ -template< typename _MatrixType, int _UpLo, typename _Preconditioner> -class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> > -{ - typedef IterativeSolverBase<ConjugateGradient> Base; - using Base::matrix; - using Base::m_error; - using Base::m_iterations; - using Base::m_info; - using Base::m_isInitialized; -public: - typedef _MatrixType MatrixType; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::RealScalar RealScalar; - typedef _Preconditioner Preconditioner; - - enum { - UpLo = _UpLo - }; - -public: - - /** Default constructor. */ - ConjugateGradient() : Base() {} - - /** Initialize the solver with matrix \a A for further \c Ax=b solving. - * - * This constructor is a shortcut for the default constructor followed - * by a call to compute(). - * - * \warning this class stores a reference to the matrix A as well as some - * precomputed values that depend on it. Therefore, if \a A is changed - * this class becomes invalid. Call compute() to update it with the new - * matrix A, or modify a copy of A. - */ - template<typename MatrixDerived> - explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {} - - ~ConjugateGradient() {} - - /** \internal */ - template<typename Rhs,typename Dest> - void _solve_with_guess_impl(const Rhs& b, Dest& x) const - { - typedef typename Base::MatrixWrapper MatrixWrapper; - typedef typename Base::ActualMatrixType ActualMatrixType; - enum { - TransposeInput = (!MatrixWrapper::MatrixFree) - && (UpLo==(Lower|Upper)) - && (!MatrixType::IsRowMajor) - && (!NumTraits<Scalar>::IsComplex) - }; - typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper; - EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY); - typedef typename internal::conditional<UpLo==(Lower|Upper), - RowMajorWrapper, - typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type - >::type SelfAdjointWrapper; - m_iterations = Base::maxIterations(); - m_error = Base::m_tolerance; - - for(Index j=0; j<b.cols(); ++j) - { - m_iterations = Base::maxIterations(); - m_error = Base::m_tolerance; - - typename Dest::ColXpr xj(x,j); - RowMajorWrapper row_mat(matrix()); - internal::conjugate_gradient(SelfAdjointWrapper(row_mat), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error); - } - - m_isInitialized = true; - m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; - } - - /** \internal */ - using Base::_solve_impl; - template<typename Rhs,typename Dest> - void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const - { - x.setZero(); - _solve_with_guess_impl(b.derived(),x); - } - -protected: - -}; - -} // end namespace Eigen - -#endif // EIGEN_CONJUGATE_GRADIENT_H diff --git a/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h b/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h deleted file mode 100644 index e45c272..0000000 --- a/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +++ /dev/null @@ -1,400 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> -// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_INCOMPLETE_CHOlESKY_H -#define EIGEN_INCOMPLETE_CHOlESKY_H - -#include <vector> -#include <list> - -namespace Eigen { -/** - * \brief Modified Incomplete Cholesky with dual threshold - * - * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with - * Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999 - * - * \tparam Scalar the scalar type of the input matrices - * \tparam _UpLo The triangular part that will be used for the computations. It can be Lower - * or Upper. Default is Lower. - * \tparam _OrderingType The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<int>, - * unless EIGEN_MPL2_ONLY is defined, in which case the default is NaturalOrdering<int>. - * - * \implsparsesolverconcept - * - * It performs the following incomplete factorization: \f$ S P A P' S \approx L L' \f$ - * where L is a lower triangular factor, S is a diagonal scaling matrix, and P is a - * fill-in reducing permutation as computed by the ordering method. - * - * \b Shifting \b strategy: Let \f$ B = S P A P' S \f$ be the scaled matrix on which the factorization is carried out, - * and \f$ \beta \f$ be the minimum value of the diagonal. If \f$ \beta > 0 \f$ then, the factorization is directly performed - * on the matrix B. Otherwise, the factorization is performed on the shifted matrix \f$ B + (\sigma+|\beta| I \f$ where - * \f$ \sigma \f$ is the initial shift value as returned and set by setInitialShift() method. The default value is \f$ \sigma = 10^{-3} \f$. - * If the factorization fails, then the shift in doubled until it succeed or a maximum of ten attempts. If it still fails, as returned by - * the info() method, then you can either increase the initial shift, or better use another preconditioning technique. - * - */ -template <typename Scalar, int _UpLo = Lower, typename _OrderingType = -#ifndef EIGEN_MPL2_ONLY -AMDOrdering<int> -#else -NaturalOrdering<int> -#endif -> -class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> > -{ - protected: - typedef SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> > Base; - using Base::m_isInitialized; - public: - typedef typename NumTraits<Scalar>::Real RealScalar; - typedef _OrderingType OrderingType; - typedef typename OrderingType::PermutationType PermutationType; - typedef typename PermutationType::StorageIndex StorageIndex; - typedef SparseMatrix<Scalar,ColMajor,StorageIndex> FactorType; - typedef Matrix<Scalar,Dynamic,1> VectorSx; - typedef Matrix<RealScalar,Dynamic,1> VectorRx; - typedef Matrix<StorageIndex,Dynamic, 1> VectorIx; - typedef std::vector<std::list<StorageIndex> > VectorList; - enum { UpLo = _UpLo }; - enum { - ColsAtCompileTime = Dynamic, - MaxColsAtCompileTime = Dynamic - }; - public: - - /** Default constructor leaving the object in a partly non-initialized stage. - * - * You must call compute() or the pair analyzePattern()/factorize() to make it valid. - * - * \sa IncompleteCholesky(const MatrixType&) - */ - IncompleteCholesky() : m_initialShift(1e-3),m_factorizationIsOk(false) {} - - /** Constructor computing the incomplete factorization for the given matrix \a matrix. - */ - template<typename MatrixType> - IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_factorizationIsOk(false) - { - compute(matrix); - } - - /** \returns number of rows of the factored matrix */ - Index rows() const { return m_L.rows(); } - - /** \returns number of columns of the factored matrix */ - Index cols() const { return m_L.cols(); } - - - /** \brief Reports whether previous computation was successful. - * - * It triggers an assertion if \c *this has not been initialized through the respective constructor, - * or a call to compute() or analyzePattern(). - * - * \returns \c Success if computation was successful, - * \c NumericalIssue if the matrix appears to be negative. - */ - ComputationInfo info() const - { - eigen_assert(m_isInitialized && "IncompleteCholesky is not initialized."); - return m_info; - } - - /** \brief Set the initial shift parameter \f$ \sigma \f$. - */ - void setInitialShift(RealScalar shift) { m_initialShift = shift; } - - /** \brief Computes the fill reducing permutation vector using the sparsity pattern of \a mat - */ - template<typename MatrixType> - void analyzePattern(const MatrixType& mat) - { - OrderingType ord; - PermutationType pinv; - ord(mat.template selfadjointView<UpLo>(), pinv); - if(pinv.size()>0) m_perm = pinv.inverse(); - else m_perm.resize(0); - m_L.resize(mat.rows(), mat.cols()); - m_analysisIsOk = true; - m_isInitialized = true; - m_info = Success; - } - - /** \brief Performs the numerical factorization of the input matrix \a mat - * - * The method analyzePattern() or compute() must have been called beforehand - * with a matrix having the same pattern. - * - * \sa compute(), analyzePattern() - */ - template<typename MatrixType> - void factorize(const MatrixType& mat); - - /** Computes or re-computes the incomplete Cholesky factorization of the input matrix \a mat - * - * It is a shortcut for a sequential call to the analyzePattern() and factorize() methods. - * - * \sa analyzePattern(), factorize() - */ - template<typename MatrixType> - void compute(const MatrixType& mat) - { - analyzePattern(mat); - factorize(mat); - } - - // internal - template<typename Rhs, typename Dest> - void _solve_impl(const Rhs& b, Dest& x) const - { - eigen_assert(m_factorizationIsOk && "factorize() should be called first"); - if (m_perm.rows() == b.rows()) x = m_perm * b; - else x = b; - x = m_scale.asDiagonal() * x; - x = m_L.template triangularView<Lower>().solve(x); - x = m_L.adjoint().template triangularView<Upper>().solve(x); - x = m_scale.asDiagonal() * x; - if (m_perm.rows() == b.rows()) - x = m_perm.inverse() * x; - } - - /** \returns the sparse lower triangular factor L */ - const FactorType& matrixL() const { eigen_assert("m_factorizationIsOk"); return m_L; } - - /** \returns a vector representing the scaling factor S */ - const VectorRx& scalingS() const { eigen_assert("m_factorizationIsOk"); return m_scale; } - - /** \returns the fill-in reducing permutation P (can be empty for a natural ordering) */ - const PermutationType& permutationP() const { eigen_assert("m_analysisIsOk"); return m_perm; } - - protected: - FactorType m_L; // The lower part stored in CSC - VectorRx m_scale; // The vector for scaling the matrix - RealScalar m_initialShift; // The initial shift parameter - bool m_analysisIsOk; - bool m_factorizationIsOk; - ComputationInfo m_info; - PermutationType m_perm; - - private: - inline void updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol); -}; - -// Based on the following paper: -// C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with -// Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999 -// http://ftp.mcs.anl.gov/pub/tech_reports/reports/P682.pdf -template<typename Scalar, int _UpLo, typename OrderingType> -template<typename _MatrixType> -void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat) -{ - using std::sqrt; - eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); - - // Dropping strategy : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added - - // Apply the fill-reducing permutation computed in analyzePattern() - if (m_perm.rows() == mat.rows() ) // To detect the null permutation - { - // The temporary is needed to make sure that the diagonal entry is properly sorted - FactorType tmp(mat.rows(), mat.cols()); - tmp = mat.template selfadjointView<_UpLo>().twistedBy(m_perm); - m_L.template selfadjointView<Lower>() = tmp.template selfadjointView<Lower>(); - } - else - { - m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>(); - } - - Index n = m_L.cols(); - Index nnz = m_L.nonZeros(); - Map<VectorSx> vals(m_L.valuePtr(), nnz); //values - Map<VectorIx> rowIdx(m_L.innerIndexPtr(), nnz); //Row indices - Map<VectorIx> colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row - VectorIx firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization - VectorList listCol(n); // listCol(j) is a linked list of columns to update column j - VectorSx col_vals(n); // Store a nonzero values in each column - VectorIx col_irow(n); // Row indices of nonzero elements in each column - VectorIx col_pattern(n); - col_pattern.fill(-1); - StorageIndex col_nnz; - - - // Computes the scaling factors - m_scale.resize(n); - m_scale.setZero(); - for (Index j = 0; j < n; j++) - for (Index k = colPtr[j]; k < colPtr[j+1]; k++) - { - m_scale(j) += numext::abs2(vals(k)); - if(rowIdx[k]!=j) - m_scale(rowIdx[k]) += numext::abs2(vals(k)); - } - - m_scale = m_scale.cwiseSqrt().cwiseSqrt(); - - for (Index j = 0; j < n; ++j) - if(m_scale(j)>(std::numeric_limits<RealScalar>::min)()) - m_scale(j) = RealScalar(1)/m_scale(j); - else - m_scale(j) = 1; - - // TODO disable scaling if not needed, i.e., if it is roughly uniform? (this will make solve() faster) - - // Scale and compute the shift for the matrix - RealScalar mindiag = NumTraits<RealScalar>::highest(); - for (Index j = 0; j < n; j++) - { - for (Index k = colPtr[j]; k < colPtr[j+1]; k++) - vals[k] *= (m_scale(j)*m_scale(rowIdx[k])); - eigen_internal_assert(rowIdx[colPtr[j]]==j && "IncompleteCholesky: only the lower triangular part must be stored"); - mindiag = numext::mini(numext::real(vals[colPtr[j]]), mindiag); - } - - FactorType L_save = m_L; - - RealScalar shift = 0; - if(mindiag <= RealScalar(0.)) - shift = m_initialShift - mindiag; - - m_info = NumericalIssue; - - // Try to perform the incomplete factorization using the current shift - int iter = 0; - do - { - // Apply the shift to the diagonal elements of the matrix - for (Index j = 0; j < n; j++) - vals[colPtr[j]] += shift; - - // jki version of the Cholesky factorization - Index j=0; - for (; j < n; ++j) - { - // Left-looking factorization of the j-th column - // First, load the j-th column into col_vals - Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored - col_nnz = 0; - for (Index i = colPtr[j] + 1; i < colPtr[j+1]; i++) - { - StorageIndex l = rowIdx[i]; - col_vals(col_nnz) = vals[i]; - col_irow(col_nnz) = l; - col_pattern(l) = col_nnz; - col_nnz++; - } - { - typename std::list<StorageIndex>::iterator k; - // Browse all previous columns that will update column j - for(k = listCol[j].begin(); k != listCol[j].end(); k++) - { - Index jk = firstElt(*k); // First element to use in the column - eigen_internal_assert(rowIdx[jk]==j); - Scalar v_j_jk = numext::conj(vals[jk]); - - jk += 1; - for (Index i = jk; i < colPtr[*k+1]; i++) - { - StorageIndex l = rowIdx[i]; - if(col_pattern[l]<0) - { - col_vals(col_nnz) = vals[i] * v_j_jk; - col_irow[col_nnz] = l; - col_pattern(l) = col_nnz; - col_nnz++; - } - else - col_vals(col_pattern[l]) -= vals[i] * v_j_jk; - } - updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol); - } - } - - // Scale the current column - if(numext::real(diag) <= 0) - { - if(++iter>=10) - return; - - // increase shift - shift = numext::maxi(m_initialShift,RealScalar(2)*shift); - // restore m_L, col_pattern, and listCol - vals = Map<const VectorSx>(L_save.valuePtr(), nnz); - rowIdx = Map<const VectorIx>(L_save.innerIndexPtr(), nnz); - colPtr = Map<const VectorIx>(L_save.outerIndexPtr(), n+1); - col_pattern.fill(-1); - for(Index i=0; i<n; ++i) - listCol[i].clear(); - - break; - } - - RealScalar rdiag = sqrt(numext::real(diag)); - vals[colPtr[j]] = rdiag; - for (Index k = 0; k<col_nnz; ++k) - { - Index i = col_irow[k]; - //Scale - col_vals(k) /= rdiag; - //Update the remaining diagonals with col_vals - vals[colPtr[i]] -= numext::abs2(col_vals(k)); - } - // Select the largest p elements - // p is the original number of elements in the column (without the diagonal) - Index p = colPtr[j+1] - colPtr[j] - 1 ; - Ref<VectorSx> cvals = col_vals.head(col_nnz); - Ref<VectorIx> cirow = col_irow.head(col_nnz); - internal::QuickSplit(cvals,cirow, p); - // Insert the largest p elements in the matrix - Index cpt = 0; - for (Index i = colPtr[j]+1; i < colPtr[j+1]; i++) - { - vals[i] = col_vals(cpt); - rowIdx[i] = col_irow(cpt); - // restore col_pattern: - col_pattern(col_irow(cpt)) = -1; - cpt++; - } - // Get the first smallest row index and put it after the diagonal element - Index jk = colPtr(j)+1; - updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol); - } - - if(j==n) - { - m_factorizationIsOk = true; - m_info = Success; - } - } while(m_info!=Success); -} - -template<typename Scalar, int _UpLo, typename OrderingType> -inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol) -{ - if (jk < colPtr(col+1) ) - { - Index p = colPtr(col+1) - jk; - Index minpos; - rowIdx.segment(jk,p).minCoeff(&minpos); - minpos += jk; - if (rowIdx(minpos) != rowIdx(jk)) - { - //Swap - std::swap(rowIdx(jk),rowIdx(minpos)); - std::swap(vals(jk),vals(minpos)); - } - firstElt(col) = internal::convert_index<StorageIndex,Index>(jk); - listCol[rowIdx(jk)].push_back(internal::convert_index<StorageIndex,Index>(col)); - } -} - -} // end namespace Eigen - -#endif diff --git a/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h deleted file mode 100644 index 338e6f1..0000000 --- a/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +++ /dev/null @@ -1,462 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> -// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_INCOMPLETE_LUT_H -#define EIGEN_INCOMPLETE_LUT_H - - -namespace Eigen { - -namespace internal { - -/** \internal - * Compute a quick-sort split of a vector - * On output, the vector row is permuted such that its elements satisfy - * abs(row(i)) >= abs(row(ncut)) if i<ncut - * abs(row(i)) <= abs(row(ncut)) if i>ncut - * \param row The vector of values - * \param ind The array of index for the elements in @p row - * \param ncut The number of largest elements to keep - **/ -template <typename VectorV, typename VectorI> -Index QuickSplit(VectorV &row, VectorI &ind, Index ncut) -{ - typedef typename VectorV::RealScalar RealScalar; - using std::swap; - using std::abs; - Index mid; - Index n = row.size(); /* length of the vector */ - Index first, last ; - - ncut--; /* to fit the zero-based indices */ - first = 0; - last = n-1; - if (ncut < first || ncut > last ) return 0; - - do { - mid = first; - RealScalar abskey = abs(row(mid)); - for (Index j = first + 1; j <= last; j++) { - if ( abs(row(j)) > abskey) { - ++mid; - swap(row(mid), row(j)); - swap(ind(mid), ind(j)); - } - } - /* Interchange for the pivot element */ - swap(row(mid), row(first)); - swap(ind(mid), ind(first)); - - if (mid > ncut) last = mid - 1; - else if (mid < ncut ) first = mid + 1; - } while (mid != ncut ); - - return 0; /* mid is equal to ncut */ -} - -}// end namespace internal - -/** \ingroup IterativeLinearSolvers_Module - * \class IncompleteLUT - * \brief Incomplete LU factorization with dual-threshold strategy - * - * \implsparsesolverconcept - * - * During the numerical factorization, two dropping rules are used : - * 1) any element whose magnitude is less than some tolerance is dropped. - * This tolerance is obtained by multiplying the input tolerance @p droptol - * by the average magnitude of all the original elements in the current row. - * 2) After the elimination of the row, only the @p fill largest elements in - * the L part and the @p fill largest elements in the U part are kept - * (in addition to the diagonal element ). Note that @p fill is computed from - * the input parameter @p fillfactor which is used the ratio to control the fill_in - * relatively to the initial number of nonzero elements. - * - * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements) - * and when @p fill=n/2 with @p droptol being different to zero. - * - * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, - * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994. - * - * NOTE : The following implementation is derived from the ILUT implementation - * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota - * released under the terms of the GNU LGPL: - * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README - * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2. - * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012: - * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html - * alternatively, on GMANE: - * http://comments.gmane.org/gmane.comp.lib.eigen/3302 - */ -template <typename _Scalar, typename _StorageIndex = int> -class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageIndex> > -{ - protected: - typedef SparseSolverBase<IncompleteLUT> Base; - using Base::m_isInitialized; - public: - typedef _Scalar Scalar; - typedef _StorageIndex StorageIndex; - typedef typename NumTraits<Scalar>::Real RealScalar; - typedef Matrix<Scalar,Dynamic,1> Vector; - typedef Matrix<StorageIndex,Dynamic,1> VectorI; - typedef SparseMatrix<Scalar,RowMajor,StorageIndex> FactorType; - - enum { - ColsAtCompileTime = Dynamic, - MaxColsAtCompileTime = Dynamic - }; - - public: - - IncompleteLUT() - : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10), - m_analysisIsOk(false), m_factorizationIsOk(false) - {} - - template<typename MatrixType> - explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10) - : m_droptol(droptol),m_fillfactor(fillfactor), - m_analysisIsOk(false),m_factorizationIsOk(false) - { - eigen_assert(fillfactor != 0); - compute(mat); - } - - Index rows() const { return m_lu.rows(); } - - Index cols() const { return m_lu.cols(); } - - /** \brief Reports whether previous computation was successful. - * - * \returns \c Success if computation was succesful, - * \c NumericalIssue if the matrix.appears to be negative. - */ - ComputationInfo info() const - { - eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); - return m_info; - } - - template<typename MatrixType> - void analyzePattern(const MatrixType& amat); - - template<typename MatrixType> - void factorize(const MatrixType& amat); - - /** - * Compute an incomplete LU factorization with dual threshold on the matrix mat - * No pivoting is done in this version - * - **/ - template<typename MatrixType> - IncompleteLUT& compute(const MatrixType& amat) - { - analyzePattern(amat); - factorize(amat); - return *this; - } - - void setDroptol(const RealScalar& droptol); - void setFillfactor(int fillfactor); - - template<typename Rhs, typename Dest> - void _solve_impl(const Rhs& b, Dest& x) const - { - x = m_Pinv * b; - x = m_lu.template triangularView<UnitLower>().solve(x); - x = m_lu.template triangularView<Upper>().solve(x); - x = m_P * x; - } - -protected: - - /** keeps off-diagonal entries; drops diagonal entries */ - struct keep_diag { - inline bool operator() (const Index& row, const Index& col, const Scalar&) const - { - return row!=col; - } - }; - -protected: - - FactorType m_lu; - RealScalar m_droptol; - int m_fillfactor; - bool m_analysisIsOk; - bool m_factorizationIsOk; - ComputationInfo m_info; - PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // Fill-reducing permutation - PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // Inverse permutation -}; - -/** - * Set control parameter droptol - * \param droptol Drop any element whose magnitude is less than this tolerance - **/ -template<typename Scalar, typename StorageIndex> -void IncompleteLUT<Scalar,StorageIndex>::setDroptol(const RealScalar& droptol) -{ - this->m_droptol = droptol; -} - -/** - * Set control parameter fillfactor - * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row. - **/ -template<typename Scalar, typename StorageIndex> -void IncompleteLUT<Scalar,StorageIndex>::setFillfactor(int fillfactor) -{ - this->m_fillfactor = fillfactor; -} - -template <typename Scalar, typename StorageIndex> -template<typename _MatrixType> -void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat) -{ - // Compute the Fill-reducing permutation - // Since ILUT does not perform any numerical pivoting, - // it is highly preferable to keep the diagonal through symmetric permutations. -#ifndef EIGEN_MPL2_ONLY - // To this end, let's symmetrize the pattern and perform AMD on it. - SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat; - SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose(); - // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. - // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... - SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1; - AMDOrdering<StorageIndex> ordering; - ordering(AtA,m_P); - m_Pinv = m_P.inverse(); // cache the inverse permutation -#else - // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine. - SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat; - COLAMDOrdering<StorageIndex> ordering; - ordering(mat1,m_Pinv); - m_P = m_Pinv.inverse(); -#endif - - m_analysisIsOk = true; - m_factorizationIsOk = false; - m_isInitialized = true; -} - -template <typename Scalar, typename StorageIndex> -template<typename _MatrixType> -void IncompleteLUT<Scalar,StorageIndex>::factorize(const _MatrixType& amat) -{ - using std::sqrt; - using std::swap; - using std::abs; - using internal::convert_index; - - eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); - Index n = amat.cols(); // Size of the matrix - m_lu.resize(n,n); - // Declare Working vectors and variables - Vector u(n) ; // real values of the row -- maximum size is n -- - VectorI ju(n); // column position of the values in u -- maximum size is n - VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1 - - // Apply the fill-reducing permutation - eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); - SparseMatrix<Scalar,RowMajor, StorageIndex> mat; - mat = amat.twistedBy(m_Pinv); - - // Initialization - jr.fill(-1); - ju.fill(0); - u.fill(0); - - // number of largest elements to keep in each row: - Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1; - if (fill_in > n) fill_in = n; - - // number of largest nonzero elements to keep in the L and the U part of the current row: - Index nnzL = fill_in/2; - Index nnzU = nnzL; - m_lu.reserve(n * (nnzL + nnzU + 1)); - - // global loop over the rows of the sparse matrix - for (Index ii = 0; ii < n; ii++) - { - // 1 - copy the lower and the upper part of the row i of mat in the working vector u - - Index sizeu = 1; // number of nonzero elements in the upper part of the current row - Index sizel = 0; // number of nonzero elements in the lower part of the current row - ju(ii) = convert_index<StorageIndex>(ii); - u(ii) = 0; - jr(ii) = convert_index<StorageIndex>(ii); - RealScalar rownorm = 0; - - typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii - for (; j_it; ++j_it) - { - Index k = j_it.index(); - if (k < ii) - { - // copy the lower part - ju(sizel) = convert_index<StorageIndex>(k); - u(sizel) = j_it.value(); - jr(k) = convert_index<StorageIndex>(sizel); - ++sizel; - } - else if (k == ii) - { - u(ii) = j_it.value(); - } - else - { - // copy the upper part - Index jpos = ii + sizeu; - ju(jpos) = convert_index<StorageIndex>(k); - u(jpos) = j_it.value(); - jr(k) = convert_index<StorageIndex>(jpos); - ++sizeu; - } - rownorm += numext::abs2(j_it.value()); - } - - // 2 - detect possible zero row - if(rownorm==0) - { - m_info = NumericalIssue; - return; - } - // Take the 2-norm of the current row as a relative tolerance - rownorm = sqrt(rownorm); - - // 3 - eliminate the previous nonzero rows - Index jj = 0; - Index len = 0; - while (jj < sizel) - { - // In order to eliminate in the correct order, - // we must select first the smallest column index among ju(jj:sizel) - Index k; - Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment - k += jj; - if (minrow != ju(jj)) - { - // swap the two locations - Index j = ju(jj); - swap(ju(jj), ju(k)); - jr(minrow) = convert_index<StorageIndex>(jj); - jr(j) = convert_index<StorageIndex>(k); - swap(u(jj), u(k)); - } - // Reset this location - jr(minrow) = -1; - - // Start elimination - typename FactorType::InnerIterator ki_it(m_lu, minrow); - while (ki_it && ki_it.index() < minrow) ++ki_it; - eigen_internal_assert(ki_it && ki_it.col()==minrow); - Scalar fact = u(jj) / ki_it.value(); - - // drop too small elements - if(abs(fact) <= m_droptol) - { - jj++; - continue; - } - - // linear combination of the current row ii and the row minrow - ++ki_it; - for (; ki_it; ++ki_it) - { - Scalar prod = fact * ki_it.value(); - Index j = ki_it.index(); - Index jpos = jr(j); - if (jpos == -1) // fill-in element - { - Index newpos; - if (j >= ii) // dealing with the upper part - { - newpos = ii + sizeu; - sizeu++; - eigen_internal_assert(sizeu<=n); - } - else // dealing with the lower part - { - newpos = sizel; - sizel++; - eigen_internal_assert(sizel<=ii); - } - ju(newpos) = convert_index<StorageIndex>(j); - u(newpos) = -prod; - jr(j) = convert_index<StorageIndex>(newpos); - } - else - u(jpos) -= prod; - } - // store the pivot element - u(len) = fact; - ju(len) = convert_index<StorageIndex>(minrow); - ++len; - - jj++; - } // end of the elimination on the row ii - - // reset the upper part of the pointer jr to zero - for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1; - - // 4 - partially sort and insert the elements in the m_lu matrix - - // sort the L-part of the row - sizel = len; - len = (std::min)(sizel, nnzL); - typename Vector::SegmentReturnType ul(u.segment(0, sizel)); - typename VectorI::SegmentReturnType jul(ju.segment(0, sizel)); - internal::QuickSplit(ul, jul, len); - - // store the largest m_fill elements of the L part - m_lu.startVec(ii); - for(Index k = 0; k < len; k++) - m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); - - // store the diagonal element - // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization) - if (u(ii) == Scalar(0)) - u(ii) = sqrt(m_droptol) * rownorm; - m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii); - - // sort the U-part of the row - // apply the dropping rule first - len = 0; - for(Index k = 1; k < sizeu; k++) - { - if(abs(u(ii+k)) > m_droptol * rownorm ) - { - ++len; - u(ii + len) = u(ii + k); - ju(ii + len) = ju(ii + k); - } - } - sizeu = len + 1; // +1 to take into account the diagonal element - len = (std::min)(sizeu, nnzU); - typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1)); - typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1)); - internal::QuickSplit(uu, juu, len); - - // store the largest elements of the U part - for(Index k = ii + 1; k < ii + len; k++) - m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); - } - m_lu.finalize(); - m_lu.makeCompressed(); - - m_factorizationIsOk = true; - m_info = Success; -} - -} // end namespace Eigen - -#endif // EIGEN_INCOMPLETE_LUT_H diff --git a/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h deleted file mode 100644 index 7c2326e..0000000 --- a/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +++ /dev/null @@ -1,394 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H -#define EIGEN_ITERATIVE_SOLVER_BASE_H - -namespace Eigen { - -namespace internal { - -template<typename MatrixType> -struct is_ref_compatible_impl -{ -private: - template <typename T0> - struct any_conversion - { - template <typename T> any_conversion(const volatile T&); - template <typename T> any_conversion(T&); - }; - struct yes {int a[1];}; - struct no {int a[2];}; - - template<typename T> - static yes test(const Ref<const T>&, int); - template<typename T> - static no test(any_conversion<T>, ...); - -public: - static MatrixType ms_from; - enum { value = sizeof(test<MatrixType>(ms_from, 0))==sizeof(yes) }; -}; - -template<typename MatrixType> -struct is_ref_compatible -{ - enum { value = is_ref_compatible_impl<typename remove_all<MatrixType>::type>::value }; -}; - -template<typename MatrixType, bool MatrixFree = !internal::is_ref_compatible<MatrixType>::value> -class generic_matrix_wrapper; - -// We have an explicit matrix at hand, compatible with Ref<> -template<typename MatrixType> -class generic_matrix_wrapper<MatrixType,false> -{ -public: - typedef Ref<const MatrixType> ActualMatrixType; - template<int UpLo> struct ConstSelfAdjointViewReturnType { - typedef typename ActualMatrixType::template ConstSelfAdjointViewReturnType<UpLo>::Type Type; - }; - - enum { - MatrixFree = false - }; - - generic_matrix_wrapper() - : m_dummy(0,0), m_matrix(m_dummy) - {} - - template<typename InputType> - generic_matrix_wrapper(const InputType &mat) - : m_matrix(mat) - {} - - const ActualMatrixType& matrix() const - { - return m_matrix; - } - - template<typename MatrixDerived> - void grab(const EigenBase<MatrixDerived> &mat) - { - m_matrix.~Ref<const MatrixType>(); - ::new (&m_matrix) Ref<const MatrixType>(mat.derived()); - } - - void grab(const Ref<const MatrixType> &mat) - { - if(&(mat.derived()) != &m_matrix) - { - m_matrix.~Ref<const MatrixType>(); - ::new (&m_matrix) Ref<const MatrixType>(mat); - } - } - -protected: - MatrixType m_dummy; // used to default initialize the Ref<> object - ActualMatrixType m_matrix; -}; - -// MatrixType is not compatible with Ref<> -> matrix-free wrapper -template<typename MatrixType> -class generic_matrix_wrapper<MatrixType,true> -{ -public: - typedef MatrixType ActualMatrixType; - template<int UpLo> struct ConstSelfAdjointViewReturnType - { - typedef ActualMatrixType Type; - }; - - enum { - MatrixFree = true - }; - - generic_matrix_wrapper() - : mp_matrix(0) - {} - - generic_matrix_wrapper(const MatrixType &mat) - : mp_matrix(&mat) - {} - - const ActualMatrixType& matrix() const - { - return *mp_matrix; - } - - void grab(const MatrixType &mat) - { - mp_matrix = &mat; - } - -protected: - const ActualMatrixType *mp_matrix; -}; - -} - -/** \ingroup IterativeLinearSolvers_Module - * \brief Base class for linear iterative solvers - * - * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner - */ -template< typename Derived> -class IterativeSolverBase : public SparseSolverBase<Derived> -{ -protected: - typedef SparseSolverBase<Derived> Base; - using Base::m_isInitialized; - -public: - typedef typename internal::traits<Derived>::MatrixType MatrixType; - typedef typename internal::traits<Derived>::Preconditioner Preconditioner; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::StorageIndex StorageIndex; - typedef typename MatrixType::RealScalar RealScalar; - - enum { - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime - }; - -public: - - using Base::derived; - - /** Default constructor. */ - IterativeSolverBase() - { - init(); - } - - /** Initialize the solver with matrix \a A for further \c Ax=b solving. - * - * This constructor is a shortcut for the default constructor followed - * by a call to compute(). - * - * \warning this class stores a reference to the matrix A as well as some - * precomputed values that depend on it. Therefore, if \a A is changed - * this class becomes invalid. Call compute() to update it with the new - * matrix A, or modify a copy of A. - */ - template<typename MatrixDerived> - explicit IterativeSolverBase(const EigenBase<MatrixDerived>& A) - : m_matrixWrapper(A.derived()) - { - init(); - compute(matrix()); - } - - ~IterativeSolverBase() {} - - /** Initializes the iterative solver for the sparsity pattern of the matrix \a A for further solving \c Ax=b problems. - * - * Currently, this function mostly calls analyzePattern on the preconditioner. In the future - * we might, for instance, implement column reordering for faster matrix vector products. - */ - template<typename MatrixDerived> - Derived& analyzePattern(const EigenBase<MatrixDerived>& A) - { - grab(A.derived()); - m_preconditioner.analyzePattern(matrix()); - m_isInitialized = true; - m_analysisIsOk = true; - m_info = m_preconditioner.info(); - return derived(); - } - - /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems. - * - * Currently, this function mostly calls factorize on the preconditioner. - * - * \warning this class stores a reference to the matrix A as well as some - * precomputed values that depend on it. Therefore, if \a A is changed - * this class becomes invalid. Call compute() to update it with the new - * matrix A, or modify a copy of A. - */ - template<typename MatrixDerived> - Derived& factorize(const EigenBase<MatrixDerived>& A) - { - eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); - grab(A.derived()); - m_preconditioner.factorize(matrix()); - m_factorizationIsOk = true; - m_info = m_preconditioner.info(); - return derived(); - } - - /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems. - * - * Currently, this function mostly initializes/computes the preconditioner. In the future - * we might, for instance, implement column reordering for faster matrix vector products. - * - * \warning this class stores a reference to the matrix A as well as some - * precomputed values that depend on it. Therefore, if \a A is changed - * this class becomes invalid. Call compute() to update it with the new - * matrix A, or modify a copy of A. - */ - template<typename MatrixDerived> - Derived& compute(const EigenBase<MatrixDerived>& A) - { - grab(A.derived()); - m_preconditioner.compute(matrix()); - m_isInitialized = true; - m_analysisIsOk = true; - m_factorizationIsOk = true; - m_info = m_preconditioner.info(); - return derived(); - } - - /** \internal */ - Index rows() const { return matrix().rows(); } - - /** \internal */ - Index cols() const { return matrix().cols(); } - - /** \returns the tolerance threshold used by the stopping criteria. - * \sa setTolerance() - */ - RealScalar tolerance() const { return m_tolerance; } - - /** Sets the tolerance threshold used by the stopping criteria. - * - * This value is used as an upper bound to the relative residual error: |Ax-b|/|b|. - * The default value is the machine precision given by NumTraits<Scalar>::epsilon() - */ - Derived& setTolerance(const RealScalar& tolerance) - { - m_tolerance = tolerance; - return derived(); - } - - /** \returns a read-write reference to the preconditioner for custom configuration. */ - Preconditioner& preconditioner() { return m_preconditioner; } - - /** \returns a read-only reference to the preconditioner. */ - const Preconditioner& preconditioner() const { return m_preconditioner; } - - /** \returns the max number of iterations. - * It is either the value setted by setMaxIterations or, by default, - * twice the number of columns of the matrix. - */ - Index maxIterations() const - { - return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations; - } - - /** Sets the max number of iterations. - * Default is twice the number of columns of the matrix. - */ - Derived& setMaxIterations(Index maxIters) - { - m_maxIterations = maxIters; - return derived(); - } - - /** \returns the number of iterations performed during the last solve */ - Index iterations() const - { - eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); - return m_iterations; - } - - /** \returns the tolerance error reached during the last solve. - * It is a close approximation of the true relative residual error |Ax-b|/|b|. - */ - RealScalar error() const - { - eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); - return m_error; - } - - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A - * and \a x0 as an initial solution. - * - * \sa solve(), compute() - */ - template<typename Rhs,typename Guess> - inline const SolveWithGuess<Derived, Rhs, Guess> - solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const - { - eigen_assert(m_isInitialized && "Solver is not initialized."); - eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); - return SolveWithGuess<Derived, Rhs, Guess>(derived(), b.derived(), x0); - } - - /** \returns Success if the iterations converged, and NoConvergence otherwise. */ - ComputationInfo info() const - { - eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); - return m_info; - } - - /** \internal */ - template<typename Rhs, typename DestDerived> - void _solve_impl(const Rhs& b, SparseMatrixBase<DestDerived> &aDest) const - { - eigen_assert(rows()==b.rows()); - - Index rhsCols = b.cols(); - Index size = b.rows(); - DestDerived& dest(aDest.derived()); - typedef typename DestDerived::Scalar DestScalar; - Eigen::Matrix<DestScalar,Dynamic,1> tb(size); - Eigen::Matrix<DestScalar,Dynamic,1> tx(cols()); - // We do not directly fill dest because sparse expressions have to be free of aliasing issue. - // For non square least-square problems, b and dest might not have the same size whereas they might alias each-other. - typename DestDerived::PlainObject tmp(cols(),rhsCols); - for(Index k=0; k<rhsCols; ++k) - { - tb = b.col(k); - tx = derived().solve(tb); - tmp.col(k) = tx.sparseView(0); - } - dest.swap(tmp); - } - -protected: - void init() - { - m_isInitialized = false; - m_analysisIsOk = false; - m_factorizationIsOk = false; - m_maxIterations = -1; - m_tolerance = NumTraits<Scalar>::epsilon(); - } - - typedef internal::generic_matrix_wrapper<MatrixType> MatrixWrapper; - typedef typename MatrixWrapper::ActualMatrixType ActualMatrixType; - - const ActualMatrixType& matrix() const - { - return m_matrixWrapper.matrix(); - } - - template<typename InputType> - void grab(const InputType &A) - { - m_matrixWrapper.grab(A); - } - - MatrixWrapper m_matrixWrapper; - Preconditioner m_preconditioner; - - Index m_maxIterations; - RealScalar m_tolerance; - - mutable RealScalar m_error; - mutable Index m_iterations; - mutable ComputationInfo m_info; - mutable bool m_analysisIsOk, m_factorizationIsOk; -}; - -} // end namespace Eigen - -#endif // EIGEN_ITERATIVE_SOLVER_BASE_H diff --git a/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h b/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h deleted file mode 100644 index 0aea0e0..0000000 --- a/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +++ /dev/null @@ -1,216 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H -#define EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H - -namespace Eigen { - -namespace internal { - -/** \internal Low-level conjugate gradient algorithm for least-square problems - * \param mat The matrix A - * \param rhs The right hand side vector b - * \param x On input and initial solution, on output the computed solution. - * \param precond A preconditioner being able to efficiently solve for an - * approximation of A'Ax=b (regardless of b) - * \param iters On input the max number of iteration, on output the number of performed iterations. - * \param tol_error On input the tolerance error, on output an estimation of the relative error. - */ -template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> -EIGEN_DONT_INLINE -void least_square_conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, - const Preconditioner& precond, Index& iters, - typename Dest::RealScalar& tol_error) -{ - using std::sqrt; - using std::abs; - typedef typename Dest::RealScalar RealScalar; - typedef typename Dest::Scalar Scalar; - typedef Matrix<Scalar,Dynamic,1> VectorType; - - RealScalar tol = tol_error; - Index maxIters = iters; - - Index m = mat.rows(), n = mat.cols(); - - VectorType residual = rhs - mat * x; - VectorType normal_residual = mat.adjoint() * residual; - - RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm(); - if(rhsNorm2 == 0) - { - x.setZero(); - iters = 0; - tol_error = 0; - return; - } - RealScalar threshold = tol*tol*rhsNorm2; - RealScalar residualNorm2 = normal_residual.squaredNorm(); - if (residualNorm2 < threshold) - { - iters = 0; - tol_error = sqrt(residualNorm2 / rhsNorm2); - return; - } - - VectorType p(n); - p = precond.solve(normal_residual); // initial search direction - - VectorType z(n), tmp(m); - RealScalar absNew = numext::real(normal_residual.dot(p)); // the square of the absolute value of r scaled by invM - Index i = 0; - while(i < maxIters) - { - tmp.noalias() = mat * p; - - Scalar alpha = absNew / tmp.squaredNorm(); // the amount we travel on dir - x += alpha * p; // update solution - residual -= alpha * tmp; // update residual - normal_residual = mat.adjoint() * residual; // update residual of the normal equation - - residualNorm2 = normal_residual.squaredNorm(); - if(residualNorm2 < threshold) - break; - - z = precond.solve(normal_residual); // approximately solve for "A'A z = normal_residual" - - RealScalar absOld = absNew; - absNew = numext::real(normal_residual.dot(z)); // update the absolute value of r - RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction - p = z + beta * p; // update search direction - i++; - } - tol_error = sqrt(residualNorm2 / rhsNorm2); - iters = i; -} - -} - -template< typename _MatrixType, - typename _Preconditioner = LeastSquareDiagonalPreconditioner<typename _MatrixType::Scalar> > -class LeastSquaresConjugateGradient; - -namespace internal { - -template< typename _MatrixType, typename _Preconditioner> -struct traits<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> > -{ - typedef _MatrixType MatrixType; - typedef _Preconditioner Preconditioner; -}; - -} - -/** \ingroup IterativeLinearSolvers_Module - * \brief A conjugate gradient solver for sparse (or dense) least-square problems - * - * This class allows to solve for A x = b linear problems using an iterative conjugate gradient algorithm. - * The matrix A can be non symmetric and rectangular, but the matrix A' A should be positive-definite to guaranty stability. - * Otherwise, the SparseLU or SparseQR classes might be preferable. - * The matrix A and the vectors x and b can be either dense or sparse. - * - * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. - * \tparam _Preconditioner the type of the preconditioner. Default is LeastSquareDiagonalPreconditioner - * - * \implsparsesolverconcept - * - * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() - * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations - * and NumTraits<Scalar>::epsilon() for the tolerance. - * - * This class can be used as the direct solver classes. Here is a typical usage example: - \code - int m=1000000, n = 10000; - VectorXd x(n), b(m); - SparseMatrix<double> A(m,n); - // fill A and b - LeastSquaresConjugateGradient<SparseMatrix<double> > lscg; - lscg.compute(A); - x = lscg.solve(b); - std::cout << "#iterations: " << lscg.iterations() << std::endl; - std::cout << "estimated error: " << lscg.error() << std::endl; - // update b, and solve again - x = lscg.solve(b); - \endcode - * - * By default the iterations start with x=0 as an initial guess of the solution. - * One can control the start using the solveWithGuess() method. - * - * \sa class ConjugateGradient, SparseLU, SparseQR - */ -template< typename _MatrixType, typename _Preconditioner> -class LeastSquaresConjugateGradient : public IterativeSolverBase<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> > -{ - typedef IterativeSolverBase<LeastSquaresConjugateGradient> Base; - using Base::matrix; - using Base::m_error; - using Base::m_iterations; - using Base::m_info; - using Base::m_isInitialized; -public: - typedef _MatrixType MatrixType; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::RealScalar RealScalar; - typedef _Preconditioner Preconditioner; - -public: - - /** Default constructor. */ - LeastSquaresConjugateGradient() : Base() {} - - /** Initialize the solver with matrix \a A for further \c Ax=b solving. - * - * This constructor is a shortcut for the default constructor followed - * by a call to compute(). - * - * \warning this class stores a reference to the matrix A as well as some - * precomputed values that depend on it. Therefore, if \a A is changed - * this class becomes invalid. Call compute() to update it with the new - * matrix A, or modify a copy of A. - */ - template<typename MatrixDerived> - explicit LeastSquaresConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {} - - ~LeastSquaresConjugateGradient() {} - - /** \internal */ - template<typename Rhs,typename Dest> - void _solve_with_guess_impl(const Rhs& b, Dest& x) const - { - m_iterations = Base::maxIterations(); - m_error = Base::m_tolerance; - - for(Index j=0; j<b.cols(); ++j) - { - m_iterations = Base::maxIterations(); - m_error = Base::m_tolerance; - - typename Dest::ColXpr xj(x,j); - internal::least_square_conjugate_gradient(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error); - } - - m_isInitialized = true; - m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; - } - - /** \internal */ - using Base::_solve_impl; - template<typename Rhs,typename Dest> - void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const - { - x.setZero(); - _solve_with_guess_impl(b.derived(),x); - } - -}; - -} // end namespace Eigen - -#endif // EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H diff --git a/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h b/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h deleted file mode 100644 index 0ace451..0000000 --- a/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +++ /dev/null @@ -1,115 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_SOLVEWITHGUESS_H -#define EIGEN_SOLVEWITHGUESS_H - -namespace Eigen { - -template<typename Decomposition, typename RhsType, typename GuessType> class SolveWithGuess; - -/** \class SolveWithGuess - * \ingroup IterativeLinearSolvers_Module - * - * \brief Pseudo expression representing a solving operation - * - * \tparam Decomposition the type of the matrix or decomposion object - * \tparam Rhstype the type of the right-hand side - * - * This class represents an expression of A.solve(B) - * and most of the time this is the only way it is used. - * - */ -namespace internal { - - -template<typename Decomposition, typename RhsType, typename GuessType> -struct traits<SolveWithGuess<Decomposition, RhsType, GuessType> > - : traits<Solve<Decomposition,RhsType> > -{}; - -} - - -template<typename Decomposition, typename RhsType, typename GuessType> -class SolveWithGuess : public internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type -{ -public: - typedef typename internal::traits<SolveWithGuess>::Scalar Scalar; - typedef typename internal::traits<SolveWithGuess>::PlainObject PlainObject; - typedef typename internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type Base; - typedef typename internal::ref_selector<SolveWithGuess>::type Nested; - - SolveWithGuess(const Decomposition &dec, const RhsType &rhs, const GuessType &guess) - : m_dec(dec), m_rhs(rhs), m_guess(guess) - {} - - EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); } - EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); } - - EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; } - EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; } - EIGEN_DEVICE_FUNC const GuessType& guess() const { return m_guess; } - -protected: - const Decomposition &m_dec; - const RhsType &m_rhs; - const GuessType &m_guess; - -private: - Scalar coeff(Index row, Index col) const; - Scalar coeff(Index i) const; -}; - -namespace internal { - -// Evaluator of SolveWithGuess -> eval into a temporary -template<typename Decomposition, typename RhsType, typename GuessType> -struct evaluator<SolveWithGuess<Decomposition,RhsType, GuessType> > - : public evaluator<typename SolveWithGuess<Decomposition,RhsType,GuessType>::PlainObject> -{ - typedef SolveWithGuess<Decomposition,RhsType,GuessType> SolveType; - typedef typename SolveType::PlainObject PlainObject; - typedef evaluator<PlainObject> Base; - - evaluator(const SolveType& solve) - : m_result(solve.rows(), solve.cols()) - { - ::new (static_cast<Base*>(this)) Base(m_result); - m_result = solve.guess(); - solve.dec()._solve_with_guess_impl(solve.rhs(), m_result); - } - -protected: - PlainObject m_result; -}; - -// Specialization for "dst = dec.solveWithGuess(rhs)" -// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere -template<typename DstXprType, typename DecType, typename RhsType, typename GuessType, typename Scalar> -struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, internal::assign_op<Scalar,Scalar>, Dense2Dense> -{ - typedef SolveWithGuess<DecType,RhsType,GuessType> SrcXprType; - static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) - { - Index dstRows = src.rows(); - Index dstCols = src.cols(); - if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) - dst.resize(dstRows, dstCols); - - dst = src.guess(); - src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/); - } -}; - -} // end namepsace internal - -} // end namespace Eigen - -#endif // EIGEN_SOLVEWITHGUESS_H |