Updated Eigen to the latest version.
This commit is contained in:
parent
d3d50e6858
commit
17f61b2774
|
@ -121,6 +121,13 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
|
|||
return m_expression;
|
||||
}
|
||||
|
||||
/** Forwards the resizing request to the nested expression
|
||||
* \sa DenseBase::resize(Index) */
|
||||
void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
|
||||
/** Forwards the resizing request to the nested expression
|
||||
* \sa DenseBase::resize(Index,Index)*/
|
||||
void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
|
||||
|
||||
protected:
|
||||
NestedExpressionType m_expression;
|
||||
};
|
||||
|
@ -231,6 +238,13 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
|
|||
return m_expression;
|
||||
}
|
||||
|
||||
/** Forwards the resizing request to the nested expression
|
||||
* \sa DenseBase::resize(Index) */
|
||||
void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
|
||||
/** Forwards the resizing request to the nested expression
|
||||
* \sa DenseBase::resize(Index,Index)*/
|
||||
void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
|
||||
|
||||
protected:
|
||||
NestedExpressionType m_expression;
|
||||
};
|
||||
|
|
|
@ -65,6 +65,8 @@ struct CommaInitializer
|
|||
template<typename OtherDerived>
|
||||
CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
|
||||
{
|
||||
if(other.cols()==0 || other.rows()==0)
|
||||
return *this;
|
||||
if (m_col==m_xpr.cols())
|
||||
{
|
||||
m_row+=m_currentBlockRows;
|
||||
|
|
|
@ -20,6 +20,7 @@ class DiagonalBase : public EigenBase<Derived>
|
|||
public:
|
||||
typedef typename internal::traits<Derived>::DiagonalVectorType DiagonalVectorType;
|
||||
typedef typename DiagonalVectorType::Scalar Scalar;
|
||||
typedef typename DiagonalVectorType::RealScalar RealScalar;
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
|
||||
|
@ -65,6 +66,17 @@ class DiagonalBase : public EigenBase<Derived>
|
|||
return diagonal().cwiseInverse();
|
||||
}
|
||||
|
||||
inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
|
||||
operator*(const Scalar& scalar) const
|
||||
{
|
||||
return diagonal() * scalar;
|
||||
}
|
||||
friend inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
|
||||
operator*(const Scalar& scalar, const DiagonalBase& other)
|
||||
{
|
||||
return other.diagonal() * scalar;
|
||||
}
|
||||
|
||||
#ifdef EIGEN2_SUPPORT
|
||||
template<typename OtherDerived>
|
||||
bool isApprox(const DiagonalBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
|
||||
|
|
|
@ -447,7 +447,7 @@ struct functor_traits<scalar_log_op<Scalar> >
|
|||
* indeed it seems better to declare m_other as a Packet and do the pset1() once
|
||||
* in the constructor. However, in practice:
|
||||
* - GCC does not like m_other as a Packet and generate a load every time it needs it
|
||||
* - on the other hand GCC is able to moves the pset1() away the loop :)
|
||||
* - on the other hand GCC is able to moves the pset1() outside the loop :)
|
||||
* - simpler code ;)
|
||||
* (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y)
|
||||
*/
|
||||
|
@ -478,33 +478,6 @@ template<typename Scalar1,typename Scalar2>
|
|||
struct functor_traits<scalar_multiple2_op<Scalar1,Scalar2> >
|
||||
{ enum { Cost = NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
|
||||
|
||||
template<typename Scalar, bool IsInteger>
|
||||
struct scalar_quotient1_impl {
|
||||
typedef typename packet_traits<Scalar>::type Packet;
|
||||
// FIXME default copy constructors seems bugged with std::complex<>
|
||||
EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { }
|
||||
EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(static_cast<Scalar>(1) / other) {}
|
||||
EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; }
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
|
||||
{ return internal::pmul(a, pset1<Packet>(m_other)); }
|
||||
const Scalar m_other;
|
||||
};
|
||||
template<typename Scalar>
|
||||
struct functor_traits<scalar_quotient1_impl<Scalar,false> >
|
||||
{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
|
||||
|
||||
template<typename Scalar>
|
||||
struct scalar_quotient1_impl<Scalar,true> {
|
||||
// FIXME default copy constructors seems bugged with std::complex<>
|
||||
EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { }
|
||||
EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(other) {}
|
||||
EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; }
|
||||
typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
|
||||
};
|
||||
template<typename Scalar>
|
||||
struct functor_traits<scalar_quotient1_impl<Scalar,true> >
|
||||
{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
|
||||
|
||||
/** \internal
|
||||
* \brief Template functor to divide a scalar by a fixed other one
|
||||
*
|
||||
|
@ -514,14 +487,19 @@ struct functor_traits<scalar_quotient1_impl<Scalar,true> >
|
|||
* \sa class CwiseUnaryOp, MatrixBase::operator/
|
||||
*/
|
||||
template<typename Scalar>
|
||||
struct scalar_quotient1_op : scalar_quotient1_impl<Scalar, NumTraits<Scalar>::IsInteger > {
|
||||
EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other)
|
||||
: scalar_quotient1_impl<Scalar, NumTraits<Scalar>::IsInteger >(other) {}
|
||||
struct scalar_quotient1_op {
|
||||
typedef typename packet_traits<Scalar>::type Packet;
|
||||
// FIXME default copy constructors seems bugged with std::complex<>
|
||||
EIGEN_STRONG_INLINE scalar_quotient1_op(const scalar_quotient1_op& other) : m_other(other.m_other) { }
|
||||
EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) : m_other(other) {}
|
||||
EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; }
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
|
||||
{ return internal::pdiv(a, pset1<Packet>(m_other)); }
|
||||
typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
|
||||
};
|
||||
template<typename Scalar>
|
||||
struct functor_traits<scalar_quotient1_op<Scalar> >
|
||||
: functor_traits<scalar_quotient1_impl<Scalar, NumTraits<Scalar>::IsInteger> >
|
||||
{};
|
||||
{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
|
||||
|
||||
// nullary functors
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ template<typename Derived> class MatrixBase
|
|||
|
||||
// huuuge hack. make Eigen2's matrix.part<Diagonal>() work in eigen3. Problem: Diagonal is now a class template instead
|
||||
// of an integer constant. Solution: overload the part() method template wrt template parameters list.
|
||||
template<template<typename T, int n> class U>
|
||||
template<template<typename T, int N> class U>
|
||||
const DiagonalWrapper<ConstDiagonalReturnType> part() const
|
||||
{ return diagonal().asDiagonal(); }
|
||||
#endif // EIGEN2_SUPPORT
|
||||
|
|
|
@ -131,7 +131,6 @@ MatrixBase<Derived>::blueNorm() const
|
|||
abig = internal::sqrt(abig);
|
||||
if(abig > overfl)
|
||||
{
|
||||
eigen_assert(false && "overflow");
|
||||
return rbig;
|
||||
}
|
||||
if(amed > RealScalar(0))
|
||||
|
|
|
@ -511,6 +511,7 @@ template<typename Derived1, typename Derived2, bool ClearOpposite>
|
|||
struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite>
|
||||
{
|
||||
typedef typename Derived1::Index Index;
|
||||
typedef typename Derived1::Scalar Scalar;
|
||||
static inline void run(Derived1 &dst, const Derived2 &src)
|
||||
{
|
||||
for(Index j = 0; j < dst.cols(); ++j)
|
||||
|
@ -520,7 +521,7 @@ struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic
|
|||
dst.copyCoeff(i, j, src);
|
||||
if (ClearOpposite)
|
||||
for(Index i = maxi; i < dst.rows(); ++i)
|
||||
dst.coeffRef(i, j) = 0;
|
||||
dst.coeffRef(i, j) = Scalar(0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
@ -81,14 +81,13 @@ EIGEN_DONT_INLINE static void run(
|
|||
const Index peels = 2;
|
||||
const Index LhsPacketAlignedMask = LhsPacketSize-1;
|
||||
const Index ResPacketAlignedMask = ResPacketSize-1;
|
||||
const Index PeelAlignedMask = ResPacketSize*peels-1;
|
||||
const Index size = rows;
|
||||
|
||||
// How many coeffs of the result do we have to skip to be aligned.
|
||||
// Here we assume data are at least aligned on the base scalar type.
|
||||
Index alignedStart = internal::first_aligned(res,size);
|
||||
Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0;
|
||||
const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
|
||||
const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
|
||||
|
||||
const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
|
||||
Index alignmentPattern = alignmentStep==0 ? AllAligned
|
||||
|
@ -177,6 +176,8 @@ EIGEN_DONT_INLINE static void run(
|
|||
_EIGEN_ACCUMULATE_PACKETS(d,du,d);
|
||||
break;
|
||||
case FirstAligned:
|
||||
{
|
||||
Index j = alignedStart;
|
||||
if(peels>1)
|
||||
{
|
||||
LhsPacket A00, A01, A02, A03, A10, A11, A12, A13;
|
||||
|
@ -186,7 +187,7 @@ EIGEN_DONT_INLINE static void run(
|
|||
A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
|
||||
A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
|
||||
|
||||
for (Index j = alignedStart; j<peeledSize; j+=peels*ResPacketSize)
|
||||
for (; j<peeledSize; j+=peels*ResPacketSize)
|
||||
{
|
||||
A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11);
|
||||
A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12);
|
||||
|
@ -210,9 +211,10 @@ EIGEN_DONT_INLINE static void run(
|
|||
pstore(&res[j+ResPacketSize],T1);
|
||||
}
|
||||
}
|
||||
for (Index j = peeledSize; j<alignedSize; j+=ResPacketSize)
|
||||
for (; j<alignedSize; j+=ResPacketSize)
|
||||
_EIGEN_ACCUMULATE_PACKETS(d,du,du);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
|
||||
_EIGEN_ACCUMULATE_PACKETS(du,du,du);
|
||||
|
@ -332,7 +334,6 @@ EIGEN_DONT_INLINE static void run(
|
|||
const Index peels = 2;
|
||||
const Index RhsPacketAlignedMask = RhsPacketSize-1;
|
||||
const Index LhsPacketAlignedMask = LhsPacketSize-1;
|
||||
const Index PeelAlignedMask = RhsPacketSize*peels-1;
|
||||
const Index depth = cols;
|
||||
|
||||
// How many coeffs of the result do we have to skip to be aligned.
|
||||
|
@ -340,7 +341,7 @@ EIGEN_DONT_INLINE static void run(
|
|||
// if that's not the case then vectorization is discarded, see below.
|
||||
Index alignedStart = internal::first_aligned(rhs, depth);
|
||||
Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0;
|
||||
const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
|
||||
const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
|
||||
|
||||
const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
|
||||
Index alignmentPattern = alignmentStep==0 ? AllAligned
|
||||
|
@ -430,10 +431,12 @@ EIGEN_DONT_INLINE static void run(
|
|||
_EIGEN_ACCUMULATE_PACKETS(d,du,d);
|
||||
break;
|
||||
case FirstAligned:
|
||||
{
|
||||
Index j = alignedStart;
|
||||
if (peels>1)
|
||||
{
|
||||
/* Here we proccess 4 rows with with two peeled iterations to hide
|
||||
* tghe overhead of unaligned loads. Moreover unaligned loads are handled
|
||||
* the overhead of unaligned loads. Moreover unaligned loads are handled
|
||||
* using special shift/move operations between the two aligned packets
|
||||
* overlaping the desired unaligned packet. This is *much* more efficient
|
||||
* than basic unaligned loads.
|
||||
|
@ -443,7 +446,7 @@ EIGEN_DONT_INLINE static void run(
|
|||
A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
|
||||
A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
|
||||
|
||||
for (Index j = alignedStart; j<peeledSize; j+=peels*RhsPacketSize)
|
||||
for (; j<peeledSize; j+=peels*RhsPacketSize)
|
||||
{
|
||||
RhsPacket b = pload<RhsPacket>(&rhs[j]);
|
||||
A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11);
|
||||
|
@ -465,9 +468,10 @@ EIGEN_DONT_INLINE static void run(
|
|||
ptmp3 = pcj.pmadd(A13, b, ptmp3);
|
||||
}
|
||||
}
|
||||
for (Index j = peeledSize; j<alignedSize; j+=RhsPacketSize)
|
||||
for (; j<alignedSize; j+=RhsPacketSize)
|
||||
_EIGEN_ACCUMULATE_PACKETS(d,du,du);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
|
||||
_EIGEN_ACCUMULATE_PACKETS(du,du,du);
|
||||
|
|
|
@ -57,11 +57,11 @@ template <typename Index, int Mode, \
|
|||
struct product_triangular_matrix_matrix<Scalar,Index, Mode, LhsIsTriangular, \
|
||||
LhsStorageOrder,ConjugateLhs, RhsStorageOrder,ConjugateRhs,ColMajor,Specialized> { \
|
||||
static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\
|
||||
const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha) { \
|
||||
const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar,Scalar>& blocking) { \
|
||||
product_triangular_matrix_matrix_trmm<Scalar,Index,Mode, \
|
||||
LhsIsTriangular,LhsStorageOrder,ConjugateLhs, \
|
||||
RhsStorageOrder, ConjugateRhs, ColMajor>::run( \
|
||||
_rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \
|
||||
_rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
|
||||
} \
|
||||
};
|
||||
|
||||
|
@ -96,7 +96,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
|
|||
const EIGTYPE* _lhs, Index lhsStride, \
|
||||
const EIGTYPE* _rhs, Index rhsStride, \
|
||||
EIGTYPE* res, Index resStride, \
|
||||
EIGTYPE alpha) \
|
||||
EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
|
||||
{ \
|
||||
Index diagSize = (std::min)(_rows,_depth); \
|
||||
Index rows = IsLower ? _rows : diagSize; \
|
||||
|
@ -115,16 +115,16 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
|
|||
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
||||
product_triangular_matrix_matrix<EIGTYPE,Index,Mode,true, \
|
||||
LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
|
||||
_rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \
|
||||
_rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
|
||||
/*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \
|
||||
} else { \
|
||||
/* Make sense to call GEMM */ \
|
||||
Map<const MatrixLhs, 0, OuterStride<> > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \
|
||||
MatrixLhs aa_tmp=lhsMap.template triangularView<Mode>(); \
|
||||
MKL_INT aStride = aa_tmp.outerStride(); \
|
||||
gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> blocking(_rows,_cols,_depth); \
|
||||
gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth); \
|
||||
general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
|
||||
rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, blocking, 0); \
|
||||
rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, gemm_blocking, 0); \
|
||||
\
|
||||
/*std::cout << "TRMM_L: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \
|
||||
} \
|
||||
|
@ -210,7 +210,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
|
|||
const EIGTYPE* _lhs, Index lhsStride, \
|
||||
const EIGTYPE* _rhs, Index rhsStride, \
|
||||
EIGTYPE* res, Index resStride, \
|
||||
EIGTYPE alpha) \
|
||||
EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
|
||||
{ \
|
||||
Index diagSize = (std::min)(_cols,_depth); \
|
||||
Index rows = _rows; \
|
||||
|
@ -229,16 +229,16 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
|
|||
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
||||
product_triangular_matrix_matrix<EIGTYPE,Index,Mode,false, \
|
||||
LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
|
||||
_rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \
|
||||
_rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
|
||||
/*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \
|
||||
} else { \
|
||||
/* Make sense to call GEMM */ \
|
||||
Map<const MatrixRhs, 0, OuterStride<> > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \
|
||||
MatrixRhs aa_tmp=rhsMap.template triangularView<Mode>(); \
|
||||
MKL_INT aStride = aa_tmp.outerStride(); \
|
||||
gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> blocking(_rows,_cols,_depth); \
|
||||
gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth); \
|
||||
general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
|
||||
rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, blocking, 0); \
|
||||
rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, gemm_blocking, 0); \
|
||||
\
|
||||
/*std::cout << "TRMM_R: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \
|
||||
} \
|
||||
|
|
|
@ -82,11 +82,11 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
|
|||
LowUp = IsLower ? Lower : Upper \
|
||||
}; \
|
||||
static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
|
||||
const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
|
||||
const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
|
||||
{ \
|
||||
if (ConjLhs || IsZeroDiag) { \
|
||||
triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor,BuiltIn>::run( \
|
||||
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha, blocking); \
|
||||
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
|
||||
return; \
|
||||
}\
|
||||
Index size = (std::min)(_rows,_cols); \
|
||||
|
@ -167,11 +167,11 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
|
|||
LowUp = IsLower ? Lower : Upper \
|
||||
}; \
|
||||
static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
|
||||
const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
|
||||
const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
|
||||
{ \
|
||||
if (IsZeroDiag) { \
|
||||
triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor,BuiltIn>::run( \
|
||||
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha, blocking); \
|
||||
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
|
||||
return; \
|
||||
}\
|
||||
Index size = (std::min)(_rows,_cols); \
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
#define EIGEN_WORLD_VERSION 3
|
||||
#define EIGEN_MAJOR_VERSION 1
|
||||
#define EIGEN_MINOR_VERSION 1
|
||||
#define EIGEN_MINOR_VERSION 2
|
||||
|
||||
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
||||
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
||||
|
|
|
@ -204,7 +204,7 @@ inline void* aligned_malloc(size_t size)
|
|||
if(posix_memalign(&result, 16, size)) result = 0;
|
||||
#elif EIGEN_HAS_MM_MALLOC
|
||||
result = _mm_malloc(size, 16);
|
||||
#elif (defined _MSC_VER)
|
||||
#elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
|
||||
result = _aligned_malloc(size, 16);
|
||||
#else
|
||||
result = handmade_aligned_malloc(size);
|
||||
|
@ -745,7 +745,7 @@ public:
|
|||
__asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id) );
|
||||
# endif
|
||||
# elif defined(_MSC_VER)
|
||||
# if (_MSC_VER > 1500)
|
||||
# if (_MSC_VER > 1500) && ( defined(_M_IX86) || defined(_M_X64) )
|
||||
# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
|
||||
# endif
|
||||
# endif
|
||||
|
|
|
@ -301,9 +301,9 @@ template<typename T, int n=1, typename PlainObject = typename eval<T>::type> str
|
|||
// it's important that this value can still be squared without integer overflowing.
|
||||
DynamicAsInteger = 10000,
|
||||
ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
|
||||
ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? DynamicAsInteger : ScalarReadCost,
|
||||
ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? int(DynamicAsInteger) : int(ScalarReadCost),
|
||||
CoeffReadCost = traits<T>::CoeffReadCost,
|
||||
CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? DynamicAsInteger : CoeffReadCost,
|
||||
CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? int(DynamicAsInteger) : int(CoeffReadCost),
|
||||
NAsInteger = n == Dynamic ? int(DynamicAsInteger) : n,
|
||||
CostEvalAsInteger = (NAsInteger+1) * ScalarReadCostAsInteger + CoeffReadCostAsInteger,
|
||||
CostNoEvalAsInteger = NAsInteger * CoeffReadCostAsInteger
|
||||
|
|
|
@ -336,6 +336,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
|||
Index iu = m_matT.cols() - 1;
|
||||
Index il;
|
||||
Index iter = 0; // number of iterations we are working on the (iu,iu) element
|
||||
Index totalIter = 0; // number of iterations for whole matrix
|
||||
|
||||
while(true)
|
||||
{
|
||||
|
@ -350,9 +351,10 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
|||
// if iu is zero then we are done; the whole matrix is triangularized
|
||||
if(iu==0) break;
|
||||
|
||||
// if we spent too many iterations on the current element, we give up
|
||||
// if we spent too many iterations, we give up
|
||||
iter++;
|
||||
if(iter > m_maxIterations * m_matT.cols()) break;
|
||||
totalIter++;
|
||||
if(totalIter > m_maxIterations * m_matT.cols()) break;
|
||||
|
||||
// find il, the top row of the active submatrix
|
||||
il = iu-1;
|
||||
|
@ -382,7 +384,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
|||
}
|
||||
}
|
||||
|
||||
if(iter <= m_maxIterations * m_matT.cols())
|
||||
if(totalIter <= m_maxIterations * m_matT.cols())
|
||||
m_info = Success;
|
||||
else
|
||||
m_info = NoConvergence;
|
||||
|
|
|
@ -220,7 +220,8 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix,
|
|||
// Rows il,...,iu is the part we are working on (the active window).
|
||||
// Rows iu+1,...,end are already brought in triangular form.
|
||||
Index iu = m_matT.cols() - 1;
|
||||
Index iter = 0; // iteration count
|
||||
Index iter = 0; // iteration count for current eigenvalue
|
||||
Index totalIter = 0; // iteration count for whole matrix
|
||||
Scalar exshift(0); // sum of exceptional shifts
|
||||
Scalar norm = computeNormOfT();
|
||||
|
||||
|
@ -251,14 +252,15 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix,
|
|||
Vector3s firstHouseholderVector(0,0,0), shiftInfo;
|
||||
computeShift(iu, iter, exshift, shiftInfo);
|
||||
iter = iter + 1;
|
||||
if (iter > m_maxIterations * m_matT.cols()) break;
|
||||
totalIter = totalIter + 1;
|
||||
if (totalIter > m_maxIterations * matrix.cols()) break;
|
||||
Index im;
|
||||
initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
|
||||
performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
|
||||
}
|
||||
}
|
||||
}
|
||||
if(iter <= m_maxIterations * m_matT.cols())
|
||||
if(totalIter <= m_maxIterations * matrix.cols())
|
||||
m_info = Success;
|
||||
else
|
||||
m_info = NoConvergence;
|
||||
|
|
|
@ -743,7 +743,16 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
|
|||
// RealScalar e2 = abs2(subdiag[end-1]);
|
||||
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
|
||||
// This explain the following, somewhat more complicated, version:
|
||||
RealScalar mu = diag[end] - (e / (td + (td>0 ? 1 : -1))) * (e / hypot(td,e));
|
||||
RealScalar mu = diag[end];
|
||||
if(td==0)
|
||||
mu -= abs(e);
|
||||
else
|
||||
{
|
||||
RealScalar e2 = abs2(subdiag[end-1]);
|
||||
RealScalar h = hypot(td,e);
|
||||
if(e2==0) mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h);
|
||||
else mu -= e2 / (td + (td>0 ? h : -h));
|
||||
}
|
||||
|
||||
RealScalar x = diag[start] - mu;
|
||||
RealScalar z = subdiag[start];
|
||||
|
|
|
@ -79,7 +79,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
~AlignedBox() {}
|
||||
|
||||
/** \returns the dimension in which the box holds */
|
||||
inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : Index(AmbientDimAtCompileTime); }
|
||||
inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
|
||||
|
||||
/** \deprecated use isEmpty */
|
||||
inline bool isNull() const { return isEmpty(); }
|
||||
|
|
|
@ -108,6 +108,7 @@ class PardisoImpl
|
|||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
typedef Matrix<Index, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
|
||||
typedef Matrix<Index, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
|
||||
typedef Array<Index,64,1,DontAlign> ParameterType;
|
||||
enum {
|
||||
ScalarIsComplex = NumTraits<Scalar>::IsComplex
|
||||
};
|
||||
|
@ -142,7 +143,7 @@ class PardisoImpl
|
|||
/** \warning for advanced usage only.
|
||||
* \returns a reference to the parameter array controlling PARDISO.
|
||||
* See the PARDISO manual to know how to use it. */
|
||||
Array<Index,64,1>& pardisoParameterArray()
|
||||
ParameterType& pardisoParameterArray()
|
||||
{
|
||||
return m_iparm;
|
||||
}
|
||||
|
@ -295,7 +296,7 @@ class PardisoImpl
|
|||
bool m_initialized, m_analysisIsOk, m_factorizationIsOk;
|
||||
Index m_type, m_msglvl;
|
||||
mutable void *m_pt[64];
|
||||
mutable Array<Index,64,1> m_iparm;
|
||||
mutable ParameterType m_iparm;
|
||||
mutable IntColVectorType m_perm;
|
||||
Index m_size;
|
||||
|
||||
|
|
|
@ -126,11 +126,15 @@ class sparse_diagonal_product_inner_iterator_selector
|
|||
SparseInnerVectorSet<Rhs,1>,
|
||||
typename Lhs::DiagonalVectorType>::InnerIterator Base;
|
||||
typedef typename Lhs::Index Index;
|
||||
Index m_outer;
|
||||
public:
|
||||
inline sparse_diagonal_product_inner_iterator_selector(
|
||||
const SparseDiagonalProductType& expr, Index outer)
|
||||
: Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0)
|
||||
: Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0), m_outer(outer)
|
||||
{}
|
||||
|
||||
inline Index outer() const { return m_outer; }
|
||||
inline Index col() const { return m_outer; }
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
|
||||
|
@ -160,11 +164,15 @@ class sparse_diagonal_product_inner_iterator_selector
|
|||
SparseInnerVectorSet<Lhs,1>,
|
||||
Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
|
||||
typedef typename Lhs::Index Index;
|
||||
Index m_outer;
|
||||
public:
|
||||
inline sparse_diagonal_product_inner_iterator_selector(
|
||||
const SparseDiagonalProductType& expr, Index outer)
|
||||
: Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0)
|
||||
: Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0), m_outer(outer)
|
||||
{}
|
||||
|
||||
inline Index outer() const { return m_outer; }
|
||||
inline Index row() const { return m_outer; }
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
|
|
@ -572,6 +572,16 @@ class SparseMatrix
|
|||
*this = other.derived();
|
||||
}
|
||||
|
||||
/** \brief Copy constructor with in-place evaluation */
|
||||
template<typename OtherDerived>
|
||||
SparseMatrix(const ReturnByValue<OtherDerived>& other)
|
||||
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
initAssignment(other);
|
||||
other.evalTo(*this);
|
||||
}
|
||||
|
||||
/** Swaps the content of two sparse matrices of the same type.
|
||||
* This is a fast operation that simply swaps the underlying pointers and parameters. */
|
||||
inline void swap(SparseMatrix& other)
|
||||
|
@ -613,7 +623,10 @@ class SparseMatrix
|
|||
|
||||
template<typename OtherDerived>
|
||||
inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
|
||||
{ return Base::operator=(other.derived()); }
|
||||
{
|
||||
initAssignment(other);
|
||||
return Base::operator=(other.derived());
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
|
||||
|
@ -623,7 +636,6 @@ class SparseMatrix
|
|||
template<typename OtherDerived>
|
||||
EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
initAssignment(other.derived());
|
||||
const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
||||
if (needToTranspose)
|
||||
{
|
||||
|
@ -635,40 +647,45 @@ class SparseMatrix
|
|||
typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
|
||||
OtherCopy otherCopy(other.derived());
|
||||
|
||||
Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
|
||||
SparseMatrix dest(other.rows(),other.cols());
|
||||
Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
|
||||
|
||||
// pass 1
|
||||
// FIXME the above copy could be merged with that pass
|
||||
for (Index j=0; j<otherCopy.outerSize(); ++j)
|
||||
for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
|
||||
++m_outerIndex[it.index()];
|
||||
++dest.m_outerIndex[it.index()];
|
||||
|
||||
// prefix sum
|
||||
Index count = 0;
|
||||
VectorXi positions(outerSize());
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
VectorXi positions(dest.outerSize());
|
||||
for (Index j=0; j<dest.outerSize(); ++j)
|
||||
{
|
||||
Index tmp = m_outerIndex[j];
|
||||
m_outerIndex[j] = count;
|
||||
Index tmp = dest.m_outerIndex[j];
|
||||
dest.m_outerIndex[j] = count;
|
||||
positions[j] = count;
|
||||
count += tmp;
|
||||
}
|
||||
m_outerIndex[outerSize()] = count;
|
||||
dest.m_outerIndex[dest.outerSize()] = count;
|
||||
// alloc
|
||||
m_data.resize(count);
|
||||
dest.m_data.resize(count);
|
||||
// pass 2
|
||||
for (Index j=0; j<otherCopy.outerSize(); ++j)
|
||||
{
|
||||
for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
|
||||
{
|
||||
Index pos = positions[it.index()]++;
|
||||
m_data.index(pos) = j;
|
||||
m_data.value(pos) = it.value();
|
||||
dest.m_data.index(pos) = j;
|
||||
dest.m_data.value(pos) = it.value();
|
||||
}
|
||||
}
|
||||
this->swap(dest);
|
||||
return *this;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(other.isRValue())
|
||||
initAssignment(other.derived());
|
||||
// there is no special optimization
|
||||
return Base::operator=(other.derived());
|
||||
}
|
||||
|
@ -888,6 +905,7 @@ protected:
|
|||
m_data.value(p) = m_data.value(p-1);
|
||||
--p;
|
||||
}
|
||||
eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
|
||||
|
||||
m_innerNonZeros[outer]++;
|
||||
|
||||
|
|
|
@ -113,9 +113,10 @@ template<typename T,int Rows> struct sparse_eval<T,Rows,1> {
|
|||
|
||||
template<typename T,int Rows,int Cols> struct sparse_eval {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
enum { _Flags = traits<T>::Flags };
|
||||
typedef typename traits<T>::Index _Index;
|
||||
enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
public:
|
||||
typedef SparseMatrix<_Scalar, _Flags> type;
|
||||
typedef SparseMatrix<_Scalar, _Options, _Index> type;
|
||||
};
|
||||
|
||||
template<typename T> struct sparse_eval<T,1,1> {
|
||||
|
|
|
@ -496,8 +496,8 @@ class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
|
|||
|
||||
SuperLU(const MatrixType& matrix) : Base()
|
||||
{
|
||||
Base::init();
|
||||
compute(matrix);
|
||||
init();
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~SuperLU()
|
||||
|
@ -833,7 +833,7 @@ class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >
|
|||
SuperILU(const MatrixType& matrix) : Base()
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~SuperILU()
|
||||
|
|
|
@ -33,7 +33,8 @@ EIGEN_MAKE_CWISE_BINARY_OP(min,internal::scalar_min_op)
|
|||
*
|
||||
* \sa max()
|
||||
*/
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const ConstantReturnType>
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived,
|
||||
const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
|
||||
(min)(const Scalar &other) const
|
||||
{
|
||||
return (min)(Derived::PlainObject::Constant(rows(), cols(), other));
|
||||
|
@ -52,7 +53,8 @@ EIGEN_MAKE_CWISE_BINARY_OP(max,internal::scalar_max_op)
|
|||
*
|
||||
* \sa min()
|
||||
*/
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const ConstantReturnType>
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived,
|
||||
const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
|
||||
(max)(const Scalar &other) const
|
||||
{
|
||||
return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
|
||||
|
|
|
@ -200,3 +200,4 @@ EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<=, std::less_equal)
|
|||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>, std::greater)
|
||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>=, std::greater_equal)
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Current Eigen Version 3.1.1 (05.Oct.2012)
|
||||
Current Eigen Version 3.1.2 (05.11.2012)
|
||||
To update the lib:
|
||||
- download Eigen
|
||||
- unzip it somewhere
|
||||
|
|
|
@ -195,24 +195,24 @@ template <typename MatrixType>
|
|||
int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
|
||||
{
|
||||
#if LDBL_MANT_DIG == 53 // double precision
|
||||
const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
|
||||
1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
|
||||
const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L,
|
||||
1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L };
|
||||
#elif LDBL_MANT_DIG <= 64 // extended precision
|
||||
const double maxNormForPade[] = { 5.48256690357782863103e-3 /* degree = 3 */, 2.34559162387971167321e-2,
|
||||
5.84603923897347449857e-2, 1.08486423756725170223e-1, 1.68385767881294446649e-1,
|
||||
2.32777776523703892094e-1 };
|
||||
const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L,
|
||||
5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L,
|
||||
2.32777776523703892094e-1L };
|
||||
#elif LDBL_MANT_DIG <= 106 // double-double
|
||||
const double maxNormForPade[] = { 8.58970550342939562202529664318890e-5 /* degree = 3 */,
|
||||
9.34074328446359654039446552677759e-4, 4.26117194647672175773064114582860e-3,
|
||||
1.21546224740281848743149666560464e-2, 2.61100544998339436713088248557444e-2,
|
||||
4.66170074627052749243018566390567e-2, 7.32585144444135027565872014932387e-2,
|
||||
1.05026503471351080481093652651105e-1 };
|
||||
const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */,
|
||||
9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L,
|
||||
1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L,
|
||||
4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L,
|
||||
1.05026503471351080481093652651105e-1L };
|
||||
#else // quadruple precision
|
||||
const double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5 /* degree = 3 */,
|
||||
5.8853168473544560470387769480192666e-4, 2.9216120366601315391789493628113520e-3,
|
||||
8.8415758124319434347116734705174308e-3, 1.9850836029449446668518049562565291e-2,
|
||||
3.6688019729653446926585242192447447e-2, 5.9290962294020186998954055264528393e-2,
|
||||
8.6998436081634343903250580992127677e-2, 1.1880960220216759245467951592883642e-1 };
|
||||
const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */,
|
||||
5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L,
|
||||
8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L,
|
||||
3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L,
|
||||
8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L };
|
||||
#endif
|
||||
for (int degree = 3; degree <= maxPadeDegree; ++degree)
|
||||
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
|
||||
|
@ -423,8 +423,8 @@ void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const
|
|||
* This class holds the argument to the matrix function until it is
|
||||
* assigned or evaluated for some other reason (so the argument
|
||||
* should not be changed in the meantime). It is the return type of
|
||||
* matrixBase::matrixLogarithm() and most of the time this is the
|
||||
* only way it is used.
|
||||
* matrixBase::log() and most of the time this is the only way it
|
||||
* is used.
|
||||
*/
|
||||
template<typename Derived> class MatrixLogarithmReturnValue
|
||||
: public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
|
||||
|
|
Loading…
Reference in New Issue