Updated Eigen - From 3.02 to 3.05. Hopefully it should be harmless.

This commit is contained in:
Paolo Cignoni 2012-06-14 13:31:05 +00:00
parent cb639a12d7
commit 52e7aa16f9
41 changed files with 265 additions and 166 deletions

View File

@ -167,7 +167,7 @@
#include <intrin.h> #include <intrin.h>
#endif #endif
#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(EIGEN_NO_EXCEPTIONS) #if defined(_CPPUNWIND) || defined(__EXCEPTIONS)
#define EIGEN_EXCEPTIONS #define EIGEN_EXCEPTIONS
#endif #endif

View File

@ -13,9 +13,9 @@ namespace Eigen {
* *
* *
* *
* This module provides SVD decomposition for (currently) real matrices. * This module provides SVD decomposition for matrices (both real and complex).
* This decomposition is accessible via the following MatrixBase method: * This decomposition is accessible via the following MatrixBase method:
* - MatrixBase::svd() * - MatrixBase::jacobiSvd()
* *
* \code * \code
* #include <Eigen/SVD> * #include <Eigen/SVD>

View File

@ -158,10 +158,19 @@ template<typename _MatrixType, int _UpLo> class LDLT
} }
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .
* *
* \note_about_checking_solutions * \note_about_checking_solutions
* *
* \sa solveInPlace(), MatrixBase::ldlt() * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
* by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$,
* \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
* \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
* least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
* computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular.
*
* \sa MatrixBase::ldlt()
*/ */
template<typename Rhs> template<typename Rhs>
inline const internal::solve_retval<LDLT, Rhs> inline const internal::solve_retval<LDLT, Rhs>
@ -376,7 +385,21 @@ struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
dec().matrixL().solveInPlace(dst); dec().matrixL().solveInPlace(dst);
// dst = D^-1 (L^-1 P b) // dst = D^-1 (L^-1 P b)
dst = dec().vectorD().asDiagonal().inverse() * dst; // more precisely, use pseudo-inverse of D (see bug 241)
using std::abs;
using std::max;
typedef typename LDLTType::MatrixType MatrixType;
typedef typename LDLTType::Scalar Scalar;
typedef typename LDLTType::RealScalar RealScalar;
const Diagonal<const MatrixType> vectorD = dec().vectorD();
RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(),
RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS
for (Index i = 0; i < vectorD.size(); ++i) {
if(abs(vectorD(i)) > tolerance)
dst.row(i) /= vectorD(i);
else
dst.row(i).setZero();
}
// dst = L^-T (D^-1 L^-1 P b) // dst = L^-T (D^-1 L^-1 P b)
dec().matrixU().solveInPlace(dst); dec().matrixU().solveInPlace(dst);

View File

@ -68,10 +68,8 @@ class Array
friend struct internal::conservative_resize_like_impl; friend struct internal::conservative_resize_like_impl;
using Base::m_storage; using Base::m_storage;
public: public:
enum { NeedsToAlign = (!(Options&DontAlign))
&& SizeAtCompileTime!=Dynamic && ((static_cast<int>(sizeof(Scalar))*SizeAtCompileTime)%16)==0 };
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
using Base::base; using Base::base;
using Base::coeff; using Base::coeff;

View File

@ -94,7 +94,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess>
MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits<Scalar>::size) == 0) MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits<Scalar>::size) == 0)
&& (InnerStrideAtCompileTime == 1) && (InnerStrideAtCompileTime == 1)
? PacketAccessBit : 0, ? PacketAccessBit : 0,
MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && ((OuterStrideAtCompileTime % packet_traits<Scalar>::size) == 0)) ? AlignedBit : 0, MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0, FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,

View File

@ -94,7 +94,7 @@ struct isMuchSmallerThan_scalar_selector<Derived, true>
* *
* \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
* are considered to be approximately equal within precision \f$ p \f$ if * are considered to be approximately equal within precision \f$ p \f$ if
* \f[ \Vert v - w \Vert \leqslant p\,\(min)(\Vert v\Vert, \Vert w\Vert). \f] * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
* For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
* L2 norm). * L2 norm).
* *

View File

@ -34,7 +34,7 @@
* \tparam PlainObjectType the equivalent matrix type of the mapped data * \tparam PlainObjectType the equivalent matrix type of the mapped data
* \tparam MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned. * \tparam MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned.
* The default is \c #Unaligned. * The default is \c #Unaligned.
* \tparam StrideType optionnally specifies strides. By default, Map assumes the memory layout * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout
* of an ordinary, contiguous array. This can be overridden by specifying strides. * of an ordinary, contiguous array. This can be overridden by specifying strides.
* The type passed here must be a specialization of the Stride template, see examples below. * The type passed here must be a specialization of the Stride template, see examples below.
* *
@ -72,9 +72,9 @@
* Example: \include Map_placement_new.cpp * Example: \include Map_placement_new.cpp
* Output: \verbinclude Map_placement_new.out * Output: \verbinclude Map_placement_new.out
* *
* This class is the return type of Matrix::Map() but can also be used directly. * This class is the return type of PlainObjectBase::Map() but can also be used directly.
* *
* \sa Matrix::Map(), \ref TopicStorageOrders * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
*/ */
namespace internal { namespace internal {
@ -102,7 +102,7 @@ struct traits<Map<PlainObjectType, MapOptions, StrideType> >
|| HasNoOuterStride || HasNoOuterStride
|| ( OuterStrideAtCompileTime!=Dynamic || ( OuterStrideAtCompileTime!=Dynamic
&& ((static_cast<int>(sizeof(Scalar))*OuterStrideAtCompileTime)%16)==0 ) ), && ((static_cast<int>(sizeof(Scalar))*OuterStrideAtCompileTime)%16)==0 ) ),
Flags0 = TraitsBase::Flags, Flags0 = TraitsBase::Flags & (~NestByRefBit),
Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit), Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit),
Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime)) Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime))
? int(Flags1) : int(Flags1 & ~LinearAccessBit), ? int(Flags1) : int(Flags1 & ~LinearAccessBit),
@ -120,7 +120,6 @@ template<typename PlainObjectType, int MapOptions, typename StrideType> class Ma
public: public:
typedef MapBase<Map> Base; typedef MapBase<Map> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Map) EIGEN_DENSE_PUBLIC_INTERFACE(Map)
typedef typename Base::PointerType PointerType; typedef typename Base::PointerType PointerType;
@ -181,7 +180,6 @@ template<typename PlainObjectType, int MapOptions, typename StrideType> class Ma
PlainObjectType::Base::_check_template_params(); PlainObjectType::Base::_check_template_params();
} }
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
protected: protected:

View File

@ -170,7 +170,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(internal::traits<Derived>::Flags&PacketAccessBit, EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(internal::traits<Derived>::Flags&PacketAccessBit,
internal::inner_stride_at_compile_time<Derived>::ret==1), internal::inner_stride_at_compile_time<Derived>::ret==1),
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % (sizeof(Scalar)*internal::packet_traits<Scalar>::size)) == 0) eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
&& "data is not aligned"); && "data is not aligned");
} }

View File

@ -153,10 +153,6 @@ class Matrix
typedef typename Base::PlainObject PlainObject; typedef typename Base::PlainObject PlainObject;
enum { NeedsToAlign = (!(Options&DontAlign))
&& SizeAtCompileTime!=Dynamic && ((static_cast<int>(sizeof(Scalar))*SizeAtCompileTime)%16)==0 };
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
using Base::base; using Base::base;
using Base::coeffRef; using Base::coeffRef;

View File

@ -250,7 +250,8 @@ template<typename Derived> class MatrixBase
// huuuge hack. make Eigen2's matrix.part<Diagonal>() work in eigen3. Problem: Diagonal is now a class template instead // huuuge hack. make Eigen2's matrix.part<Diagonal>() work in eigen3. Problem: Diagonal is now a class template instead
// of an integer constant. Solution: overload the part() method template wrt template parameters list. // of an integer constant. Solution: overload the part() method template wrt template parameters list.
template<template<typename T, int n> class U> // Note: replacing next line by "template<template<typename T, int n> class U>" produces a mysterious error C2082 in MSVC.
template<template<typename, int> class U>
const DiagonalWrapper<ConstDiagonalReturnType> part() const const DiagonalWrapper<ConstDiagonalReturnType> part() const
{ return diagonal().asDiagonal(); } { return diagonal().asDiagonal(); }
#endif // EIGEN2_SUPPORT #endif // EIGEN2_SUPPORT

View File

@ -34,6 +34,19 @@
namespace internal { namespace internal {
template<typename Index>
EIGEN_ALWAYS_INLINE void check_rows_cols_for_overflow(Index rows, Index cols)
{
// http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242
// we assume Index is signed
Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed
bool error = (rows < 0 || cols < 0) ? true
: (rows == 0 || cols == 0) ? false
: (rows > max_index / cols);
if (error)
throw_std_bad_alloc();
}
template <typename Derived, typename OtherDerived = Derived, bool IsVector = static_cast<bool>(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl; template <typename Derived, typename OtherDerived = Derived, bool IsVector = static_cast<bool>(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl; template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;
@ -85,13 +98,11 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
template<typename StrideType> struct StridedAlignedMapType { typedef Eigen::Map<Derived, Aligned, StrideType> type; }; template<typename StrideType> struct StridedAlignedMapType { typedef Eigen::Map<Derived, Aligned, StrideType> type; };
template<typename StrideType> struct StridedConstAlignedMapType { typedef Eigen::Map<const Derived, Aligned, StrideType> type; }; template<typename StrideType> struct StridedConstAlignedMapType { typedef Eigen::Map<const Derived, Aligned, StrideType> type; };
protected: protected:
DenseStorage<Scalar, Base::MaxSizeAtCompileTime, Base::RowsAtCompileTime, Base::ColsAtCompileTime, Options> m_storage; DenseStorage<Scalar, Base::MaxSizeAtCompileTime, Base::RowsAtCompileTime, Base::ColsAtCompileTime, Options> m_storage;
public: public:
enum { NeedsToAlign = (!(Options&DontAlign)) enum { NeedsToAlign = SizeAtCompileTime != Dynamic && (internal::traits<Derived>::Flags & AlignedBit) != 0 };
&& SizeAtCompileTime!=Dynamic && ((static_cast<int>(sizeof(Scalar))*SizeAtCompileTime)%16)==0 };
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
Base& base() { return *static_cast<Base*>(this); } Base& base() { return *static_cast<Base*>(this); }
@ -200,11 +211,13 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_STRONG_INLINE void resize(Index rows, Index cols) EIGEN_STRONG_INLINE void resize(Index rows, Index cols)
{ {
#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
internal::check_rows_cols_for_overflow(rows, cols);
Index size = rows*cols; Index size = rows*cols;
bool size_changed = size != this->size(); bool size_changed = size != this->size();
m_storage.resize(size, rows, cols); m_storage.resize(size, rows, cols);
if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
#else #else
internal::check_rows_cols_for_overflow(rows, cols);
m_storage.resize(rows*cols, rows, cols); m_storage.resize(rows*cols, rows, cols);
#endif #endif
} }
@ -273,6 +286,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other) EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
{ {
const OtherDerived& other = _other.derived(); const OtherDerived& other = _other.derived();
internal::check_rows_cols_for_overflow(other.rows(), other.cols());
const Index othersize = other.rows()*other.cols(); const Index othersize = other.rows()*other.cols();
if(RowsAtCompileTime == 1) if(RowsAtCompileTime == 1)
{ {
@ -417,6 +431,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
: m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
{ {
_check_template_params(); _check_template_params();
internal::check_rows_cols_for_overflow(other.derived().rows(), other.derived().cols());
Base::operator=(other.derived()); Base::operator=(other.derived());
} }
@ -425,9 +440,6 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned
* \a data pointers. * \a data pointers.
* *
* These methods do not allow to specify strides. If you need to specify strides, you have to
* use the Map class directly.
*
* \see class Map * \see class Map
*/ */
//@{ //@{
@ -584,6 +596,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
{ {
eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
&& cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
internal::check_rows_cols_for_overflow(rows, cols);
m_storage.resize(rows*cols,rows,cols); m_storage.resize(rows*cols,rows,cols);
EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
} }
@ -641,6 +654,7 @@ struct internal::conservative_resize_like_impl
if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
(!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns (!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns
{ {
internal::check_rows_cols_for_overflow(rows, cols);
_this.derived().m_storage.conservativeResize(rows*cols,rows,cols); _this.derived().m_storage.conservativeResize(rows*cols,rows,cols);
} }
else else

View File

@ -256,16 +256,16 @@ class ScaledProduct
: Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {} : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {}
template<typename Dest> template<typename Dest>
inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,m_alpha); } inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst, Scalar(1)); }
template<typename Dest> template<typename Dest>
inline void addTo(Dest& dst) const { scaleAndAddTo(dst,m_alpha); } inline void addTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(1)); }
template<typename Dest> template<typename Dest>
inline void subTo(Dest& dst) const { scaleAndAddTo(dst,-m_alpha); } inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); }
template<typename Dest> template<typename Dest>
inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha); } inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha * m_alpha); }
const Scalar& alpha() const { return m_alpha; } const Scalar& alpha() const { return m_alpha; }

View File

@ -180,7 +180,7 @@ void TriangularView<MatrixType,Mode>::solveInPlace(const MatrixBase<OtherDerived
eigen_assert(cols() == rows()); eigen_assert(cols() == rows());
eigen_assert( (Side==OnTheLeft && cols() == other.rows()) || (Side==OnTheRight && cols() == other.cols()) ); eigen_assert( (Side==OnTheLeft && cols() == other.rows()) || (Side==OnTheRight && cols() == other.cols()) );
eigen_assert(!(Mode & ZeroDiag)); eigen_assert(!(Mode & ZeroDiag));
eigen_assert(Mode & (Upper|Lower)); eigen_assert((Mode & (Upper|Lower)) != 0);
enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit && OtherDerived::IsVectorAtCompileTime }; enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit && OtherDerived::IsVectorAtCompileTime };
typedef typename internal::conditional<copy, typedef typename internal::conditional<copy,

View File

@ -27,8 +27,8 @@
namespace internal { namespace internal {
static uint32x4_t p4ui_CONJ_XOR = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; static uint32x4_t p4ui_CONJ_XOR = EIGEN_INIT_NEON_PACKET4(0x00000000, 0x80000000, 0x00000000, 0x80000000);
static uint32x2_t p2ui_CONJ_XOR = { 0x00000000, 0x80000000 }; static uint32x2_t p2ui_CONJ_XOR = EIGEN_INIT_NEON_PACKET2(0x00000000, 0x80000000);
//---------- float ---------- //---------- float ----------
struct Packet2cf struct Packet2cf

View File

@ -52,6 +52,16 @@ typedef uint32x4_t Packet4ui;
#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
const Packet4i p4i_##NAME = pset1<Packet4i>(X) const Packet4i p4i_##NAME = pset1<Packet4i>(X)
#if defined(__llvm__) && !defined(__clang__)
//Special treatment for Apple's llvm-gcc, its NEON packet types are unions
#define EIGEN_INIT_NEON_PACKET2(X, Y) {{X, Y}}
#define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {{X, Y, Z, W}}
#else
//Default initializer for packets
#define EIGEN_INIT_NEON_PACKET2(X, Y) {X, Y}
#define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W}
#endif
#ifndef __pld #ifndef __pld
#define __pld(x) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (x) : "cc" ); #define __pld(x) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (x) : "cc" );
#endif #endif
@ -84,7 +94,7 @@ template<> struct packet_traits<int> : default_packet_traits
}; };
}; };
#if EIGEN_GNUC_AT_MOST(4,4) #if EIGEN_GNUC_AT_MOST(4,4) && !defined(__llvm__)
// workaround gcc 4.2, 4.3 and 4.4 compilatin issue // workaround gcc 4.2, 4.3 and 4.4 compilatin issue
EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); } EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); } EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); }
@ -100,12 +110,12 @@ template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) {
template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a)
{ {
Packet4f countdown = { 0, 1, 2, 3 }; Packet4f countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3);
return vaddq_f32(pset1<Packet4f>(a), countdown); return vaddq_f32(pset1<Packet4f>(a), countdown);
} }
template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a)
{ {
Packet4i countdown = { 0, 1, 2, 3 }; Packet4i countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3);
return vaddq_s32(pset1<Packet4i>(a), countdown); return vaddq_s32(pset1<Packet4i>(a), countdown);
} }
@ -395,25 +405,29 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
return s[0]; return s[0];
} }
template<int Offset> // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
struct palign_impl<Offset,Packet4f> // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074
{ #define PALIGN_NEON(Offset,Type,Command) \
EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second) template<>\
{ struct palign_impl<Offset,Type>\
if (Offset!=0) {\
first = vextq_f32(first, second, Offset); EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\
} {\
}; if (Offset!=0)\
first = Command(first, second, Offset);\
}\
};\
template<int Offset> PALIGN_NEON(0,Packet4f,vextq_f32)
struct palign_impl<Offset,Packet4i> PALIGN_NEON(1,Packet4f,vextq_f32)
{ PALIGN_NEON(2,Packet4f,vextq_f32)
EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second) PALIGN_NEON(3,Packet4f,vextq_f32)
{ PALIGN_NEON(0,Packet4i,vextq_s32)
if (Offset!=0) PALIGN_NEON(1,Packet4i,vextq_s32)
first = vextq_s32(first, second, Offset); PALIGN_NEON(2,Packet4i,vextq_s32)
} PALIGN_NEON(3,Packet4i,vextq_s32)
};
#undef PALIGN_NEON
} // end namespace internal } // end namespace internal

View File

@ -30,19 +30,16 @@ namespace internal {
template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false> template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
class gebp_traits; class gebp_traits;
inline std::ptrdiff_t manage_caching_sizes_second_if_negative(std::ptrdiff_t a, std::ptrdiff_t b)
{
return a<=0 ? b : a;
}
/** \internal */ /** \internal */
inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0) inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0)
{ {
static std::ptrdiff_t m_l1CacheSize = 0; static std::ptrdiff_t m_l1CacheSize = manage_caching_sizes_second_if_negative(queryL1CacheSize(),8 * 1024);
static std::ptrdiff_t m_l2CacheSize = 0; static std::ptrdiff_t m_l2CacheSize = manage_caching_sizes_second_if_negative(queryTopLevelCacheSize(),1*1024*1024);
if(m_l1CacheSize==0)
{
m_l1CacheSize = queryL1CacheSize();
m_l2CacheSize = queryTopLevelCacheSize();
if(m_l1CacheSize<=0) m_l1CacheSize = 8 * 1024;
if(m_l2CacheSize<=0) m_l2CacheSize = 1 * 1024 * 1024;
}
if(action==SetAction) if(action==SetAction)
{ {
@ -118,14 +115,14 @@ inline void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, st
// FIXME (a bit overkill maybe ?) // FIXME (a bit overkill maybe ?)
template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector { template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {
EIGEN_STRONG_INLINE EIGEN_ALWAYS_INLINE_ATTRIB static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/) EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)
{ {
c = cj.pmadd(a,b,c); c = cj.pmadd(a,b,c);
} }
}; };
template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> { template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {
EIGEN_STRONG_INLINE EIGEN_ALWAYS_INLINE_ATTRIB static void run(const CJ& cj, T& a, T& b, T& c, T& t) EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t)
{ {
t = b; t = cj.pmul(a,t); c = padd(c,t); t = b; t = cj.pmul(a,t); c = padd(c,t);
} }

View File

@ -1,3 +1,4 @@
// This file is part of Eigen, a lightweight C++ template library // This file is part of Eigen, a lightweight C++ template library
// for linear algebra. // for linear algebra.
// //
@ -28,7 +29,7 @@
#define EIGEN_WORLD_VERSION 3 #define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 0 #define EIGEN_MAJOR_VERSION 0
#define EIGEN_MINOR_VERSION 2 #define EIGEN_MINOR_VERSION 5
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
@ -45,7 +46,7 @@
#define EIGEN_GNUC_AT_MOST(x,y) 0 #define EIGEN_GNUC_AT_MOST(x,y) 0
#endif #endif
#if EIGEN_GNUC_AT_MOST(4,3) #if EIGEN_GNUC_AT_MOST(4,3) && !defined(__clang__)
// see bug 89 // see bug 89
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0 #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
#else #else
@ -130,31 +131,34 @@
#define EIGEN_MAKESTRING2(a) #a #define EIGEN_MAKESTRING2(a) #a
#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a) #define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
// EIGEN_ALWAYS_INLINE_ATTRIB should be use in the declaration of function
// which should be inlined even in debug mode.
// FIXME with the always_inline attribute,
// gcc 3.4.x reports the following compilation error:
// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
// : function body not available
#if EIGEN_GNUC_AT_LEAST(4,0)
#define EIGEN_ALWAYS_INLINE_ATTRIB __attribute__((always_inline))
#else
#define EIGEN_ALWAYS_INLINE_ATTRIB
#endif
#if EIGEN_GNUC_AT_LEAST(4,1) && !defined(__clang__) && !defined(__INTEL_COMPILER) #if EIGEN_GNUC_AT_LEAST(4,1) && !defined(__clang__) && !defined(__INTEL_COMPILER)
#define EIGEN_FLATTEN_ATTRIB __attribute__((flatten)) #define EIGEN_FLATTEN_ATTRIB __attribute__((flatten))
#else #else
#define EIGEN_FLATTEN_ATTRIB #define EIGEN_FLATTEN_ATTRIB
#endif #endif
// EIGEN_FORCE_INLINE means "inline as much as possible" // EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
// but GCC is still doing fine with just inline.
#if (defined _MSC_VER) || (defined __INTEL_COMPILER) #if (defined _MSC_VER) || (defined __INTEL_COMPILER)
#define EIGEN_STRONG_INLINE __forceinline #define EIGEN_STRONG_INLINE __forceinline
#else #else
#define EIGEN_STRONG_INLINE inline #define EIGEN_STRONG_INLINE inline
#endif #endif
// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
// attribute to maximize inlining. This should only be used when really necessary: in particular,
// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.
// FIXME with the always_inline attribute,
// gcc 3.4.x reports the following compilation error:
// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
// : function body not available
#if EIGEN_GNUC_AT_LEAST(4,0)
#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
#else
#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
#endif
#if (defined __GNUC__) #if (defined __GNUC__)
#define EIGEN_DONT_INLINE __attribute__((noinline)) #define EIGEN_DONT_INLINE __attribute__((noinline))
#elif (defined _MSC_VER) #elif (defined _MSC_VER)

View File

@ -82,6 +82,16 @@
namespace internal { namespace internal {
inline void throw_std_bad_alloc()
{
#ifdef EIGEN_EXCEPTIONS
throw std::bad_alloc();
#else
std::size_t huge = -1;
new int[huge];
#endif
}
/***************************************************************************** /*****************************************************************************
*** Implementation of handmade aligned functions *** *** Implementation of handmade aligned functions ***
*****************************************************************************/ *****************************************************************************/
@ -192,7 +202,7 @@ inline void check_that_malloc_is_allowed()
#endif #endif
/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment. /** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment.
* On allocation error, the returned pointer is null, and if exceptions are enabled then a std::bad_alloc is thrown. * On allocation error, the returned pointer is null, and std::bad_alloc is thrown.
*/ */
inline void* aligned_malloc(size_t size) inline void* aligned_malloc(size_t size)
{ {
@ -213,10 +223,9 @@ inline void* aligned_malloc(size_t size)
result = handmade_aligned_malloc(size); result = handmade_aligned_malloc(size);
#endif #endif
#ifdef EIGEN_EXCEPTIONS if(!result && size)
if(result == 0) throw_std_bad_alloc();
throw std::bad_alloc();
#endif
return result; return result;
} }
@ -241,7 +250,7 @@ inline void aligned_free(void *ptr)
/** /**
* \internal * \internal
* \brief Reallocates an aligned block of memory. * \brief Reallocates an aligned block of memory.
* \throws std::bad_alloc if EIGEN_EXCEPTIONS are defined. * \throws std::bad_alloc on allocation failure
**/ **/
inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size) inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
{ {
@ -269,10 +278,9 @@ inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
result = handmade_aligned_realloc(ptr,new_size,old_size); result = handmade_aligned_realloc(ptr,new_size,old_size);
#endif #endif
#ifdef EIGEN_EXCEPTIONS if (!result && new_size)
if (result==0 && new_size!=0) throw_std_bad_alloc();
throw std::bad_alloc();
#endif
return result; return result;
} }
@ -281,7 +289,7 @@ inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
*****************************************************************************/ *****************************************************************************/
/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned. /** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
* On allocation error, the returned pointer is null, and if exceptions are enabled then a std::bad_alloc is thrown. * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.
*/ */
template<bool Align> inline void* conditional_aligned_malloc(size_t size) template<bool Align> inline void* conditional_aligned_malloc(size_t size)
{ {
@ -293,9 +301,8 @@ template<> inline void* conditional_aligned_malloc<false>(size_t size)
check_that_malloc_is_allowed(); check_that_malloc_is_allowed();
void *result = std::malloc(size); void *result = std::malloc(size);
#ifdef EIGEN_EXCEPTIONS if(!result && size)
if(!result) throw std::bad_alloc(); throw_std_bad_alloc();
#endif
return result; return result;
} }
@ -347,18 +354,27 @@ template<typename T> inline void destruct_elements_of_array(T *ptr, size_t size)
*** Implementation of aligned new/delete-like functions *** *** Implementation of aligned new/delete-like functions ***
*****************************************************************************/ *****************************************************************************/
template<typename T>
EIGEN_ALWAYS_INLINE void check_size_for_overflow(size_t size)
{
if(size > size_t(-1) / sizeof(T))
throw_std_bad_alloc();
}
/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment. /** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
* On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown. * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.
* The default constructor of T is called. * The default constructor of T is called.
*/ */
template<typename T> inline T* aligned_new(size_t size) template<typename T> inline T* aligned_new(size_t size)
{ {
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size)); T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
return construct_elements_of_array(result, size); return construct_elements_of_array(result, size);
} }
template<typename T, bool Align> inline T* conditional_aligned_new(size_t size) template<typename T, bool Align> inline T* conditional_aligned_new(size_t size)
{ {
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size)); T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
return construct_elements_of_array(result, size); return construct_elements_of_array(result, size);
} }
@ -383,6 +399,8 @@ template<typename T, bool Align> inline void conditional_aligned_delete(T *ptr,
template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size) template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size)
{ {
check_size_for_overflow<T>(new_size);
check_size_for_overflow<T>(old_size);
if(new_size < old_size) if(new_size < old_size)
destruct_elements_of_array(pts+new_size, old_size-new_size); destruct_elements_of_array(pts+new_size, old_size-new_size);
T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size)); T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
@ -394,6 +412,7 @@ template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pt
template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size) template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size)
{ {
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size)); T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
if(NumTraits<T>::RequireInitialization) if(NumTraits<T>::RequireInitialization)
construct_elements_of_array(result, size); construct_elements_of_array(result, size);
@ -402,6 +421,8 @@ template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t s
template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size) template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size)
{ {
check_size_for_overflow<T>(new_size);
check_size_for_overflow<T>(old_size);
if(NumTraits<T>::RequireInitialization && (new_size < old_size)) if(NumTraits<T>::RequireInitialization && (new_size < old_size))
destruct_elements_of_array(pts+new_size, old_size-new_size); destruct_elements_of_array(pts+new_size, old_size-new_size);
T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size)); T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
@ -536,6 +557,7 @@ template<typename T> class aligned_stack_memory_handler
#endif #endif
#define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \ TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
: reinterpret_cast<TYPE*>( \ : reinterpret_cast<TYPE*>( \
(sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \ (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
@ -545,6 +567,7 @@ template<typename T> class aligned_stack_memory_handler
#else #else
#define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \ TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \
Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true) Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
@ -669,6 +692,7 @@ public:
pointer allocate( size_type num, const void* hint = 0 ) pointer allocate( size_type num, const void* hint = 0 )
{ {
EIGEN_UNUSED_VARIABLE(hint); EIGEN_UNUSED_VARIABLE(hint);
internal::check_size_for_overflow<T>(num);
return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) ); return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
} }

View File

@ -125,10 +125,9 @@ class compute_matrix_flags
aligned_bit = aligned_bit =
( (
((Options&DontAlign)==0) ((Options&DontAlign)==0)
&& packet_traits<Scalar>::Vectorizable
&& ( && (
#if EIGEN_ALIGN_STATICALLY #if EIGEN_ALIGN_STATICALLY
((!is_dynamic_size_storage) && (((MaxCols*MaxRows) % packet_traits<Scalar>::size) == 0)) ((!is_dynamic_size_storage) && (((MaxCols*MaxRows*int(sizeof(Scalar))) % 16) == 0))
#else #else
0 0
#endif #endif

View File

@ -51,19 +51,19 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
{ if (AmbientDimAtCompileTime!=Dynamic) setNull(); } { if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
/** Constructs a null box with \a _dim the dimension of the ambient space. */ /** Constructs a null box with \a _dim the dimension of the ambient space. */
inline explicit AlignedBox(int _dim) : m_(min)(_dim), m_(max)(_dim) inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim)
{ setNull(); } { setNull(); }
/** Constructs a box with extremities \a _min and \a _max. */ /** Constructs a box with extremities \a _min and \a _max. */
inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_(min)(_min), m_(max)(_max) {} inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {}
/** Constructs a box containing a single point \a p. */ /** Constructs a box containing a single point \a p. */
inline explicit AlignedBox(const VectorType& p) : m_(min)(p), m_(max)(p) {} inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {}
~AlignedBox() {} ~AlignedBox() {}
/** \returns the dimension in which the box holds */ /** \returns the dimension in which the box holds */
inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; } inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : int(AmbientDimAtCompileTime); }
/** \returns true if the box is null, i.e, empty. */ /** \returns true if the box is null, i.e, empty. */
inline bool isNull() const { return (m_min.cwise() > m_max).any(); } inline bool isNull() const { return (m_min.cwise() > m_max).any(); }
@ -71,8 +71,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
/** Makes \c *this a null/empty box. */ /** Makes \c *this a null/empty box. */
inline void setNull() inline void setNull()
{ {
m_min.setConstant( std::numeric_limits<Scalar>::(max)()); m_min.setConstant( (std::numeric_limits<Scalar>::max)());
m_max.setConstant(-std::numeric_limits<Scalar>::(max)()); m_max.setConstant(-(std::numeric_limits<Scalar>::max)());
} }
/** \returns the minimal corner */ /** \returns the minimal corner */
@ -90,19 +90,19 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
/** \returns true if the box \a b is entirely inside the box \c *this. */ /** \returns true if the box \a b is entirely inside the box \c *this. */
inline bool contains(const AlignedBox& b) const inline bool contains(const AlignedBox& b) const
{ return (m_min.cwise()<=b.(min)()).all() && (b.(max)().cwise()<=m_max).all(); } { return (m_min.cwise()<=(b.min)()).all() && ((b.max)().cwise()<=m_max).all(); }
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */ /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
inline AlignedBox& extend(const VectorType& p) inline AlignedBox& extend(const VectorType& p)
{ m_min = m_min.cwise().(min)(p); m_max = m_max.cwise().(max)(p); return *this; } { m_min = (m_min.cwise().min)(p); m_max = (m_max.cwise().max)(p); return *this; }
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */ /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
inline AlignedBox& extend(const AlignedBox& b) inline AlignedBox& extend(const AlignedBox& b)
{ m_min = m_min.cwise().(min)(b.m_min); m_max = m_max.cwise().(max)(b.m_max); return *this; } { m_min = (m_min.cwise().min)(b.m_min); m_max = (m_max.cwise().max)(b.m_max); return *this; }
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */ /** Clamps \c *this by the box \a b and returns a reference to \c *this. */
inline AlignedBox& clamp(const AlignedBox& b) inline AlignedBox& clamp(const AlignedBox& b)
{ m_min = m_min.cwise().(max)(b.m_min); m_max = m_max.cwise().(min)(b.m_max); return *this; } { m_min = (m_min.cwise().max)(b.m_min); m_max = (m_max.cwise().min)(b.m_max); return *this; }
/** Translate \c *this by the vector \a t and returns a reference to \c *this. */ /** Translate \c *this by the vector \a t and returns a reference to \c *this. */
inline AlignedBox& translate(const VectorType& t) inline AlignedBox& translate(const VectorType& t)
@ -138,8 +138,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
template<typename OtherScalarType> template<typename OtherScalarType>
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other) inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
{ {
m_min = other.(min)().template cast<Scalar>(); m_min = (other.min)().template cast<Scalar>();
m_max = other.(max)().template cast<Scalar>(); m_max = (other.max)().template cast<Scalar>();
} }
/** \returns \c true if \c *this is approximately equal to \a other, within the precision /** \returns \c true if \c *this is approximately equal to \a other, within the precision

View File

@ -291,7 +291,7 @@ template<typename _MatrixType> class EigenSolver
ComputationInfo info() const ComputationInfo info() const
{ {
eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
return m_realSchur.info(); return m_realSchur.info();
} }
@ -339,7 +339,7 @@ typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eige
EigenvectorsType matV(n,n); EigenvectorsType matV(n,n);
for (Index j=0; j<n; ++j) for (Index j=0; j<n; ++j)
{ {
if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(j)), internal::real(m_eivalues.coeff(j)))) if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(j)), internal::real(m_eivalues.coeff(j))) || j+1==n)
{ {
// we have a real eigen value // we have a real eigen value
matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>(); matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
@ -570,10 +570,13 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
} }
} }
// We handled a pair of complex conjugate eigenvalues, so need to skip them both
n--;
} }
else else
{ {
eigen_assert("Internal bug in EigenSolver"); // this should not happen eigen_assert(0 && "Internal bug in EigenSolver"); // this should not happen
} }
} }

View File

@ -307,7 +307,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
/** \brief Maximum number of iterations. /** \brief Maximum number of iterations.
* *
* Maximum number of iterations allowed for an eigenvalue to converge. * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n
* denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK).
*/ */
static const int m_maxIterations = 30; static const int m_maxIterations = 30;
@ -407,7 +408,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
Index end = n-1; Index end = n-1;
Index start = 0; Index start = 0;
Index iter = 0; // number of iterations we are working on one element Index iter = 0; // total number of iterations
while (end>0) while (end>0)
{ {
@ -418,15 +419,14 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
// find the largest unreduced block // find the largest unreduced block
while (end>0 && m_subdiag[end-1]==0) while (end>0 && m_subdiag[end-1]==0)
{ {
iter = 0;
end--; end--;
} }
if (end<=0) if (end<=0)
break; break;
// if we spent too many iterations on the current element, we give up // if we spent too many iterations, we give up
iter++; iter++;
if(iter > m_maxIterations) break; if(iter > m_maxIterations * n) break;
start = end - 1; start = end - 1;
while (start>0 && m_subdiag[start-1]!=0) while (start>0 && m_subdiag[start-1]!=0)
@ -435,7 +435,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n); internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
} }
if (iter <= m_maxIterations) if (iter <= m_maxIterations * n)
m_info = Success; m_info = Success;
else else
m_info = NoConvergence; m_info = NoConvergence;

View File

@ -225,7 +225,7 @@ public:
normal() = mat * normal(); normal() = mat * normal();
else else
{ {
eigen_assert("invalid traits value in Hyperplane::transform()"); eigen_assert(0 && "invalid traits value in Hyperplane::transform()");
} }
return *this; return *this;
} }

View File

@ -182,8 +182,7 @@ public:
template<typename NewScalarType> template<typename NewScalarType>
inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const
{ {
return typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type( return typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type(derived());
coeffs().template cast<NewScalarType>());
} }
#ifdef EIGEN_QUATERNIONBASE_PLUGIN #ifdef EIGEN_QUATERNIONBASE_PLUGIN
@ -225,22 +224,25 @@ struct traits<Quaternion<_Scalar,_Options> >
typedef _Scalar Scalar; typedef _Scalar Scalar;
typedef Matrix<_Scalar,4,1,_Options> Coefficients; typedef Matrix<_Scalar,4,1,_Options> Coefficients;
enum{ enum{
IsAligned = bool(EIGEN_ALIGN) && ((int(_Options)&Aligned)==Aligned), IsAligned = internal::traits<Coefficients>::Flags & AlignedBit,
Flags = IsAligned ? (AlignedBit | LvalueBit) : LvalueBit Flags = IsAligned ? (AlignedBit | LvalueBit) : LvalueBit
}; };
}; };
} }
template<typename _Scalar, int _Options> template<typename _Scalar, int _Options>
class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >{ class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
{
typedef QuaternionBase<Quaternion<_Scalar,_Options> > Base; typedef QuaternionBase<Quaternion<_Scalar,_Options> > Base;
enum { IsAligned = internal::traits<Quaternion>::IsAligned };
public: public:
typedef _Scalar Scalar; typedef _Scalar Scalar;
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
using Base::operator*=; using Base::operator*=;
typedef typename internal::traits<Quaternion<Scalar,_Options> >::Coefficients Coefficients; typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
typedef typename Base::AngleAxisType AngleAxisType; typedef typename Base::AngleAxisType AngleAxisType;
/** Default constructor leaving the quaternion uninitialized. */ /** Default constructor leaving the quaternion uninitialized. */
@ -271,9 +273,16 @@ public:
template<typename Derived> template<typename Derived>
explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; } explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
/** Explicit copy constructor with scalar conversion */
template<typename OtherScalar, int OtherOptions>
explicit inline Quaternion(const Quaternion<OtherScalar, OtherOptions>& other)
{ m_coeffs = other.coeffs().template cast<Scalar>(); }
inline Coefficients& coeffs() { return m_coeffs;} inline Coefficients& coeffs() { return m_coeffs;}
inline const Coefficients& coeffs() const { return m_coeffs;} inline const Coefficients& coeffs() const { return m_coeffs;}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(IsAligned)
protected: protected:
Coefficients m_coeffs; Coefficients m_coeffs;
@ -686,9 +695,8 @@ QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& oth
scale0 = internal::sin( ( Scalar(1) - t ) * theta) / sinTheta; scale0 = internal::sin( ( Scalar(1) - t ) * theta) / sinTheta;
scale1 = internal::sin( ( t * theta) ) / sinTheta; scale1 = internal::sin( ( t * theta) ) / sinTheta;
if (d<0)
scale1 = -scale1;
} }
if(d<0) scale1 = -scale1;
return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs()); return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
} }

View File

@ -89,7 +89,7 @@ public:
/** Concatenates two rotations */ /** Concatenates two rotations */
inline Rotation2D& operator*=(const Rotation2D& other) inline Rotation2D& operator*=(const Rotation2D& other)
{ return m_angle += other.m_angle; return *this; } { m_angle += other.m_angle; return *this; }
/** Applies the rotation to a 2D vector */ /** Applies the rotation to a 2D vector */
Vector2 operator* (const Vector2& vec) const Vector2 operator* (const Vector2& vec) const

View File

@ -443,7 +443,6 @@ FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0); m_maxpivot = RealScalar(0);
RealScalar cutoff(0);
for(Index k = 0; k < size; ++k) for(Index k = 0; k < size; ++k)
{ {
@ -458,14 +457,7 @@ FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner, row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner,
col_of_biggest_in_corner += k; // need to add k to them. col_of_biggest_in_corner += k; // need to add k to them.
// when k==0, biggest_in_corner is the biggest coeff absolute value in the original matrix if(biggest_in_corner==RealScalar(0))
if(k == 0) cutoff = biggest_in_corner * NumTraits<Scalar>::epsilon();
// if the pivot (hence the corner) is "zero", terminate to avoid generating nan/inf values.
// Notice that using an exact comparison (biggest_in_corner==0) here, as Golub-van Loan do in
// their pseudo-code, results in numerical instability! The cutoff here has been validated
// by running the unit test 'lu' with many repetitions.
if(biggest_in_corner < cutoff)
{ {
// before exiting, make sure to initialize the still uninitialized transpositions // before exiting, make sure to initialize the still uninitialized transpositions
// in a sane state without destroying what we already have. // in a sane state without destroying what we already have.

View File

@ -55,7 +55,7 @@ struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
static void run(const MatrixType& matrix, ResultType& result) static void run(const MatrixType& matrix, ResultType& result)
{ {
EIGEN_ALIGN16 const int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 }; EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
// Load the full matrix into registers // Load the full matrix into registers
__m128 _L1 = matrix.template packet<MatrixAlignment>( 0); __m128 _L1 = matrix.template packet<MatrixAlignment>( 0);

View File

@ -590,6 +590,9 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
// only worsening the precision of U and V as we accumulate more rotations // only worsening the precision of U and V as we accumulate more rotations
const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon(); const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
// limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
/*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
if(!internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows>::run(*this, matrix) if(!internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows>::run(*this, matrix)
@ -617,10 +620,11 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
{ {
// if this 2x2 sub-matrix is not diagonal already... // if this 2x2 sub-matrix is not diagonal already...
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
// keep us iterating forever. // keep us iterating forever. Similarly, small denormal numbers are considered zero.
using std::max; using std::max;
if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) RealScalar threshold = (max)(considerAsZero, precision * (max)(internal::abs(m_workMatrix.coeff(p,p)),
> (max)(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision) internal::abs(m_workMatrix.coeff(q,q))));
if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) > threshold)
{ {
finished = false; finished = false;
@ -704,6 +708,13 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
}; };
} // end namespace internal } // end namespace internal
/** \svd_module
*
* \return the singular value decomposition of \c *this computed by two-sided
* Jacobi transformations.
*
* \sa class JacobiSVD
*/
template<typename Derived> template<typename Derived>
JacobiSVD<typename MatrixBase<Derived>::PlainObject> JacobiSVD<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const

View File

@ -171,7 +171,7 @@ void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDer
eigen_assert(m_matrix.cols() == m_matrix.rows()); eigen_assert(m_matrix.cols() == m_matrix.rows());
eigen_assert(m_matrix.cols() == other.rows()); eigen_assert(m_matrix.cols() == other.rows());
eigen_assert(!(Mode & ZeroDiag)); eigen_assert(!(Mode & ZeroDiag));
eigen_assert(Mode & (Upper|Lower)); eigen_assert((Mode & (Upper|Lower)) != 0);
enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit }; enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
@ -298,7 +298,7 @@ void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<Ot
eigen_assert(m_matrix.cols() == m_matrix.rows()); eigen_assert(m_matrix.cols() == m_matrix.rows());
eigen_assert(m_matrix.cols() == other.rows()); eigen_assert(m_matrix.cols() == other.rows());
eigen_assert(!(Mode & ZeroDiag)); eigen_assert(!(Mode & ZeroDiag));
eigen_assert(Mode & (Upper|Lower)); eigen_assert((Mode & (Upper|Lower)) != 0);
// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit }; // enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };

7
eigenlib/howto.txt Normal file
View File

@ -0,0 +1,7 @@
Current Eigen Version 3.05 (10.02.2012)
To update the lib:
- download Eigen
- unzip it somewhere
- copy the two folders 'Eigen' and 'unsupported' here
- update this file
- commit

View File

@ -331,7 +331,7 @@ class FFT
// if the vector is strided, then we need to copy it to a packed temporary // if the vector is strided, then we need to copy it to a packed temporary
Matrix<src_type,1,Dynamic> tmp; Matrix<src_type,1,Dynamic> tmp;
if ( resize_input ) { if ( resize_input ) {
size_t ncopy = std::min(src.size(),src.size() + resize_input); size_t ncopy = (std::min)(src.size(),src.size() + resize_input);
tmp.setZero(src.size() + resize_input); tmp.setZero(src.size() + resize_input);
if ( realfft && HasFlag(HalfSpectrum) ) { if ( realfft && HasFlag(HalfSpectrum) ) {
// pad at the Nyquist bin // pad at the Nyquist bin

View File

@ -35,7 +35,8 @@
namespace Eigen { namespace Eigen {
/** \defgroup MPRealSupport_Module MPFRC++ Support module /** \ingroup Unsupported_modules
* \defgroup MPRealSupport_Module MPFRC++ Support module
* *
* \code * \code
* #include <Eigen/MPRealSupport> * #include <Eigen/MPRealSupport>

View File

@ -231,7 +231,7 @@ private:
template<typename BVH, typename Minimizer> template<typename BVH, typename Minimizer>
typename Minimizer::Scalar BVMinimize(const BVH &tree, Minimizer &minimizer) typename Minimizer::Scalar BVMinimize(const BVH &tree, Minimizer &minimizer)
{ {
return internal::minimize_helper(tree, minimizer, tree.getRootIndex(), std::numeric_limits<typename Minimizer::Scalar>::max()); return internal::minimize_helper(tree, minimizer, tree.getRootIndex(), (std::numeric_limits<typename Minimizer::Scalar>::max)());
} }
/** Given two BVH's, runs the query on their cartesian product encapsulated by \a minimizer. /** Given two BVH's, runs the query on their cartesian product encapsulated by \a minimizer.
@ -264,7 +264,7 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini
ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2(); ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2();
std::priority_queue<QueueElement, std::vector<QueueElement>, std::greater<QueueElement> > todo; //smallest is at the top std::priority_queue<QueueElement, std::vector<QueueElement>, std::greater<QueueElement> > todo; //smallest is at the top
Scalar minimum = std::numeric_limits<Scalar>::max(); Scalar minimum = (std::numeric_limits<Scalar>::max)();
todo.push(std::make_pair(Scalar(), std::make_pair(tree1.getRootIndex(), tree2.getRootIndex()))); todo.push(std::make_pair(Scalar(), std::make_pair(tree1.getRootIndex(), tree2.getRootIndex())));
while(!todo.empty()) { while(!todo.empty()) {

View File

@ -259,7 +259,7 @@ void MatrixExponential<MatrixType>::computeUV(float)
pade5(m_M); pade5(m_M);
} else { } else {
const float maxnorm = 3.925724783138660f; const float maxnorm = 3.925724783138660f;
m_squarings = max(0, (int)ceil(log2(m_l1norm / maxnorm))); m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
MatrixType A = m_M / pow(Scalar(2), Scalar(static_cast<RealScalar>(m_squarings))); MatrixType A = m_M / pow(Scalar(2), Scalar(static_cast<RealScalar>(m_squarings)));
pade7(A); pade7(A);
} }
@ -281,7 +281,7 @@ void MatrixExponential<MatrixType>::computeUV(double)
pade9(m_M); pade9(m_M);
} else { } else {
const double maxnorm = 5.371920351148152; const double maxnorm = 5.371920351148152;
m_squarings = max(0, (int)ceil(log2(m_l1norm / maxnorm))); m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
MatrixType A = m_M / pow(Scalar(2), Scalar(m_squarings)); MatrixType A = m_M / pow(Scalar(2), Scalar(m_squarings));
pade13(A); pade13(A);
} }

View File

@ -1,13 +1,22 @@
namespace Eigen { namespace Eigen {
o /** \mainpage Eigen's unsupported modules /** \mainpage Eigen's unsupported modules
This is the API documentation for Eigen's unsupported modules. This is the API documentation for Eigen's unsupported modules.
These modules are contributions from various users. They are provided "as is", without any support. These modules are contributions from various users. They are provided "as is", without any support.
Click on the \e Modules tab at the top of this page to get a list of all unsupported modules.
Don't miss the <a href="..//index.html">official Eigen documentation</a>. Don't miss the <a href="..//index.html">official Eigen documentation</a>.
\defgroup Unsupported_modules Unsupported modules
The unsupported modules are contributions from various users. They are
provided "as is", without any support. Nevertheless, some of them are
subject to be included in Eigen in the future.
*/ */
} }

View File

@ -90,13 +90,13 @@ struct BallPointStuff //this class provides functions to be both an intersector
} }
double minimumOnVolume(const BoxType &r) { ++calls; return r.squaredExteriorDistance(p); } double minimumOnVolume(const BoxType &r) { ++calls; return r.squaredExteriorDistance(p); }
double minimumOnObject(const BallType &b) { ++calls; return std::max(0., (b.center - p).squaredNorm() - SQR(b.radius)); } double minimumOnObject(const BallType &b) { ++calls; return (std::max)(0., (b.center - p).squaredNorm() - SQR(b.radius)); }
double minimumOnVolumeVolume(const BoxType &r1, const BoxType &r2) { ++calls; return r1.squaredExteriorDistance(r2); } double minimumOnVolumeVolume(const BoxType &r1, const BoxType &r2) { ++calls; return r1.squaredExteriorDistance(r2); }
double minimumOnVolumeObject(const BoxType &r, const BallType &b) { ++calls; return SQR(std::max(0., r.exteriorDistance(b.center) - b.radius)); } double minimumOnVolumeObject(const BoxType &r, const BallType &b) { ++calls; return SQR((std::max)(0., r.exteriorDistance(b.center) - b.radius)); }
double minimumOnObjectVolume(const BallType &b, const BoxType &r) { ++calls; return SQR(std::max(0., r.exteriorDistance(b.center) - b.radius)); } double minimumOnObjectVolume(const BallType &b, const BoxType &r) { ++calls; return SQR((std::max)(0., r.exteriorDistance(b.center) - b.radius)); }
double minimumOnObjectObject(const BallType &b1, const BallType &b2){ ++calls; return SQR(std::max(0., (b1.center - b2.center).norm() - b1.radius - b2.radius)); } double minimumOnObjectObject(const BallType &b1, const BallType &b2){ ++calls; return SQR((std::max)(0., (b1.center - b2.center).norm() - b1.radius - b2.radius)); }
double minimumOnVolumeObject(const BoxType &r, const VectorType &v) { ++calls; return r.squaredExteriorDistance(v); } double minimumOnVolumeObject(const BoxType &r, const VectorType &v) { ++calls; return r.squaredExteriorDistance(v); }
double minimumOnObjectObject(const BallType &b, const VectorType &v){ ++calls; return SQR(std::max(0., (b.center - v).norm() - b.radius)); } double minimumOnObjectObject(const BallType &b, const VectorType &v){ ++calls; return SQR((std::max)(0., (b.center - v).norm() - b.radius)); }
VectorType p; VectorType p;
int calls; int calls;

View File

@ -36,7 +36,7 @@ double binom(int n, int k)
template <typename Derived, typename OtherDerived> template <typename Derived, typename OtherDerived>
double relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B) double relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B)
{ {
return std::sqrt((A - B).cwiseAbs2().sum() / std::min(A.cwiseAbs2().sum(), B.cwiseAbs2().sum())); return std::sqrt((A - B).cwiseAbs2().sum() / (std::min)(A.cwiseAbs2().sum(), B.cwiseAbs2().sum()));
} }
template <typename T> template <typename T>

View File

@ -67,7 +67,7 @@ template<typename SparseMatrixType> void sparse_extra(const SparseMatrixType& re
typedef typename SparseMatrixType::Scalar Scalar; typedef typename SparseMatrixType::Scalar Scalar;
enum { Flags = SparseMatrixType::Flags }; enum { Flags = SparseMatrixType::Flags };
double density = std::max(8./(rows*cols), 0.01); double density = (std::max)(8./(rows*cols), 0.01);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix; typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector; typedef Matrix<Scalar,Dynamic,1> DenseVector;
Scalar eps = 1e-6; Scalar eps = 1e-6;

View File

@ -33,7 +33,7 @@ template<typename Scalar> void sparse_ldlt(int rows, int cols)
{ {
static bool odd = true; static bool odd = true;
odd = !odd; odd = !odd;
double density = std::max(8./(rows*cols), 0.01); double density = (std::max)(8./(rows*cols), 0.01);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix; typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector; typedef Matrix<Scalar,Dynamic,1> DenseVector;

View File

@ -31,7 +31,7 @@
template<typename Scalar> void sparse_llt(int rows, int cols) template<typename Scalar> void sparse_llt(int rows, int cols)
{ {
double density = std::max(8./(rows*cols), 0.01); double density = (std::max)(8./(rows*cols), 0.01);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix; typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector; typedef Matrix<Scalar,Dynamic,1> DenseVector;

View File

@ -35,7 +35,7 @@
template<typename Scalar> void sparse_lu(int rows, int cols) template<typename Scalar> void sparse_lu(int rows, int cols)
{ {
double density = std::max(8./(rows*cols), 0.01); double density = (std::max)(8./(rows*cols), 0.01);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix; typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector; typedef Matrix<Scalar,Dynamic,1> DenseVector;