summaryrefslogtreecommitdiff
path: root/indra/llmath/llmath.h
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llmath/llmath.h')
-rwxr-xr-x[-rw-r--r--]indra/llmath/llmath.h143
1 files changed, 81 insertions, 62 deletions
diff --git a/indra/llmath/llmath.h b/indra/llmath/llmath.h
index 798f1154d0..93b9f22b25 100644..100755
--- a/indra/llmath/llmath.h
+++ b/indra/llmath/llmath.h
@@ -1,4 +1,4 @@
-/**
+/**
* @file llmath.h
* @brief Useful math constants and macros.
*
@@ -29,7 +29,8 @@
#include <cmath>
#include <cstdlib>
-#include <complex>
+#include <vector>
+#include <limits>
#include "lldefs.h"
//#include "llstl.h" // *TODO: Remove when LLString is gone
//#include "llstring.h" // *TODO: Remove when LLString is gone
@@ -55,32 +56,11 @@
#endif
// Single Precision Floating Point Routines
-#ifndef sqrtf
-#define sqrtf(x) ((F32)sqrt((F64)(x)))
-#endif
-#ifndef fsqrtf
-#define fsqrtf(x) sqrtf(x)
-#endif
-
-#ifndef cosf
-#define cosf(x) ((F32)cos((F64)(x)))
-#endif
-#ifndef sinf
-#define sinf(x) ((F32)sin((F64)(x)))
-#endif
-#ifndef tanf
+// (There used to be more defined here, but they appeared to be redundant and
+// were breaking some other includes. Removed by Falcon, reviewed by Andrew, 11/25/09)
+/*#ifndef tanf
#define tanf(x) ((F32)tan((F64)(x)))
-#endif
-#ifndef acosf
-#define acosf(x) ((F32)acos((F64)(x)))
-#endif
-
-#ifndef powf
-#define powf(x,y) ((F32)pow((F64)(x),(F64)(y)))
-#endif
-#ifndef expf
-#define expf(x) ((F32)exp((F64)(x)))
-#endif
+#endif*/
const F32 GRAVITY = -9.8f;
@@ -93,20 +73,26 @@ const F32 F_E = 2.71828182845904523536f;
const F32 F_SQRT2 = 1.4142135623730950488016887242097f;
const F32 F_SQRT3 = 1.73205080756888288657986402541f;
const F32 OO_SQRT2 = 0.7071067811865475244008443621049f;
+const F32 OO_SQRT3 = 0.577350269189625764509f;
const F32 DEG_TO_RAD = 0.017453292519943295769236907684886f;
const F32 RAD_TO_DEG = 57.295779513082320876798154814105f;
const F32 F_APPROXIMATELY_ZERO = 0.00001f;
+const F32 F_LN10 = 2.3025850929940456840179914546844f;
+const F32 OO_LN10 = 0.43429448190325182765112891891661;
const F32 F_LN2 = 0.69314718056f;
const F32 OO_LN2 = 1.4426950408889634073599246810019f;
const F32 F_ALMOST_ZERO = 0.0001f;
const F32 F_ALMOST_ONE = 1.0f - F_ALMOST_ZERO;
+const F32 GIMBAL_THRESHOLD = 0.000436f; // sets the gimballock threshold 0.025 away from +/-90 degrees
+// formula: GIMBAL_THRESHOLD = sin(DEG_TO_RAD * gimbal_threshold_angle);
+
// BUG: Eliminate in favor of F_APPROXIMATELY_ZERO above?
const F32 FP_MAG_THRESHOLD = 0.0000001f;
// TODO: Replace with logic like is_approx_equal
-inline BOOL is_approx_zero( F32 f ) { return (-F_APPROXIMATELY_ZERO < f) && (f < F_APPROXIMATELY_ZERO); }
+inline bool is_approx_zero( F32 f ) { return (-F_APPROXIMATELY_ZERO < f) && (f < F_APPROXIMATELY_ZERO); }
// These functions work by interpreting sign+exp+mantissa as an unsigned
// integer.
@@ -132,13 +118,19 @@ inline BOOL is_approx_zero( F32 f ) { return (-F_APPROXIMATELY_ZERO < f) && (f <
// WARNING: Infinity is comparable with F32_MAX and negative
// infinity is comparable with F32_MIN
-inline BOOL is_approx_equal(F32 x, F32 y)
+// handles negative and positive zeros
+inline bool is_zero(F32 x)
+{
+ return (*(U32*)(&x) & 0x7fffffff) == 0;
+}
+
+inline bool is_approx_equal(F32 x, F32 y)
{
const S32 COMPARE_MANTISSA_UP_TO_BIT = 0x02;
return (std::abs((S32) ((U32&)x - (U32&)y) ) < COMPARE_MANTISSA_UP_TO_BIT);
}
-inline BOOL is_approx_equal(F64 x, F64 y)
+inline bool is_approx_equal(F64 x, F64 y)
{
const S64 COMPARE_MANTISSA_UP_TO_BIT = 0x02;
return (std::abs((S32) ((U64&)x - (U64&)y) ) < COMPARE_MANTISSA_UP_TO_BIT);
@@ -200,7 +192,7 @@ inline S32 llfloor( F32 f )
}
return result;
#else
- return (S32)floorf(f);
+ return (S32)floor(f);
#endif
}
@@ -214,16 +206,16 @@ inline S32 llceil( F32 f )
#ifndef BOGUS_ROUND
// Use this round. Does an arithmetic round (0.5 always rounds up)
-inline S32 llround(const F32 val)
+inline S32 ll_round(const F32 val)
{
return llfloor(val + 0.5f);
}
#else // BOGUS_ROUND
-// Old llround implementation - does banker's round (toward nearest even in the case of a 0.5.
+// Old ll_round implementation - does banker's round (toward nearest even in the case of a 0.5.
// Not using this because we don't have a consistent implementation on both platforms, use
// llfloor(val + 0.5f), which is consistent on all platforms.
-inline S32 llround(const F32 val)
+inline S32 ll_round(const F32 val)
{
#if LL_WINDOWS
// Note: assumes that the floating point control word is set to rounding mode (the default)
@@ -262,12 +254,12 @@ inline int round_int(double x)
}
#endif // BOGUS_ROUND
-inline F32 llround( F32 val, F32 nearest )
+inline F32 ll_round( F32 val, F32 nearest )
{
return F32(floor(val * (1.0f / nearest) + 0.5f)) * nearest;
}
-inline F64 llround( F64 val, F64 nearest )
+inline F64 ll_round( F64 val, F64 nearest )
{
return F64(floor(val * (1.0 / nearest) + 0.5)) * nearest;
}
@@ -317,25 +309,6 @@ const S32 LL_SHIFT_AMOUNT = 16; //16.16 fixed point represe
#define LL_MAN_INDEX 1
#endif
-/* Deprecated: use llround(), lltrunc(), or llfloor() instead
-// ================================================================================================
-// Real2Int
-// ================================================================================================
-inline S32 F64toS32(F64 val)
-{
- val = val + LL_DOUBLE_TO_FIX_MAGIC;
- return ((S32*)&val)[LL_MAN_INDEX] >> LL_SHIFT_AMOUNT;
-}
-
-// ================================================================================================
-// Real2Int
-// ================================================================================================
-inline S32 F32toS32(F32 val)
-{
- return F64toS32 ((F64)val);
-}
-*/
-
////////////////////////////////////////////////
//
// Fast exp and log
@@ -359,9 +332,7 @@ static union
#define LL_EXP_A (1048576 * OO_LN2) // use 1512775 for integer
#define LL_EXP_C (60801) // this value of C good for -4 < y < 4
-#define LL_FAST_EXP(y) (LLECO.n.i = llround(F32(LL_EXP_A*(y))) + (1072693248 - LL_EXP_C), LLECO.d)
-
-
+#define LL_FAST_EXP(y) (LLECO.n.i = ll_round(F32(LL_EXP_A*(y))) + (1072693248 - LL_EXP_C), LLECO.d)
inline F32 llfastpow(const F32 x, const F32 y)
{
@@ -378,11 +349,11 @@ inline F32 snap_to_sig_figs(F32 foo, S32 sig_figs)
bar *= 10.f;
}
- foo = (F32)llround(foo * bar);
+ F32 sign = (foo > 0.f) ? 1.f : -1.f;
+ F32 new_foo = F32( S64(foo * bar + sign * 0.5f));
+ new_foo /= bar;
- // shift back
- foo /= bar;
- return foo;
+ return new_foo;
}
inline F32 lerp(F32 a, F32 b, F32 u)
@@ -516,4 +487,52 @@ inline F32 llgaussian(F32 x, F32 o)
return 1.f/(F_SQRT_TWO_PI*o)*powf(F_E, -(x*x)/(2*o*o));
}
+//helper function for removing outliers
+template <class VEC_TYPE>
+inline void ll_remove_outliers(std::vector<VEC_TYPE>& data, F32 k)
+{
+ if (data.size() < 100)
+ { //not enough samples
+ return;
+ }
+
+ VEC_TYPE Q1 = data[data.size()/4];
+ VEC_TYPE Q3 = data[data.size()-data.size()/4-1];
+
+ if ((F32)(Q3-Q1) < 1.f)
+ {
+ // not enough variation to detect outliers
+ return;
+ }
+
+
+ VEC_TYPE min = (VEC_TYPE) ((F32) Q1-k * (F32) (Q3-Q1));
+ VEC_TYPE max = (VEC_TYPE) ((F32) Q3+k * (F32) (Q3-Q1));
+
+ U32 i = 0;
+ while (i < data.size() && data[i] < min)
+ {
+ i++;
+ }
+
+ S32 j = data.size()-1;
+ while (j > 0 && data[j] > max)
+ {
+ j--;
+ }
+
+ if (j < data.size()-1)
+ {
+ data.erase(data.begin()+j, data.end());
+ }
+
+ if (i > 0)
+ {
+ data.erase(data.begin(), data.begin()+i);
+ }
+}
+
+// Include simd math header
+#include "llsimdmath.h"
+
#endif