summaryrefslogtreecommitdiff
path: root/indra/llmath
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llmath')
-rwxr-xr-xindra/llmath/llmath.h36
-rwxr-xr-xindra/llmath/llquantize.h4
-rwxr-xr-xindra/llmath/llvolume.cpp2
-rwxr-xr-xindra/llmath/v4color.cpp8
-rwxr-xr-xindra/llmath/v4coloru.h22
5 files changed, 24 insertions, 48 deletions
diff --git a/indra/llmath/llmath.h b/indra/llmath/llmath.h
index a8b27ad189..93b9f22b25 100755
--- a/indra/llmath/llmath.h
+++ b/indra/llmath/llmath.h
@@ -206,16 +206,16 @@ inline S32 llceil( F32 f )
#ifndef BOGUS_ROUND
// Use this round. Does an arithmetic round (0.5 always rounds up)
-inline S32 llround(const F32 val)
+inline S32 ll_round(const F32 val)
{
return llfloor(val + 0.5f);
}
#else // BOGUS_ROUND
-// Old llround implementation - does banker's round (toward nearest even in the case of a 0.5.
+// Old ll_round implementation - does banker's round (toward nearest even in the case of a 0.5.
// Not using this because we don't have a consistent implementation on both platforms, use
// llfloor(val + 0.5f), which is consistent on all platforms.
-inline S32 llround(const F32 val)
+inline S32 ll_round(const F32 val)
{
#if LL_WINDOWS
// Note: assumes that the floating point control word is set to rounding mode (the default)
@@ -254,12 +254,12 @@ inline int round_int(double x)
}
#endif // BOGUS_ROUND
-inline F32 llround( F32 val, F32 nearest )
+inline F32 ll_round( F32 val, F32 nearest )
{
return F32(floor(val * (1.0f / nearest) + 0.5f)) * nearest;
}
-inline F64 llround( F64 val, F64 nearest )
+inline F64 ll_round( F64 val, F64 nearest )
{
return F64(floor(val * (1.0 / nearest) + 0.5)) * nearest;
}
@@ -309,25 +309,6 @@ const S32 LL_SHIFT_AMOUNT = 16; //16.16 fixed point represe
#define LL_MAN_INDEX 1
#endif
-/* Deprecated: use llround(), lltrunc(), or llfloor() instead
-// ================================================================================================
-// Real2Int
-// ================================================================================================
-inline S32 F64toS32(F64 val)
-{
- val = val + LL_DOUBLE_TO_FIX_MAGIC;
- return ((S32*)&val)[LL_MAN_INDEX] >> LL_SHIFT_AMOUNT;
-}
-
-// ================================================================================================
-// Real2Int
-// ================================================================================================
-inline S32 F32toS32(F32 val)
-{
- return F64toS32 ((F64)val);
-}
-*/
-
////////////////////////////////////////////////
//
// Fast exp and log
@@ -351,9 +332,7 @@ static union
#define LL_EXP_A (1048576 * OO_LN2) // use 1512775 for integer
#define LL_EXP_C (60801) // this value of C good for -4 < y < 4
-#define LL_FAST_EXP(y) (LLECO.n.i = llround(F32(LL_EXP_A*(y))) + (1072693248 - LL_EXP_C), LLECO.d)
-
-
+#define LL_FAST_EXP(y) (LLECO.n.i = ll_round(F32(LL_EXP_A*(y))) + (1072693248 - LL_EXP_C), LLECO.d)
inline F32 llfastpow(const F32 x, const F32 y)
{
@@ -370,9 +349,6 @@ inline F32 snap_to_sig_figs(F32 foo, S32 sig_figs)
bar *= 10.f;
}
- //F32 new_foo = (F32)llround(foo * bar);
- // the llround() implementation sucks. Don't us it.
-
F32 sign = (foo > 0.f) ? 1.f : -1.f;
F32 new_foo = F32( S64(foo * bar + sign * 0.5f));
new_foo /= bar;
diff --git a/indra/llmath/llquantize.h b/indra/llmath/llquantize.h
index 1595dbecf8..10c950abbb 100755
--- a/indra/llmath/llquantize.h
+++ b/indra/llmath/llquantize.h
@@ -52,7 +52,7 @@ inline U16 F32_to_U16_ROUND(F32 val, F32 lower, F32 upper)
val /= (upper - lower);
// round the value. Sreturn the U16
- return (U16)(llround(val*U16MAX));
+ return (U16)(ll_round(val*U16MAX));
}
@@ -92,7 +92,7 @@ inline U8 F32_to_U8_ROUND(F32 val, F32 lower, F32 upper)
val /= (upper - lower);
// return the rounded U8
- return (U8)(llround(val*U8MAX));
+ return (U8)(ll_round(val*U8MAX));
}
diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp
index 49cd970392..dedd90eab2 100755
--- a/indra/llmath/llvolume.cpp
+++ b/indra/llmath/llvolume.cpp
@@ -558,7 +558,7 @@ void LLProfile::genNGon(const LLProfileParams& params, S32 sides, F32 offset, F3
// Scale to have size "match" scale. Compensates to get object to generally fill bounding box.
- S32 total_sides = llround(sides / ang_scale); // Total number of sides all around
+ S32 total_sides = ll_round(sides / ang_scale); // Total number of sides all around
if (total_sides < 8)
{
diff --git a/indra/llmath/v4color.cpp b/indra/llmath/v4color.cpp
index cd2be7c8fd..79a64b24f2 100755
--- a/indra/llmath/v4color.cpp
+++ b/indra/llmath/v4color.cpp
@@ -125,10 +125,10 @@ LLColor4 LLColor4::cyan6(0.2f, 0.6f, 0.6f, 1.0f);
LLColor4::operator const LLColor4U() const
{
return LLColor4U(
- (U8)llclampb(llround(mV[VRED]*255.f)),
- (U8)llclampb(llround(mV[VGREEN]*255.f)),
- (U8)llclampb(llround(mV[VBLUE]*255.f)),
- (U8)llclampb(llround(mV[VALPHA]*255.f)));
+ (U8)llclampb(ll_round(mV[VRED]*255.f)),
+ (U8)llclampb(ll_round(mV[VGREEN]*255.f)),
+ (U8)llclampb(ll_round(mV[VBLUE]*255.f)),
+ (U8)llclampb(ll_round(mV[VALPHA]*255.f)));
}
LLColor4::LLColor4(const LLColor3 &vec, F32 a)
diff --git a/indra/llmath/v4coloru.h b/indra/llmath/v4coloru.h
index 12da7e2dd7..fddad34978 100755
--- a/indra/llmath/v4coloru.h
+++ b/indra/llmath/v4coloru.h
@@ -353,10 +353,10 @@ inline LLColor4U LLColor4U::multAll(const F32 k)
{
// Round to nearest
return LLColor4U(
- (U8)llround(mV[VX] * k),
- (U8)llround(mV[VY] * k),
- (U8)llround(mV[VZ] * k),
- (U8)llround(mV[VW] * k));
+ (U8)ll_round(mV[VX] * k),
+ (U8)ll_round(mV[VY] * k),
+ (U8)ll_round(mV[VZ] * k),
+ (U8)ll_round(mV[VW] * k));
}
/*
inline LLColor4U operator*(const LLColor4U &a, U8 k)
@@ -471,7 +471,7 @@ void LLColor4U::setVecScaleClamp(const LLColor4& color)
color_scale_factor /= max_color;
}
const S32 MAX_COLOR = 255;
- S32 r = llround(color.mV[0] * color_scale_factor);
+ S32 r = ll_round(color.mV[0] * color_scale_factor);
if (r > MAX_COLOR)
{
r = MAX_COLOR;
@@ -482,7 +482,7 @@ void LLColor4U::setVecScaleClamp(const LLColor4& color)
}
mV[0] = r;
- S32 g = llround(color.mV[1] * color_scale_factor);
+ S32 g = ll_round(color.mV[1] * color_scale_factor);
if (g > MAX_COLOR)
{
g = MAX_COLOR;
@@ -493,7 +493,7 @@ void LLColor4U::setVecScaleClamp(const LLColor4& color)
}
mV[1] = g;
- S32 b = llround(color.mV[2] * color_scale_factor);
+ S32 b = ll_round(color.mV[2] * color_scale_factor);
if (b > MAX_COLOR)
{
b = MAX_COLOR;
@@ -505,7 +505,7 @@ void LLColor4U::setVecScaleClamp(const LLColor4& color)
mV[2] = b;
// Alpha shouldn't be scaled, just clamped...
- S32 a = llround(color.mV[3] * MAX_COLOR);
+ S32 a = ll_round(color.mV[3] * MAX_COLOR);
if (a > MAX_COLOR)
{
a = MAX_COLOR;
@@ -527,7 +527,7 @@ void LLColor4U::setVecScaleClamp(const LLColor3& color)
}
const S32 MAX_COLOR = 255;
- S32 r = llround(color.mV[0] * color_scale_factor);
+ S32 r = ll_round(color.mV[0] * color_scale_factor);
if (r > MAX_COLOR)
{
r = MAX_COLOR;
@@ -539,7 +539,7 @@ void LLColor4U::setVecScaleClamp(const LLColor3& color)
}
mV[0] = r;
- S32 g = llround(color.mV[1] * color_scale_factor);
+ S32 g = ll_round(color.mV[1] * color_scale_factor);
if (g > MAX_COLOR)
{
g = MAX_COLOR;
@@ -551,7 +551,7 @@ void LLColor4U::setVecScaleClamp(const LLColor3& color)
}
mV[1] = g;
- S32 b = llround(color.mV[2] * color_scale_factor);
+ S32 b = ll_round(color.mV[2] * color_scale_factor);
if (b > MAX_COLOR)
{
b = MAX_COLOR;