summaryrefslogtreecommitdiff
path: root/indra/llrender/llimagegl.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llrender/llimagegl.cpp')
-rw-r--r--indra/llrender/llimagegl.cpp93
1 files changed, 79 insertions, 14 deletions
diff --git a/indra/llrender/llimagegl.cpp b/indra/llrender/llimagegl.cpp
index ff47c57c70..2579bad0b6 100644
--- a/indra/llrender/llimagegl.cpp
+++ b/indra/llrender/llimagegl.cpp
@@ -1645,7 +1645,7 @@ void LLImageGL::calcAlphaChannelOffsetAndStride()
}
}
-void LLImageGL::analyzeAlpha(const void* data_in, S32 w, S32 h)
+void LLImageGL::analyzeAlpha(const void* data_in, U32 w, U32 h)
{
if(!mNeedsAlphaAndPickMask)
{
@@ -1653,26 +1653,91 @@ void LLImageGL::analyzeAlpha(const void* data_in, S32 w, S32 h)
}
U32 length = w * h;
- const GLubyte* current = ((const GLubyte*) data_in) + mAlphaOffset ;
+ U32 alphatotal = 0;
- S32 sample[16];
- memset(sample, 0, sizeof(S32)*16);
-
- for (U32 i = 0; i < length; i++)
+ U32 sample[16];
+ memset(sample, 0, sizeof(U32)*16);
+
+ // generate histogram of quantized alpha.
+ // also add-in the histogram of a 2x2 box-sampled version. The idea is
+ // this will mid-skew the data (and thus increase the chances of not
+ // being used as a mask) from high-frequency alpha maps which
+ // suffer the worst from aliasing when used as alpha masks.
+ if (w >= 2 && h >= 2)
+ {
+ llassert(w%2 == 0);
+ llassert(h%2 == 0);
+ const GLubyte* rowstart = ((const GLubyte*) data_in) + mAlphaOffset;
+ for (U32 y = 0; y < h; y+=2)
+ {
+ const GLubyte* current = rowstart;
+ for (U32 x = 0; x < w; x+=2)
+ {
+ const U32 s1 = current[0];
+ alphatotal += s1;
+ const U32 s2 = current[w * mAlphaStride];
+ alphatotal += s2;
+ current += mAlphaStride;
+ const U32 s3 = current[0];
+ alphatotal += s3;
+ const U32 s4 = current[w * mAlphaStride];
+ alphatotal += s4;
+ current += mAlphaStride;
+
+ ++sample[s1/16];
+ ++sample[s2/16];
+ ++sample[s3/16];
+ ++sample[s4/16];
+
+ const U32 asum = (s1+s2+s3+s4);
+ alphatotal += asum;
+ sample[asum/(16*4)] += 4;
+ }
+
+ rowstart += 2 * w * mAlphaStride;
+ }
+ length *= 2; // we sampled everything twice, essentially
+ }
+ else
{
- ++sample[*current/16];
- current += mAlphaStride ;
+ const GLubyte* current = ((const GLubyte*) data_in) + mAlphaOffset;
+ for (U32 i = 0; i < length; i++)
+ {
+ const U32 s1 = *current;
+ alphatotal += s1;
+ ++sample[s1/16];
+ current += mAlphaStride;
+ }
}
+
+ // if more than 1/16th of alpha samples are mid-range, this
+ // shouldn't be treated as a 1-bit mask
- U32 total = 0;
+ // also, if all of the alpha samples are clumped on one half
+ // of the range (but not at an absolute extreme), then consider
+ // this to be an intentional effect and don't treat as a mask.
+
+ U32 midrangetotal = 0;
for (U32 i = 4; i < 11; i++)
{
- total += sample[i];
+ midrangetotal += sample[i];
+ }
+ U32 lowerhalftotal = 0;
+ for (U32 i = 0; i < 8; i++)
+ {
+ lowerhalftotal += sample[i];
+ }
+ U32 upperhalftotal = 0;
+ for (U32 i = 8; i < 16; i++)
+ {
+ upperhalftotal += sample[i];
}
- if (total > length/16)
+ if (midrangetotal > length/16 || // lots of midrange, or
+ (lowerhalftotal == length && alphatotal != 0) || // all close to transparent but not all totally transparent, or
+ (upperhalftotal == length && alphatotal != 255*length)) // all close to opaque but not all totally opaque
{
- mIsMask = FALSE;
+ mIsMask = FALSE; // not suitable for masking
}
else
{
@@ -1748,7 +1813,7 @@ BOOL LLImageGL::getMask(const LLVector2 &tc)
{
LL_WARNS_ONCE("render") << "Ugh, non-finite u/v in mask pick" << LL_ENDL;
u = v = 0.f;
- llassert(false);
+ //llassert(false);
}
if (LL_UNLIKELY(u < 0.f || u > 1.f ||
@@ -1756,7 +1821,7 @@ BOOL LLImageGL::getMask(const LLVector2 &tc)
{
LL_WARNS_ONCE("render") << "Ugh, u/v out of range in image mask pick" << LL_ENDL;
u = v = 0.f;
- llassert(false);
+ //llassert(false);
}
S32 x = llfloor(u * mPickMaskWidth);