summaryrefslogtreecommitdiff
path: root/indra/llrender/llimagegl.cpp
diff options
context:
space:
mode:
authorTofu Linden <tofu.linden@lindenlab.com>2010-08-24 18:44:39 +0100
committerTofu Linden <tofu.linden@lindenlab.com>2010-08-24 18:44:39 +0100
commit98cc2365034a93c69704daa69efb389799cc9627 (patch)
tree4c3ec75b78a26a736f18a2153af025040ae05a4b /indra/llrender/llimagegl.cpp
parent6ba23344c95157793af9e4154933ae8df61630e8 (diff)
Backed out changeset a62bf7c0af21
Backing out this merge that I pushed (prematurely) to the wrong place.
Diffstat (limited to 'indra/llrender/llimagegl.cpp')
-rw-r--r--indra/llrender/llimagegl.cpp101
1 files changed, 16 insertions, 85 deletions
diff --git a/indra/llrender/llimagegl.cpp b/indra/llrender/llimagegl.cpp
index 90fcf6ab3f..7860b32f34 100644
--- a/indra/llrender/llimagegl.cpp
+++ b/indra/llrender/llimagegl.cpp
@@ -144,12 +144,12 @@ void LLImageGL::checkTexSize(bool forced) const
if (gDebugSession)
{
gFailLog << "wrong texture size and discard level!" <<
- mWidth << " Height: " << mHeight << " Current Level: " << (S32)mCurrentDiscardLevel << std::endl;
+ mWidth << " Height: " << mHeight << " Current Level: " << mCurrentDiscardLevel << std::endl;
}
else
{
llerrs << "wrong texture size and discard level: width: " <<
- mWidth << " Height: " << mHeight << " Current Level: " << (S32)mCurrentDiscardLevel << llendl ;
+ mWidth << " Height: " << mHeight << " Current Level: " << mCurrentDiscardLevel << llendl ;
}
}
@@ -1051,12 +1051,8 @@ BOOL LLImageGL::setSubImageFromFrameBuffer(S32 fb_x, S32 fb_y, S32 x_pos, S32 y_
{
if (gGL.getTexUnit(0)->bind(this, false, true))
{
- if(gGLManager.mDebugGPU)
- {
- llinfos << "Calling glCopyTexSubImage2D(...)" << llendl ;
- checkTexSize(true) ;
- llcallstacks << fb_x << " : " << fb_y << " : " << x_pos << " : " << y_pos << " : " << width << " : " << height << llcallstacksendl ;
- }
+ checkTexSize(true) ;
+ llcallstacks << fb_x << " : " << fb_y << " : " << x_pos << " : " << y_pos << " : " << width << " : " << height << llcallstacksendl ;
glCopyTexSubImage2D(GL_TEXTURE_2D, 0, fb_x, fb_y, x_pos, y_pos, width, height);
mGLTextureCreated = true;
@@ -1652,7 +1648,7 @@ void LLImageGL::calcAlphaChannelOffsetAndStride()
}
}
-void LLImageGL::analyzeAlpha(const void* data_in, U32 w, U32 h)
+void LLImageGL::analyzeAlpha(const void* data_in, S32 w, S32 h)
{
if(!mNeedsAlphaAndPickMask)
{
@@ -1660,91 +1656,26 @@ void LLImageGL::analyzeAlpha(const void* data_in, U32 w, U32 h)
}
U32 length = w * h;
- U32 alphatotal = 0;
+ const GLubyte* current = ((const GLubyte*) data_in) + mAlphaOffset ;
- U32 sample[16];
- memset(sample, 0, sizeof(U32)*16);
-
- // generate histogram of quantized alpha.
- // also add-in the histogram of a 2x2 box-sampled version. The idea is
- // this will mid-skew the data (and thus increase the chances of not
- // being used as a mask) from high-frequency alpha maps which
- // suffer the worst from aliasing when used as alpha masks.
- if (w >= 2 && h >= 2)
- {
- llassert(w%2 == 0);
- llassert(h%2 == 0);
- const GLubyte* rowstart = ((const GLubyte*) data_in) + mAlphaOffset;
- for (U32 y = 0; y < h; y+=2)
- {
- const GLubyte* current = rowstart;
- for (U32 x = 0; x < w; x+=2)
- {
- const U32 s1 = current[0];
- alphatotal += s1;
- const U32 s2 = current[w * mAlphaStride];
- alphatotal += s2;
- current += mAlphaStride;
- const U32 s3 = current[0];
- alphatotal += s3;
- const U32 s4 = current[w * mAlphaStride];
- alphatotal += s4;
- current += mAlphaStride;
-
- ++sample[s1/16];
- ++sample[s2/16];
- ++sample[s3/16];
- ++sample[s4/16];
-
- const U32 asum = (s1+s2+s3+s4);
- alphatotal += asum;
- sample[asum/(16*4)] += 4;
- }
-
- rowstart += 2 * w * mAlphaStride;
- }
- length *= 2; // we sampled everything twice, essentially
- }
- else
+ S32 sample[16];
+ memset(sample, 0, sizeof(S32)*16);
+
+ for (U32 i = 0; i < length; i++)
{
- const GLubyte* current = ((const GLubyte*) data_in) + mAlphaOffset;
- for (U32 i = 0; i < length; i++)
- {
- const U32 s1 = *current;
- alphatotal += s1;
- ++sample[s1/16];
- current += mAlphaStride;
- }
+ ++sample[*current/16];
+ current += mAlphaStride ;
}
-
- // if more than 1/16th of alpha samples are mid-range, this
- // shouldn't be treated as a 1-bit mask
- // also, if all of the alpha samples are clumped on one half
- // of the range (but not at an absolute extreme), then consider
- // this to be an intentional effect and don't treat as a mask.
-
- U32 midrangetotal = 0;
+ U32 total = 0;
for (U32 i = 4; i < 11; i++)
{
- midrangetotal += sample[i];
- }
- U32 lowerhalftotal = 0;
- for (U32 i = 0; i < 8; i++)
- {
- lowerhalftotal += sample[i];
- }
- U32 upperhalftotal = 0;
- for (U32 i = 8; i < 16; i++)
- {
- upperhalftotal += sample[i];
+ total += sample[i];
}
- if (midrangetotal > length/16 || // lots of midrange, or
- (lowerhalftotal == length && alphatotal != 0) || // all close to transparent but not all totally transparent, or
- (upperhalftotal == length && alphatotal != 255*length)) // all close to opaque but not all totally opaque
+ if (total > length/16)
{
- mIsMask = FALSE; // not suitable for masking
+ mIsMask = FALSE;
}
else
{