diff options
| author | Tofu Linden <tofu.linden@lindenlab.com> | 2010-02-19 09:45:30 +0000 | 
|---|---|---|
| committer | Tofu Linden <tofu.linden@lindenlab.com> | 2010-02-19 09:45:30 +0000 | 
| commit | 93b74186a37b8baf995be8400794ccc56250e40e (patch) | |
| tree | 57cda4e655edde49898067b54d2ea35e0fa78ce0 | |
| parent | fa95084dd3e5af99d7a0944e8e3323b91ea85ee1 (diff) | |
EXT-5553 improve alpha mask ('fast alpha') heuristic
rev'd by davep
| -rw-r--r-- | indra/llrender/llimagegl.cpp | 62 | ||||
| -rw-r--r-- | indra/llrender/llimagegl.h | 2 | 
2 files changed, 52 insertions, 12 deletions
diff --git a/indra/llrender/llimagegl.cpp b/indra/llrender/llimagegl.cpp index 3d8bd21609..2ab6e327b7 100644 --- a/indra/llrender/llimagegl.cpp +++ b/indra/llrender/llimagegl.cpp @@ -1639,7 +1639,7 @@ void LLImageGL::calcAlphaChannelOffsetAndStride()  	}  } -void LLImageGL::analyzeAlpha(const void* data_in, S32 w, S32 h) +void LLImageGL::analyzeAlpha(const void* data_in, U32 w, U32 h)  {  	if(!mNeedsAlphaAndPickMask)  	{ @@ -1647,24 +1647,64 @@ void LLImageGL::analyzeAlpha(const void* data_in, S32 w, S32 h)  	}  	U32 length = w * h; -	const GLubyte* current = ((const GLubyte*) data_in) + mAlphaOffset ; -	S32 sample[16]; -	memset(sample, 0, sizeof(S32)*16); - -	for (U32 i = 0; i < length; i++) +	U32 sample[16]; +	memset(sample, 0, sizeof(U32)*16); + +	// generate histogram of quantized alpha. +	// also add-in the histogram of a 2x2 box-sampled version.  The idea is +	// this will mid-skew the data (and thus increase the chances of not +	// being used as a mask) from high-frequency alpha maps which +	// suffer the worst from aliasing when used as alpha masks. +	if (w >= 2 && h >= 2) +	{ +		llassert(w%2 == 0); +		llassert(h%2 == 0); +		const GLubyte* rowstart = ((const GLubyte*) data_in) + mAlphaOffset; +		for (U32 y = 0; y < h; y+=2) +		{ +			const GLubyte* current = rowstart; +			for (U32 x = 0; x < w; x+=2) +			{ +				U32 s1 = current[0]; +				U32 s2 = current[w * mAlphaStride]; +				current += mAlphaStride; +				U32 s3 = current[0]; +				U32 s4 = current[w * mAlphaStride]; +				current += mAlphaStride; + +				++sample[s1/16]; +				++sample[s2/16]; +				++sample[s3/16]; +				++sample[s4/16]; + +				sample[(s1+s2+s3+s4)/(16 * 4)] += 4; +			} +			 +			rowstart += 2 * w * mAlphaStride; +		} +		length += length; +	} +	else  	{ -		++sample[*current/16]; -		current += mAlphaStride ; +		const GLubyte* current = ((const GLubyte*) data_in) + mAlphaOffset; +		for (U32 i = 0; i < length; i++) +		{ +			++sample[*current/16]; +			current += mAlphaStride; +		}  	} +	 +	// if more than 1/16th of alpha samples are mid-range, this +	// shouldn't be treated as a 1-bit mask -	U32 total = 0; +	U32 midrangetotal = 0;  	for (U32 i = 4; i < 11; i++)  	{ -		total += sample[i]; +		midrangetotal += sample[i];  	} -	if (total > length/16) +	if (midrangetotal > length/16)  	{  		mIsMask = FALSE;  	} diff --git a/indra/llrender/llimagegl.h b/indra/llrender/llimagegl.h index f0870c3fc4..1b303307f6 100644 --- a/indra/llrender/llimagegl.h +++ b/indra/llrender/llimagegl.h @@ -91,7 +91,7 @@ public:  protected:  	virtual ~LLImageGL(); -	void analyzeAlpha(const void* data_in, S32 w, S32 h); +	void analyzeAlpha(const void* data_in, U32 w, U32 h);  	void calcAlphaChannelOffsetAndStride();  public:  | 
