diff options
| author | Rye Mutt <rye@alchemyviewer.org> | 2023-09-28 16:05:37 -0400 | 
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-09-28 15:05:37 -0500 | 
| commit | a4c2eab969b1971389408de5a3927f87d74a6d6d (patch) | |
| tree | 0336e86ccab4476ae1e9f0851752ce4e516be6b0 | |
| parent | b49632f4278078b0d10129a8c6742e145946cfff (diff) | |
Fix integer overflow when framebuffers are extremely high resolution resulting in INT_MAX texture bias (#393)
| -rw-r--r-- | indra/llrender/llimagegl.cpp | 24 | ||||
| -rw-r--r-- | indra/llrender/llimagegl.h | 8 | ||||
| -rw-r--r-- | indra/newview/llviewertexture.cpp | 5 | 
3 files changed, 20 insertions, 17 deletions
| diff --git a/indra/llrender/llimagegl.cpp b/indra/llrender/llimagegl.cpp index b77f98d65e..c6fd824c4e 100644 --- a/indra/llrender/llimagegl.cpp +++ b/indra/llrender/llimagegl.cpp @@ -58,7 +58,7 @@ U32 wpo2(U32 i);  // texture memory accounting (for OS X)  static LLMutex sTexMemMutex; -static std::unordered_map<U32, U32> sTextureAllocs; +static std::unordered_map<U32, U64> sTextureAllocs;  static U64 sTextureBytes = 0;  // track a texture alloc on the currently bound texture. @@ -67,7 +67,7 @@ static void alloc_tex_image(U32 width, U32 height, U32 pixformat)  {      U32 texUnit = gGL.getCurrentTexUnitIndex();      U32 texName = gGL.getTexUnit(texUnit)->getCurrTexture(); -    S32 size = LLImageGL::dataFormatBytes(pixformat, width, height); +    U64 size = LLImageGL::dataFormatBytes(pixformat, width, height);      llassert(size >= 0); @@ -296,7 +296,7 @@ S32 LLImageGL::dataFormatBits(S32 dataformat)  }  //static -S32 LLImageGL::dataFormatBytes(S32 dataformat, S32 width, S32 height) +S64 LLImageGL::dataFormatBytes(S32 dataformat, S32 width, S32 height)  {      switch (dataformat)      { @@ -312,8 +312,8 @@ S32 LLImageGL::dataFormatBytes(S32 dataformat, S32 width, S32 height)      default:          break;      } -	S32 bytes ((width*height*dataFormatBits(dataformat)+7)>>3); -	S32 aligned = (bytes+3)&~3; +	S64 bytes (((S64)width * (S64)height * (S64)dataFormatBits(dataformat)+7)>>3); +	S64 aligned = (bytes+3)&~3;  	return aligned;  } @@ -518,7 +518,7 @@ void LLImageGL::init(BOOL usemipmaps)  	// so that it is obvious by visual inspection if we forgot to  	// init a field. -	mTextureMemory = (S32Bytes)0; +	mTextureMemory = S64Bytes(0);  	mLastBindTime = 0.f;  	mPickMask = NULL; @@ -1744,7 +1744,7 @@ BOOL LLImageGL::createGLTexture(S32 discard_level, const U8* data_in, BOOL data_      } -    mTextureMemory = (S32Bytes)getMipBytes(mCurrentDiscardLevel); +    mTextureMemory = (S64Bytes)getMipBytes(mCurrentDiscardLevel);      mTexelsInGLTexture = getWidth() * getHeight();      // mark this as bound at this point, so we don't throw it out immediately @@ -1938,9 +1938,9 @@ void LLImageGL::destroyGLTexture()  	if (mTexName != 0)  	{ -		if(mTextureMemory != S32Bytes(0)) +		if(mTextureMemory != S64Bytes(0))  		{ -			mTextureMemory = (S32Bytes)0; +			mTextureMemory = (S64Bytes)0;  		}  		LLImageGL::deleteTextures(1, &mTexName); @@ -2036,7 +2036,7 @@ S32 LLImageGL::getWidth(S32 discard_level) const  	return width;  } -S32 LLImageGL::getBytes(S32 discard_level) const +S64 LLImageGL::getBytes(S32 discard_level) const  {  	if (discard_level < 0)  	{ @@ -2049,7 +2049,7 @@ S32 LLImageGL::getBytes(S32 discard_level) const  	return dataFormatBytes(mFormatPrimary, w, h);  } -S32 LLImageGL::getMipBytes(S32 discard_level) const +S64 LLImageGL::getMipBytes(S32 discard_level) const  {  	if (discard_level < 0)  	{ @@ -2057,7 +2057,7 @@ S32 LLImageGL::getMipBytes(S32 discard_level) const  	}  	S32 w = mWidth>>discard_level;  	S32 h = mHeight>>discard_level; -	S32 res = dataFormatBytes(mFormatPrimary, w, h); +	S64 res = dataFormatBytes(mFormatPrimary, w, h);  	if (mUseMipMaps)  	{  		while (w > 1 && h > 1) diff --git a/indra/llrender/llimagegl.h b/indra/llrender/llimagegl.h index 243aeaea25..a9a6b93cb3 100644 --- a/indra/llrender/llimagegl.h +++ b/indra/llrender/llimagegl.h @@ -65,7 +65,7 @@ public:  	// Size calculation  	static S32 dataFormatBits(S32 dataformat); -	static S32 dataFormatBytes(S32 dataformat, S32 width, S32 height); +	static S64 dataFormatBytes(S32 dataformat, S32 width, S32 height);  	static S32 dataFormatComponents(S32 dataformat);  	BOOL updateBindStats() const ; @@ -145,8 +145,8 @@ public:  	S32	 getWidth(S32 discard_level = -1) const;  	S32	 getHeight(S32 discard_level = -1) const;  	U8	 getComponents() const { return mComponents; } -	S32  getBytes(S32 discard_level = -1) const; -	S32  getMipBytes(S32 discard_level = -1) const; +	S64  getBytes(S32 discard_level = -1) const; +	S64  getMipBytes(S32 discard_level = -1) const;  	BOOL getBoundRecently() const;  	BOOL isJustBound() const;  	BOOL getHasExplicitFormat() const { return mHasExplicitFormat; } @@ -208,7 +208,7 @@ public:  public:  	// Various GL/Rendering options -	S32Bytes mTextureMemory; +	S64Bytes mTextureMemory;  	mutable F32  mLastBindTime;	// last time this was bound, by discard level  private: diff --git a/indra/newview/llviewertexture.cpp b/indra/newview/llviewertexture.cpp index 9fc092d4b9..9336d99555 100644 --- a/indra/newview/llviewertexture.cpp +++ b/indra/newview/llviewertexture.cpp @@ -534,9 +534,12 @@ void LLViewerTexture::updateClass()      static LLCachedControl<U32> max_vram_budget(gSavedSettings, "RenderMaxVRAMBudget", 0); +	F64 texture_bytes_alloc = LLImageGL::getTextureBytesAllocated() / 1024.0 / 512.0; +	F64 vertex_bytes_alloc = LLVertexBuffer::getBytesAllocated() / 1024.0 / 512.0; +      // get an estimate of how much video memory we're using       // NOTE: our metrics miss about half the vram we use, so this biases high but turns out to typically be within 5% of the real number -    F32 used = (LLImageGL::getTextureBytesAllocated() + LLVertexBuffer::getBytesAllocated()) / 1024 / 512; +	F32 used = (F32)ll_round(texture_bytes_alloc + vertex_bytes_alloc);      F32 budget = max_vram_budget == 0 ? gGLManager.mVRAM : max_vram_budget; | 
