summaryrefslogtreecommitdiff
path: root/indra/llrender
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llrender')
-rwxr-xr-x[-rw-r--r--]indra/llrender/llimagegl.cpp149
-rwxr-xr-x[-rw-r--r--]indra/llrender/llimagegl.h36
-rw-r--r--indra/llrender/llrender.cpp10
-rw-r--r--indra/llrender/llvertexbuffer.cpp130
-rw-r--r--indra/llrender/llvertexbuffer.h24
5 files changed, 116 insertions, 233 deletions
diff --git a/indra/llrender/llimagegl.cpp b/indra/llrender/llimagegl.cpp
index 78591ddd38..ab744fb7ff 100644..100755
--- a/indra/llrender/llimagegl.cpp
+++ b/indra/llrender/llimagegl.cpp
@@ -43,7 +43,6 @@
const F32 MIN_TEXTURE_LIFETIME = 10.f;
//statics
-LLGLuint LLImageGL::sCurrentBoundTextures[MAX_GL_TEXTURE_UNITS] = { 0 };
U32 LLImageGL::sUniqueCount = 0;
U32 LLImageGL::sBindCount = 0;
@@ -65,19 +64,10 @@ std::set<LLImageGL*> LLImageGL::sImageList;
//****************************************************************************************************
//-----------------------
//debug use
-BOOL gAuditTexture = FALSE ;
-#define MAX_TEXTURE_LOG_SIZE 22 //2048 * 2048
-std::vector<S32> LLImageGL::sTextureLoadedCounter(MAX_TEXTURE_LOG_SIZE + 1) ;
-std::vector<S32> LLImageGL::sTextureBoundCounter(MAX_TEXTURE_LOG_SIZE + 1) ;
-std::vector<S32> LLImageGL::sTextureCurBoundCounter(MAX_TEXTURE_LOG_SIZE + 1) ;
S32 LLImageGL::sCurTexSizeBar = -1 ;
S32 LLImageGL::sCurTexPickSize = -1 ;
-LLPointer<LLImageGL> LLImageGL::sHighlightTexturep = NULL;
-S32 LLImageGL::sMaxCatagories = 1 ;
+S32 LLImageGL::sMaxCategories = 1 ;
-std::vector<S32> LLImageGL::sTextureMemByCategory;
-std::vector<S32> LLImageGL::sTextureMemByCategoryBound ;
-std::vector<S32> LLImageGL::sTextureCurMemByCategoryBound ;
//------------------------
//****************************************************************************************************
//End for texture auditing use only
@@ -175,49 +165,11 @@ BOOL is_little_endian()
//static
void LLImageGL::initClass(S32 num_catagories)
{
- sMaxCatagories = num_catagories ;
-
- sTextureMemByCategory.resize(sMaxCatagories);
- sTextureMemByCategoryBound.resize(sMaxCatagories) ;
- sTextureCurMemByCategoryBound.resize(sMaxCatagories) ;
}
//static
void LLImageGL::cleanupClass()
{
- sTextureMemByCategory.clear() ;
- sTextureMemByCategoryBound.clear() ;
- sTextureCurMemByCategoryBound.clear() ;
-}
-
-//static
-void LLImageGL::setHighlightTexture(S32 category)
-{
- const S32 dim = 128;
- sHighlightTexturep = new LLImageGL() ;
- LLPointer<LLImageRaw> image_raw = new LLImageRaw(dim,dim,3);
- U8* data = image_raw->getData();
- for (S32 i = 0; i<dim; i++)
- {
- for (S32 j = 0; j<dim; j++)
- {
- const S32 border = 2;
- if (i<border || j<border || i>=(dim-border) || j>=(dim-border))
- {
- *data++ = 0xff;
- *data++ = 0xff;
- *data++ = 0xff;
- }
- else
- {
- *data++ = 0xff;
- *data++ = 0xff;
- *data++ = 0x00;
- }
- }
- }
- sHighlightTexturep->createGLTexture(0, image_raw, 0, TRUE, category);
- image_raw = NULL;
}
//static
@@ -285,31 +237,11 @@ void LLImageGL::updateStats(F32 current_time)
sLastFrameTime = current_time;
sBoundTextureMemoryInBytes = sCurBoundTextureMemory;
sCurBoundTextureMemory = 0;
-
- if(gAuditTexture)
- {
- for(U32 i = 0 ; i < sTextureCurBoundCounter.size() ; i++)
- {
- sTextureBoundCounter[i] = sTextureCurBoundCounter[i] ;
- sTextureCurBoundCounter[i] = 0 ;
- }
- for(U32 i = 0 ; i < sTextureCurMemByCategoryBound.size() ; i++)
- {
- sTextureMemByCategoryBound[i] = sTextureCurMemByCategoryBound[i] ;
- sTextureCurMemByCategoryBound[i] = 0 ;
- }
- }
}
//static
S32 LLImageGL::updateBoundTexMem(const S32 mem, const S32 ncomponents, S32 category)
{
- if(gAuditTexture && ncomponents > 0 && category > -1)
- {
- sTextureCurBoundCounter[getTextureCounterIndex(mem / ncomponents)]++ ;
- sTextureCurMemByCategoryBound[category] += mem ;
- }
-
LLImageGL::sCurBoundTextureMemory += mem ;
return LLImageGL::sCurBoundTextureMemory;
}
@@ -1284,7 +1216,7 @@ BOOL LLImageGL::createGLTexture(S32 discard_level, const LLImageRaw* imageraw, S
return TRUE ;
}
- setCategory(category) ;
+ setCategory(category);
const U8* rawdata = imageraw->getData();
return createGLTexture(discard_level, rawdata, FALSE, usename);
}
@@ -1362,11 +1294,6 @@ BOOL LLImageGL::createGLTexture(S32 discard_level, const U8* data_in, BOOL data_
{
sGlobalTextureMemoryInBytes -= mTextureMemory;
- if(gAuditTexture)
- {
- decTextureCounter(mTextureMemory, mComponents, mCategory) ;
- }
-
LLImageGL::deleteTextures(1, &old_name);
stop_glerror();
@@ -1376,10 +1303,6 @@ BOOL LLImageGL::createGLTexture(S32 discard_level, const U8* data_in, BOOL data_
sGlobalTextureMemoryInBytes += mTextureMemory;
mTexelsInGLTexture = getWidth() * getHeight() ;
- if(gAuditTexture)
- {
- incTextureCounter(mTextureMemory, mComponents, mCategory) ;
- }
// mark this as bound at this point, so we don't throw it out immediately
mLastBindTime = sLastFrameTime;
return TRUE;
@@ -1536,10 +1459,6 @@ void LLImageGL::destroyGLTexture()
{
if(mTextureMemory)
{
- if(gAuditTexture)
- {
- decTextureCounter(mTextureMemory, mComponents, mCategory) ;
- }
sGlobalTextureMemoryInBytes -= mTextureMemory;
mTextureMemory = 0;
}
@@ -1969,70 +1888,6 @@ BOOL LLImageGL::getMask(const LLVector2 &tc)
return res;
}
-void LLImageGL::setCategory(S32 category)
-{
-#if 0 //turn this off temporarily because it is not in use now.
- if(!gAuditTexture)
- {
- return ;
- }
- if(mCategory != category)
- {
- if(mCategory > -1)
- {
- sTextureMemByCategory[mCategory] -= mTextureMemory ;
- }
- if(category > -1 && category < sMaxCatagories)
- {
- sTextureMemByCategory[category] += mTextureMemory ;
- mCategory = category;
- }
- else
- {
- mCategory = -1 ;
- }
- }
-#endif
-}
-
-//for debug use
-//val is a "power of two" number
-S32 LLImageGL::getTextureCounterIndex(U32 val)
-{
- //index range is [0, MAX_TEXTURE_LOG_SIZE].
- if(val < 2)
- {
- return 0 ;
- }
- else if(val >= (1 << MAX_TEXTURE_LOG_SIZE))
- {
- return MAX_TEXTURE_LOG_SIZE ;
- }
- else
- {
- S32 ret = 0 ;
- while(val >>= 1)
- {
- ++ret;
- }
- return ret ;
- }
-}
-
-//static
-void LLImageGL::incTextureCounter(U32 val, S32 ncomponents, S32 category)
-{
- sTextureLoadedCounter[getTextureCounterIndex(val)]++ ;
- sTextureMemByCategory[category] += (S32)val * ncomponents ;
-}
-
-//static
-void LLImageGL::decTextureCounter(U32 val, S32 ncomponents, S32 category)
-{
- sTextureLoadedCounter[getTextureCounterIndex(val)]-- ;
- sTextureMemByCategory[category] += (S32)val * ncomponents ;
-}
-
void LLImageGL::setCurTexSizebar(S32 index, BOOL set_pick_size)
{
sCurTexSizeBar = index ;
diff --git a/indra/llrender/llimagegl.h b/indra/llrender/llimagegl.h
index 2cfb15b0d9..2060be914b 100644..100755
--- a/indra/llrender/llimagegl.h
+++ b/indra/llrender/llimagegl.h
@@ -102,8 +102,8 @@ public:
static void setManualImage(U32 target, S32 miplevel, S32 intformat, S32 width, S32 height, U32 pixformat, U32 pixtype, const void *pixels);
BOOL createGLTexture() ;
- BOOL createGLTexture(S32 discard_level, const LLImageRaw* imageraw, S32 usename = 0, BOOL to_create = TRUE,
- S32 category = sMaxCatagories - 1);
+ BOOL createGLTexture(S32 discard_level, const LLImageRaw* imageraw, S32 usename = 0, BOOL to_create = TRUE,
+ S32 category = sMaxCategories-1);
BOOL createGLTexture(S32 discard_level, const U8* data, BOOL data_hasmips = FALSE, S32 usename = 0);
void setImage(const LLImageRaw* imageraw);
void setImage(const U8* data_in, BOOL data_hasmips = FALSE);
@@ -234,8 +234,6 @@ public:
static S32 sCount;
static F32 sLastFrameTime;
-
- static LLGLuint sCurrentBoundTextures[MAX_GL_TEXTURE_UNITS]; // Currently bound texture ID
// Global memory statistics
static S32 sGlobalTextureMemoryInBytes; // Tracks main memory texmem
@@ -257,9 +255,10 @@ public:
public:
static void initClass(S32 num_catagories) ;
static void cleanupClass() ;
-private:
- static S32 sMaxCatagories ;
+private:
+ static S32 sMaxCategories;
+
//the flag to allow to call readBackRaw(...).
//can be removed if we do not use that function at all.
static BOOL sAllowReadBackRaw ;
@@ -269,39 +268,22 @@ private:
//****************************************************************************************************
private:
S32 mCategory ;
-public:
- void setCategory(S32 category) ;
- S32 getCategory()const {return mCategory ;}
-
+public:
+ void setCategory(S32 category) {mCategory = category;}
+ S32 getCategory()const {return mCategory;}
+
//for debug use: show texture size distribution
//----------------------------------------
- static LLPointer<LLImageGL> sHighlightTexturep; //default texture to replace normal textures
- static std::vector<S32> sTextureLoadedCounter ;
- static std::vector<S32> sTextureBoundCounter ;
- static std::vector<S32> sTextureCurBoundCounter ;
static S32 sCurTexSizeBar ;
static S32 sCurTexPickSize ;
- static void setHighlightTexture(S32 category) ;
- static S32 getTextureCounterIndex(U32 val) ;
- static void incTextureCounter(U32 val, S32 ncomponents, S32 category) ;
- static void decTextureCounter(U32 val, S32 ncomponents, S32 category) ;
static void setCurTexSizebar(S32 index, BOOL set_pick_size = TRUE) ;
static void resetCurTexSizebar();
- //----------------------------------------
- //for debug use: show texture category distribution
- //----------------------------------------
-
- static std::vector<S32> sTextureMemByCategory;
- static std::vector<S32> sTextureMemByCategoryBound ;
- static std::vector<S32> sTextureCurMemByCategoryBound ;
- //----------------------------------------
//****************************************************************************************************
//End of definitions for texture auditing use only
//****************************************************************************************************
};
-extern BOOL gAuditTexture;
#endif // LL_LLIMAGEGL_H
diff --git a/indra/llrender/llrender.cpp b/indra/llrender/llrender.cpp
index 03a9884c2b..93bac4c779 100644
--- a/indra/llrender/llrender.cpp
+++ b/indra/llrender/llrender.cpp
@@ -246,14 +246,6 @@ bool LLTexUnit::bind(LLTexture* texture, bool for_rendering, bool forceBind)
}
//in audit, replace the selected texture by the default one.
- if(gAuditTexture && for_rendering && LLImageGL::sCurTexPickSize > 0)
- {
- if(texture->getWidth() * texture->getHeight() == LLImageGL::sCurTexPickSize)
- {
- gl_tex->updateBindStats(gl_tex->mTextureMemory);
- return bind(LLImageGL::sHighlightTexturep.get());
- }
- }
if ((mCurrTexture != gl_tex->getTexName()) || forceBind)
{
activate();
@@ -997,7 +989,7 @@ void LLLightState::setSpotDirection(const LLVector3& direction)
const glh::matrix4f& mat = gGL.getModelviewMatrix();
mat.mult_matrix_dir(dir);
- mSpotDirection.set(direction);
+ mSpotDirection.set(dir.v);
}
}
diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp
index 20a450fbfb..eb302392bb 100644
--- a/indra/llrender/llvertexbuffer.cpp
+++ b/indra/llrender/llvertexbuffer.cpp
@@ -148,7 +148,7 @@ U32 wpo2(U32 i)
return r;
}
-U8* LLVBOPool::allocate(U32& name, U32 size)
+volatile U8* LLVBOPool::allocate(U32& name, U32 size)
{
llassert(nhpo2(size) == size);
@@ -159,20 +159,25 @@ U8* LLVBOPool::allocate(U32& name, U32 size)
mFreeList.resize(i+1);
}
- U8* ret = NULL;
+ volatile U8* ret = NULL;
if (mFreeList[i].empty())
{
//make a new buffer
glGenBuffersARB(1, &name);
glBindBufferARB(mType, name);
- glBufferDataARB(mType, size, 0, mUsage);
LLVertexBuffer::sAllocatedBytes += size;
- if (LLVertexBuffer::sDisableVBOMapping)
+ if (LLVertexBuffer::sDisableVBOMapping || mUsage != GL_DYNAMIC_DRAW_ARB)
{
+ glBufferDataARB(mType, size, 0, mUsage);
ret = (U8*) ll_aligned_malloc_16(size);
}
+ else
+ { //always use a true hint of static draw when allocating non-client-backed buffers
+ glBufferDataARB(mType, size, 0, GL_STATIC_DRAW_ARB);
+ }
+
glBindBufferARB(mType, 0);
}
else
@@ -188,7 +193,7 @@ U8* LLVBOPool::allocate(U32& name, U32 size)
return ret;
}
-void LLVBOPool::release(U32 name, U8* buffer, U32 size)
+void LLVBOPool::release(U32 name, volatile U8* buffer, U32 size)
{
llassert(nhpo2(size) == size);
@@ -201,8 +206,15 @@ void LLVBOPool::release(U32 name, U8* buffer, U32 size)
rec.mClientData = buffer;
sBytesPooled += size;
-
- mFreeList[i].push_back(rec);
+
+ if (!LLVertexBuffer::sDisableVBOMapping && mUsage == GL_DYNAMIC_DRAW_ARB)
+ {
+ glDeleteBuffersARB(1, &rec.mGLName);
+ }
+ else
+ {
+ mFreeList[i].push_back(rec);
+ }
}
void LLVBOPool::cleanup()
@@ -221,7 +233,7 @@ void LLVBOPool::cleanup()
if (r.mClientData)
{
- ll_aligned_free_16(r.mClientData);
+ ll_aligned_free_16((void*) r.mClientData);
}
l.pop_front();
@@ -536,7 +548,7 @@ void LLVertexBuffer::validateRange(U32 start, U32 end, U32 count, U32 indices_of
void LLVertexBuffer::drawRange(U32 mode, U32 start, U32 end, U32 count, U32 indices_offset) const
{
validateRange(start, end, count, indices_offset);
-
+ mMappable = FALSE;
gGL.syncMatrices();
llassert(mNumVerts >= 0);
@@ -591,7 +603,7 @@ void LLVertexBuffer::drawRange(U32 mode, U32 start, U32 end, U32 count, U32 indi
void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
{
llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
-
+ mMappable = FALSE;
gGL.syncMatrices();
llassert(mNumIndices >= 0);
@@ -637,7 +649,7 @@ void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
{
llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
-
+ mMappable = FALSE;
gGL.syncMatrices();
llassert(mNumVerts >= 0);
@@ -787,9 +799,26 @@ LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) :
if (mUsage && mUsage != GL_STREAM_DRAW_ARB)
{ //only stream_draw and dynamic_draw are supported when using VBOs, dynamic draw is the default
- mUsage = GL_DYNAMIC_DRAW_ARB;
+ if (sDisableVBOMapping)
+ { //always use stream draw if VBO mapping is disabled
+ mUsage = GL_STREAM_DRAW_ARB;
+ }
+ else
+ {
+ mUsage = GL_DYNAMIC_DRAW_ARB;
+ }
}
-
+
+
+ if (mUsage == GL_DYNAMIC_DRAW_ARB && !sDisableVBOMapping)
+ {
+ mMappable = TRUE;
+ }
+ else
+ {
+ mMappable = FALSE;
+ }
+
//zero out offsets
for (U32 i = 0; i < TYPE_MAX; i++)
{
@@ -1042,7 +1071,7 @@ void LLVertexBuffer::destroyGLBuffer()
}
else
{
- FREE_MEM(sPrivatePoolp, mMappedData) ;
+ FREE_MEM(sPrivatePoolp, (void*) mMappedData) ;
mMappedData = NULL;
mEmpty = TRUE;
}
@@ -1063,7 +1092,7 @@ void LLVertexBuffer::destroyGLIndices()
}
else
{
- FREE_MEM(sPrivatePoolp, mMappedIndexData) ;
+ FREE_MEM(sPrivatePoolp, (void*) mMappedIndexData) ;
mMappedIndexData = NULL;
mEmpty = TRUE;
}
@@ -1282,8 +1311,11 @@ bool expand_region(LLVertexBuffer::MappedRegion& region, S32 index, S32 count)
return true;
}
+static LLFastTimer::DeclareTimer FTM_VBO_MAP_BUFFER_RANGE("VBO Map Range");
+static LLFastTimer::DeclareTimer FTM_VBO_MAP_BUFFER("VBO Map");
+
// Map for data access
-U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range)
+volatile U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range)
{
bindGLBuffer(true);
LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
@@ -1298,7 +1330,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
if (useVBOs())
{
- if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
+ if (!mMappable || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
@@ -1323,7 +1355,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
if (!mapped)
{
//not already mapped, map new region
- MappedRegion region(type, !sDisableVBOMapping && map_range ? -1 : index, count);
+ MappedRegion region(type, mMappable && map_range ? -1 : index, count);
mMappedVertexRegions.push_back(region);
}
}
@@ -1340,19 +1372,20 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
sMappedCount++;
stop_glerror();
- if(sDisableVBOMapping)
+ if(!mMappable)
{
map_range = false;
}
else
{
- U8* src = NULL;
+ volatile U8* src = NULL;
waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
#ifdef GL_ARB_map_buffer_range
+ LLFastTimer t(FTM_VBO_MAP_BUFFER_RANGE);
S32 offset = mOffsets[type] + sTypeSize[type]*index;
S32 length = (sTypeSize[type]*count+0xF) & ~0xF;
src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, offset, length,
@@ -1376,6 +1409,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
}
}
+ LLFastTimer t(FTM_VBO_MAP_BUFFER);
src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, 0, mSize,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT);
@@ -1403,7 +1437,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
llassert(src != NULL);
- mMappedData = LL_NEXT_ALIGNED_ADDRESS<U8>(src);
+ mMappedData = LL_NEXT_ALIGNED_ADDRESS<volatile U8>(src);
mAlignedOffset = mMappedData - src;
stop_glerror();
@@ -1416,7 +1450,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
//check the availability of memory
LLMemory::logMemoryInfo(TRUE) ;
- if(!sDisableVBOMapping)
+ if(mMappable)
{
//--------------------
//print out more debug info before crash
@@ -1448,7 +1482,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
map_range = false;
}
- if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
+ if (map_range && gGLManager.mHasMapBufferRange && mMappable)
{
return mMappedData;
}
@@ -1458,7 +1492,11 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
}
}
-U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
+
+static LLFastTimer::DeclareTimer FTM_VBO_MAP_INDEX_RANGE("IBO Map Range");
+static LLFastTimer::DeclareTimer FTM_VBO_MAP_INDEX("IBO Map");
+
+volatile U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
bindGLIndices(true);
@@ -1473,7 +1511,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
if (useVBOs())
{
- if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
+ if (!mMappable || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
@@ -1495,7 +1533,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
if (!mapped)
{
//not already mapped, map new region
- MappedRegion region(TYPE_INDEX, !sDisableVBOMapping && map_range ? -1 : index, count);
+ MappedRegion region(TYPE_INDEX, mMappable && map_range ? -1 : index, count);
mMappedIndexRegions.push_back(region);
}
}
@@ -1524,19 +1562,20 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
}
}
- if(sDisableVBOMapping)
+ if(!mMappable)
{
map_range = false;
}
else
{
- U8* src = NULL;
+ volatile U8* src = NULL;
waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
#ifdef GL_ARB_map_buffer_range
+ LLFastTimer t(FTM_VBO_MAP_INDEX_RANGE);
S32 offset = sizeof(U16)*index;
S32 length = sizeof(U16)*count;
src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length,
@@ -1548,6 +1587,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
else
{
#ifdef GL_ARB_map_buffer_range
+ LLFastTimer t(FTM_VBO_MAP_INDEX);
src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, sizeof(U16)*mNumIndices,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT);
@@ -1569,6 +1609,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
}
else
{
+ LLFastTimer t(FTM_VBO_MAP_INDEX);
map_range = false;
src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
@@ -1587,7 +1628,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
log_glerror();
LLMemory::logMemoryInfo(TRUE) ;
- if(!sDisableVBOMapping)
+ if(mMappable)
{
GLint buff;
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
@@ -1609,7 +1650,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
map_range = false;
}
- if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
+ if (map_range && gGLManager.mHasMapBufferRange && mMappable)
{
return mMappedIndexData;
}
@@ -1619,6 +1660,13 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
}
}
+static LLFastTimer::DeclareTimer FTM_VBO_UNMAP("VBO Unmap");
+static LLFastTimer::DeclareTimer FTM_VBO_FLUSH_RANGE("Flush VBO Range");
+
+
+static LLFastTimer::DeclareTimer FTM_IBO_UNMAP("IBO Unmap");
+static LLFastTimer::DeclareTimer FTM_IBO_FLUSH_RANGE("Flush IBO Range");
+
void LLVertexBuffer::unmapBuffer()
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_UNMAP_BUFFER);
@@ -1631,10 +1679,11 @@ void LLVertexBuffer::unmapBuffer()
if (mMappedData && mVertexLocked)
{
+ LLFastTimer t(FTM_VBO_UNMAP);
bindGLBuffer(true);
updated_all = mIndexLocked; //both vertex and index buffers done updating
- if(sDisableVBOMapping)
+ if(!mMappable)
{
if (!mMappedVertexRegions.empty())
{
@@ -1644,7 +1693,7 @@ void LLVertexBuffer::unmapBuffer()
const MappedRegion& region = mMappedVertexRegions[i];
S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0;
S32 length = sTypeSize[region.mType]*region.mCount;
- glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, offset, length, mMappedData+offset);
+ glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, offset, length, (U8*) mMappedData+offset);
stop_glerror();
}
@@ -1653,7 +1702,7 @@ void LLVertexBuffer::unmapBuffer()
else
{
stop_glerror();
- glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData);
+ glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), (U8*) mMappedData);
stop_glerror();
}
}
@@ -1671,6 +1720,7 @@ void LLVertexBuffer::unmapBuffer()
S32 length = sTypeSize[region.mType]*region.mCount;
if (gGLManager.mHasMapBufferRange)
{
+ LLFastTimer t(FTM_VBO_FLUSH_RANGE);
#ifdef GL_ARB_map_buffer_range
glFlushMappedBufferRange(GL_ARRAY_BUFFER_ARB, offset, length);
#endif
@@ -1698,8 +1748,9 @@ void LLVertexBuffer::unmapBuffer()
if (mMappedIndexData && mIndexLocked)
{
+ LLFastTimer t(FTM_IBO_UNMAP);
bindGLIndices();
- if(sDisableVBOMapping)
+ if(!mMappable)
{
if (!mMappedIndexRegions.empty())
{
@@ -1708,7 +1759,7 @@ void LLVertexBuffer::unmapBuffer()
const MappedRegion& region = mMappedIndexRegions[i];
S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0;
S32 length = sizeof(U16)*region.mCount;
- glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, mMappedIndexData+offset);
+ glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, (U8*) mMappedIndexData+offset);
stop_glerror();
}
@@ -1717,7 +1768,7 @@ void LLVertexBuffer::unmapBuffer()
else
{
stop_glerror();
- glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData);
+ glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), (U8*) mMappedIndexData);
stop_glerror();
}
}
@@ -1734,6 +1785,7 @@ void LLVertexBuffer::unmapBuffer()
S32 length = sizeof(U16)*region.mCount;
if (gGLManager.mHasMapBufferRange)
{
+ LLFastTimer t(FTM_IBO_FLUSH_RANGE);
#ifdef GL_ARB_map_buffer_range
glFlushMappedBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
#endif
@@ -1778,7 +1830,7 @@ template <class T,S32 type> struct VertexBufferStrider
{
if (type == LLVertexBuffer::TYPE_INDEX)
{
- U8* ptr = vbo.mapIndexBuffer(index, count, map_range);
+ volatile U8* ptr = vbo.mapIndexBuffer(index, count, map_range);
if (ptr == NULL)
{
@@ -1794,7 +1846,7 @@ template <class T,S32 type> struct VertexBufferStrider
{
S32 stride = LLVertexBuffer::sTypeSize[type];
- U8* ptr = vbo.mapVertexBuffer(type, index, count, map_range);
+ volatile U8* ptr = vbo.mapVertexBuffer(type, index, count, map_range);
if (ptr == NULL)
{
@@ -2109,7 +2161,7 @@ void LLVertexBuffer::setupVertexBuffer(U32 data_mask)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_SETUP_VERTEX_BUFFER);
stop_glerror();
- U8* base = useVBOs() ? (U8*) mAlignedOffset : mMappedData;
+ volatile U8* base = useVBOs() ? (U8*) mAlignedOffset : mMappedData;
/*if ((data_mask & mTypeMask) != data_mask)
{
diff --git a/indra/llrender/llvertexbuffer.h b/indra/llrender/llvertexbuffer.h
index 3e6f6a959a..e1cbfd3b61 100644
--- a/indra/llrender/llvertexbuffer.h
+++ b/indra/llrender/llvertexbuffer.h
@@ -60,10 +60,10 @@ public:
U32 mType;
//size MUST be a power of 2
- U8* allocate(U32& name, U32 size);
+ volatile U8* allocate(U32& name, U32 size);
//size MUST be the size provided to allocate that returned the given name
- void release(U32 name, U8* buffer, U32 size);
+ void release(U32 name, volatile U8* buffer, U32 size);
//destroy all records in mFreeList
void cleanup();
@@ -72,7 +72,7 @@ public:
{
public:
U32 mGLName;
- U8* mClientData;
+ volatile U8* mClientData;
};
typedef std::list<Record> record_list_t;
@@ -208,8 +208,8 @@ public:
LLVertexBuffer(U32 typemask, S32 usage);
// map for data access
- U8* mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range);
- U8* mapIndexBuffer(S32 index, S32 count, bool map_range);
+ volatile U8* mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range);
+ volatile U8* mapIndexBuffer(S32 index, S32 count, bool map_range);
// set for rendering
virtual void setBuffer(U32 data_mask); // calls setupVertexBuffer() if data_mask is not 0
@@ -244,16 +244,17 @@ public:
S32 getNumVerts() const { return mNumVerts; }
S32 getNumIndices() const { return mNumIndices; }
- U8* getIndicesPointer() const { return useVBOs() ? (U8*) mAlignedIndexOffset : mMappedIndexData; }
- U8* getVerticesPointer() const { return useVBOs() ? (U8*) mAlignedOffset : mMappedData; }
+ volatile U8* getIndicesPointer() const { return useVBOs() ? (U8*) mAlignedIndexOffset : mMappedIndexData; }
+ volatile U8* getVerticesPointer() const { return useVBOs() ? (U8*) mAlignedOffset : mMappedData; }
U32 getTypeMask() const { return mTypeMask; }
bool hasDataType(S32 type) const { return ((1 << type) & getTypeMask()); }
S32 getSize() const;
S32 getIndicesSize() const { return mIndicesSize; }
- U8* getMappedData() const { return mMappedData; }
- U8* getMappedIndices() const { return mMappedIndexData; }
+ volatile U8* getMappedData() const { return mMappedData; }
+ volatile U8* getMappedIndices() const { return mMappedIndexData; }
S32 getOffset(S32 type) const { return mOffsets[type]; }
S32 getUsage() const { return mUsage; }
+ BOOL isWriteable() const { return (mMappable || mUsage == GL_STREAM_DRAW_ARB) ? TRUE : FALSE; }
void draw(U32 mode, U32 count, U32 indices_offset) const;
void drawArrays(U32 mode, U32 offset, U32 count) const;
@@ -278,12 +279,13 @@ protected:
U32 mGLIndices; // GL IBO handle
U32 mGLArray; // GL VAO handle
- U8* mMappedData; // pointer to currently mapped data (NULL if unmapped)
- U8* mMappedIndexData; // pointer to currently mapped indices (NULL if unmapped)
+ volatile U8* mMappedData; // pointer to currently mapped data (NULL if unmapped)
+ volatile U8* mMappedIndexData; // pointer to currently mapped indices (NULL if unmapped)
BOOL mVertexLocked; // if TRUE, vertex buffer is being or has been written to in client memory
BOOL mIndexLocked; // if TRUE, index buffer is being or has been written to in client memory
BOOL mFinal; // if TRUE, buffer can not be mapped again
BOOL mEmpty; // if TRUE, client buffer is empty (or NULL). Old values have been discarded.
+ mutable BOOL mMappable; // if TRUE, use memory mapping to upload data (otherwise doublebuffer and use glBufferSubData)
S32 mOffsets[TYPE_MAX];
std::vector<MappedRegion> mMappedVertexRegions;