summaryrefslogtreecommitdiff
path: root/indra/llrender/llvertexbuffer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llrender/llvertexbuffer.cpp')
-rw-r--r--indra/llrender/llvertexbuffer.cpp357
1 files changed, 200 insertions, 157 deletions
diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp
index 20a450fbfb..a7151afeb1 100644
--- a/indra/llrender/llvertexbuffer.cpp
+++ b/indra/llrender/llvertexbuffer.cpp
@@ -53,31 +53,31 @@ U32 nhpo2(U32 v)
//============================================================================
//static
-LLVBOPool LLVertexBuffer::sStreamVBOPool;
-LLVBOPool LLVertexBuffer::sDynamicVBOPool;
-LLVBOPool LLVertexBuffer::sStreamIBOPool;
-LLVBOPool LLVertexBuffer::sDynamicIBOPool;
+LLVBOPool LLVertexBuffer::sStreamVBOPool(GL_STREAM_DRAW_ARB, GL_ARRAY_BUFFER_ARB);
+LLVBOPool LLVertexBuffer::sDynamicVBOPool(GL_DYNAMIC_DRAW_ARB, GL_ARRAY_BUFFER_ARB);
+LLVBOPool LLVertexBuffer::sStreamIBOPool(GL_STREAM_DRAW_ARB, GL_ELEMENT_ARRAY_BUFFER_ARB);
+LLVBOPool LLVertexBuffer::sDynamicIBOPool(GL_DYNAMIC_DRAW_ARB, GL_ELEMENT_ARRAY_BUFFER_ARB);
U32 LLVBOPool::sBytesPooled = 0;
-LLPrivateMemoryPool* LLVertexBuffer::sPrivatePoolp = NULL ;
+LLPrivateMemoryPool* LLVertexBuffer::sPrivatePoolp = NULL;
U32 LLVertexBuffer::sBindCount = 0;
U32 LLVertexBuffer::sSetCount = 0;
S32 LLVertexBuffer::sCount = 0;
S32 LLVertexBuffer::sGLCount = 0;
S32 LLVertexBuffer::sMappedCount = 0;
-BOOL LLVertexBuffer::sDisableVBOMapping = FALSE ;
-BOOL LLVertexBuffer::sEnableVBOs = TRUE;
+bool LLVertexBuffer::sDisableVBOMapping = false;
+bool LLVertexBuffer::sEnableVBOs = true;
U32 LLVertexBuffer::sGLRenderBuffer = 0;
U32 LLVertexBuffer::sGLRenderArray = 0;
U32 LLVertexBuffer::sGLRenderIndices = 0;
U32 LLVertexBuffer::sLastMask = 0;
-BOOL LLVertexBuffer::sVBOActive = FALSE;
-BOOL LLVertexBuffer::sIBOActive = FALSE;
+bool LLVertexBuffer::sVBOActive = false;
+bool LLVertexBuffer::sIBOActive = false;
U32 LLVertexBuffer::sAllocatedBytes = 0;
-BOOL LLVertexBuffer::sMapped = FALSE;
-BOOL LLVertexBuffer::sUseStreamDraw = TRUE;
-BOOL LLVertexBuffer::sUseVAO = FALSE;
-BOOL LLVertexBuffer::sPreferStreamDraw = FALSE;
+bool LLVertexBuffer::sMapped = false;
+bool LLVertexBuffer::sUseStreamDraw = true;
+bool LLVertexBuffer::sUseVAO = false;
+bool LLVertexBuffer::sPreferStreamDraw = false;
const U32 FENCE_WAIT_TIME_NANOSECONDS = 10000; //1 ms
@@ -148,7 +148,7 @@ U32 wpo2(U32 i)
return r;
}
-U8* LLVBOPool::allocate(U32& name, U32 size)
+volatile U8* LLVBOPool::allocate(U32& name, U32 size)
{
llassert(nhpo2(size) == size);
@@ -159,20 +159,25 @@ U8* LLVBOPool::allocate(U32& name, U32 size)
mFreeList.resize(i+1);
}
- U8* ret = NULL;
+ volatile U8* ret = NULL;
if (mFreeList[i].empty())
{
//make a new buffer
glGenBuffersARB(1, &name);
glBindBufferARB(mType, name);
- glBufferDataARB(mType, size, 0, mUsage);
LLVertexBuffer::sAllocatedBytes += size;
- if (LLVertexBuffer::sDisableVBOMapping)
+ if (LLVertexBuffer::sDisableVBOMapping || mUsage != GL_DYNAMIC_DRAW_ARB)
{
+ glBufferDataARB(mType, size, 0, mUsage);
ret = (U8*) ll_aligned_malloc_16(size);
}
+ else
+ { //always use a true hint of static draw when allocating non-client-backed buffers
+ glBufferDataARB(mType, size, 0, GL_STATIC_DRAW_ARB);
+ }
+
glBindBufferARB(mType, 0);
}
else
@@ -188,7 +193,7 @@ U8* LLVBOPool::allocate(U32& name, U32 size)
return ret;
}
-void LLVBOPool::release(U32 name, U8* buffer, U32 size)
+void LLVBOPool::release(U32 name, volatile U8* buffer, U32 size)
{
llassert(nhpo2(size) == size);
@@ -199,10 +204,16 @@ void LLVBOPool::release(U32 name, U8* buffer, U32 size)
Record rec;
rec.mGLName = name;
rec.mClientData = buffer;
-
- sBytesPooled += size;
-
- mFreeList[i].push_back(rec);
+
+ if (buffer == NULL)
+ {
+ glDeleteBuffersARB(1, &rec.mGLName);
+ }
+ else
+ {
+ sBytesPooled += size;
+ mFreeList[i].push_back(rec);
+ }
}
void LLVBOPool::cleanup()
@@ -221,7 +232,7 @@ void LLVBOPool::cleanup()
if (r.mClientData)
{
- ll_aligned_free_16(r.mClientData);
+ ll_aligned_free_16((void*) r.mClientData);
}
l.pop_front();
@@ -271,7 +282,7 @@ void LLVertexBuffer::setupClientArrays(U32 data_mask)
{
if (sLastMask != data_mask)
{
- BOOL error = FALSE;
+ bool error = false;
if (LLGLSLShader::sNoFixedFunction)
{
@@ -332,7 +343,7 @@ void LLVertexBuffer::setupClientArrays(U32 data_mask)
{
if (gDebugSession)
{
- error = TRUE;
+ error = true;
gFailLog << "Bad client state! " << array[i] << " disabled." << std::endl;
}
else
@@ -352,7 +363,7 @@ void LLVertexBuffer::setupClientArrays(U32 data_mask)
{ //needs to be disabled, make sure it was (DEBUG TEMPORARY)
if (gDebugSession)
{
- error = TRUE;
+ error = true;
gFailLog << "Bad client state! " << array[i] << " enabled." << std::endl;
}
else
@@ -536,7 +547,7 @@ void LLVertexBuffer::validateRange(U32 start, U32 end, U32 count, U32 indices_of
void LLVertexBuffer::drawRange(U32 mode, U32 start, U32 end, U32 count, U32 indices_offset) const
{
validateRange(start, end, count, indices_offset);
-
+ mMappable = false;
gGL.syncMatrices();
llassert(mNumVerts >= 0);
@@ -591,7 +602,7 @@ void LLVertexBuffer::drawRange(U32 mode, U32 start, U32 end, U32 count, U32 indi
void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
{
llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
-
+ mMappable = false;
gGL.syncMatrices();
llassert(mNumIndices >= 0);
@@ -637,7 +648,7 @@ void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
{
llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
-
+ mMappable = false;
gGL.syncMatrices();
llassert(mNumVerts >= 0);
@@ -677,23 +688,13 @@ void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
//static
void LLVertexBuffer::initClass(bool use_vbo, bool no_vbo_mapping)
{
- sEnableVBOs = use_vbo && gGLManager.mHasVertexBufferObject ;
- sDisableVBOMapping = sEnableVBOs && no_vbo_mapping ;
+ sEnableVBOs = use_vbo && gGLManager.mHasVertexBufferObject;
+ sDisableVBOMapping = sEnableVBOs && no_vbo_mapping;
- if(!sPrivatePoolp)
+ if (!sPrivatePoolp)
{
- sPrivatePoolp = LLPrivateMemoryPoolManager::getInstance()->newPool(LLPrivateMemoryPool::STATIC) ;
+ sPrivatePoolp = LLPrivateMemoryPoolManager::getInstance()->newPool(LLPrivateMemoryPool::STATIC);
}
-
- sStreamVBOPool.mType = GL_ARRAY_BUFFER_ARB;
- sStreamVBOPool.mUsage= GL_STREAM_DRAW_ARB;
- sStreamIBOPool.mType = GL_ELEMENT_ARRAY_BUFFER_ARB;
- sStreamIBOPool.mUsage= GL_STREAM_DRAW_ARB;
-
- sDynamicVBOPool.mType = GL_ARRAY_BUFFER_ARB;
- sDynamicVBOPool.mUsage= GL_DYNAMIC_DRAW_ARB;
- sDynamicIBOPool.mType = GL_ELEMENT_ARRAY_BUFFER_ARB;
- sDynamicIBOPool.mUsage= GL_DYNAMIC_DRAW_ARB;
}
//static
@@ -706,18 +707,18 @@ void LLVertexBuffer::unbind()
#endif
sGLRenderArray = 0;
sGLRenderIndices = 0;
- sIBOActive = FALSE;
+ sIBOActive = false;
}
if (sVBOActive)
{
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
- sVBOActive = FALSE;
+ sVBOActive = false;
}
if (sIBOActive)
{
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
- sIBOActive = FALSE;
+ sIBOActive = false;
}
sGLRenderBuffer = 0;
@@ -746,62 +747,87 @@ void LLVertexBuffer::cleanupClass()
//----------------------------------------------------------------------------
+S32 LLVertexBuffer::determineUsage(S32 usage)
+{
+ S32 ret_usage = usage;
+
+ if (!sEnableVBOs)
+ {
+ ret_usage = 0;
+ }
+
+ if (usage == GL_STREAM_DRAW_ARB && !sUseStreamDraw)
+ {
+ ret_usage = 0;
+ }
+
+ if (usage == GL_DYNAMIC_DRAW_ARB && sPreferStreamDraw)
+ {
+ ret_usage = GL_STREAM_DRAW_ARB;
+ }
+
+ if (usage == 0 && LLRender::sGLCoreProfile)
+ { //MUST use VBOs for all rendering
+ ret_usage = GL_STREAM_DRAW_ARB;
+ }
+
+ if (usage && usage != GL_STREAM_DRAW_ARB)
+ { //only stream_draw and dynamic_draw are supported when using VBOs, dynamic draw is the default
+ if (sDisableVBOMapping)
+ { //always use stream draw if VBO mapping is disabled
+ ret_usage = GL_STREAM_DRAW_ARB;
+ }
+ else
+ {
+ ret_usage = GL_DYNAMIC_DRAW_ARB;
+ }
+ }
+
+ return ret_usage;
+}
+
LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) :
LLRefCount(),
mNumVerts(0),
mNumIndices(0),
- mUsage(usage),
+ mAlignedOffset(0),
+ mAlignedIndexOffset(0),
+ mSize(0),
+ mIndicesSize(0),
+ mTypeMask(typemask),
+ mUsage(LLVertexBuffer::determineUsage(usage)),
mGLBuffer(0),
+ mGLIndices(0),
mGLArray(0),
- mGLIndices(0),
mMappedData(NULL),
- mMappedIndexData(NULL),
- mVertexLocked(FALSE),
- mIndexLocked(FALSE),
- mFinal(FALSE),
- mEmpty(TRUE),
+ mMappedIndexData(NULL),
+ mMappedDataUsingVBOs(false),
+ mMappedIndexDataUsingVBOs(false),
+ mVertexLocked(false),
+ mIndexLocked(false),
+ mFinal(false),
+ mEmpty(true),
+ mMappable(false),
mFence(NULL)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_CONSTRUCTOR);
- mFence = NULL;
- if (!sEnableVBOs)
- {
- mUsage = 0 ;
- }
- if (mUsage == GL_STREAM_DRAW_ARB && !sUseStreamDraw)
+ if (mUsage == GL_DYNAMIC_DRAW_ARB && !sDisableVBOMapping)
{
- mUsage = 0;
+ mMappable = true;
}
-
- if (mUsage == GL_DYNAMIC_DRAW_ARB && sPreferStreamDraw)
+ else
{
- mUsage = GL_STREAM_DRAW_ARB;
+ mMappable = false;
}
- if (mUsage == 0 && LLRender::sGLCoreProfile)
- { //MUST use VBOs for all rendering
- mUsage = GL_STREAM_DRAW_ARB;
- }
-
- if (mUsage && mUsage != GL_STREAM_DRAW_ARB)
- { //only stream_draw and dynamic_draw are supported when using VBOs, dynamic draw is the default
- mUsage = GL_DYNAMIC_DRAW_ARB;
- }
-
//zero out offsets
for (U32 i = 0; i < TYPE_MAX; i++)
{
mOffsets[i] = 0;
}
- mTypeMask = typemask;
- mSize = 0;
- mIndicesSize = 0;
- mAlignedOffset = 0;
- mAlignedIndexOffset = 0;
-
sCount++;
}
@@ -982,9 +1008,11 @@ void LLVertexBuffer::createGLBuffer(U32 size)
return;
}
- mEmpty = TRUE;
+ mEmpty = true;
- if (useVBOs())
+ mMappedDataUsingVBOs = useVBOs();
+
+ if (mMappedDataUsingVBOs)
{
genBuffer(size);
}
@@ -1011,12 +1039,14 @@ void LLVertexBuffer::createGLIndices(U32 size)
return;
}
- mEmpty = TRUE;
+ mEmpty = true;
//pad by 16 bytes for aligned copies
size += 16;
- if (useVBOs())
+ mMappedIndexDataUsingVBOs = useVBOs();
+
+ if (mMappedIndexDataUsingVBOs)
{
//pad by another 16 bytes for VBO pointer adjustment
size += 16;
@@ -1036,15 +1066,15 @@ void LLVertexBuffer::destroyGLBuffer()
LLMemType mt2(LLMemType::MTYPE_VERTEX_DESTROY_BUFFER);
if (mGLBuffer)
{
- if (useVBOs())
+ if (mMappedDataUsingVBOs)
{
releaseBuffer();
}
else
{
- FREE_MEM(sPrivatePoolp, mMappedData) ;
+ FREE_MEM(sPrivatePoolp, (void*) mMappedData);
mMappedData = NULL;
- mEmpty = TRUE;
+ mEmpty = true;
}
}
@@ -1057,15 +1087,15 @@ void LLVertexBuffer::destroyGLIndices()
LLMemType mt2(LLMemType::MTYPE_VERTEX_DESTROY_INDICES);
if (mGLIndices)
{
- if (useVBOs())
+ if (mMappedIndexDataUsingVBOs)
{
releaseIndices();
}
else
{
- FREE_MEM(sPrivatePoolp, mMappedIndexData) ;
+ FREE_MEM(sPrivatePoolp, (void*) mMappedIndexData);
mMappedIndexData = NULL;
- mEmpty = TRUE;
+ mEmpty = true;
}
}
@@ -1250,16 +1280,10 @@ void LLVertexBuffer::resizeBuffer(S32 newnverts, S32 newnindices)
}
}
-BOOL LLVertexBuffer::useVBOs() const
+bool LLVertexBuffer::useVBOs() const
{
//it's generally ineffective to use VBO for things that are streaming on apple
-
- if (!mUsage)
- {
- return FALSE;
- }
-
- return TRUE;
+ return (mUsage != 0);
}
//----------------------------------------------------------------------------
@@ -1282,8 +1306,11 @@ bool expand_region(LLVertexBuffer::MappedRegion& region, S32 index, S32 count)
return true;
}
+static LLFastTimer::DeclareTimer FTM_VBO_MAP_BUFFER_RANGE("VBO Map Range");
+static LLFastTimer::DeclareTimer FTM_VBO_MAP_BUFFER("VBO Map");
+
// Map for data access
-U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range)
+volatile U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range)
{
bindGLBuffer(true);
LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
@@ -1298,7 +1325,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
if (useVBOs())
{
- if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
+ if (!mMappable || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
@@ -1323,7 +1350,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
if (!mapped)
{
//not already mapped, map new region
- MappedRegion region(type, !sDisableVBOMapping && map_range ? -1 : index, count);
+ MappedRegion region(type, mMappable && map_range ? -1 : index, count);
mMappedVertexRegions.push_back(region);
}
}
@@ -1336,23 +1363,24 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
if (!mVertexLocked)
{
LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_VERTICES);
- mVertexLocked = TRUE;
+ mVertexLocked = true;
sMappedCount++;
stop_glerror();
- if(sDisableVBOMapping)
+ if(!mMappable)
{
map_range = false;
}
else
{
- U8* src = NULL;
+ volatile U8* src = NULL;
waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
#ifdef GL_ARB_map_buffer_range
+ LLFastTimer t(FTM_VBO_MAP_BUFFER_RANGE);
S32 offset = mOffsets[type] + sTypeSize[type]*index;
S32 length = (sTypeSize[type]*count+0xF) & ~0xF;
src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, offset, length,
@@ -1376,6 +1404,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
}
}
+ LLFastTimer t(FTM_VBO_MAP_BUFFER);
src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, 0, mSize,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT);
@@ -1403,7 +1432,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
llassert(src != NULL);
- mMappedData = LL_NEXT_ALIGNED_ADDRESS<U8>(src);
+ mMappedData = LL_NEXT_ALIGNED_ADDRESS<volatile U8>(src);
mAlignedOffset = mMappedData - src;
stop_glerror();
@@ -1413,10 +1442,10 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
{
log_glerror();
- //check the availability of memory
- LLMemory::logMemoryInfo(TRUE) ;
+ //check the availability of memory
+ LLMemory::logMemoryInfo(true) ;
- if(!sDisableVBOMapping)
+ if(mMappable)
{
//--------------------
//print out more debug info before crash
@@ -1448,7 +1477,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
map_range = false;
}
- if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
+ if (map_range && gGLManager.mHasMapBufferRange && mMappable)
{
return mMappedData;
}
@@ -1458,7 +1487,11 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
}
}
-U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
+
+static LLFastTimer::DeclareTimer FTM_VBO_MAP_INDEX_RANGE("IBO Map Range");
+static LLFastTimer::DeclareTimer FTM_VBO_MAP_INDEX("IBO Map");
+
+volatile U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
bindGLIndices(true);
@@ -1473,7 +1506,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
if (useVBOs())
{
- if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
+ if (!mMappable || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
@@ -1495,7 +1528,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
if (!mapped)
{
//not already mapped, map new region
- MappedRegion region(TYPE_INDEX, !sDisableVBOMapping && map_range ? -1 : index, count);
+ MappedRegion region(TYPE_INDEX, mMappable && map_range ? -1 : index, count);
mMappedIndexRegions.push_back(region);
}
}
@@ -1509,7 +1542,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
{
LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES);
- mIndexLocked = TRUE;
+ mIndexLocked = true;
sMappedCount++;
stop_glerror();
@@ -1524,19 +1557,20 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
}
}
- if(sDisableVBOMapping)
+ if(!mMappable)
{
map_range = false;
}
else
{
- U8* src = NULL;
+ volatile U8* src = NULL;
waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
#ifdef GL_ARB_map_buffer_range
+ LLFastTimer t(FTM_VBO_MAP_INDEX_RANGE);
S32 offset = sizeof(U16)*index;
S32 length = sizeof(U16)*count;
src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length,
@@ -1548,6 +1582,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
else
{
#ifdef GL_ARB_map_buffer_range
+ LLFastTimer t(FTM_VBO_MAP_INDEX);
src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, sizeof(U16)*mNumIndices,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT);
@@ -1569,6 +1604,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
}
else
{
+ LLFastTimer t(FTM_VBO_MAP_INDEX);
map_range = false;
src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
@@ -1585,9 +1621,9 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
if (!mMappedIndexData)
{
log_glerror();
- LLMemory::logMemoryInfo(TRUE) ;
+ LLMemory::logMemoryInfo(true) ;
- if(!sDisableVBOMapping)
+ if(mMappable)
{
GLint buff;
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
@@ -1609,7 +1645,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
map_range = false;
}
- if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
+ if (map_range && gGLManager.mHasMapBufferRange && mMappable)
{
return mMappedIndexData;
}
@@ -1619,6 +1655,13 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
}
}
+static LLFastTimer::DeclareTimer FTM_VBO_UNMAP("VBO Unmap");
+static LLFastTimer::DeclareTimer FTM_VBO_FLUSH_RANGE("Flush VBO Range");
+
+
+static LLFastTimer::DeclareTimer FTM_IBO_UNMAP("IBO Unmap");
+static LLFastTimer::DeclareTimer FTM_IBO_FLUSH_RANGE("Flush IBO Range");
+
void LLVertexBuffer::unmapBuffer()
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_UNMAP_BUFFER);
@@ -1631,10 +1674,11 @@ void LLVertexBuffer::unmapBuffer()
if (mMappedData && mVertexLocked)
{
+ LLFastTimer t(FTM_VBO_UNMAP);
bindGLBuffer(true);
updated_all = mIndexLocked; //both vertex and index buffers done updating
- if(sDisableVBOMapping)
+ if(!mMappable)
{
if (!mMappedVertexRegions.empty())
{
@@ -1644,7 +1688,7 @@ void LLVertexBuffer::unmapBuffer()
const MappedRegion& region = mMappedVertexRegions[i];
S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0;
S32 length = sTypeSize[region.mType]*region.mCount;
- glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, offset, length, mMappedData+offset);
+ glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, offset, length, (U8*) mMappedData+offset);
stop_glerror();
}
@@ -1653,7 +1697,7 @@ void LLVertexBuffer::unmapBuffer()
else
{
stop_glerror();
- glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData);
+ glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), (U8*) mMappedData);
stop_glerror();
}
}
@@ -1671,6 +1715,7 @@ void LLVertexBuffer::unmapBuffer()
S32 length = sTypeSize[region.mType]*region.mCount;
if (gGLManager.mHasMapBufferRange)
{
+ LLFastTimer t(FTM_VBO_FLUSH_RANGE);
#ifdef GL_ARB_map_buffer_range
glFlushMappedBufferRange(GL_ARRAY_BUFFER_ARB, offset, length);
#endif
@@ -1692,14 +1737,15 @@ void LLVertexBuffer::unmapBuffer()
mMappedData = NULL;
}
- mVertexLocked = FALSE ;
+ mVertexLocked = false;
sMappedCount--;
}
if (mMappedIndexData && mIndexLocked)
{
+ LLFastTimer t(FTM_IBO_UNMAP);
bindGLIndices();
- if(sDisableVBOMapping)
+ if(!mMappable)
{
if (!mMappedIndexRegions.empty())
{
@@ -1708,7 +1754,7 @@ void LLVertexBuffer::unmapBuffer()
const MappedRegion& region = mMappedIndexRegions[i];
S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0;
S32 length = sizeof(U16)*region.mCount;
- glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, mMappedIndexData+offset);
+ glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, (U8*) mMappedIndexData+offset);
stop_glerror();
}
@@ -1717,7 +1763,7 @@ void LLVertexBuffer::unmapBuffer()
else
{
stop_glerror();
- glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData);
+ glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), (U8*) mMappedIndexData);
stop_glerror();
}
}
@@ -1734,6 +1780,7 @@ void LLVertexBuffer::unmapBuffer()
S32 length = sizeof(U16)*region.mCount;
if (gGLManager.mHasMapBufferRange)
{
+ LLFastTimer t(FTM_IBO_FLUSH_RANGE);
#ifdef GL_ARB_map_buffer_range
glFlushMappedBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
#endif
@@ -1757,13 +1804,13 @@ void LLVertexBuffer::unmapBuffer()
mMappedIndexData = NULL ;
}
- mIndexLocked = FALSE ;
+ mIndexLocked = false ;
sMappedCount--;
}
if(updated_all)
{
- mEmpty = FALSE;
+ mEmpty = false;
}
}
@@ -1778,39 +1825,39 @@ template <class T,S32 type> struct VertexBufferStrider
{
if (type == LLVertexBuffer::TYPE_INDEX)
{
- U8* ptr = vbo.mapIndexBuffer(index, count, map_range);
+ volatile U8* ptr = vbo.mapIndexBuffer(index, count, map_range);
if (ptr == NULL)
{
llwarns << "mapIndexBuffer failed!" << llendl;
- return FALSE;
+ return false;
}
strider = (T*)ptr;
strider.setStride(0);
- return TRUE;
+ return true;
}
else if (vbo.hasDataType(type))
{
S32 stride = LLVertexBuffer::sTypeSize[type];
- U8* ptr = vbo.mapVertexBuffer(type, index, count, map_range);
+ volatile U8* ptr = vbo.mapVertexBuffer(type, index, count, map_range);
if (ptr == NULL)
{
llwarns << "mapVertexBuffer failed!" << llendl;
- return FALSE;
+ return false;
}
strider = (T*)ptr;
strider.setStride(stride);
- return TRUE;
+ return true;
}
else
{
llerrs << "VertexBufferStrider could not find valid vertex data." << llendl;
}
- return FALSE;
+ return false;
}
};
@@ -1909,7 +1956,7 @@ bool LLVertexBuffer::bindGLBuffer(bool force_bind)
glBindBufferARB(GL_ARRAY_BUFFER_ARB, mGLBuffer);
sGLRenderBuffer = mGLBuffer;
sBindCount++;
- sVBOActive = TRUE;
+ sVBOActive = true;
if (mGLArray)
{
@@ -1941,7 +1988,7 @@ bool LLVertexBuffer::bindGLIndices(bool force_bind)
sGLRenderIndices = mGLIndices;
stop_glerror();
sBindCount++;
- sIBOActive = TRUE;
+ sIBOActive = true;
ret = true;
}
@@ -1963,7 +2010,7 @@ void LLVertexBuffer::setBuffer(U32 data_mask)
LLMemType mt2(LLMemType::MTYPE_VERTEX_SET_BUFFER);
//set up pointers if the data mask is different ...
- BOOL setup = (sLastMask != data_mask);
+ bool setup = (sLastMask != data_mask);
if (gDebugGL && data_mask != 0)
{ //make sure data requirements are fulfilled
@@ -1997,21 +2044,17 @@ void LLVertexBuffer::setBuffer(U32 data_mask)
if (mGLArray)
{
bindGLArray();
- setup = FALSE; //do NOT perform pointer setup if using VAO
+ setup = false; //do NOT perform pointer setup if using VAO
}
else
{
- if (bindGLBuffer())
- {
- setup = TRUE;
- }
- if (bindGLIndices())
- {
- setup = TRUE;
- }
+ const bool bindBuffer = bindGLBuffer();
+ const bool bindIndices = bindGLIndices();
+
+ setup = setup || bindBuffer || bindIndices;
}
- BOOL error = FALSE;
+ bool error = false;
if (gDebugGL && !mGLArray)
{
GLint buff;
@@ -2020,7 +2063,7 @@ void LLVertexBuffer::setBuffer(U32 data_mask)
{
if (gDebugSession)
{
- error = TRUE;
+ error = true;
gFailLog << "Invalid GL vertex buffer bound: " << buff << std::endl;
}
else
@@ -2036,7 +2079,7 @@ void LLVertexBuffer::setBuffer(U32 data_mask)
{
if (gDebugSession)
{
- error = TRUE;
+ error = true;
gFailLog << "Invalid GL index buffer bound: " << buff << std::endl;
}
else
@@ -2058,7 +2101,7 @@ void LLVertexBuffer::setBuffer(U32 data_mask)
#endif
sGLRenderArray = 0;
sGLRenderIndices = 0;
- sIBOActive = FALSE;
+ sIBOActive = false;
}
if (mGLBuffer)
@@ -2067,13 +2110,13 @@ void LLVertexBuffer::setBuffer(U32 data_mask)
{
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
sBindCount++;
- sVBOActive = FALSE;
- setup = TRUE; // ... or a VBO is deactivated
+ sVBOActive = false;
+ setup = true; // ... or a VBO is deactivated
}
if (sGLRenderBuffer != mGLBuffer)
{
sGLRenderBuffer = mGLBuffer;
- setup = TRUE; // ... or a client memory pointer changed
+ setup = true; // ... or a client memory pointer changed
}
}
if (mGLIndices)
@@ -2082,7 +2125,7 @@ void LLVertexBuffer::setBuffer(U32 data_mask)
{
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
sBindCount++;
- sIBOActive = FALSE;
+ sIBOActive = false;
}
sGLRenderIndices = mGLIndices;
@@ -2109,7 +2152,7 @@ void LLVertexBuffer::setupVertexBuffer(U32 data_mask)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_SETUP_VERTEX_BUFFER);
stop_glerror();
- U8* base = useVBOs() ? (U8*) mAlignedOffset : mMappedData;
+ volatile U8* base = useVBOs() ? (U8*) mAlignedOffset : mMappedData;
/*if ((data_mask & mTypeMask) != data_mask)
{
@@ -2170,19 +2213,19 @@ void LLVertexBuffer::setupVertexBuffer(U32 data_mask)
{
S32 loc = TYPE_WEIGHT;
void* ptr = (void*)(base + mOffsets[TYPE_WEIGHT]);
- glVertexAttribPointerARB(loc, 1, GL_FLOAT, FALSE, LLVertexBuffer::sTypeSize[TYPE_WEIGHT], ptr);
+ glVertexAttribPointerARB(loc, 1, GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_WEIGHT], ptr);
}
if (data_mask & MAP_WEIGHT4)
{
S32 loc = TYPE_WEIGHT4;
void* ptr = (void*)(base+mOffsets[TYPE_WEIGHT4]);
- glVertexAttribPointerARB(loc, 4, GL_FLOAT, FALSE, LLVertexBuffer::sTypeSize[TYPE_WEIGHT4], ptr);
+ glVertexAttribPointerARB(loc, 4, GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_WEIGHT4], ptr);
}
if (data_mask & MAP_CLOTHWEIGHT)
{
S32 loc = TYPE_CLOTHWEIGHT;
void* ptr = (void*)(base + mOffsets[TYPE_CLOTHWEIGHT]);
- glVertexAttribPointerARB(loc, 4, GL_FLOAT, TRUE, LLVertexBuffer::sTypeSize[TYPE_CLOTHWEIGHT], ptr);
+ glVertexAttribPointerARB(loc, 4, GL_FLOAT, GL_TRUE, LLVertexBuffer::sTypeSize[TYPE_CLOTHWEIGHT], ptr);
}
if (data_mask & MAP_TEXTURE_INDEX)
{