From 1fd46831f0ea7309d83c1fa2eecc611b3bada719 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sun, 29 May 2011 00:40:57 -0500 Subject: SH-1682 Work in progress on using texture indexes to improve batch size (wow, super fast so far) --- indra/llrender/llvertexbuffer.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'indra/llrender/llvertexbuffer.cpp') diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp index 8c9171ccf4..f715a8e9ba 100644 --- a/indra/llrender/llvertexbuffer.cpp +++ b/indra/llrender/llvertexbuffer.cpp @@ -1497,7 +1497,14 @@ void LLVertexBuffer::setupVertexBuffer(U32 data_mask) const } if (data_mask & MAP_VERTEX) { - glVertexPointer(3,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_VERTEX], (void*)(base + 0)); + if (data_mask & MAP_TEXTURE_INDEX) + { + glVertexPointer(4,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_VERTEX], (void*)(base + 0)); + } + else + { + glVertexPointer(3,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_VERTEX], (void*)(base + 0)); + } } llglassertok(); -- cgit v1.2.3 From 6992dbc1e32e1d8b803291aa1b87862fd6640c2a Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 1 Jun 2011 23:46:04 -0500 Subject: SH-1682 Use GL_ARB_map_buffer_range to mitigate impact of mapping larger vertex buffer objects. Limit number of textures per batch to 6 (prevents frame stalls on NVIDIA). --- indra/llrender/llvertexbuffer.cpp | 345 ++++++++++++++++++++++++++++---------- 1 file changed, 258 insertions(+), 87 deletions(-) (limited to 'indra/llrender/llvertexbuffer.cpp') diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp index f715a8e9ba..27cc88462a 100644 --- a/indra/llrender/llvertexbuffer.cpp +++ b/indra/llrender/llvertexbuffer.cpp @@ -934,8 +934,26 @@ void LLVertexBuffer::allocateClientIndexBuffer() } } +bool expand_region(LLVertexBuffer::MappedRegion& region, S32 index, S32 count) +{ + S32 end = index+count; + S32 region_end = region.mIndex+region.mCount; + + if (end < region.mIndex || + index > region_end) + { //gap exists, do not merge + return false; + } + + S32 new_end = llmax(end, region_end); + S32 new_index = llmin(index, region.mIndex); + region.mIndex = new_index; + region.mCount = new_end-new_index; + return true; +} + // Map for data access -U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 access) +U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range) { LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER); if (mFinal) @@ -947,8 +965,44 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 access) llerrs << "LLVertexBuffer::mapVertexBuffer() called on unallocated buffer." << llendl; } - if (!mVertexLocked && useVBOs()) + if (useVBOs()) { + + if (!sDisableVBOMapping && gGLManager.mHasMapBufferRange) + { + if (count == -1) + { + count = mNumVerts; + } + + bool mapped = false; + //see if range is already mapped + for (U32 i = 0; i < mMappedVertexRegions.size(); ++i) + { + MappedRegion& region = mMappedVertexRegions[i]; + if (region.mType == type) + { + if (expand_region(region, index, count)) + { + mapped = true; + } + } + } + + if (!mapped) + { + //not already mapped, map new region + MappedRegion region(type, map_range ? -1 : index, count); + mMappedVertexRegions.push_back(region); + } + } + + if (mVertexLocked && map_range) + { + llerrs << "Attempted to map a specific range of a buffer that was already mapped." << llendl; + } + + if (!mVertexLocked) { LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_VERTICES); setBuffer(0, type); @@ -957,61 +1011,91 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 access) if(sDisableVBOMapping) { + map_range = false; allocateClientVertexBuffer() ; } else { - U8* src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); + U8* src = NULL; + if (gGLManager.mHasMapBufferRange) + { + if (map_range) + { + S32 offset = mOffsets[type] + sTypeSize[type]*index; + S32 length = (sTypeSize[type]*count+0xF) & ~0xF; + src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, offset, length, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_INVALIDATE_RANGE_BIT); + } + else + { + src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, 0, mSize, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT); + } + } + else + { + map_range = false; + src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); + } + mMappedData = LL_NEXT_ALIGNED_ADDRESS(src); mAlignedOffset = mMappedData - src; stop_glerror(); } - } - - - if (!mMappedData) - { - log_glerror(); - - //check the availability of memory - U32 avail_phy_mem, avail_vir_mem; - LLMemoryInfo::getAvailableMemoryKB(avail_phy_mem, avail_vir_mem) ; - llinfos << "Available physical mwmory(KB): " << avail_phy_mem << llendl ; - llinfos << "Available virtual memory(KB): " << avail_vir_mem << llendl; - - if(!sDisableVBOMapping) - { - //-------------------- - //print out more debug info before crash - llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ; - GLint size ; - glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size) ; - llinfos << "GL_ARRAY_BUFFER_ARB size is " << size << llendl ; - //-------------------- + + if (!mMappedData) + { + log_glerror(); + + //check the availability of memory + U32 avail_phy_mem, avail_vir_mem; + LLMemoryInfo::getAvailableMemoryKB(avail_phy_mem, avail_vir_mem) ; + llinfos << "Available physical mwmory(KB): " << avail_phy_mem << llendl ; + llinfos << "Available virtual memory(KB): " << avail_vir_mem << llendl; + + if(!sDisableVBOMapping) + { + //-------------------- + //print out more debug info before crash + llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ; + GLint size ; + glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size) ; + llinfos << "GL_ARRAY_BUFFER_ARB size is " << size << llendl ; + //-------------------- + + GLint buff; + glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff); + if ((GLuint)buff != mGLBuffer) + { + llerrs << "Invalid GL vertex buffer bound: " << buff << llendl; + } - GLint buff; - glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff); - if ((GLuint)buff != mGLBuffer) + + llerrs << "glMapBuffer returned NULL (no vertex data)" << llendl; + } + else { - llerrs << "Invalid GL vertex buffer bound: " << buff << llendl; + llerrs << "memory allocation for vertex data failed." << llendl ; } - - - llerrs << "glMapBuffer returned NULL (no vertex data)" << llendl; - } - else - { - llerrs << "memory allocation for vertex data failed." << llendl ; } + sMappedCount++; } - sMappedCount++; + } + else + { + map_range = false; } - return mMappedData; + if (map_range) + { + return mMappedData; + } + else + { + return mMappedData+mOffsets[type]+sTypeSize[type]*index; + } } -U8* LLVertexBuffer::mapIndexBuffer(S32 access) +U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range) { LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER); if (mFinal) @@ -1023,8 +1107,40 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 access) llerrs << "LLVertexBuffer::mapIndexBuffer() called on unallocated buffer." << llendl; } - if (!mIndexLocked && useVBOs()) + if (useVBOs()) { + if (!sDisableVBOMapping && gGLManager.mHasMapBufferRange) + { + if (count == -1) + { + count = mNumIndices; + } + + bool mapped = false; + //see if range is already mapped + for (U32 i = 0; i < mMappedIndexRegions.size(); ++i) + { + MappedRegion& region = mMappedIndexRegions[i]; + if (expand_region(region, index, count)) + { + mapped = true; + } + } + + if (!mapped) + { + //not already mapped, map new region + MappedRegion region(TYPE_INDEX, map_range ? -1 : index, count); + mMappedIndexRegions.push_back(region); + } + } + + if (mIndexLocked && map_range) + { + llerrs << "Attempted to map a specific range of a buffer that was already mapped." << llendl; + } + + if (!mIndexLocked) { LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES); @@ -1034,12 +1150,32 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 access) if(sDisableVBOMapping) { + map_range = false; allocateClientIndexBuffer() ; } else { - U8* src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); - mMappedIndexData = LL_NEXT_ALIGNED_ADDRESS(src); + U8* src = NULL; + if (gGLManager.mHasMapBufferRange) + { + if (map_range) + { + S32 offset = sizeof(U16)*index; + S32 length = sizeof(U16)*count; + src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_INVALIDATE_RANGE_BIT); + } + else + { + src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, sizeof(U16)*mNumIndices, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT); + } + } + else + { + map_range = false; + src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); + } + + mMappedIndexData = src; //LL_NEXT_ALIGNED_ADDRESS(src); mAlignedIndexOffset = mMappedIndexData - src; stop_glerror(); } @@ -1068,19 +1204,31 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 access) sMappedCount++; } + else + { + map_range = false; + } - return mMappedIndexData ; + if (map_range) + { + return mMappedIndexData; + } + else + { + return mMappedIndexData + sizeof(U16)*index; + } } void LLVertexBuffer::unmapBuffer(S32 type) { LLMemType mt2(LLMemType::MTYPE_VERTEX_UNMAP_BUFFER); - if (!useVBOs()) + if (!useVBOs() || type == -2) { return ; //nothing to unmap } bool updated_all = false ; + if (mMappedData && mVertexLocked && type != TYPE_INDEX) { updated_all = (mIndexLocked && type < 0) ; //both vertex and index buffers done updating @@ -1093,6 +1241,23 @@ void LLVertexBuffer::unmapBuffer(S32 type) } else { + if (gGLManager.mHasMapBufferRange) + { + if (!mMappedVertexRegions.empty()) + { + stop_glerror(); + for (U32 i = 0; i < mMappedVertexRegions.size(); ++i) + { + const MappedRegion& region = mMappedVertexRegions[i]; + S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0; + S32 length = sTypeSize[region.mType]*region.mCount; + glFlushMappedBufferRange(GL_ARRAY_BUFFER_ARB, offset, length); + stop_glerror(); + } + + mMappedVertexRegions.clear(); + } + } stop_glerror(); glUnmapBufferARB(GL_ARRAY_BUFFER_ARB); stop_glerror(); @@ -1103,8 +1268,8 @@ void LLVertexBuffer::unmapBuffer(S32 type) mVertexLocked = FALSE ; sMappedCount--; } - - if(mMappedIndexData && mIndexLocked && (type < 0 || type == TYPE_INDEX)) + + if (mMappedIndexData && mIndexLocked && (type < 0 || type == TYPE_INDEX)) { if(sDisableVBOMapping) { @@ -1114,6 +1279,23 @@ void LLVertexBuffer::unmapBuffer(S32 type) } else { + + if (gGLManager.mHasMapBufferRange) + { + if (!mMappedIndexRegions.empty()) + { + for (U32 i = 0; i < mMappedIndexRegions.size(); ++i) + { + const MappedRegion& region = mMappedIndexRegions[i]; + S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0; + S32 length = sizeof(U16)*region.mCount; + glFlushMappedBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length); + stop_glerror(); + } + + mMappedIndexRegions.clear(); + } + } stop_glerror(); glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB); stop_glerror(); @@ -1152,19 +1334,21 @@ template struct VertexBufferStrider typedef LLStrider strider_t; static bool get(LLVertexBuffer& vbo, strider_t& strider, - S32 index) + S32 index, S32 count, bool map_range) { if (type == LLVertexBuffer::TYPE_INDEX) { S32 stride = sizeof(T); - if (vbo.mapIndexBuffer() == NULL) + U8* ptr = vbo.mapIndexBuffer(index, count, map_range); + + if (ptr == NULL) { llwarns << "mapIndexBuffer failed!" << llendl; return FALSE; } - strider = (T*)(vbo.getMappedIndices() + index*stride); + strider = (T*)ptr; strider.setStride(0); return TRUE; } @@ -1172,13 +1356,15 @@ template struct VertexBufferStrider { S32 stride = LLVertexBuffer::sTypeSize[type]; - if (vbo.mapVertexBuffer(type) == NULL) + U8* ptr = vbo.mapVertexBuffer(type, index, count, map_range); + + if (ptr == NULL) { llwarns << "mapVertexBuffer failed!" << llendl; return FALSE; } - strider = (T*)(vbo.getMappedData() + vbo.getOffset(type)+index*stride); + strider = (T*)ptr; strider.setStride(stride); return TRUE; } @@ -1190,55 +1376,48 @@ template struct VertexBufferStrider } }; -bool LLVertexBuffer::getVertexStrider(LLStrider& strider, S32 index) +bool LLVertexBuffer::getVertexStrider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } -bool LLVertexBuffer::getIndexStrider(LLStrider& strider, S32 index) +bool LLVertexBuffer::getIndexStrider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } -bool LLVertexBuffer::getTexCoord0Strider(LLStrider& strider, S32 index) +bool LLVertexBuffer::getTexCoord0Strider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } -bool LLVertexBuffer::getTexCoord1Strider(LLStrider& strider, S32 index) +bool LLVertexBuffer::getTexCoord1Strider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } -/*bool LLVertexBuffer::getTexCoord2Strider(LLStrider& strider, S32 index) -{ - return VertexBufferStrider::get(*this, strider, index); -} -bool LLVertexBuffer::getTexCoord3Strider(LLStrider& strider, S32 index) -{ - return VertexBufferStrider::get(*this, strider, index); -}*/ -bool LLVertexBuffer::getNormalStrider(LLStrider& strider, S32 index) + +bool LLVertexBuffer::getNormalStrider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } -bool LLVertexBuffer::getBinormalStrider(LLStrider& strider, S32 index) +bool LLVertexBuffer::getBinormalStrider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } -bool LLVertexBuffer::getColorStrider(LLStrider& strider, S32 index) +bool LLVertexBuffer::getColorStrider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } -bool LLVertexBuffer::getWeightStrider(LLStrider& strider, S32 index) +bool LLVertexBuffer::getWeightStrider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } -bool LLVertexBuffer::getWeight4Strider(LLStrider& strider, S32 index) +bool LLVertexBuffer::getWeight4Strider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } -bool LLVertexBuffer::getClothWeightStrider(LLStrider& strider, S32 index) +bool LLVertexBuffer::getClothWeightStrider(LLStrider& strider, S32 index, S32 count, bool map_range) { - return VertexBufferStrider::get(*this, strider, index); + return VertexBufferStrider::get(*this, strider, index, count, map_range); } //---------------------------------------------------------------------------- @@ -1510,11 +1689,3 @@ void LLVertexBuffer::setupVertexBuffer(U32 data_mask) const llglassertok(); } -void LLVertexBuffer::markDirty(U32 vert_index, U32 vert_count, U32 indices_index, U32 indices_count) -{ - // TODO: use GL_APPLE_flush_buffer_range here - /*if (useVBOs() && !mFilthy) - { - - }*/ -} -- cgit v1.2.3 From d4e09e81ca1c6350b5b73c9209baa9b1d5391746 Mon Sep 17 00:00:00 2001 From: seth_productengine Date: Thu, 2 Jun 2011 17:03:51 +0300 Subject: Linux build fix. Unused local varible removed. --- indra/llrender/llvertexbuffer.cpp | 2 -- 1 file changed, 2 deletions(-) (limited to 'indra/llrender/llvertexbuffer.cpp') diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp index 27cc88462a..74fd3bf197 100644 --- a/indra/llrender/llvertexbuffer.cpp +++ b/indra/llrender/llvertexbuffer.cpp @@ -1338,8 +1338,6 @@ template struct VertexBufferStrider { if (type == LLVertexBuffer::TYPE_INDEX) { - S32 stride = sizeof(T); - U8* ptr = vbo.mapIndexBuffer(index, count, map_range); if (ptr == NULL) -- cgit v1.2.3 From ffab1eef57813625cae51bffd8dd830371b2e203 Mon Sep 17 00:00:00 2001 From: Leslie Linden Date: Fri, 3 Jun 2011 16:24:07 -0700 Subject: Mac build fixes. Reviewed by davep. --- indra/llrender/llvertexbuffer.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'indra/llrender/llvertexbuffer.cpp') diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp index 74fd3bf197..3197866fa9 100644 --- a/indra/llrender/llvertexbuffer.cpp +++ b/indra/llrender/llvertexbuffer.cpp @@ -1017,6 +1017,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran else { U8* src = NULL; +#ifdef GL_ARB_map_buffer_range if (gGLManager.mHasMapBufferRange) { if (map_range) @@ -1031,6 +1032,9 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran } } else +#else + llassert_always(!gGLManager.mHasMapBufferRange); +#endif { map_range = false; src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); @@ -1156,6 +1160,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range) else { U8* src = NULL; +#ifdef GL_ARB_map_buffer_range if (gGLManager.mHasMapBufferRange) { if (map_range) @@ -1170,6 +1175,9 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range) } } else +#else + llassert_always(!gGLManager.mHasMapBufferRange); +#endif { map_range = false; src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); @@ -1241,6 +1249,7 @@ void LLVertexBuffer::unmapBuffer(S32 type) } else { +#ifdef GL_ARB_map_buffer_range if (gGLManager.mHasMapBufferRange) { if (!mMappedVertexRegions.empty()) @@ -1258,6 +1267,9 @@ void LLVertexBuffer::unmapBuffer(S32 type) mMappedVertexRegions.clear(); } } +#else + llassert_always(!gGLManager.mHasMapBufferRange); +#endif stop_glerror(); glUnmapBufferARB(GL_ARRAY_BUFFER_ARB); stop_glerror(); @@ -1279,7 +1291,7 @@ void LLVertexBuffer::unmapBuffer(S32 type) } else { - +#ifdef GL_ARB_map_buffer_range if (gGLManager.mHasMapBufferRange) { if (!mMappedIndexRegions.empty()) @@ -1296,6 +1308,9 @@ void LLVertexBuffer::unmapBuffer(S32 type) mMappedIndexRegions.clear(); } } +#else + llassert_always(!gGLManager.mHasMapBufferRange); +#endif stop_glerror(); glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB); stop_glerror(); -- cgit v1.2.3 From dd4e50610147393bb355186e83672a5741b0e299 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 10 Jun 2011 01:28:53 -0500 Subject: Followup on mapbuffer work -- only use map buffer where map_buffer_range is available, and allow non-buffer-mapping implementation to take advantage of recorded mapped regions. --- indra/llrender/llvertexbuffer.cpp | 63 ++++++++++++++++++++++++++++++--------- 1 file changed, 49 insertions(+), 14 deletions(-) (limited to 'indra/llrender/llvertexbuffer.cpp') diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp index 3197866fa9..4a0b964e61 100644 --- a/indra/llrender/llvertexbuffer.cpp +++ b/indra/llrender/llvertexbuffer.cpp @@ -968,11 +968,11 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran if (useVBOs()) { - if (!sDisableVBOMapping && gGLManager.mHasMapBufferRange) + if (sDisableVBOMapping || gGLManager.mHasMapBufferRange) { if (count == -1) { - count = mNumVerts; + count = mNumVerts-index; } bool mapped = false; @@ -985,6 +985,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran if (expand_region(region, index, count)) { mapped = true; + break; } } } @@ -992,7 +993,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran if (!mapped) { //not already mapped, map new region - MappedRegion region(type, map_range ? -1 : index, count); + MappedRegion region(type, !sDisableVBOMapping && map_range ? -1 : index, count); mMappedVertexRegions.push_back(region); } } @@ -1089,7 +1090,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran map_range = false; } - if (map_range) + if (map_range && !sDisableVBOMapping) { return mMappedData; } @@ -1113,11 +1114,11 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range) if (useVBOs()) { - if (!sDisableVBOMapping && gGLManager.mHasMapBufferRange) + if (sDisableVBOMapping || gGLManager.mHasMapBufferRange) { if (count == -1) { - count = mNumIndices; + count = mNumIndices-index; } bool mapped = false; @@ -1128,13 +1129,14 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range) if (expand_region(region, index, count)) { mapped = true; + break; } } if (!mapped) { //not already mapped, map new region - MappedRegion region(TYPE_INDEX, map_range ? -1 : index, count); + MappedRegion region(TYPE_INDEX, !sDisableVBOMapping && map_range ? -1 : index, count); mMappedIndexRegions.push_back(region); } } @@ -1217,7 +1219,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range) map_range = false; } - if (map_range) + if (map_range && !sDisableVBOMapping) { return mMappedIndexData; } @@ -1243,9 +1245,26 @@ void LLVertexBuffer::unmapBuffer(S32 type) if(sDisableVBOMapping) { - stop_glerror(); - glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData); - stop_glerror(); + if (!mMappedVertexRegions.empty()) + { + stop_glerror(); + for (U32 i = 0; i < mMappedVertexRegions.size(); ++i) + { + const MappedRegion& region = mMappedVertexRegions[i]; + S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0; + S32 length = sTypeSize[region.mType]*region.mCount; + glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, offset, length, mMappedData+offset); + stop_glerror(); + } + + mMappedVertexRegions.clear(); + } + else + { + stop_glerror(); + glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData); + stop_glerror(); + } } else { @@ -1285,9 +1304,25 @@ void LLVertexBuffer::unmapBuffer(S32 type) { if(sDisableVBOMapping) { - stop_glerror(); - glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData); - stop_glerror(); + if (!mMappedIndexRegions.empty()) + { + for (U32 i = 0; i < mMappedIndexRegions.size(); ++i) + { + const MappedRegion& region = mMappedIndexRegions[i]; + S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0; + S32 length = sizeof(U16)*region.mCount; + glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, mMappedIndexData+offset); + stop_glerror(); + } + + mMappedIndexRegions.clear(); + } + else + { + stop_glerror(); + glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData); + stop_glerror(); + } } else { -- cgit v1.2.3