summaryrefslogtreecommitdiff
path: root/indra/llrender/llvertexbuffer.cpp
diff options
context:
space:
mode:
authorDebi King (Dessie) <dessie@lindenlab.com>2011-08-18 14:46:32 -0400
committerDebi King (Dessie) <dessie@lindenlab.com>2011-08-18 14:46:32 -0400
commitba1589d118db0c25f607c74f37834fe8feed9760 (patch)
tree908e3940d8c9624b1b0591567a35b2314ed6277c /indra/llrender/llvertexbuffer.cpp
parent6c61a537918b5cc73336885f8bdd34943fda2482 (diff)
parentd36a9a7907a1528a8d163577a0770b50e6248802 (diff)
merged .hgtags
Diffstat (limited to 'indra/llrender/llvertexbuffer.cpp')
-rw-r--r--indra/llrender/llvertexbuffer.cpp219
1 files changed, 179 insertions, 40 deletions
diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp
index 4a0b964e61..1180afa631 100644
--- a/indra/llrender/llvertexbuffer.cpp
+++ b/indra/llrender/llvertexbuffer.cpp
@@ -35,6 +35,8 @@
#include "llmemtype.h"
#include "llrender.h"
#include "llvector4a.h"
+#include "llglslshader.h"
+
//============================================================================
@@ -65,6 +67,60 @@ S32 LLVertexBuffer::sWeight4Loc = -1;
std::vector<U32> LLVertexBuffer::sDeleteList;
+const U32 FENCE_WAIT_TIME_NANOSECONDS = 10000; //1 ms
+
+class LLGLSyncFence : public LLGLFence
+{
+public:
+#ifdef GL_ARB_sync
+ GLsync mSync;
+#endif
+
+ LLGLSyncFence()
+ {
+#ifdef GL_ARB_sync
+ mSync = 0;
+#endif
+ }
+
+ ~LLGLSyncFence()
+ {
+#ifdef GL_ARB_sync
+ if (mSync)
+ {
+ glDeleteSync(mSync);
+ }
+#endif
+ }
+
+ void placeFence()
+ {
+#ifdef GL_ARB_sync
+ if (mSync)
+ {
+ glDeleteSync(mSync);
+ }
+ mSync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
+#endif
+ }
+
+ void wait()
+ {
+#ifdef GL_ARB_sync
+ if (mSync)
+ {
+ while (glClientWaitSync(mSync, 0, FENCE_WAIT_TIME_NANOSECONDS) == GL_TIMEOUT_EXPIRED)
+ { //track the number of times we've waited here
+ static S32 waits = 0;
+ waits++;
+ }
+ }
+#endif
+ }
+
+
+};
+
S32 LLVertexBuffer::sTypeSize[LLVertexBuffer::TYPE_MAX] =
{
sizeof(LLVector4), // TYPE_VERTEX,
@@ -309,6 +365,7 @@ void LLVertexBuffer::drawRange(U32 mode, U32 start, U32 end, U32 count, U32 indi
glDrawRangeElements(sGLMode[mode], start, end, count, GL_UNSIGNED_SHORT,
idx);
stop_glerror();
+ placeFence();
}
void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
@@ -340,6 +397,7 @@ void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
glDrawElements(sGLMode[mode], count, GL_UNSIGNED_SHORT,
((U16*) getIndicesPointer()) + indices_offset);
stop_glerror();
+ placeFence();
}
void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
@@ -365,6 +423,7 @@ void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
stop_glerror();
glDrawArrays(sGLMode[mode], first, count);
stop_glerror();
+ placeFence();
}
//static
@@ -444,9 +503,11 @@ LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) :
mFilthy(FALSE),
mEmpty(TRUE),
mResized(FALSE),
- mDynamicSize(FALSE)
+ mDynamicSize(FALSE),
+ mFence(NULL)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_CONSTRUCTOR);
+ mFence = NULL;
if (!sEnableVBOs)
{
mUsage = 0 ;
@@ -527,9 +588,40 @@ LLVertexBuffer::~LLVertexBuffer()
destroyGLIndices();
sCount--;
+ if (mFence)
+ {
+ delete mFence;
+ }
+
+ mFence = NULL;
+
llassert_always(!mMappedData && !mMappedIndexData) ;
};
+void LLVertexBuffer::placeFence() const
+{
+ /*if (!mFence && useVBOs())
+ {
+ if (gGLManager.mHasSync)
+ {
+ mFence = new LLGLSyncFence();
+ }
+ }
+
+ if (mFence)
+ {
+ mFence->placeFence();
+ }*/
+}
+
+void LLVertexBuffer::waitFence() const
+{
+ /*if (mFence)
+ {
+ mFence->wait();
+ }*/
+}
+
//----------------------------------------------------------------------------
void LLVertexBuffer::genBuffer()
@@ -892,17 +984,11 @@ BOOL LLVertexBuffer::useVBOs() const
{
//it's generally ineffective to use VBO for things that are streaming on apple
-#if LL_DARWIN
- if (!mUsage || mUsage == GL_STREAM_DRAW_ARB)
- {
- return FALSE;
- }
-#else
if (!mUsage)
{
return FALSE;
}
-#endif
+
return TRUE;
}
@@ -967,8 +1053,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
if (useVBOs())
{
-
- if (sDisableVBOMapping || gGLManager.mHasMapBufferRange)
+ if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
@@ -1008,6 +1093,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_VERTICES);
setBuffer(0, type);
mVertexLocked = TRUE;
+ sMappedCount++;
stop_glerror();
if(sDisableVBOMapping)
@@ -1018,29 +1104,50 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
else
{
U8* src = NULL;
-#ifdef GL_ARB_map_buffer_range
+ waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
+#ifdef GL_ARB_map_buffer_range
S32 offset = mOffsets[type] + sTypeSize[type]*index;
S32 length = (sTypeSize[type]*count+0xF) & ~0xF;
- src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, offset, length, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_INVALIDATE_RANGE_BIT);
+ src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, offset, length,
+ GL_MAP_WRITE_BIT |
+ GL_MAP_FLUSH_EXPLICIT_BIT |
+ GL_MAP_INVALIDATE_RANGE_BIT);
+#endif
}
else
{
- src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, 0, mSize, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
+#ifdef GL_ARB_map_buffer_range
+ src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, 0, mSize,
+ GL_MAP_WRITE_BIT |
+ GL_MAP_FLUSH_EXPLICIT_BIT);
+#endif
+ }
+ }
+ else if (gGLManager.mHasFlushBufferRange)
+ {
+ if (map_range)
+ {
+ glBufferParameteriAPPLE(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE);
+ glBufferParameteriAPPLE(GL_ARRAY_BUFFER_ARB, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE);
+ src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
+ }
+ else
+ {
+ src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
}
else
-#else
- llassert_always(!gGLManager.mHasMapBufferRange);
-#endif
{
map_range = false;
src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
+ llassert(src != NULL);
+
mMappedData = LL_NEXT_ALIGNED_ADDRESS<U8>(src);
mAlignedOffset = mMappedData - src;
@@ -1082,7 +1189,6 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
llerrs << "memory allocation for vertex data failed." << llendl ;
}
}
- sMappedCount++;
}
}
else
@@ -1090,7 +1196,7 @@ U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_ran
map_range = false;
}
- if (map_range && !sDisableVBOMapping)
+ if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
{
return mMappedData;
}
@@ -1114,7 +1220,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
if (useVBOs())
{
- if (sDisableVBOMapping || gGLManager.mHasMapBufferRange)
+ if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
@@ -1152,6 +1258,7 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
setBuffer(0, TYPE_INDEX);
mIndexLocked = TRUE;
+ sMappedCount++;
stop_glerror();
if(sDisableVBOMapping)
@@ -1162,29 +1269,51 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
else
{
U8* src = NULL;
-#ifdef GL_ARB_map_buffer_range
+ waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
+#ifdef GL_ARB_map_buffer_range
S32 offset = sizeof(U16)*index;
S32 length = sizeof(U16)*count;
- src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_INVALIDATE_RANGE_BIT);
+ src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length,
+ GL_MAP_WRITE_BIT |
+ GL_MAP_FLUSH_EXPLICIT_BIT |
+ GL_MAP_INVALIDATE_RANGE_BIT);
+#endif
+ }
+ else
+ {
+#ifdef GL_ARB_map_buffer_range
+ src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, sizeof(U16)*mNumIndices,
+ GL_MAP_WRITE_BIT |
+ GL_MAP_FLUSH_EXPLICIT_BIT);
+#endif
+ }
+ }
+ else if (gGLManager.mHasFlushBufferRange)
+ {
+ if (map_range)
+ {
+ glBufferParameteriAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE);
+ glBufferParameteriAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE);
+ src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
else
{
- src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, sizeof(U16)*mNumIndices, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
+ src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
}
else
-#else
- llassert_always(!gGLManager.mHasMapBufferRange);
-#endif
{
map_range = false;
src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
+ llassert(src != NULL);
+
+
mMappedIndexData = src; //LL_NEXT_ALIGNED_ADDRESS<U8>(src);
mAlignedIndexOffset = mMappedIndexData - src;
stop_glerror();
@@ -1211,15 +1340,13 @@ U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
llerrs << "memory allocation for Index data failed. " << llendl ;
}
}
-
- sMappedCount++;
}
else
{
map_range = false;
}
- if (map_range && !sDisableVBOMapping)
+ if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
{
return mMappedIndexData;
}
@@ -1268,8 +1395,7 @@ void LLVertexBuffer::unmapBuffer(S32 type)
}
else
{
-#ifdef GL_ARB_map_buffer_range
- if (gGLManager.mHasMapBufferRange)
+ if (gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (!mMappedVertexRegions.empty())
{
@@ -1279,16 +1405,22 @@ void LLVertexBuffer::unmapBuffer(S32 type)
const MappedRegion& region = mMappedVertexRegions[i];
S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0;
S32 length = sTypeSize[region.mType]*region.mCount;
- glFlushMappedBufferRange(GL_ARRAY_BUFFER_ARB, offset, length);
+ if (gGLManager.mHasMapBufferRange)
+ {
+#ifdef GL_ARB_map_buffer_range
+ glFlushMappedBufferRange(GL_ARRAY_BUFFER_ARB, offset, length);
+#endif
+ }
+ else if (gGLManager.mHasFlushBufferRange)
+ {
+ glFlushMappedBufferRangeAPPLE(GL_ARRAY_BUFFER_ARB, offset, length);
+ }
stop_glerror();
}
mMappedVertexRegions.clear();
}
}
-#else
- llassert_always(!gGLManager.mHasMapBufferRange);
-#endif
stop_glerror();
glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
stop_glerror();
@@ -1326,8 +1458,7 @@ void LLVertexBuffer::unmapBuffer(S32 type)
}
else
{
-#ifdef GL_ARB_map_buffer_range
- if (gGLManager.mHasMapBufferRange)
+ if (gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (!mMappedIndexRegions.empty())
{
@@ -1336,16 +1467,24 @@ void LLVertexBuffer::unmapBuffer(S32 type)
const MappedRegion& region = mMappedIndexRegions[i];
S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0;
S32 length = sizeof(U16)*region.mCount;
- glFlushMappedBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
+ if (gGLManager.mHasMapBufferRange)
+ {
+#ifdef GL_ARB_map_buffer_range
+ glFlushMappedBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
+#endif
+ }
+ else if (gGLManager.mHasFlushBufferRange)
+ {
+#ifdef GL_APPLE_flush_buffer_range
+ glFlushMappedBufferRangeAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
+#endif
+ }
stop_glerror();
}
mMappedIndexRegions.clear();
}
}
-#else
- llassert_always(!gGLManager.mHasMapBufferRange);
-#endif
stop_glerror();
glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
stop_glerror();