summaryrefslogtreecommitdiff
path: root/indra/llrender
diff options
context:
space:
mode:
authorNyx (Neal Orman) <nyx@lindenlab.com>2011-03-21 17:56:11 -0400
committerNyx (Neal Orman) <nyx@lindenlab.com>2011-03-21 17:56:11 -0400
commit955b727550e8595fd67c55775d8f0fe249826277 (patch)
treef4ac70eb36bcb14d44fad4199811d365eb4b788f /indra/llrender
parent77fff26431671ffef8cf482044b1fcd98262d3e0 (diff)
parent5b9ee2a9d5687eae6fd48ee6038cd2f7ed1d9a53 (diff)
merge
Diffstat (limited to 'indra/llrender')
-rw-r--r--indra/llrender/llgl.cpp18
-rw-r--r--indra/llrender/llgl.h2
-rw-r--r--indra/llrender/llglheaders.h18
-rw-r--r--indra/llrender/llvertexbuffer.cpp300
-rw-r--r--indra/llrender/llvertexbuffer.h30
5 files changed, 280 insertions, 88 deletions
diff --git a/indra/llrender/llgl.cpp b/indra/llrender/llgl.cpp
index 9354373dba..d5f0b81830 100644
--- a/indra/llrender/llgl.cpp
+++ b/indra/llrender/llgl.cpp
@@ -313,6 +313,8 @@ LLGLManager::LLGLManager() :
mIsDisabled(FALSE),
mHasMultitexture(FALSE),
+ mHasATIMemInfo(FALSE),
+ mHasNVXMemInfo(FALSE),
mNumTextureUnits(1),
mHasMipMapGeneration(FALSE),
mHasCompressedTextures(FALSE),
@@ -497,6 +499,20 @@ bool LLGLManager::initGL()
// This is called here because it depends on the setting of mIsGF2or4MX, and sets up mHasMultitexture.
initExtensions();
+ if (mHasATIMemInfo)
+ { //ask the gl how much vram is free at startup and attempt to use no more than half of that
+ S32 meminfo[4];
+ glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, meminfo);
+
+ mVRAM = meminfo[0]/1024;
+ }
+ else if (mHasNVXMemInfo)
+ {
+ S32 dedicated_memory;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &dedicated_memory);
+ mVRAM = dedicated_memory/1024;
+ }
+
if (mHasMultitexture)
{
GLint num_tex_units;
@@ -659,6 +675,8 @@ void LLGLManager::initExtensions()
mHasTextureRectangle = FALSE;
#else // LL_MESA_HEADLESS
mHasMultitexture = glh_init_extensions("GL_ARB_multitexture");
+ mHasATIMemInfo = ExtensionExists("GL_ATI_meminfo", gGLHExts.mSysExts);
+ mHasNVXMemInfo = ExtensionExists("GL_NVX_gpu_memory_info", gGLHExts.mSysExts);
mHasMipMapGeneration = glh_init_extensions("GL_SGIS_generate_mipmap");
mHasSeparateSpecularColor = glh_init_extensions("GL_EXT_separate_specular_color");
mHasAnisotropic = glh_init_extensions("GL_EXT_texture_filter_anisotropic");
diff --git a/indra/llrender/llgl.h b/indra/llrender/llgl.h
index df110613e3..0d7ba15b12 100644
--- a/indra/llrender/llgl.h
+++ b/indra/llrender/llgl.h
@@ -76,6 +76,8 @@ public:
// Extensions used by everyone
BOOL mHasMultitexture;
+ BOOL mHasATIMemInfo;
+ BOOL mHasNVXMemInfo;
S32 mNumTextureUnits;
BOOL mHasMipMapGeneration;
BOOL mHasCompressedTextures;
diff --git a/indra/llrender/llglheaders.h b/indra/llrender/llglheaders.h
index 46bc282436..c48e2bb5fa 100644
--- a/indra/llrender/llglheaders.h
+++ b/indra/llrender/llglheaders.h
@@ -837,4 +837,22 @@ extern void glGetBufferPointervARB (GLenum, GLenum, GLvoid* *);
#define GL_DEPTH_CLAMP 0x864F
#endif
+//GL_NVX_gpu_memory_info constants
+#ifndef GL_NVX_gpu_memory_info
+#define GL_NVX_gpu_memory_info
+#define GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX 0x9047
+#define GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX 0x9048
+#define GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX 0x9049
+#define GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX 0x904A
+#define GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX 0x904B
+#endif
+
+//GL_ATI_meminfo constants
+#ifndef GL_ATI_meminfo
+#define GL_ATI_meminfo
+#define GL_VBO_FREE_MEMORY_ATI 0x87FB
+#define GL_TEXTURE_FREE_MEMORY_ATI 0x87FC
+#define GL_RENDERBUFFER_FREE_MEMORY_ATI 0x87FD
+#endif
+
#endif // LL_LLGLHEADERS_H
diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp
index 51870515f2..0d3f5b81bc 100644
--- a/indra/llrender/llvertexbuffer.cpp
+++ b/indra/llrender/llvertexbuffer.cpp
@@ -49,6 +49,7 @@ U32 LLVertexBuffer::sSetCount = 0;
S32 LLVertexBuffer::sCount = 0;
S32 LLVertexBuffer::sGLCount = 0;
S32 LLVertexBuffer::sMappedCount = 0;
+BOOL LLVertexBuffer::sDisableVBOMapping = FALSE ;
BOOL LLVertexBuffer::sEnableVBOs = TRUE;
U32 LLVertexBuffer::sGLRenderBuffer = 0;
U32 LLVertexBuffer::sGLRenderIndices = 0;
@@ -58,6 +59,7 @@ BOOL LLVertexBuffer::sIBOActive = FALSE;
U32 LLVertexBuffer::sAllocatedBytes = 0;
BOOL LLVertexBuffer::sMapped = FALSE;
BOOL LLVertexBuffer::sUseStreamDraw = TRUE;
+BOOL LLVertexBuffer::sPreferStreamDraw = FALSE;
S32 LLVertexBuffer::sWeight4Loc = -1;
std::vector<U32> LLVertexBuffer::sDeleteList;
@@ -147,7 +149,7 @@ void LLVertexBuffer::setupClientArrays(U32 data_mask)
{ //needs to be enabled
glEnableClientState(array[i]);
}
- else if (gDebugGL && glIsEnabled(array[i]))
+ else if (gDebugGL && i > 0 && glIsEnabled(array[i]))
{ //needs to be disabled, make sure it was (DEBUG TEMPORARY)
if (gDebugSession)
{
@@ -350,9 +352,21 @@ void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
}
//static
-void LLVertexBuffer::initClass(bool use_vbo)
+void LLVertexBuffer::initClass(bool use_vbo, bool no_vbo_mapping)
{
- sEnableVBOs = use_vbo;
+ sEnableVBOs = use_vbo && gGLManager.mHasVertexBufferObject ;
+ if(sEnableVBOs)
+ {
+ //llassert_always(glBindBufferARB) ; //double check the extention for VBO is loaded.
+
+ llinfos << "VBO is enabled." << llendl ;
+ }
+ else
+ {
+ llinfos << "VBO is disabled." << llendl ;
+ }
+
+ sDisableVBOMapping = sEnableVBOs && no_vbo_mapping ;
LLGLNamePool::registerPool(&sDynamicVBOPool);
LLGLNamePool::registerPool(&sDynamicIBOPool);
LLGLNamePool::registerPool(&sStreamVBOPool);
@@ -385,6 +399,8 @@ void LLVertexBuffer::cleanupClass()
LLMemType mt2(LLMemType::MTYPE_VERTEX_CLEANUP_CLASS);
unbind();
clientCopy(); // deletes GL buffers
+
+ //llassert_always(!sCount) ;
}
void LLVertexBuffer::clientCopy(F64 max_time)
@@ -409,7 +425,9 @@ LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) :
mGLBuffer(0),
mGLIndices(0),
mMappedData(NULL),
- mMappedIndexData(NULL), mLocked(FALSE),
+ mMappedIndexData(NULL),
+ mVertexLocked(FALSE),
+ mIndexLocked(FALSE),
mFinal(FALSE),
mFilthy(FALSE),
mEmpty(TRUE),
@@ -427,9 +445,9 @@ LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) :
mUsage = 0;
}
- if (mUsage == GL_STREAM_DRAW_ARB && !sUseStreamDraw)
+ if (mUsage == GL_DYNAMIC_DRAW_ARB && sPreferStreamDraw)
{
- mUsage = 0;
+ mUsage = GL_STREAM_DRAW_ARB;
}
//zero out offsets
@@ -496,6 +514,8 @@ LLVertexBuffer::~LLVertexBuffer()
destroyGLBuffer();
destroyGLIndices();
sCount--;
+
+ llassert_always(!mMappedData && !mMappedIndexData) ;
};
//----------------------------------------------------------------------------
@@ -644,6 +664,8 @@ void LLVertexBuffer::destroyGLBuffer()
{
if (useVBOs())
{
+ freeClientBuffer() ;
+
if (mMappedData || mMappedIndexData)
{
llerrs << "Vertex buffer destroyed while mapped!" << llendl;
@@ -671,6 +693,8 @@ void LLVertexBuffer::destroyGLIndices()
{
if (useVBOs())
{
+ freeClientBuffer() ;
+
if (mMappedData || mMappedIndexData)
{
llerrs << "Vertex buffer destroyed while mapped." << llendl;
@@ -847,6 +871,7 @@ void LLVertexBuffer::resizeBuffer(S32 newnverts, S32 newnindices)
if (mResized && useVBOs())
{
+ freeClientBuffer() ;
setBuffer(0);
}
}
@@ -870,43 +895,69 @@ BOOL LLVertexBuffer::useVBOs() const
}
//----------------------------------------------------------------------------
+void LLVertexBuffer::freeClientBuffer()
+{
+ if(useVBOs() && sDisableVBOMapping && (mMappedData || mMappedIndexData))
+ {
+ free(mMappedData) ;
+ free(mMappedIndexData) ;
+ mMappedData = NULL ;
+ mMappedIndexData = NULL ;
+ }
+}
+
+void LLVertexBuffer::allocateClientVertexBuffer()
+{
+ if(!mMappedData)
+ {
+ mMappedData = (U8*)malloc(getSize());
+ }
+}
+
+void LLVertexBuffer::allocateClientIndexBuffer()
+{
+ if(!mMappedIndexData)
+ {
+ mMappedIndexData = (U8*)malloc(getIndicesSize());
+ }
+}
// Map for data access
-U8* LLVertexBuffer::mapBuffer(S32 access)
+U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 access)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
if (mFinal)
{
- llerrs << "LLVertexBuffer::mapBuffer() called on a finalized buffer." << llendl;
+ llerrs << "LLVertexBuffer::mapVeretxBuffer() called on a finalized buffer." << llendl;
}
if (!useVBOs() && !mMappedData && !mMappedIndexData)
{
- llerrs << "LLVertexBuffer::mapBuffer() called on unallocated buffer." << llendl;
+ llerrs << "LLVertexBuffer::mapVertexBuffer() called on unallocated buffer." << llendl;
}
- if (!mLocked && useVBOs())
+ if (!mVertexLocked && useVBOs())
{
{
LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_VERTICES);
- setBuffer(0);
- mLocked = TRUE;
+ setBuffer(0, type);
+ mVertexLocked = TRUE;
stop_glerror();
- U8* src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
- mMappedData = LL_NEXT_ALIGNED_ADDRESS<U8>(src);
- mAlignedOffset = mMappedData - src;
+ if(sDisableVBOMapping)
+ {
+ allocateClientVertexBuffer() ;
+ }
+ else
+ {
+ U8* src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
+ mMappedData = LL_NEXT_ALIGNED_ADDRESS<U8>(src);
+ mAlignedOffset = mMappedData - src;
- stop_glerror();
- }
- {
- LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES);
- U8* src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
- mMappedIndexData = LL_NEXT_ALIGNED_ADDRESS<U8>(src);
- mAlignedIndexOffset = mMappedIndexData - src;
-
- stop_glerror();
+ stop_glerror();
+ }
}
-
+
+
if (!mMappedData)
{
log_glerror();
@@ -917,80 +968,167 @@ U8* LLVertexBuffer::mapBuffer(S32 access)
llinfos << "Available physical mwmory(KB): " << avail_phy_mem << llendl ;
llinfos << "Available virtual memory(KB): " << avail_vir_mem << llendl;
- //--------------------
- //print out more debug info before crash
- llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ;
- GLint size ;
- glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size) ;
- llinfos << "GL_ARRAY_BUFFER_ARB size is " << size << llendl ;
- //--------------------
+ if(!sDisableVBOMapping)
+ {
+ //--------------------
+ //print out more debug info before crash
+ llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ;
+ GLint size ;
+ glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size) ;
+ llinfos << "GL_ARRAY_BUFFER_ARB size is " << size << llendl ;
+ //--------------------
- GLint buff;
- glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff);
- if ((GLuint)buff != mGLBuffer)
+ GLint buff;
+ glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff);
+ if ((GLuint)buff != mGLBuffer)
+ {
+ llerrs << "Invalid GL vertex buffer bound: " << buff << llendl;
+ }
+
+
+ llerrs << "glMapBuffer returned NULL (no vertex data)" << llendl;
+ }
+ else
{
- llerrs << "Invalid GL vertex buffer bound: " << buff << llendl;
+ llerrs << "memory allocation for vertex data failed." << llendl ;
}
+ }
+ sMappedCount++;
+ }
+
+ return mMappedData;
+}
-
- llerrs << "glMapBuffer returned NULL (no vertex data)" << llendl;
+U8* LLVertexBuffer::mapIndexBuffer(S32 access)
+{
+ LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
+ if (mFinal)
+ {
+ llerrs << "LLVertexBuffer::mapIndexBuffer() called on a finalized buffer." << llendl;
+ }
+ if (!useVBOs() && !mMappedData && !mMappedIndexData)
+ {
+ llerrs << "LLVertexBuffer::mapIndexBuffer() called on unallocated buffer." << llendl;
+ }
+
+ if (!mIndexLocked && useVBOs())
+ {
+ {
+ LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES);
+
+ setBuffer(0, TYPE_INDEX);
+ mIndexLocked = TRUE;
+ stop_glerror();
+
+ if(sDisableVBOMapping)
+ {
+ allocateClientIndexBuffer() ;
+ }
+ else
+ {
+ U8* src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
+ mMappedIndexData = LL_NEXT_ALIGNED_ADDRESS<U8>(src);
+ mAlignedIndexOffset = mMappedIndexData - src;
+ stop_glerror();
+ }
}
if (!mMappedIndexData)
{
log_glerror();
- GLint buff;
- glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
- if ((GLuint)buff != mGLIndices)
+ if(!sDisableVBOMapping)
{
- llerrs << "Invalid GL index buffer bound: " << buff << llendl;
- }
+ GLint buff;
+ glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
+ if ((GLuint)buff != mGLIndices)
+ {
+ llerrs << "Invalid GL index buffer bound: " << buff << llendl;
+ }
- llerrs << "glMapBuffer returned NULL (no index data)" << llendl;
+ llerrs << "glMapBuffer returned NULL (no index data)" << llendl;
+ }
+ else
+ {
+ llerrs << "memory allocation for Index data failed. " << llendl ;
+ }
}
sMappedCount++;
}
-
- return mMappedData;
+
+ return mMappedIndexData ;
}
-void LLVertexBuffer::unmapBuffer()
+void LLVertexBuffer::unmapBuffer(S32 type)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_UNMAP_BUFFER);
- if (mMappedData || mMappedIndexData)
+ if (!useVBOs())
{
- if (useVBOs() && mLocked)
+ return ; //nothing to unmap
+ }
+
+ bool updated_all = false ;
+ if (mMappedData && mVertexLocked && type != TYPE_INDEX)
+ {
+ updated_all = (mIndexLocked && type < 0) ; //both vertex and index buffers done updating
+
+ if(sDisableVBOMapping)
+ {
+ stop_glerror();
+ glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData);
+ stop_glerror();
+ }
+ else
{
stop_glerror();
glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
stop_glerror();
+
+ mMappedData = NULL;
+ }
+
+ mVertexLocked = FALSE ;
+ sMappedCount--;
+ }
+
+ if(mMappedIndexData && mIndexLocked && (type < 0 || type == TYPE_INDEX))
+ {
+ if(sDisableVBOMapping)
+ {
+ stop_glerror();
+ glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData);
+ stop_glerror();
+ }
+ else
+ {
+ stop_glerror();
glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
stop_glerror();
- /*if (!sMapped)
- {
- llerrs << "Redundantly unmapped VBO!" << llendl;
- }
- sMapped = FALSE;*/
- sMappedCount--;
-
- if (mUsage == GL_STATIC_DRAW_ARB)
- { //static draw buffers can only be mapped a single time
- //throw out client data (we won't be using it again)
- mEmpty = TRUE;
- mFinal = TRUE;
- }
- else
+ mMappedIndexData = NULL ;
+ }
+
+ mIndexLocked = FALSE ;
+ sMappedCount--;
+ }
+
+ if(updated_all)
+ {
+ if(mUsage == GL_STATIC_DRAW_ARB)
+ {
+ //static draw buffers can only be mapped a single time
+ //throw out client data (we won't be using it again)
+ mEmpty = TRUE;
+ mFinal = TRUE;
+ if(sDisableVBOMapping)
{
- mEmpty = FALSE;
+ freeClientBuffer() ;
}
-
- mMappedIndexData = NULL;
- mMappedData = NULL;
-
- mLocked = FALSE;
+ }
+ else
+ {
+ mEmpty = FALSE;
}
}
}
@@ -1004,15 +1142,16 @@ template <class T,S32 type> struct VertexBufferStrider
strider_t& strider,
S32 index)
{
- if (vbo.mapBuffer() == NULL)
- {
- llwarns << "mapBuffer failed!" << llendl;
- return FALSE;
- }
-
if (type == LLVertexBuffer::TYPE_INDEX)
{
S32 stride = sizeof(T);
+
+ if (vbo.mapIndexBuffer() == NULL)
+ {
+ llwarns << "mapIndexBuffer failed!" << llendl;
+ return FALSE;
+ }
+
strider = (T*)(vbo.getMappedIndices() + index*stride);
strider.setStride(0);
return TRUE;
@@ -1020,6 +1159,13 @@ template <class T,S32 type> struct VertexBufferStrider
else if (vbo.hasDataType(type))
{
S32 stride = LLVertexBuffer::sTypeSize[type];
+
+ if (vbo.mapVertexBuffer(type) == NULL)
+ {
+ llwarns << "mapVertexBuffer failed!" << llendl;
+ return FALSE;
+ }
+
strider = (T*)(vbo.getMappedData() + vbo.getOffset(type)+index*stride);
strider.setStride(stride);
return TRUE;
@@ -1086,7 +1232,7 @@ bool LLVertexBuffer::getClothWeightStrider(LLStrider<LLVector4>& strider, S32 in
//----------------------------------------------------------------------------
// Set for rendering
-void LLVertexBuffer::setBuffer(U32 data_mask)
+void LLVertexBuffer::setBuffer(U32 data_mask, S32 type)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_SET_BUFFER);
//set up pointers if the data mask is different ...
@@ -1227,7 +1373,7 @@ void LLVertexBuffer::setBuffer(U32 data_mask)
{
ll_fail("LLVertexBuffer::mapBuffer failed");
}
- unmapBuffer();
+ unmapBuffer(type);
}
else
{
diff --git a/indra/llrender/llvertexbuffer.h b/indra/llrender/llvertexbuffer.h
index f40f013306..2bbc17fb12 100644
--- a/indra/llrender/llvertexbuffer.h
+++ b/indra/llrender/llvertexbuffer.h
@@ -92,8 +92,9 @@ public:
static S32 sWeight4Loc;
static BOOL sUseStreamDraw;
+ static BOOL sPreferStreamDraw;
- static void initClass(bool use_vbo);
+ static void initClass(bool use_vbo, bool no_vbo_mapping);
static void cleanupClass();
static void setupClientArrays(U32 data_mask);
static void clientCopy(F64 max_time = 0.005); //copy data from client to GL
@@ -157,19 +158,24 @@ protected:
void updateNumVerts(S32 nverts);
void updateNumIndices(S32 nindices);
virtual BOOL useVBOs() const;
- void unmapBuffer();
-
+ void unmapBuffer(S32 type);
+ void freeClientBuffer() ;
+ void allocateClientVertexBuffer() ;
+ void allocateClientIndexBuffer() ;
+
public:
LLVertexBuffer(U32 typemask, S32 usage);
// map for data access
- U8* mapBuffer(S32 access = -1);
+ U8* mapVertexBuffer(S32 type = -1, S32 access = -1);
+ U8* mapIndexBuffer(S32 access = -1);
+
// set for rendering
- virtual void setBuffer(U32 data_mask); // calls setupVertexBuffer() if data_mask is not 0
+ virtual void setBuffer(U32 data_mask, S32 type = -1); // calls setupVertexBuffer() if data_mask is not 0
// allocate buffer
void allocateBuffer(S32 nverts, S32 nindices, bool create);
virtual void resizeBuffer(S32 newnverts, S32 newnindices);
-
+
// Only call each getVertexPointer, etc, once before calling unmapBuffer()
// call unmapBuffer() after calls to getXXXStrider() before any cals to setBuffer()
// example:
@@ -189,7 +195,7 @@ public:
bool getClothWeightStrider(LLStrider<LLVector4>& strider, S32 index=0);
BOOL isEmpty() const { return mEmpty; }
- BOOL isLocked() const { return mLocked; }
+ BOOL isLocked() const { return mVertexLocked || mIndexLocked; }
S32 getNumVerts() const { return mNumVerts; }
S32 getNumIndices() const { return mNumIndices; }
S32 getRequestedVerts() const { return mRequestedNumVerts; }
@@ -232,14 +238,15 @@ protected:
U32 mGLIndices; // GL IBO handle
U8* mMappedData; // pointer to currently mapped data (NULL if unmapped)
U8* mMappedIndexData; // pointer to currently mapped indices (NULL if unmapped)
- BOOL mLocked; // if TRUE, buffer is being or has been written to in client memory
+ BOOL mVertexLocked; // if TRUE, vertex buffer is being or has been written to in client memory
+ BOOL mIndexLocked; // if TRUE, index buffer is being or has been written to in client memory
BOOL mFinal; // if TRUE, buffer can not be mapped again
BOOL mFilthy; // if TRUE, entire buffer must be copied (used to prevent redundant dirty flags)
- BOOL mEmpty; // if TRUE, client buffer is empty (or NULL). Old values have been discarded.
- S32 mOffsets[TYPE_MAX];
+ BOOL mEmpty; // if TRUE, client buffer is empty (or NULL). Old values have been discarded.
BOOL mResized; // if TRUE, client buffer has been resized and GL buffer has not
BOOL mDynamicSize; // if TRUE, buffer has been resized at least once (and should be padded)
-
+ S32 mOffsets[TYPE_MAX];
+
class DirtyRegion
{
public:
@@ -263,6 +270,7 @@ public:
static std::vector<U32> sDeleteList;
typedef std::list<LLVertexBuffer*> buffer_list_t;
+ static BOOL sDisableVBOMapping; //disable glMapBufferARB
static BOOL sEnableVBOs;
static S32 sTypeSize[TYPE_MAX];
static U32 sGLMode[LLRender::NUM_MODES];