From 4dabd9c0472deb49573fdafef2fa413e59703f19 Mon Sep 17 00:00:00 2001 From: Steven Bennetts Date: Fri, 2 Mar 2007 21:25:50 +0000 Subject: merge release@58699 beta-1-14-0@58707 -> release --- indra/llrender/llfontgl.cpp | 1 + indra/llrender/llimagegl.cpp | 32 +- indra/llrender/llimagegl.h | 3 +- indra/llrender/llvertexbuffer.cpp | 918 ++++++++++++++++++++++++++++++++++++++ indra/llrender/llvertexbuffer.h | 179 ++++++++ 5 files changed, 1122 insertions(+), 11 deletions(-) create mode 100644 indra/llrender/llvertexbuffer.cpp create mode 100644 indra/llrender/llvertexbuffer.h (limited to 'indra/llrender') diff --git a/indra/llrender/llfontgl.cpp b/indra/llrender/llfontgl.cpp index f42122b0ee..1fc040c7d6 100644 --- a/indra/llrender/llfontgl.cpp +++ b/indra/llrender/llfontgl.cpp @@ -14,6 +14,7 @@ #include "llfontgl.h" #include "llgl.h" #include "v4color.h" +#include "llstl.h" const S32 BOLD_OFFSET = 1; diff --git a/indra/llrender/llimagegl.cpp b/indra/llrender/llimagegl.cpp index b4edd3d365..9c1178b9f7 100644 --- a/indra/llrender/llimagegl.cpp +++ b/indra/llrender/llimagegl.cpp @@ -115,6 +115,15 @@ void LLImageGL::unbindTexture(S32 stage, LLGLenum bind_target) sCurrentBoundTextures[stage] = 0; } +// static (duplicated for speed and to avoid GL_TEXTURE_2D default argument which requires GL header dependency) +void LLImageGL::unbindTexture(S32 stage) +{ + glActiveTextureARB(GL_TEXTURE0_ARB + stage); + glClientActiveTextureARB(GL_TEXTURE0_ARB + stage); + glBindTexture(GL_TEXTURE_2D, 0); + sCurrentBoundTextures[stage] = 0; +} + // static void LLImageGL::updateStats(F32 current_time) { @@ -371,13 +380,8 @@ BOOL LLImageGL::bindTextureInternal(const S32 stage) const llwarns << "Trying to bind a texture while GL is disabled!" << llendl; } - stop_glerror(); - glActiveTextureARB(GL_TEXTURE0_ARB + stage); - //glClientActiveTextureARB(GL_TEXTURE0_ARB + stage); - - stop_glerror(); - + if (sCurrentBoundTextures[stage] && sCurrentBoundTextures[stage] == mTexName) { // already set! @@ -392,7 +396,6 @@ BOOL LLImageGL::bindTextureInternal(const S32 stage) const glBindTexture(mBindTarget, mTexName); sCurrentBoundTextures[stage] = mTexName; - stop_glerror(); if (mLastBindTime != sLastFrameTime) { @@ -631,6 +634,7 @@ void LLImageGL::setImage(const U8* data_in, BOOL data_hasmips) } mHasMipMaps = FALSE; } + glFlush(); stop_glerror(); } @@ -645,6 +649,11 @@ BOOL LLImageGL::setSubImage(const U8* datap, S32 data_width, S32 data_height, S3 llwarns << "Setting subimage on image without GL texture" << llendl; return FALSE; } + if (datap == NULL) + { + llwarns << "Setting subimage on image with NULL datap" << llendl; + return FALSE; + } if (x_pos == 0 && y_pos == 0 && width == getWidth() && height == getHeight()) { @@ -657,7 +666,9 @@ BOOL LLImageGL::setSubImage(const U8* datap, S32 data_width, S32 data_height, S3 dump(); llerrs << "setSubImage called with mipmapped image (not supported)" << llendl; } - llassert(mCurrentDiscardLevel == 0); + llassert_always(mCurrentDiscardLevel == 0); + llassert_always(x_pos >= 0 && y_pos >= 0); + if (((x_pos + width) > getWidth()) || (y_pos + height) > getHeight()) { @@ -698,7 +709,8 @@ BOOL LLImageGL::setSubImage(const U8* datap, S32 data_width, S32 data_height, S3 datap += (y_pos * data_width + x_pos) * getComponents(); // Update the GL texture - llverify(bindTextureInternal(0)); + BOOL res = bindTextureInternal(0); + if (!res) llerrs << "LLImageGL::setSubImage(): bindTexture failed" << llendl; stop_glerror(); glTexSubImage2D(mTarget, 0, x_pos, y_pos, @@ -714,7 +726,7 @@ BOOL LLImageGL::setSubImage(const U8* datap, S32 data_width, S32 data_height, S3 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); stop_glerror(); } - + glFlush(); return TRUE; } diff --git a/indra/llrender/llimagegl.h b/indra/llrender/llimagegl.h index 4f6a11c2d9..1586a837b4 100644 --- a/indra/llrender/llimagegl.h +++ b/indra/llrender/llimagegl.h @@ -17,7 +17,7 @@ //============================================================================ -class LLImageGL : public LLThreadSafeRefCount +class LLImageGL : public LLRefCount { public: // Size calculation @@ -29,6 +29,7 @@ public: // Usually you want stage = 0 and bind_target = GL_TEXTURE_2D static void bindExternalTexture( LLGLuint gl_name, S32 stage, LLGLenum bind_target); static void unbindTexture(S32 stage, LLGLenum target); + static void unbindTexture(S32 stage); // Uses GL_TEXTURE_2D (not a default arg to avoid gl.h dependency) // needs to be called every frame static void updateStats(F32 current_time); diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp new file mode 100644 index 0000000000..d6477d69ec --- /dev/null +++ b/indra/llrender/llvertexbuffer.cpp @@ -0,0 +1,918 @@ +#include "linden_common.h" + +#include "llvertexbuffer.h" +// #include "llrender.h" +#include "llglheaders.h" +#include "llmemory.h" +#include "llmemtype.h" + +//============================================================================ + +//static +S32 LLVertexBuffer::sCount = 0; +S32 LLVertexBuffer::sGLCount = 0; +BOOL LLVertexBuffer::sEnableVBOs = TRUE; +S32 LLVertexBuffer::sGLRenderBuffer = 0; +S32 LLVertexBuffer::sGLRenderIndices = 0; +U32 LLVertexBuffer::sLastMask = 0; +BOOL LLVertexBuffer::sVBOActive = FALSE; +BOOL LLVertexBuffer::sIBOActive = FALSE; +U32 LLVertexBuffer::sAllocatedBytes = 0; +BOOL LLVertexBuffer::sRenderActive = FALSE; + +std::vector LLVertexBuffer::sDeleteList; +LLVertexBuffer::buffer_list_t LLVertexBuffer::sLockedList; + +S32 LLVertexBuffer::sTypeOffsets[LLVertexBuffer::TYPE_MAX] = +{ + sizeof(LLVector3), // TYPE_VERTEX, + sizeof(LLVector3), // TYPE_NORMAL, + sizeof(LLVector2), // TYPE_TEXCOORD, + sizeof(LLVector2), // TYPE_TEXCOORD2, + sizeof(LLColor4U), // TYPE_COLOR, + sizeof(LLVector3), // TYPE_BINORMAL, + sizeof(F32), // TYPE_WEIGHT, + sizeof(LLVector4), // TYPE_CLOTHWEIGHT, +}; + +//static +void LLVertexBuffer::initClass(bool use_vbo) +{ + sEnableVBOs = use_vbo; +} + +//static +void LLVertexBuffer::unbind() +{ + if (sVBOActive) + { + glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); + sVBOActive = FALSE; + } + if (sIBOActive) + { + glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); + sIBOActive = FALSE; + } + + sGLRenderBuffer = 0; + sGLRenderIndices = 0; +} + +//static +void LLVertexBuffer::cleanupClass() +{ + LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); + sLockedList.clear(); + startRender(); + stopRender(); + clientCopy(); // deletes GL buffers +} + +//static, call before rendering VBOs +void LLVertexBuffer::startRender() +{ + LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); + if (sEnableVBOs) + { + glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); + glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); + sVBOActive = FALSE; + sIBOActive = FALSE; + } + + sRenderActive = TRUE; + sGLRenderBuffer = 0; + sGLRenderIndices = 0; + sLastMask = 0; +} + +void LLVertexBuffer::stopRender() +{ + sRenderActive = FALSE; +} + +void LLVertexBuffer::clientCopy() +{ + if (!sDeleteList.empty()) + { + size_t num = sDeleteList.size(); + glDeleteBuffersARB(sDeleteList.size(), (GLuint*) &(sDeleteList[0])); + sDeleteList.clear(); + sGLCount -= num; + } + + if (sEnableVBOs) + { + LLTimer timer; + BOOL reset = TRUE; + buffer_list_t::iterator iter = sLockedList.begin(); + while(iter != sLockedList.end()) + { + LLVertexBuffer* buffer = *iter; + if (buffer->isLocked() && buffer->useVBOs()) + { + buffer->setBuffer(0); + } + ++iter; + if (reset) + { + reset = FALSE; + timer.reset(); //skip first copy (don't count pipeline stall) + } + else + { + if (timer.getElapsedTimeF64() > 0.005) + { + break; + } + } + + } + + sLockedList.erase(sLockedList.begin(), iter); + } +} + +//---------------------------------------------------------------------------- + +// For debugging +struct VTNC /// Simple +{ + F32 v1,v2,v3; + F32 n1,n2,n3; + F32 t1,t2; + U32 c; +}; +static VTNC dbg_vtnc; + +struct VTUNCB // Simple + Bump +{ + F32 v1,v2,v3; + F32 n1,n2,n3; + F32 t1,t2; + F32 u1,u2; + F32 b1,b2,b3; + U32 c; +}; +static VTUNCB dbg_vtuncb; + +struct VTUNC // Surfacepatch +{ + F32 v1,v2,v3; + F32 n1,n2,n3; + F32 t1,t2; + F32 u1,u2; + U32 c; +}; +static VTUNC dbg_vtunc; + +struct VTNW /// Avatar +{ + F32 v1,v2,v3; + F32 n1,n2,n3; + F32 t1,t2; + F32 w; +}; +static VTNW dbg_vtnw; + +struct VTNPAD /// Avatar Output +{ + F32 v1,v2,v3,p1; + F32 n1,n2,n3,p2; + F32 t1,t2,p3,p4; +}; +static VTNPAD dbg_vtnpad; + +//---------------------------------------------------------------------------- + +LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) : + LLRefCount(), + mNumVerts(0), mNumIndices(0), mUsage(usage), mGLBuffer(0), mGLIndices(0), + mMappedData(NULL), + mMappedIndexData(NULL), mLocked(FALSE), + mResized(FALSE), mEmpty(TRUE), mFinal(FALSE), mFilthy(FALSE) +{ + LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); + if (!sEnableVBOs) + { + mUsage = GL_STREAM_DRAW_ARB; + } + + S32 stride = 0; + for (S32 i=0; i mNumVerts || + nverts < mNumVerts/2) + { + if (mUsage != GL_STATIC_DRAW_ARB) + { + nverts += nverts/4; + } + + mNumVerts = nverts; + } +} + +void LLVertexBuffer::updateNumIndices(S32 nindices) +{ + LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); + if (!mDynamicSize) + { + mNumIndices = nindices; + } + else if (mUsage == GL_STATIC_DRAW_ARB || + nindices > mNumIndices || + nindices < mNumIndices/2) + { + if (mUsage != GL_STATIC_DRAW_ARB) + { + nindices += nindices/4; + } + + mNumIndices = nindices; + } +} + +void LLVertexBuffer::makeStatic() +{ + if (!sEnableVBOs) + { + return; + } + + if (sRenderActive) + { + llerrs << "Make static called during render." << llendl; + } + + if (mUsage != GL_STATIC_DRAW_ARB) + { + if (useVBOs()) + { + if (mGLBuffer) + { + sDeleteList.push_back(mGLBuffer); + } + if (mGLIndices) + { + sDeleteList.push_back(mGLIndices); + } + } + + if (mGLBuffer) + { + sGLCount++; + glGenBuffersARB(1, (GLuint*) &mGLBuffer); + } + if (mGLIndices) + { + sGLCount++; + glGenBuffersARB(1, (GLuint*) &mGLIndices); + } + + mUsage = GL_STATIC_DRAW_ARB; + mResized = TRUE; + + if (!mLocked) + { + mLocked = TRUE; + sLockedList.push_back(this); + } + } +} + +void LLVertexBuffer::allocateBuffer(S32 nverts, S32 nindices, bool create) +{ + LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); + + updateNumVerts(nverts); + updateNumIndices(nindices); + + if (mMappedData) + { + llerrs << "LLVertexBuffer::allocateBuffer() called redundantly." << llendl; + } + if (create && (nverts || nindices)) + { + createGLBuffer(); + createGLIndices(); + } + + sAllocatedBytes += getSize() + getIndicesSize(); +} + +void LLVertexBuffer::resizeBuffer(S32 newnverts, S32 newnindices) +{ + LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); + mDynamicSize = TRUE; + if (mUsage == GL_STATIC_DRAW_ARB) + { //always delete/allocate static buffers on resize + destroyGLBuffer(); + destroyGLIndices(); + allocateBuffer(newnverts, newnindices, TRUE); + mFinal = FALSE; + } + else if (newnverts > mNumVerts || newnindices > mNumIndices || + newnverts < mNumVerts/2 || newnindices < mNumIndices/2) + { + sAllocatedBytes -= getSize() + getIndicesSize(); + + S32 oldsize = getSize(); + S32 old_index_size = getIndicesSize(); + + updateNumVerts(newnverts); + updateNumIndices(newnindices); + + S32 newsize = getSize(); + S32 new_index_size = getIndicesSize(); + + sAllocatedBytes += newsize + new_index_size; + + if (newsize) + { + if (!mGLBuffer) + { //no buffer exists, create a new one + createGLBuffer(); + } + else + { + //delete old buffer, keep GL buffer for now + U8* old = mMappedData; + mMappedData = new U8[newsize]; + if (old) + { + memcpy(mMappedData, old, llmin(newsize, oldsize)); + if (newsize > oldsize) + { + memset(mMappedData+oldsize, 0, newsize-oldsize); + } + + delete [] old; + } + else + { + memset(mMappedData, 0, newsize); + mEmpty = TRUE; + } + mResized = TRUE; + } + } + else if (mGLBuffer) + { + destroyGLBuffer(); + } + + if (new_index_size) + { + if (!mGLIndices) + { + createGLIndices(); + } + else + { + //delete old buffer, keep GL buffer for now + U8* old = mMappedIndexData; + mMappedIndexData = new U8[new_index_size]; + if (old) + { + memcpy(mMappedIndexData, old, llmin(new_index_size, old_index_size)); + if (new_index_size > old_index_size) + { + memset(mMappedIndexData+old_index_size, 0, new_index_size - old_index_size); + } + delete [] old; + } + else + { + memset(mMappedIndexData, 0, new_index_size); + mEmpty = TRUE; + } + mResized = TRUE; + } + } + else if (mGLIndices) + { + destroyGLIndices(); + } + } +} + +BOOL LLVertexBuffer::useVBOs() const +{ + //it's generally ineffective to use VBO for things that are streaming + //when we already have a client buffer around + if (mUsage == GL_STREAM_DRAW_ARB) + { + return FALSE; + } + + return sEnableVBOs && (!sRenderActive || !mLocked); +} + +//---------------------------------------------------------------------------- + +// Map for data access +U8* LLVertexBuffer::mapBuffer(S32 access) +{ + LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); + if (sRenderActive) + { + llwarns << "Buffer mapped during render frame!" << llendl; + } + if (!mGLBuffer && !mGLIndices) + { + llerrs << "LLVertexBuffer::mapBuffer() called before createGLBuffer" << llendl; + } + if (mFinal) + { + llerrs << "LLVertexBuffer::mapBuffer() called on a finalized buffer." << llendl; + } + if (!mMappedData && !mMappedIndexData) + { + llerrs << "LLVertexBuffer::mapBuffer() called on unallocated buffer." << llendl; + } + + if (!mLocked && useVBOs()) + { + mLocked = TRUE; + sLockedList.push_back(this); + } + + return mMappedData; +} + +void LLVertexBuffer::unmapBuffer() +{ + LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); + if (mMappedData || mMappedIndexData) + { + if (useVBOs() && mLocked) + { + if (mGLBuffer) + { + if (mResized) + { + glBufferDataARB(GL_ARRAY_BUFFER_ARB, getSize(), mMappedData, mUsage); + } + else + { + if (mEmpty || mDirtyRegions.empty()) + { + glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData); + } + else + { + for (std::vector::iterator i = mDirtyRegions.begin(); i != mDirtyRegions.end(); ++i) + { + DirtyRegion& region = *i; + glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, region.mIndex*mStride, region.mCount*mStride, mMappedData + region.mIndex*mStride); + glFlush(); + } + } + } + } + + if (mGLIndices) + { + if (mResized) + { + glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, getIndicesSize(), mMappedIndexData, mUsage); + } + else + { + if (mEmpty || mDirtyRegions.empty()) + { + glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData); + } + else + { + for (std::vector::iterator i = mDirtyRegions.begin(); i != mDirtyRegions.end(); ++i) + { + DirtyRegion& region = *i; + glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, region.mIndicesIndex*sizeof(U32), + region.mIndicesCount*sizeof(U32), mMappedIndexData + region.mIndicesIndex*sizeof(U32)); + glFlush(); + } + } + } + } + + mDirtyRegions.clear(); + mFilthy = FALSE; + mResized = FALSE; + + if (mUsage == GL_STATIC_DRAW_ARB) + { //static draw buffers can only be mapped a single time + //throw out client data (we won't be using it again) + delete [] mMappedData; + delete [] mMappedIndexData; + mMappedIndexData = NULL; + mMappedData = NULL; + mEmpty = TRUE; + mFinal = TRUE; + } + else + { + mEmpty = FALSE; + } + + mLocked = FALSE; + + glFlush(); + } + } +} + +//---------------------------------------------------------------------------- + +template struct VertexBufferStrider +{ + typedef LLStrider strider_t; + static bool get(LLVertexBuffer& vbo, + strider_t& strider, + S32 index) + { + vbo.mapBuffer(); + if (type == LLVertexBuffer::TYPE_INDEX) + { + S32 stride = sizeof(T); + strider = (T*)(vbo.getMappedIndices() + index*stride); + strider.setStride(0); + return TRUE; + } + else if (vbo.hasDataType(type)) + { + S32 stride = vbo.getStride(); + strider = (T*)(vbo.getMappedData() + vbo.getOffset(type) + index*stride); + strider.setStride(stride); + return TRUE; + } + else + { + llerrs << "VertexBufferStrider could not find valid vertex data." << llendl; + } + return FALSE; + } +}; + + +bool LLVertexBuffer::getVertexStrider(LLStrider& strider, S32 index) +{ + return VertexBufferStrider::get(*this, strider, index); +} +bool LLVertexBuffer::getIndexStrider(LLStrider& strider, S32 index) +{ + return VertexBufferStrider::get(*this, strider, index); +} +bool LLVertexBuffer::getTexCoordStrider(LLStrider& strider, S32 index) +{ + return VertexBufferStrider::get(*this, strider, index); +} +bool LLVertexBuffer::getTexCoord2Strider(LLStrider& strider, S32 index) +{ + return VertexBufferStrider::get(*this, strider, index); +} +bool LLVertexBuffer::getNormalStrider(LLStrider& strider, S32 index) +{ + return VertexBufferStrider::get(*this, strider, index); +} +bool LLVertexBuffer::getBinormalStrider(LLStrider& strider, S32 index) +{ + return VertexBufferStrider::get(*this, strider, index); +} +bool LLVertexBuffer::getColorStrider(LLStrider& strider, S32 index) +{ + return VertexBufferStrider::get(*this, strider, index); +} +bool LLVertexBuffer::getWeightStrider(LLStrider& strider, S32 index) +{ + return VertexBufferStrider::get(*this, strider, index); +} +bool LLVertexBuffer::getClothWeightStrider(LLStrider& strider, S32 index) +{ + return VertexBufferStrider::get(*this, strider, index); +} + +void LLVertexBuffer::setStride(S32 type, S32 new_stride) +{ + LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); + if (mNumVerts) + { + llerrs << "LLVertexBuffer::setOffset called with mNumVerts = " << mNumVerts << llendl; + } + // This code assumes that setStride() will only be called once per VBO per type. + S32 delta = new_stride - sTypeOffsets[type]; + for (S32 i=type+1; i +#include + +//============================================================================ +// NOTES +// Threading: +// All constructors should take an 'create' paramater which should only be +// 'true' when called from the main thread. Otherwise createGLBuffer() will +// be called as soon as getVertexPointer(), etc is called (which MUST ONLY be +// called from the main (i.e OpenGL) thread) + +//============================================================================ +// base class + +class LLVertexBuffer : public LLRefCount +{ +public: + static void initClass(bool use_vbo); + static void cleanupClass(); + static void startRender(); //between start and stop render, no client copies will occur + static void stopRender(); //any buffer not copied to GL will be rendered from client memory + static void clientCopy(); //copy data from client to GL + static void unbind(); //unbind any bound vertex buffer + + enum { + TYPE_VERTEX, + TYPE_NORMAL, + TYPE_TEXCOORD, + TYPE_TEXCOORD2, + TYPE_COLOR, + // These use VertexAttribPointer and should possibly be made generic + TYPE_BINORMAL, + TYPE_WEIGHT, + TYPE_CLOTHWEIGHT, + TYPE_MAX, + TYPE_INDEX, + }; + enum { + MAP_VERTEX = (1<getVertexBuffer(verts); + // vb->getNormalStrider(norms); + // setVertsNorms(verts, norms); + // vb->unmapBuffer(); + bool getVertexStrider(LLStrider& strider, S32 index=0); + bool getIndexStrider(LLStrider& strider, S32 index=0); + bool getTexCoordStrider(LLStrider& strider, S32 index=0); + bool getTexCoord2Strider(LLStrider& strider, S32 index=0); + bool getNormalStrider(LLStrider& strider, S32 index=0); + bool getBinormalStrider(LLStrider& strider, S32 index=0); + bool getColorStrider(LLStrider& strider, S32 index=0); + bool getWeightStrider(LLStrider& strider, S32 index=0); + bool getClothWeightStrider(LLStrider& strider, S32 index=0); + + BOOL isEmpty() const { return mEmpty; } + BOOL isLocked() const { return mLocked; } + S32 getNumVerts() const { return mNumVerts; } + S32 getNumIndices() const { return mNumIndices; } + U8* getIndicesPointer() const { return useVBOs() ? NULL : mMappedIndexData; } + U8* getVerticesPointer() const { return useVBOs() ? NULL : mMappedData; } + S32 getStride() const { return mStride; } + S32 getTypeMask() const { return mTypeMask; } + BOOL hasDataType(S32 type) const { return ((1 << type) & getTypeMask()) ? TRUE : FALSE; } + S32 getSize() const { return mNumVerts*mStride; } + S32 getIndicesSize() const { return mNumIndices * sizeof(U32); } + U8* getMappedData() const { return mMappedData; } + U8* getMappedIndices() const { return mMappedIndexData; } + S32 getOffset(S32 type) const { return mOffsets[type]; } + S32 getUsage() const { return mUsage; } + + void setStride(S32 type, S32 new_stride); + + void markDirty(U32 vert_index, U32 vert_count, U32 indices_index, U32 indices_count); + void markClean(); + +protected: + S32 mNumVerts; // Number of vertices + S32 mNumIndices; // Number of indices + S32 mStride; + U32 mTypeMask; + S32 mUsage; // GL usage + U32 mGLBuffer; // GL VBO handle + U32 mGLIndices; // GL IBO handle + U8* mMappedData; // pointer to currently mapped data (NULL if unmapped) + U8* mMappedIndexData; // pointer to currently mapped indices (NULL if unmapped) + BOOL mLocked; // if TRUE, buffer is being or has been written to in client memory + BOOL mFinal; // if TRUE, buffer can not be mapped again + BOOL mFilthy; // if TRUE, entire buffer must be copied (used to prevent redundant dirty flags) + BOOL mEmpty; // if TRUE, client buffer is empty (or NULL). Old values have been discarded. + S32 mOffsets[TYPE_MAX]; + BOOL mResized; // if TRUE, client buffer has been resized and GL buffer has not + BOOL mDynamicSize; // if TRUE, buffer has been resized at least once (and should be padded) + + class DirtyRegion + { + public: + U32 mIndex; + U32 mCount; + U32 mIndicesIndex; + U32 mIndicesCount; + + DirtyRegion(U32 vi, U32 vc, U32 ii, U32 ic) + : mIndex(vi), mCount(vc), mIndicesIndex(ii), mIndicesCount(ic) + { } + }; + + std::vector mDirtyRegions; //vector of dirty regions to rebuild + +public: + static BOOL sRenderActive; + static S32 sCount; + static S32 sGLCount; + static std::vector sDeleteList; + typedef std::list buffer_list_t; + static buffer_list_t sLockedList; + + static BOOL sEnableVBOs; + static S32 sTypeOffsets[TYPE_MAX]; + static S32 sGLRenderBuffer; + static S32 sGLRenderIndices; + static BOOL sVBOActive; + static BOOL sIBOActive; + static U32 sLastMask; + static U32 sAllocatedBytes; +}; + + +#endif // LL_LLVERTEXBUFFER_H -- cgit v1.2.3