/** * @file llvertexbuffer.cpp * @brief LLVertexBuffer implementation * * Copyright (c) 2003-$CurrentYear$, Linden Research, Inc. * $License$ */ #include "linden_common.h" #include "llvertexbuffer.h" // #include "llrender.h" #include "llglheaders.h" #include "llmemory.h" #include "llmemtype.h" //============================================================================ //static S32 LLVertexBuffer::sCount = 0; S32 LLVertexBuffer::sGLCount = 0; BOOL LLVertexBuffer::sEnableVBOs = TRUE; U32 LLVertexBuffer::sGLRenderBuffer = 0; U32 LLVertexBuffer::sGLRenderIndices = 0; U32 LLVertexBuffer::sLastMask = 0; BOOL LLVertexBuffer::sVBOActive = FALSE; BOOL LLVertexBuffer::sIBOActive = FALSE; U32 LLVertexBuffer::sAllocatedBytes = 0; BOOL LLVertexBuffer::sRenderActive = FALSE; std::vector LLVertexBuffer::sDeleteList; LLVertexBuffer::buffer_list_t LLVertexBuffer::sLockedList; S32 LLVertexBuffer::sTypeOffsets[LLVertexBuffer::TYPE_MAX] = { sizeof(LLVector3), // TYPE_VERTEX, sizeof(LLVector3), // TYPE_NORMAL, sizeof(LLVector2), // TYPE_TEXCOORD, sizeof(LLVector2), // TYPE_TEXCOORD2, sizeof(LLColor4U), // TYPE_COLOR, sizeof(LLVector3), // TYPE_BINORMAL, sizeof(F32), // TYPE_WEIGHT, sizeof(LLVector4), // TYPE_CLOTHWEIGHT, }; //static void LLVertexBuffer::initClass(bool use_vbo) { sEnableVBOs = use_vbo; } //static void LLVertexBuffer::unbind() { if (sVBOActive) { glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); sVBOActive = FALSE; } if (sIBOActive) { glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); sIBOActive = FALSE; } sGLRenderBuffer = 0; sGLRenderIndices = 0; } //static void LLVertexBuffer::cleanupClass() { LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); sLockedList.clear(); startRender(); stopRender(); clientCopy(); // deletes GL buffers } //static, call before rendering VBOs void LLVertexBuffer::startRender() { LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); if (sEnableVBOs) { glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); sVBOActive = FALSE; sIBOActive = FALSE; } sRenderActive = TRUE; sGLRenderBuffer = 0; sGLRenderIndices = 0; sLastMask = 0; } void LLVertexBuffer::stopRender() { sRenderActive = FALSE; } void LLVertexBuffer::clientCopy(F64 max_time) { if (!sDeleteList.empty()) { size_t num = sDeleteList.size(); glDeleteBuffersARB(sDeleteList.size(), (GLuint*) &(sDeleteList[0])); sDeleteList.clear(); sGLCount -= num; } if (sEnableVBOs) { LLTimer timer; BOOL reset = TRUE; buffer_list_t::iterator iter = sLockedList.begin(); while(iter != sLockedList.end()) { LLVertexBuffer* buffer = *iter; if (buffer->isLocked() && buffer->useVBOs()) { buffer->setBuffer(0); } ++iter; if (reset) { reset = FALSE; timer.reset(); //skip first copy (don't count pipeline stall) } else { if (timer.getElapsedTimeF64() > max_time) { break; } } } sLockedList.erase(sLockedList.begin(), iter); } } //---------------------------------------------------------------------------- LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) : LLRefCount(), mNumVerts(0), mNumIndices(0), mUsage(usage), mGLBuffer(0), mGLIndices(0), mMappedData(NULL), mMappedIndexData(NULL), mLocked(FALSE), mFinal(FALSE), mFilthy(FALSE), mEmpty(TRUE), mResized(FALSE) { LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); if (!sEnableVBOs) { mUsage = GL_STREAM_DRAW_ARB; } S32 stride = 0; for (S32 i=0; i mNumVerts || nverts < mNumVerts/2) { if (mUsage != GL_STATIC_DRAW_ARB) { nverts += nverts/4; } mNumVerts = nverts; } } void LLVertexBuffer::updateNumIndices(S32 nindices) { LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); if (!mDynamicSize) { mNumIndices = nindices; } else if (mUsage == GL_STATIC_DRAW_ARB || nindices > mNumIndices || nindices < mNumIndices/2) { if (mUsage != GL_STATIC_DRAW_ARB) { nindices += nindices/4; } mNumIndices = nindices; } } void LLVertexBuffer::makeStatic() { if (!sEnableVBOs) { return; } if (sRenderActive) { llerrs << "Make static called during render." << llendl; } if (mUsage != GL_STATIC_DRAW_ARB) { if (useVBOs()) { if (mGLBuffer) { sDeleteList.push_back(mGLBuffer); } if (mGLIndices) { sDeleteList.push_back(mGLIndices); } } if (mGLBuffer) { sGLCount++; glGenBuffersARB(1, (GLuint*) &mGLBuffer); } if (mGLIndices) { sGLCount++; glGenBuffersARB(1, (GLuint*) &mGLIndices); } mUsage = GL_STATIC_DRAW_ARB; mResized = TRUE; if (!mLocked) { mLocked = TRUE; sLockedList.push_back(this); } } } void LLVertexBuffer::allocateBuffer(S32 nverts, S32 nindices, bool create) { LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); updateNumVerts(nverts); updateNumIndices(nindices); if (mMappedData) { llerrs << "LLVertexBuffer::allocateBuffer() called redundantly." << llendl; } if (create && (nverts || nindices)) { createGLBuffer(); createGLIndices(); } sAllocatedBytes += getSize() + getIndicesSize(); } void LLVertexBuffer::resizeBuffer(S32 newnverts, S32 newnindices) { LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); mDynamicSize = TRUE; if (mUsage == GL_STATIC_DRAW_ARB) { //always delete/allocate static buffers on resize destroyGLBuffer(); destroyGLIndices(); allocateBuffer(newnverts, newnindices, TRUE); mFinal = FALSE; } else if (newnverts > mNumVerts || newnindices > mNumIndices || newnverts < mNumVerts/2 || newnindices < mNumIndices/2) { sAllocatedBytes -= getSize() + getIndicesSize(); S32 oldsize = getSize(); S32 old_index_size = getIndicesSize(); updateNumVerts(newnverts); updateNumIndices(newnindices); S32 newsize = getSize(); S32 new_index_size = getIndicesSize(); sAllocatedBytes += newsize + new_index_size; if (newsize) { if (!mGLBuffer) { //no buffer exists, create a new one createGLBuffer(); } else { //delete old buffer, keep GL buffer for now U8* old = mMappedData; mMappedData = new U8[newsize]; if (old) { memcpy(mMappedData, old, llmin(newsize, oldsize)); if (newsize > oldsize) { memset(mMappedData+oldsize, 0, newsize-oldsize); } delete [] old; } else { memset(mMappedData, 0, newsize); mEmpty = TRUE; } mResized = TRUE; } } else if (mGLBuffer) { destroyGLBuffer(); } if (new_index_size) { if (!mGLIndices) { createGLIndices(); } else { //delete old buffer, keep GL buffer for now U8* old = mMappedIndexData; mMappedIndexData = new U8[new_index_size]; if (old) { memcpy(mMappedIndexData, old, llmin(new_index_size, old_index_size)); if (new_index_size > old_index_size) { memset(mMappedIndexData+old_index_size, 0, new_index_size - old_index_size); } delete [] old; } else { memset(mMappedIndexData, 0, new_index_size); mEmpty = TRUE; } mResized = TRUE; } } else if (mGLIndices) { destroyGLIndices(); } } } BOOL LLVertexBuffer::useVBOs() const { //it's generally ineffective to use VBO for things that are streaming //when we already have a client buffer around if (mUsage == GL_STREAM_DRAW_ARB) { return FALSE; } return sEnableVBOs && (!sRenderActive || !mLocked); } //---------------------------------------------------------------------------- // Map for data access U8* LLVertexBuffer::mapBuffer(S32 access) { LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); if (sRenderActive) { llwarns << "Buffer mapped during render frame!" << llendl; } if (!mGLBuffer && !mGLIndices) { llerrs << "LLVertexBuffer::mapBuffer() called before createGLBuffer" << llendl; } if (mFinal) { llerrs << "LLVertexBuffer::mapBuffer() called on a finalized buffer." << llendl; } if (!mMappedData && !mMappedIndexData) { llerrs << "LLVertexBuffer::mapBuffer() called on unallocated buffer." << llendl; } if (!mLocked && useVBOs()) { mLocked = TRUE; sLockedList.push_back(this); } return mMappedData; } void LLVertexBuffer::unmapBuffer() { LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); if (mMappedData || mMappedIndexData) { if (useVBOs() && mLocked) { if (mGLBuffer) { if (mResized) { glBufferDataARB(GL_ARRAY_BUFFER_ARB, getSize(), mMappedData, mUsage); } else { if (mEmpty || mDirtyRegions.empty()) { glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData); } else { for (std::vector::iterator i = mDirtyRegions.begin(); i != mDirtyRegions.end(); ++i) { DirtyRegion& region = *i; glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, region.mIndex*mStride, region.mCount*mStride, mMappedData + region.mIndex*mStride); } } } } if (mGLIndices) { if (mResized) { glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, getIndicesSize(), mMappedIndexData, mUsage); } else { if (mEmpty || mDirtyRegions.empty()) { glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData); } else { for (std::vector::iterator i = mDirtyRegions.begin(); i != mDirtyRegions.end(); ++i) { DirtyRegion& region = *i; glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, region.mIndicesIndex*sizeof(U32), region.mIndicesCount*sizeof(U32), mMappedIndexData + region.mIndicesIndex*sizeof(U32)); } } } } mDirtyRegions.clear(); mFilthy = FALSE; mResized = FALSE; if (mUsage == GL_STATIC_DRAW_ARB) { //static draw buffers can only be mapped a single time //throw out client data (we won't be using it again) delete [] mMappedData; delete [] mMappedIndexData; mMappedIndexData = NULL; mMappedData = NULL; mEmpty = TRUE; mFinal = TRUE; } else { mEmpty = FALSE; } mLocked = FALSE; glFlush(); } } } //---------------------------------------------------------------------------- template struct VertexBufferStrider { typedef LLStrider strider_t; static bool get(LLVertexBuffer& vbo, strider_t& strider, S32 index) { vbo.mapBuffer(); if (type == LLVertexBuffer::TYPE_INDEX) { S32 stride = sizeof(T); strider = (T*)(vbo.getMappedIndices() + index*stride); strider.setStride(0); return TRUE; } else if (vbo.hasDataType(type)) { S32 stride = vbo.getStride(); strider = (T*)(vbo.getMappedData() + vbo.getOffset(type) + index*stride); strider.setStride(stride); return TRUE; } else { llerrs << "VertexBufferStrider could not find valid vertex data." << llendl; } return FALSE; } }; bool LLVertexBuffer::getVertexStrider(LLStrider& strider, S32 index) { return VertexBufferStrider::get(*this, strider, index); } bool LLVertexBuffer::getIndexStrider(LLStrider& strider, S32 index) { return VertexBufferStrider::get(*this, strider, index); } bool LLVertexBuffer::getTexCoordStrider(LLStrider& strider, S32 index) { return VertexBufferStrider::get(*this, strider, index); } bool LLVertexBuffer::getTexCoord2Strider(LLStrider& strider, S32 index) { return VertexBufferStrider::get(*this, strider, index); } bool LLVertexBuffer::getNormalStrider(LLStrider& strider, S32 index) { return VertexBufferStrider::get(*this, strider, index); } bool LLVertexBuffer::getBinormalStrider(LLStrider& strider, S32 index) { return VertexBufferStrider::get(*this, strider, index); } bool LLVertexBuffer::getColorStrider(LLStrider& strider, S32 index) { return VertexBufferStrider::get(*this, strider, index); } bool LLVertexBuffer::getWeightStrider(LLStrider& strider, S32 index) { return VertexBufferStrider::get(*this, strider, index); } bool LLVertexBuffer::getClothWeightStrider(LLStrider& strider, S32 index) { return VertexBufferStrider::get(*this, strider, index); } void LLVertexBuffer::setStride(S32 type, S32 new_stride) { LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); if (mNumVerts) { llerrs << "LLVertexBuffer::setOffset called with mNumVerts = " << mNumVerts << llendl; } // This code assumes that setStride() will only be called once per VBO per type. S32 delta = new_stride - sTypeOffsets[type]; for (S32 i=type+1; i vert_index) { //this buffer has received multiple updates since the last copy, mark it filthy mFilthy = TRUE; mDirtyRegions.clear(); return; } if (region.mIndex + region.mCount == vert_index && region.mIndicesIndex + region.mIndicesCount == indices_index) { region.mCount += vert_count; region.mIndicesCount += indices_count; return; } } mDirtyRegions.push_back(DirtyRegion(vert_index,vert_count,indices_index,indices_count)); } } void LLVertexBuffer::markClean() { if (!mResized && !mEmpty && !mFilthy) { buffer_list_t::reverse_iterator iter = sLockedList.rbegin(); if (*iter == this) { mLocked = FALSE; sLockedList.pop_back(); } } }