summaryrefslogtreecommitdiff
path: root/indra/llrender/llvertexbuffer.h
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llrender/llvertexbuffer.h')
-rw-r--r--indra/llrender/llvertexbuffer.h265
1 files changed, 174 insertions, 91 deletions
diff --git a/indra/llrender/llvertexbuffer.h b/indra/llrender/llvertexbuffer.h
index 94fa790957..d859199663 100644
--- a/indra/llrender/llvertexbuffer.h
+++ b/indra/llrender/llvertexbuffer.h
@@ -38,6 +38,8 @@
#include <vector>
#include <list>
+#define LL_MAX_VERTEX_ATTRIB_LOCATION 64
+
//============================================================================
// NOTES
// Threading:
@@ -49,63 +51,123 @@
//============================================================================
// gl name pools for dynamic and streaming buffers
-
-class LLVBOPool : public LLGLNamePool
+class LLVBOPool
{
-protected:
- virtual GLuint allocateName()
- {
- GLuint name;
- glGenBuffersARB(1, &name);
- return name;
- }
+public:
+ static U32 sBytesPooled;
+
+ LLVBOPool(U32 vboUsage, U32 vboType)
+ : mUsage(vboUsage)
+ , mType(vboType)
+ {}
+
+ const U32 mUsage;
+ const U32 mType;
- virtual void releaseName(GLuint name)
+ //size MUST be a power of 2
+ volatile U8* allocate(U32& name, U32 size);
+
+ //size MUST be the size provided to allocate that returned the given name
+ void release(U32 name, volatile U8* buffer, U32 size);
+
+ //destroy all records in mFreeList
+ void cleanup();
+
+ class Record
{
- glDeleteBuffersARB(1, &name);
- }
+ public:
+ U32 mGLName;
+ volatile U8* mClientData;
+ };
+
+ typedef std::list<Record> record_list_t;
+ std::vector<record_list_t> mFreeList;
};
+class LLGLFence
+{
+public:
+ virtual void placeFence() = 0;
+ virtual void wait() = 0;
+};
//============================================================================
-// base class
-
+// base class
+class LLPrivateMemoryPool;
class LLVertexBuffer : public LLRefCount
{
public:
+ class MappedRegion
+ {
+ public:
+ S32 mType;
+ S32 mIndex;
+ S32 mCount;
+
+ MappedRegion(S32 type, S32 index, S32 count);
+ };
+
+ LLVertexBuffer(const LLVertexBuffer& rhs)
+ : mUsage(rhs.mUsage)
+ {
+ *this = rhs;
+ }
+
+ const LLVertexBuffer& operator=(const LLVertexBuffer& rhs)
+ {
+ llerrs << "Illegal operation!" << llendl;
+ return *this;
+ }
+
static LLVBOPool sStreamVBOPool;
static LLVBOPool sDynamicVBOPool;
static LLVBOPool sStreamIBOPool;
static LLVBOPool sDynamicIBOPool;
- static BOOL sUseStreamDraw;
+ static bool sUseStreamDraw;
+ static bool sUseVAO;
+ static bool sPreferStreamDraw;
- static void initClass(bool use_vbo);
+ static void initClass(bool use_vbo, bool no_vbo_mapping);
static void cleanupClass();
static void setupClientArrays(U32 data_mask);
- static void clientCopy(F64 max_time = 0.005); //copy data from client to GL
- static void unbind(); //unbind any bound vertex buffer
+ static void drawArrays(U32 mode, const std::vector<LLVector3>& pos, const std::vector<LLVector3>& norm);
+ static void drawElements(U32 mode, const LLVector4a* pos, const LLVector2* tc, S32 num_indices, const U16* indicesp);
+
+ static void unbind(); //unbind any bound vertex buffer
//get the size of a vertex with the given typemask
- //if offsets is not NULL, its contents will be filled
- //with the offset of each vertex component in the buffer,
+ static S32 calcVertexSize(const U32& typemask);
+
+ //get the size of a buffer with the given typemask and vertex count
+ //fill offsets with the offset of each vertex component array into the buffer
// indexed by the following enum
- static S32 calcStride(const U32& typemask, S32* offsets = NULL);
+ static S32 calcOffsets(const U32& typemask, S32* offsets, S32 num_vertices);
+
+ //WARNING -- when updating these enums you MUST
+ // 1 - update LLVertexBuffer::sTypeSize
+ // 2 - add a strider accessor
+ // 3 - modify LLVertexBuffer::setupVertexBuffer
+ // 4 - modify LLVertexBuffer::setupClientArray
+ // 5 - modify LLViewerShaderMgr::mReservedAttribs
+ // 6 - update LLVertexBuffer::setupVertexArray
enum {
- TYPE_VERTEX,
+ TYPE_VERTEX = 0,
TYPE_NORMAL,
TYPE_TEXCOORD0,
TYPE_TEXCOORD1,
TYPE_TEXCOORD2,
TYPE_TEXCOORD3,
TYPE_COLOR,
- // These use VertexAttribPointer and should possibly be made generic
+ TYPE_EMISSIVE,
TYPE_BINORMAL,
TYPE_WEIGHT,
+ TYPE_WEIGHT4,
TYPE_CLOTHWEIGHT,
+ TYPE_TEXTURE_INDEX,
TYPE_MAX,
- TYPE_INDEX,
+ TYPE_INDEX,
};
enum {
MAP_VERTEX = (1<<TYPE_VERTEX),
@@ -115,10 +177,13 @@ public:
MAP_TEXCOORD2 = (1<<TYPE_TEXCOORD2),
MAP_TEXCOORD3 = (1<<TYPE_TEXCOORD3),
MAP_COLOR = (1<<TYPE_COLOR),
+ MAP_EMISSIVE = (1<<TYPE_EMISSIVE),
// These use VertexAttribPointer and should possibly be made generic
MAP_BINORMAL = (1<<TYPE_BINORMAL),
MAP_WEIGHT = (1<<TYPE_WEIGHT),
+ MAP_WEIGHT4 = (1<<TYPE_WEIGHT4),
MAP_CLOTHWEIGHT = (1<<TYPE_CLOTHWEIGHT),
+ MAP_TEXTURE_INDEX = (1<<TYPE_TEXTURE_INDEX),
};
protected:
@@ -126,32 +191,39 @@ protected:
virtual ~LLVertexBuffer(); // use unref()
- virtual void setupVertexBuffer(U32 data_mask) const; // pure virtual, called from mapBuffer()
+ virtual void setupVertexBuffer(U32 data_mask); // pure virtual, called from mapBuffer()
+ void setupVertexArray();
- void genBuffer();
- void genIndices();
+ void genBuffer(U32 size);
+ void genIndices(U32 size);
+ bool bindGLBuffer(bool force_bind = false);
+ bool bindGLIndices(bool force_bind = false);
+ bool bindGLArray();
void releaseBuffer();
void releaseIndices();
- void createGLBuffer();
- void createGLIndices();
+ void createGLBuffer(U32 size);
+ void createGLIndices(U32 size);
void destroyGLBuffer();
void destroyGLIndices();
void updateNumVerts(S32 nverts);
void updateNumIndices(S32 nindices);
- virtual BOOL useVBOs() const;
+ bool useVBOs() const;
void unmapBuffer();
public:
LLVertexBuffer(U32 typemask, S32 usage);
// map for data access
- U8* mapBuffer(S32 access = -1);
+ volatile U8* mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range);
+ volatile U8* mapIndexBuffer(S32 index, S32 count, bool map_range);
+
// set for rendering
virtual void setBuffer(U32 data_mask); // calls setupVertexBuffer() if data_mask is not 0
+ void flush(); //flush pending data to GL memory
// allocate buffer
void allocateBuffer(S32 nverts, S32 nindices, bool create);
virtual void resizeBuffer(S32 newnverts, S32 newnindices);
-
+
// Only call each getVertexPointer, etc, once before calling unmapBuffer()
// call unmapBuffer() after calls to getXXXStrider() before any cals to setBuffer()
// example:
@@ -159,94 +231,105 @@ public:
// vb->getNormalStrider(norms);
// setVertsNorms(verts, norms);
// vb->unmapBuffer();
- bool getVertexStrider(LLStrider<LLVector3>& strider, S32 index=0);
- bool getIndexStrider(LLStrider<U16>& strider, S32 index=0);
- bool getTexCoord0Strider(LLStrider<LLVector2>& strider, S32 index=0);
- bool getTexCoord1Strider(LLStrider<LLVector2>& strider, S32 index=0);
- bool getNormalStrider(LLStrider<LLVector3>& strider, S32 index=0);
- bool getBinormalStrider(LLStrider<LLVector3>& strider, S32 index=0);
- bool getColorStrider(LLStrider<LLColor4U>& strider, S32 index=0);
- bool getWeightStrider(LLStrider<F32>& strider, S32 index=0);
- bool getClothWeightStrider(LLStrider<LLVector4>& strider, S32 index=0);
+ bool getVertexStrider(LLStrider<LLVector3>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getVertexStrider(LLStrider<LLVector4a>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getIndexStrider(LLStrider<U16>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getTexCoord0Strider(LLStrider<LLVector2>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getTexCoord1Strider(LLStrider<LLVector2>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getNormalStrider(LLStrider<LLVector3>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getBinormalStrider(LLStrider<LLVector3>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getColorStrider(LLStrider<LLColor4U>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getEmissiveStrider(LLStrider<LLColor4U>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getWeightStrider(LLStrider<F32>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getWeight4Strider(LLStrider<LLVector4>& strider, S32 index=0, S32 count = -1, bool map_range = false);
+ bool getClothWeightStrider(LLStrider<LLVector4>& strider, S32 index=0, S32 count = -1, bool map_range = false);
- BOOL isEmpty() const { return mEmpty; }
- BOOL isLocked() const { return mLocked; }
+
+ bool isEmpty() const { return mEmpty; }
+ bool isLocked() const { return mVertexLocked || mIndexLocked; }
S32 getNumVerts() const { return mNumVerts; }
S32 getNumIndices() const { return mNumIndices; }
- S32 getRequestedVerts() const { return mRequestedNumVerts; }
- S32 getRequestedIndices() const { return mRequestedNumIndices; }
-
- U8* getIndicesPointer() const { return useVBOs() ? NULL : mMappedIndexData; }
- U8* getVerticesPointer() const { return useVBOs() ? NULL : mMappedData; }
- S32 getStride() const { return mStride; }
- S32 getTypeMask() const { return mTypeMask; }
- BOOL hasDataType(S32 type) const { return ((1 << type) & getTypeMask()) ? TRUE : FALSE; }
- S32 getSize() const { return mNumVerts*mStride; }
- S32 getIndicesSize() const { return mNumIndices * sizeof(U16); }
- U8* getMappedData() const { return mMappedData; }
- U8* getMappedIndices() const { return mMappedIndexData; }
+
+ volatile U8* getIndicesPointer() const { return useVBOs() ? (U8*) mAlignedIndexOffset : mMappedIndexData; }
+ volatile U8* getVerticesPointer() const { return useVBOs() ? (U8*) mAlignedOffset : mMappedData; }
+ U32 getTypeMask() const { return mTypeMask; }
+ bool hasDataType(S32 type) const { return ((1 << type) & getTypeMask()); }
+ S32 getSize() const;
+ S32 getIndicesSize() const { return mIndicesSize; }
+ volatile U8* getMappedData() const { return mMappedData; }
+ volatile U8* getMappedIndices() const { return mMappedIndexData; }
S32 getOffset(S32 type) const { return mOffsets[type]; }
S32 getUsage() const { return mUsage; }
-
- void setStride(S32 type, S32 new_stride);
-
- void markDirty(U32 vert_index, U32 vert_count, U32 indices_index, U32 indices_count);
+ bool isWriteable() const { return (mMappable || mUsage == GL_STREAM_DRAW_ARB) ? true : false; }
void draw(U32 mode, U32 count, U32 indices_offset) const;
void drawArrays(U32 mode, U32 offset, U32 count) const;
void drawRange(U32 mode, U32 start, U32 end, U32 count, U32 indices_offset) const;
+ //for debugging, validate data in given range is valid
+ void validateRange(U32 start, U32 end, U32 count, U32 offset) const;
+
+
+
protected:
S32 mNumVerts; // Number of vertices allocated
S32 mNumIndices; // Number of indices allocated
- S32 mRequestedNumVerts; // Number of vertices requested
- S32 mRequestedNumIndices; // Number of indices requested
-
- S32 mStride;
+
+ ptrdiff_t mAlignedOffset;
+ ptrdiff_t mAlignedIndexOffset;
+ S32 mSize;
+ S32 mIndicesSize;
U32 mTypeMask;
- S32 mUsage; // GL usage
+
+ const S32 mUsage; // GL usage
+
U32 mGLBuffer; // GL VBO handle
U32 mGLIndices; // GL IBO handle
- U8* mMappedData; // pointer to currently mapped data (NULL if unmapped)
- U8* mMappedIndexData; // pointer to currently mapped indices (NULL if unmapped)
- BOOL mLocked; // if TRUE, buffer is being or has been written to in client memory
- BOOL mFinal; // if TRUE, buffer can not be mapped again
- BOOL mFilthy; // if TRUE, entire buffer must be copied (used to prevent redundant dirty flags)
- BOOL mEmpty; // if TRUE, client buffer is empty (or NULL). Old values have been discarded.
+ U32 mGLArray; // GL VAO handle
+
+ volatile U8* mMappedData; // pointer to currently mapped data (NULL if unmapped)
+ volatile U8* mMappedIndexData; // pointer to currently mapped indices (NULL if unmapped)
+
+ U32 mMappedDataUsingVBOs : 1;
+ U32 mMappedIndexDataUsingVBOs : 1;
+ U32 mVertexLocked : 1; // if true, vertex buffer is being or has been written to in client memory
+ U32 mIndexLocked : 1; // if true, index buffer is being or has been written to in client memory
+ U32 mFinal : 1; // if true, buffer can not be mapped again
+ U32 mEmpty : 1; // if true, client buffer is empty (or NULL). Old values have been discarded.
+
+ mutable bool mMappable; // if true, use memory mapping to upload data (otherwise doublebuffer and use glBufferSubData)
+
S32 mOffsets[TYPE_MAX];
- BOOL mResized; // if TRUE, client buffer has been resized and GL buffer has not
- BOOL mDynamicSize; // if TRUE, buffer has been resized at least once (and should be padded)
- class DirtyRegion
- {
- public:
- U32 mIndex;
- U32 mCount;
- U32 mIndicesIndex;
- U32 mIndicesCount;
-
- DirtyRegion(U32 vi, U32 vc, U32 ii, U32 ic)
- : mIndex(vi), mCount(vc), mIndicesIndex(ii), mIndicesCount(ic)
- { }
- };
+ std::vector<MappedRegion> mMappedVertexRegions;
+ std::vector<MappedRegion> mMappedIndexRegions;
+
+ mutable LLGLFence* mFence;
+
+ void placeFence() const;
+ void waitFence() const;
+
+ static S32 determineUsage(S32 usage);
- std::vector<DirtyRegion> mDirtyRegions; //vector of dirty regions to rebuild
+private:
+ static LLPrivateMemoryPool* sPrivatePoolp;
public:
static S32 sCount;
static S32 sGLCount;
static S32 sMappedCount;
- static BOOL sMapped;
- static std::vector<U32> sDeleteList;
+ static bool sMapped;
typedef std::list<LLVertexBuffer*> buffer_list_t;
- static BOOL sEnableVBOs;
- static S32 sTypeOffsets[TYPE_MAX];
+ static bool sDisableVBOMapping; //disable glMapBufferARB
+ static bool sEnableVBOs;
+ static S32 sTypeSize[TYPE_MAX];
static U32 sGLMode[LLRender::NUM_MODES];
static U32 sGLRenderBuffer;
+ static U32 sGLRenderArray;
static U32 sGLRenderIndices;
- static BOOL sVBOActive;
- static BOOL sIBOActive;
+ static bool sVBOActive;
+ static bool sIBOActive;
static U32 sLastMask;
static U32 sAllocatedBytes;
static U32 sBindCount;