summaryrefslogtreecommitdiff
path: root/indra/llmath
diff options
context:
space:
mode:
authorGraham Linden <graham@lindenlab.com>2019-08-05 12:04:29 -0700
committerGraham Linden <graham@lindenlab.com>2019-08-05 12:04:29 -0700
commit76128c4357bc36acd54575153516c6d337fe4263 (patch)
tree7910d8415bf394fd15f5dca35dd3779b39acc15a /indra/llmath
parent9bb6da1e76efa951da7e740f80b1e4e72e67b878 (diff)
SL-10566 Use vector for some high-traffic, low-item count containers instead of list.
Provide method of storing joint indices sep from weight data for faster runtime processing.
Diffstat (limited to 'indra/llmath')
-rw-r--r--indra/llmath/llvolume.cpp54
-rw-r--r--indra/llmath/llvolume.h2
2 files changed, 49 insertions, 7 deletions
diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp
index e32625796c..9d0cf1e119 100644
--- a/indra/llmath/llvolume.cpp
+++ b/indra/llmath/llvolume.cpp
@@ -2526,6 +2526,7 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size)
if (mdl[i].has("Weights"))
{
face.allocateWeights(num_verts);
+ face.allocateJointIndices(num_verts);
LLSD::Binary weights = mdl[i]["Weights"];
@@ -2566,6 +2567,13 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size)
{
wght = LLVector4(0.999f,0.f,0.f,0.f);
}
+ if (face.mJointIndices)
+ {
+ for (U32 k=0; k<4; k++)
+ {
+ face.mJointIndices[cur_vertex * 4 + k] = llclamp((U8)joints[k], (U8)0, (U8)110);
+ }
+ }
for (U32 k=0; k<4; k++)
{
F32 f_combined = (F32) joints[k] + wght[k];
@@ -4656,6 +4664,7 @@ LLVolumeFace::LLVolumeFace() :
mTexCoords(NULL),
mIndices(NULL),
mWeights(NULL),
+ mJointIndices(NULL),
mWeightsScrubbed(FALSE),
mOctree(NULL),
mOptimized(FALSE)
@@ -4682,6 +4691,7 @@ LLVolumeFace::LLVolumeFace(const LLVolumeFace& src)
mTexCoords(NULL),
mIndices(NULL),
mWeights(NULL),
+ mJointIndices(NULL),
mWeightsScrubbed(FALSE),
mOctree(NULL)
{
@@ -4746,15 +4756,29 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src)
if (src.mWeights)
{
+ llassert(!mWeights); // don't orphan an old alloc here accidentally
allocateWeights(src.mNumVertices);
- LLVector4a::memcpyNonAliased16((F32*) mWeights, (F32*) src.mWeights, vert_size);
+ LLVector4a::memcpyNonAliased16((F32*) mWeights, (F32*) src.mWeights, vert_size);
+ mWeightsScrubbed = src.mWeightsScrubbed;
}
else
{
- ll_aligned_free_16(mWeights);
- mWeights = NULL;
- }
- mWeightsScrubbed = src.mWeightsScrubbed;
+ ll_aligned_free_16(mWeights);
+ mWeights = NULL;
+ mWeightsScrubbed = FALSE;
+ }
+
+ if (src.mJointIndices)
+ {
+ llassert(!mJointIndices); // don't orphan an old alloc here accidentally
+ allocateJointIndices(src.mNumVertices);
+ LLVector4a::memcpyNonAliased16((F32*) mJointIndices, (F32*) src.mJointIndices, src.mNumVertices * sizeof(U8) * 4);
+ }
+ else
+ {
+ ll_aligned_free_16(mJointIndices);
+ mJointIndices = NULL;
+ }
}
if (mNumIndices)
@@ -4763,7 +4787,12 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src)
LLVector4a::memcpyNonAliased16((F32*) mIndices, (F32*) src.mIndices, idx_size);
}
-
+ else
+ {
+ ll_aligned_free_16(mIndices);
+ mIndices = NULL;
+ }
+
mOptimized = src.mOptimized;
//delete
@@ -4794,6 +4823,8 @@ void LLVolumeFace::freeData()
mTangents = NULL;
ll_aligned_free_16(mWeights);
mWeights = NULL;
+ ll_aligned_free_16(mJointIndices);
+ mJointIndices = NULL;
delete mOctree;
mOctree = NULL;
@@ -5448,11 +5479,13 @@ bool LLVolumeFace::cacheOptimize()
// DO NOT free mNormals and mTexCoords as they are part of mPositions buffer
ll_aligned_free_16(mWeights);
ll_aligned_free_16(mTangents);
+ ll_aligned_free_16(mJointIndices);
mPositions = pos;
mNormals = norm;
mTexCoords = tc;
mWeights = wght;
+ mJointIndices = NULL; // filled in later as necessary by skinning code for acceleration
mTangents = binorm;
//std::string result = llformat("ACMR pre/post: %.3f/%.3f -- %d triangles %d breaks", pre_acmr, post_acmr, mNumIndices/3, breaks);
@@ -6362,7 +6395,14 @@ void LLVolumeFace::allocateTangents(S32 num_verts)
void LLVolumeFace::allocateWeights(S32 num_verts)
{
ll_aligned_free_16(mWeights);
- mWeights = (LLVector4a*) ll_aligned_malloc_16(sizeof(LLVector4a)*num_verts);
+ mWeights = (LLVector4a*)ll_aligned_malloc_16(sizeof(LLVector4a)*num_verts);
+
+}
+
+void LLVolumeFace::allocateJointIndices(S32 num_verts)
+{
+ ll_aligned_free_16(mJointIndices);
+ mJointIndices = (U8*)ll_aligned_malloc_16(sizeof(U8) * 4 * num_verts);
}
void LLVolumeFace::resizeIndices(S32 num_indices)
diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h
index 1d6d35c432..ed2cd9cde0 100644
--- a/indra/llmath/llvolume.h
+++ b/indra/llmath/llvolume.h
@@ -875,6 +875,7 @@ public:
void resizeVertices(S32 num_verts);
void allocateTangents(S32 num_verts);
void allocateWeights(S32 num_verts);
+ void allocateJointIndices(S32 num_verts);
void resizeIndices(S32 num_indices);
void fillFromLegacyData(std::vector<LLVolumeFace::VertexData>& v, std::vector<U16>& idx);
@@ -955,6 +956,7 @@ public:
// format is mWeights[vertex_index].mV[influence] = <joint_index>.<weight>
// mWeights.size() should be empty or match mVertices.size()
LLVector4a* mWeights;
+ U8* mJointIndices;
mutable BOOL mWeightsScrubbed;