diff options
| author | Jonathan "Geenz" Goodman <geenz@geenzo.com> | 2023-04-14 03:09:02 -0700 | 
|---|---|---|
| committer | Jonathan "Geenz" Goodman <geenz@geenzo.com> | 2023-04-14 03:09:02 -0700 | 
| commit | ce48750b7ce55c8a68b345a6fc72bce522b2c2d4 (patch) | |
| tree | b7ccc614321962adffe88d2e3e405914561665b9 | |
| parent | f4274ba64e40b487dea2e7c0bfaee47232b55736 (diff) | |
| parent | 5f5bac8087973be7da1d9b78a080463b816a1efc (diff) | |
Merge branch 'DRTVWR-559' into DRTVWR-583
| -rw-r--r-- | indra/llmath/llvolume.cpp | 51 | 
1 files changed, 29 insertions, 22 deletions
| diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 7a694ab10c..2a906c8d41 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5574,37 +5574,44 @@ bool LLVolumeFace::cacheOptimize(bool gen_tangents)          U32 vert_count = meshopt_generateVertexRemapMulti(&remap[0], nullptr, data.p.size(), data.p.size(), mos, stream_count); -        std::vector<U32> indices; -        indices.resize(mNumIndices); +        if (vert_count < 65535) +        { +            std::vector<U32> indices; +            indices.resize(mNumIndices); -        //copy results back into volume -        resizeVertices(vert_count); +            //copy results back into volume +            resizeVertices(vert_count); -        if (!data.w.empty()) -        { -            allocateWeights(vert_count); -        } +            if (!data.w.empty()) +            { +                allocateWeights(vert_count); +            } -        allocateTangents(mNumVertices); +            allocateTangents(mNumVertices); -        for (int i = 0; i < mNumIndices; ++i) -        { -            U32 src_idx = i; -            U32 dst_idx = remap[i]; -            mIndices[i] = dst_idx; +            for (int i = 0; i < mNumIndices; ++i) +            { +                U32 src_idx = i; +                U32 dst_idx = remap[i]; +                mIndices[i] = dst_idx; -            mPositions[dst_idx].load3(data.p[src_idx].mV); -            mNormals[dst_idx].load3(data.n[src_idx].mV); -            mTexCoords[dst_idx] = data.tc[src_idx]; +                mPositions[dst_idx].load3(data.p[src_idx].mV); +                mNormals[dst_idx].load3(data.n[src_idx].mV); +                mTexCoords[dst_idx] = data.tc[src_idx]; -            mTangents[dst_idx].loadua(data.t[src_idx].mV); +                mTangents[dst_idx].loadua(data.t[src_idx].mV); -            if (mWeights) -            { -                mWeights[dst_idx].loadua(data.w[src_idx].mV); +                if (mWeights) +                { +                    mWeights[dst_idx].loadua(data.w[src_idx].mV); +                }              }          } - +        else +        { +            // blew past the max vertex size limit, use legacy tangent generation which never adds verts +            createTangents(); +        }          // put back in normalized coordinate frame          LLVector4a inv_scale(1.f/mNormalizedScale.mV[0], 1.f / mNormalizedScale.mV[1], 1.f / mNormalizedScale.mV[2]); | 
