diff options
| author | Andrey Kleshchev <andreykproductengine@lindenlab.com> | 2022-06-14 16:39:46 +0300 | 
|---|---|---|
| committer | Andrey Kleshchev <andreykproductengine@lindenlab.com> | 2022-06-14 16:40:56 +0300 | 
| commit | 45bcefd981e268b158d11d59f2ba9063293986a6 (patch) | |
| tree | eefeda2338e1f7d15755214f075cbb3f18c0c0b8 /indra | |
| parent | b08340f1831005ae227577899a64408cc939a12d (diff) | |
SL-17475 fix remap causing an assert
Diffstat (limited to 'indra')
| -rw-r--r-- | indra/llmath/llvolume.cpp | 4 | ||||
| -rw-r--r-- | indra/llmeshoptimizer/llmeshoptimizer.cpp | 59 | ||||
| -rw-r--r-- | indra/llmeshoptimizer/llmeshoptimizer.h | 13 | ||||
| -rw-r--r-- | indra/newview/llmodelpreview.cpp | 2 | 
4 files changed, 72 insertions, 6 deletions
| diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 23f372f6e3..9efdcd4e8c 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -4957,8 +4957,8 @@ void LLVolumeFace::remap()  {      // generate a remap buffer      std::vector<unsigned int> remap(mNumIndices); -    S32 remap_vertices_count = LLMeshOptimizer::generateRemapMulti(&remap[0], -        NULL, +    S32 remap_vertices_count = LLMeshOptimizer::generateRemapMultiU16(&remap[0], +        mIndices,          mNumIndices,          mPositions,          mNormals, diff --git a/indra/llmeshoptimizer/llmeshoptimizer.cpp b/indra/llmeshoptimizer/llmeshoptimizer.cpp index 8570887ddd..cb9716a907 100644 --- a/indra/llmeshoptimizer/llmeshoptimizer.cpp +++ b/indra/llmeshoptimizer/llmeshoptimizer.cpp @@ -152,7 +152,7 @@ void LLMeshOptimizer::optimizeVertexCacheU16(U16 * destination, const U16 * indi      meshopt_optimizeVertexCache<unsigned short>(destination, indices, index_count, vertex_count);  } -size_t LLMeshOptimizer::generateRemapMulti( +size_t LLMeshOptimizer::generateRemapMultiU32(      unsigned int* remap,      const U32 * indices,      U64 index_count, @@ -167,7 +167,62 @@ size_t LLMeshOptimizer::generateRemapMulti(         {(const float*)text_coords, sizeof(F32) * 2, sizeof(F32) * 2},      }; -    return meshopt_generateVertexRemapMulti(&remap[0], indices, index_count, vertex_count, streams, sizeof(streams) / sizeof(streams[0])); +    // Remap can function without indices, +    // but providing indices helps with removing unused vertices +    U64 indeces_cmp = indices ? index_count : vertex_count; + +    // meshopt_generateVertexRemapMulti will throw an assert if (indices[i] >= vertex_count) +    return meshopt_generateVertexRemapMulti(&remap[0], indices, indeces_cmp, vertex_count, streams, sizeof(streams) / sizeof(streams[0])); +} + +size_t LLMeshOptimizer::generateRemapMultiU16( +    unsigned int* remap, +    const U16 * indices, +    U64 index_count, +    const LLVector4a * vertex_positions, +    const LLVector4a * normals, +    const LLVector2 * text_coords, +    U64 vertex_count) +{ +    meshopt_Stream streams[] = { +       {(const float*)vertex_positions, sizeof(F32) * 3, sizeof(F32) * 4}, +       {(const float*)normals, sizeof(F32) * 3, sizeof(F32) * 4}, +       {(const float*)text_coords, sizeof(F32) * 2, sizeof(F32) * 2}, +    }; + +    S32 out_of_range_count = 0; +    U32* indices_u32 = NULL; +    if (indices) +    { +        indices_u32 = (U32*)ll_aligned_malloc_32(index_count * sizeof(U32)); +        for (U64 i = 0; i < index_count; i++) +        { +            if (indices[i] < vertex_count) +            { +                indices_u32[i] = indices[i]; +            } +            else +            { +                out_of_range_count++; +                indices_u32[i] = 0; +            } +        } +    } + +    if (out_of_range_count) +    { +        LL_WARNS() << out_of_range_count << " indexes are out of range." << LL_ENDL; +    } + +    // Remap can function without indices, +    // but providing indices helps with removing unused vertices +    U64 indeces_cmp = indices_u32 ? index_count : vertex_count; + +    size_t unique =  meshopt_generateVertexRemapMulti(&remap[0], indices_u32, indeces_cmp, vertex_count, streams, sizeof(streams) / sizeof(streams[0])); + +    ll_aligned_free_32(indices_u32); + +    return unique;  }  void LLMeshOptimizer::remapIndexBufferU32(U32 * destination_indices, diff --git a/indra/llmeshoptimizer/llmeshoptimizer.h b/indra/llmeshoptimizer/llmeshoptimizer.h index c76f8a5a89..ea965d6b47 100644 --- a/indra/llmeshoptimizer/llmeshoptimizer.h +++ b/indra/llmeshoptimizer/llmeshoptimizer.h @@ -68,8 +68,10 @@ public:          U64 vertex_count);      // Remap functions +    // Welds indentical vertexes together. +    // Removes unused vertices if indices were provided. -    static size_t generateRemapMulti( +    static size_t generateRemapMultiU32(          unsigned int* remap,          const U32 * indices,          U64 index_count, @@ -78,6 +80,15 @@ public:          const LLVector2 * text_coords,          U64 vertex_count); +    static size_t generateRemapMultiU16( +        unsigned int* remap, +        const U16 * indices, +        U64 index_count, +        const LLVector4a * vertex_positions, +        const LLVector4a * normals, +        const LLVector2 * text_coords, +        U64 vertex_count); +      static void remapIndexBufferU32(U32 * destination_indices,          const U32 * indices,          U64 index_count, diff --git a/indra/newview/llmodelpreview.cpp b/indra/newview/llmodelpreview.cpp index 707a8b970f..ef791fd80d 100644 --- a/indra/newview/llmodelpreview.cpp +++ b/indra/newview/llmodelpreview.cpp @@ -1284,7 +1284,7 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe      // II. Remap.      std::vector<unsigned int> remap(size_indices); -    S32 size_remap_vertices = LLMeshOptimizer::generateRemapMulti(&remap[0], +    S32 size_remap_vertices = LLMeshOptimizer::generateRemapMultiU32(&remap[0],          combined_indices,          size_indices,          combined_positions, | 
