summaryrefslogtreecommitdiff
path: root/indra/llmeshoptimizer/llmeshoptimizer.cpp
diff options
context:
space:
mode:
authorAndrey Kleshchev <andreykproductengine@lindenlab.com>2022-06-10 19:43:14 +0300
committerAndrey Kleshchev <andreykproductengine@lindenlab.com>2022-06-11 10:23:46 +0300
commitb08340f1831005ae227577899a64408cc939a12d (patch)
treeba5bc8cd3c3edf5cdc2670a8a3c01d84f12c506f /indra/llmeshoptimizer/llmeshoptimizer.cpp
parent58a500bee8dc06acb35acb5c2e5c65d2df99a0d8 (diff)
SL-17475 Remap models before simplification
Diffstat (limited to 'indra/llmeshoptimizer/llmeshoptimizer.cpp')
-rw-r--r--indra/llmeshoptimizer/llmeshoptimizer.cpp170
1 files changed, 161 insertions, 9 deletions
diff --git a/indra/llmeshoptimizer/llmeshoptimizer.cpp b/indra/llmeshoptimizer/llmeshoptimizer.cpp
index a879389c5a..8570887ddd 100644
--- a/indra/llmeshoptimizer/llmeshoptimizer.cpp
+++ b/indra/llmeshoptimizer/llmeshoptimizer.cpp
@@ -28,6 +28,9 @@
#include "meshoptimizer.h"
+#include "llmath.h"
+#include "v2math.h"
+
LLMeshOptimizer::LLMeshOptimizer()
{
// Todo: Looks like for memory management, we can add allocator and deallocator callbacks
@@ -40,25 +43,174 @@ LLMeshOptimizer::~LLMeshOptimizer()
}
//static
-void LLMeshOptimizer::generateShadowIndexBuffer(U16 *destination,
- const U16 *indices,
+void LLMeshOptimizer::generateShadowIndexBufferU32(U32 *destination,
+ const U32 *indices,
U64 index_count,
- const LLVector4a *vertex_positions,
- U64 vertex_count,
- U64 vertex_positions_stride
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count
)
{
- meshopt_generateShadowIndexBuffer<unsigned short>(destination,
+ meshopt_Stream streams[3];
+
+ S32 index = 0;
+ if (vertex_positions)
+ {
+ streams[index].data = (const float*)vertex_positions;
+ // Despite being LLVector4a, only x, y and z are in use
+ streams[index].size = sizeof(F32) * 3;
+ streams[index].stride = sizeof(F32) * 4;
+ index++;
+ }
+ if (normals)
+ {
+ streams[index].data = (const float*)normals;
+ streams[index].size = sizeof(F32) * 3;
+ streams[index].stride = sizeof(F32) * 4;
+ index++;
+ }
+ if (text_coords)
+ {
+ streams[index].data = (const float*)text_coords;
+ streams[index].size = sizeof(F32) * 2;
+ streams[index].stride = sizeof(F32) * 2;
+ index++;
+ }
+
+ if (index == 0)
+ {
+ // invalid
+ return;
+ }
+
+ meshopt_generateShadowIndexBufferMulti<unsigned int>(destination,
indices,
index_count,
- (const float*)vertex_positions, // verify that it is correct to convert to float
vertex_count,
- sizeof(LLVector4a),
- vertex_positions_stride
+ streams,
+ index
);
}
//static
+void LLMeshOptimizer::generateShadowIndexBufferU16(U16 *destination,
+ const U16 *indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count
+)
+{
+ meshopt_Stream streams[3];
+
+ S32 index = 0;
+ if (vertex_positions)
+ {
+ streams[index].data = (const float*)vertex_positions;
+ streams[index].size = sizeof(F32) * 3;
+ streams[index].stride = sizeof(F32) * 4;
+ index++;
+ }
+ if (normals)
+ {
+ streams[index].data = (const float*)normals;
+ streams[index].size = sizeof(F32) * 3;
+ streams[index].stride = sizeof(F32) * 4;
+ index++;
+ }
+ if (text_coords)
+ {
+ streams[index].data = (const float*)text_coords;
+ streams[index].size = sizeof(F32) * 2;
+ streams[index].stride = sizeof(F32) * 2;
+ index++;
+ }
+
+ if (index == 0)
+ {
+ // invalid
+ return;
+ }
+
+ meshopt_generateShadowIndexBufferMulti<unsigned short>(destination,
+ indices,
+ index_count,
+ vertex_count,
+ streams,
+ index);
+}
+
+void LLMeshOptimizer::optimizeVertexCacheU32(U32 * destination, const U32 * indices, U64 index_count, U64 vertex_count)
+{
+ meshopt_optimizeVertexCache<unsigned int>(destination, indices, index_count, vertex_count);
+}
+
+void LLMeshOptimizer::optimizeVertexCacheU16(U16 * destination, const U16 * indices, U64 index_count, U64 vertex_count)
+{
+ meshopt_optimizeVertexCache<unsigned short>(destination, indices, index_count, vertex_count);
+}
+
+size_t LLMeshOptimizer::generateRemapMulti(
+ unsigned int* remap,
+ const U32 * indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count)
+{
+ meshopt_Stream streams[] = {
+ {(const float*)vertex_positions, sizeof(F32) * 3, sizeof(F32) * 4},
+ {(const float*)normals, sizeof(F32) * 3, sizeof(F32) * 4},
+ {(const float*)text_coords, sizeof(F32) * 2, sizeof(F32) * 2},
+ };
+
+ return meshopt_generateVertexRemapMulti(&remap[0], indices, index_count, vertex_count, streams, sizeof(streams) / sizeof(streams[0]));
+}
+
+void LLMeshOptimizer::remapIndexBufferU32(U32 * destination_indices,
+ const U32 * indices,
+ U64 index_count,
+ const unsigned int* remap)
+{
+ meshopt_remapIndexBuffer<unsigned int>(destination_indices, indices, index_count, remap);
+}
+
+void LLMeshOptimizer::remapIndexBufferU16(U16 * destination_indices,
+ const U16 * indices,
+ U64 index_count,
+ const unsigned int* remap)
+{
+ meshopt_remapIndexBuffer<unsigned short>(destination_indices, indices, index_count, remap);
+}
+
+void LLMeshOptimizer::remapPositionsBuffer(LLVector4a * destination_vertices,
+ const LLVector4a * vertex_positions,
+ U64 vertex_count,
+ const unsigned int* remap)
+{
+ meshopt_remapVertexBuffer((float*)destination_vertices, (const float*)vertex_positions, vertex_count, sizeof(LLVector4a), remap);
+}
+
+void LLMeshOptimizer::remapNormalsBuffer(LLVector4a * destination_normalss,
+ const LLVector4a * normals,
+ U64 mormals_count,
+ const unsigned int* remap)
+{
+ meshopt_remapVertexBuffer((float*)destination_normalss, (const float*)normals, mormals_count, sizeof(LLVector4a), remap);
+}
+
+void LLMeshOptimizer::remapUVBuffer(LLVector2 * destination_uvs,
+ const LLVector2 * uv_positions,
+ U64 uv_count,
+ const unsigned int* remap)
+{
+ meshopt_remapVertexBuffer((float*)destination_uvs, (const float*)uv_positions, uv_count, sizeof(LLVector2), remap);
+}
+
+//static
U64 LLMeshOptimizer::simplifyU32(U32 *destination,
const U32 *indices,
U64 index_count,