summaryrefslogtreecommitdiff
path: root/indra/llmeshoptimizer
diff options
context:
space:
mode:
authorBrad Kittenbrink <brad@lindenlab.com>2022-11-28 14:33:27 -0800
committerBrad Kittenbrink <brad@lindenlab.com>2022-11-28 14:33:27 -0800
commit0b188ac04ecdb1d620a427eacbf5fc089e0accc8 (patch)
tree76f6afe9424d79bab8d30060d49aec3166d96ec8 /indra/llmeshoptimizer
parente9cd2b320ea2ecaab88e4904f7c81746ea617fea (diff)
parentc8f08f232a28f4e08b4acf525f0d316a101a0e27 (diff)
Merge remote-tracking branch 'origin/main' into DRTVWR-528
Diffstat (limited to 'indra/llmeshoptimizer')
-rw-r--r--indra/llmeshoptimizer/llmeshoptimizer.cpp215
-rw-r--r--indra/llmeshoptimizer/llmeshoptimizer.h81
2 files changed, 283 insertions, 13 deletions
diff --git a/indra/llmeshoptimizer/llmeshoptimizer.cpp b/indra/llmeshoptimizer/llmeshoptimizer.cpp
index a879389c5a..c178348968 100644
--- a/indra/llmeshoptimizer/llmeshoptimizer.cpp
+++ b/indra/llmeshoptimizer/llmeshoptimizer.cpp
@@ -28,6 +28,9 @@
#include "meshoptimizer.h"
+#include "llmath.h"
+#include "v2math.h"
+
LLMeshOptimizer::LLMeshOptimizer()
{
// Todo: Looks like for memory management, we can add allocator and deallocator callbacks
@@ -40,25 +43,219 @@ LLMeshOptimizer::~LLMeshOptimizer()
}
//static
-void LLMeshOptimizer::generateShadowIndexBuffer(U16 *destination,
- const U16 *indices,
+void LLMeshOptimizer::generateShadowIndexBufferU32(U32 *destination,
+ const U32 *indices,
U64 index_count,
- const LLVector4a *vertex_positions,
- U64 vertex_count,
- U64 vertex_positions_stride
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count
)
{
- meshopt_generateShadowIndexBuffer<unsigned short>(destination,
+ meshopt_Stream streams[3];
+
+ S32 index = 0;
+ if (vertex_positions)
+ {
+ streams[index].data = (const float*)vertex_positions;
+ // Despite being LLVector4a, only x, y and z are in use
+ streams[index].size = sizeof(F32) * 3;
+ streams[index].stride = sizeof(F32) * 4;
+ index++;
+ }
+ if (normals)
+ {
+ streams[index].data = (const float*)normals;
+ streams[index].size = sizeof(F32) * 3;
+ streams[index].stride = sizeof(F32) * 4;
+ index++;
+ }
+ if (text_coords)
+ {
+ streams[index].data = (const float*)text_coords;
+ streams[index].size = sizeof(F32) * 2;
+ streams[index].stride = sizeof(F32) * 2;
+ index++;
+ }
+
+ if (index == 0)
+ {
+ // invalid
+ return;
+ }
+
+ meshopt_generateShadowIndexBufferMulti<unsigned int>(destination,
indices,
index_count,
- (const float*)vertex_positions, // verify that it is correct to convert to float
vertex_count,
- sizeof(LLVector4a),
- vertex_positions_stride
+ streams,
+ index
);
}
//static
+void LLMeshOptimizer::generateShadowIndexBufferU16(U16 *destination,
+ const U16 *indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count
+)
+{
+ meshopt_Stream streams[3];
+
+ S32 index = 0;
+ if (vertex_positions)
+ {
+ streams[index].data = (const float*)vertex_positions;
+ streams[index].size = sizeof(F32) * 3;
+ streams[index].stride = sizeof(F32) * 4;
+ index++;
+ }
+ if (normals)
+ {
+ streams[index].data = (const float*)normals;
+ streams[index].size = sizeof(F32) * 3;
+ streams[index].stride = sizeof(F32) * 4;
+ index++;
+ }
+ if (text_coords)
+ {
+ streams[index].data = (const float*)text_coords;
+ streams[index].size = sizeof(F32) * 2;
+ streams[index].stride = sizeof(F32) * 2;
+ index++;
+ }
+
+ if (index == 0)
+ {
+ // invalid
+ return;
+ }
+
+ meshopt_generateShadowIndexBufferMulti<unsigned short>(destination,
+ indices,
+ index_count,
+ vertex_count,
+ streams,
+ index);
+}
+
+void LLMeshOptimizer::optimizeVertexCacheU32(U32 * destination, const U32 * indices, U64 index_count, U64 vertex_count)
+{
+ meshopt_optimizeVertexCache<unsigned int>(destination, indices, index_count, vertex_count);
+}
+
+void LLMeshOptimizer::optimizeVertexCacheU16(U16 * destination, const U16 * indices, U64 index_count, U64 vertex_count)
+{
+ meshopt_optimizeVertexCache<unsigned short>(destination, indices, index_count, vertex_count);
+}
+
+size_t LLMeshOptimizer::generateRemapMultiU32(
+ unsigned int* remap,
+ const U32 * indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count)
+{
+ meshopt_Stream streams[] = {
+ {(const float*)vertex_positions, sizeof(F32) * 3, sizeof(F32) * 4},
+ {(const float*)normals, sizeof(F32) * 3, sizeof(F32) * 4},
+ {(const float*)text_coords, sizeof(F32) * 2, sizeof(F32) * 2},
+ };
+
+ // Remap can function without indices,
+ // but providing indices helps with removing unused vertices
+ U64 indeces_cmp = indices ? index_count : vertex_count;
+
+ // meshopt_generateVertexRemapMulti will throw an assert if (indices[i] >= vertex_count)
+ return meshopt_generateVertexRemapMulti(&remap[0], indices, indeces_cmp, vertex_count, streams, sizeof(streams) / sizeof(streams[0]));
+}
+
+size_t LLMeshOptimizer::generateRemapMultiU16(
+ unsigned int* remap,
+ const U16 * indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count)
+{
+ S32 out_of_range_count = 0;
+ U32* indices_u32 = NULL;
+ if (indices)
+ {
+ indices_u32 = (U32*)ll_aligned_malloc_32(index_count * sizeof(U32));
+ for (U64 i = 0; i < index_count; i++)
+ {
+ if (indices[i] < vertex_count)
+ {
+ indices_u32[i] = (U32)indices[i];
+ }
+ else
+ {
+ out_of_range_count++;
+ indices_u32[i] = 0;
+ }
+ }
+ }
+
+ if (out_of_range_count)
+ {
+ LL_WARNS() << out_of_range_count << " indices are out of range." << LL_ENDL;
+ }
+
+ size_t unique = generateRemapMultiU32(remap, indices_u32, index_count, vertex_positions, normals, text_coords, vertex_count);
+
+ ll_aligned_free_32(indices_u32);
+
+ return unique;
+}
+
+void LLMeshOptimizer::remapIndexBufferU32(U32 * destination_indices,
+ const U32 * indices,
+ U64 index_count,
+ const unsigned int* remap)
+{
+ meshopt_remapIndexBuffer<unsigned int>(destination_indices, indices, index_count, remap);
+}
+
+void LLMeshOptimizer::remapIndexBufferU16(U16 * destination_indices,
+ const U16 * indices,
+ U64 index_count,
+ const unsigned int* remap)
+{
+ meshopt_remapIndexBuffer<unsigned short>(destination_indices, indices, index_count, remap);
+}
+
+void LLMeshOptimizer::remapPositionsBuffer(LLVector4a * destination_vertices,
+ const LLVector4a * vertex_positions,
+ U64 vertex_count,
+ const unsigned int* remap)
+{
+ meshopt_remapVertexBuffer((float*)destination_vertices, (const float*)vertex_positions, vertex_count, sizeof(LLVector4a), remap);
+}
+
+void LLMeshOptimizer::remapNormalsBuffer(LLVector4a * destination_normalss,
+ const LLVector4a * normals,
+ U64 mormals_count,
+ const unsigned int* remap)
+{
+ meshopt_remapVertexBuffer((float*)destination_normalss, (const float*)normals, mormals_count, sizeof(LLVector4a), remap);
+}
+
+void LLMeshOptimizer::remapUVBuffer(LLVector2 * destination_uvs,
+ const LLVector2 * uv_positions,
+ U64 uv_count,
+ const unsigned int* remap)
+{
+ meshopt_remapVertexBuffer((float*)destination_uvs, (const float*)uv_positions, uv_count, sizeof(LLVector2), remap);
+}
+
+//static
U64 LLMeshOptimizer::simplifyU32(U32 *destination,
const U32 *indices,
U64 index_count,
diff --git a/indra/llmeshoptimizer/llmeshoptimizer.h b/indra/llmeshoptimizer/llmeshoptimizer.h
index e8dd16dae9..ea965d6b47 100644
--- a/indra/llmeshoptimizer/llmeshoptimizer.h
+++ b/indra/llmeshoptimizer/llmeshoptimizer.h
@@ -28,7 +28,8 @@
#include "linden_common.h"
-#include "llmath.h"
+class LLVector4a;
+class LLVector2;
class LLMeshOptimizer
{
@@ -36,13 +37,85 @@ public:
LLMeshOptimizer();
~LLMeshOptimizer();
- static void generateShadowIndexBuffer(
+ static void generateShadowIndexBufferU32(
+ U32 *destination,
+ const U32 *indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count);
+
+ static void generateShadowIndexBufferU16(
U16 *destination,
const U16 *indices,
U64 index_count,
- const LLVector4a *vertex_positions,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count);
+
+ static void optimizeVertexCacheU32(
+ U32 *destination,
+ const U32 *indices,
+ U64 index_count,
+ U64 vertex_count);
+
+ static void optimizeVertexCacheU16(
+ U16 *destination,
+ const U16 *indices,
+ U64 index_count,
+ U64 vertex_count);
+
+ // Remap functions
+ // Welds indentical vertexes together.
+ // Removes unused vertices if indices were provided.
+
+ static size_t generateRemapMultiU32(
+ unsigned int* remap,
+ const U32 * indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count);
+
+ static size_t generateRemapMultiU16(
+ unsigned int* remap,
+ const U16 * indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count);
+
+ static void remapIndexBufferU32(U32 * destination_indices,
+ const U32 * indices,
+ U64 index_count,
+ const unsigned int* remap);
+
+ static void remapIndexBufferU16(U16 * destination_indices,
+ const U16 * indices,
+ U64 index_count,
+ const unsigned int* remap);
+
+
+ static void remapPositionsBuffer(LLVector4a * destination_vertices,
+ const LLVector4a * vertex_positions,
U64 vertex_count,
- U64 vertex_positions_stride);
+ const unsigned int* remap);
+
+ static void remapNormalsBuffer(LLVector4a * destination_normalss,
+ const LLVector4a * normals,
+ U64 mormals_count,
+ const unsigned int* remap);
+
+ static void remapUVBuffer(LLVector2 * destination_uvs,
+ const LLVector2 * uv_positions,
+ U64 uv_count,
+ const unsigned int* remap);
+
+ // Simplification
// returns amount of indices in destiantion
// sloppy engages a variant of a mechanizm that does not respect topology as much