summaryrefslogtreecommitdiff
path: root/indra/llmeshoptimizer/llmeshoptimizer.h
diff options
context:
space:
mode:
authorDave Parks <davep@lindenlab.com>2022-08-05 16:58:38 -0500
committerDave Parks <davep@lindenlab.com>2022-08-05 16:58:38 -0500
commit8007b43876831a0d88833bdc8fd7eb6d543dd38b (patch)
treec5c34cbb5c9a9c7bd915b331abb17ab914e3f53a /indra/llmeshoptimizer/llmeshoptimizer.h
parentd048795fce3ab83989cb909fde02014f1442cc84 (diff)
parentaca495c8812d49a5fde80ecefbf3f609b4042bf9 (diff)
Merge branch 'DRTVWR-559' of ssh://bitbucket.org/lindenlab/viewer into DRTVWR-559
Diffstat (limited to 'indra/llmeshoptimizer/llmeshoptimizer.h')
-rw-r--r--indra/llmeshoptimizer/llmeshoptimizer.h81
1 files changed, 77 insertions, 4 deletions
diff --git a/indra/llmeshoptimizer/llmeshoptimizer.h b/indra/llmeshoptimizer/llmeshoptimizer.h
index e8dd16dae9..ea965d6b47 100644
--- a/indra/llmeshoptimizer/llmeshoptimizer.h
+++ b/indra/llmeshoptimizer/llmeshoptimizer.h
@@ -28,7 +28,8 @@
#include "linden_common.h"
-#include "llmath.h"
+class LLVector4a;
+class LLVector2;
class LLMeshOptimizer
{
@@ -36,13 +37,85 @@ public:
LLMeshOptimizer();
~LLMeshOptimizer();
- static void generateShadowIndexBuffer(
+ static void generateShadowIndexBufferU32(
+ U32 *destination,
+ const U32 *indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count);
+
+ static void generateShadowIndexBufferU16(
U16 *destination,
const U16 *indices,
U64 index_count,
- const LLVector4a *vertex_positions,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count);
+
+ static void optimizeVertexCacheU32(
+ U32 *destination,
+ const U32 *indices,
+ U64 index_count,
+ U64 vertex_count);
+
+ static void optimizeVertexCacheU16(
+ U16 *destination,
+ const U16 *indices,
+ U64 index_count,
+ U64 vertex_count);
+
+ // Remap functions
+ // Welds indentical vertexes together.
+ // Removes unused vertices if indices were provided.
+
+ static size_t generateRemapMultiU32(
+ unsigned int* remap,
+ const U32 * indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count);
+
+ static size_t generateRemapMultiU16(
+ unsigned int* remap,
+ const U16 * indices,
+ U64 index_count,
+ const LLVector4a * vertex_positions,
+ const LLVector4a * normals,
+ const LLVector2 * text_coords,
+ U64 vertex_count);
+
+ static void remapIndexBufferU32(U32 * destination_indices,
+ const U32 * indices,
+ U64 index_count,
+ const unsigned int* remap);
+
+ static void remapIndexBufferU16(U16 * destination_indices,
+ const U16 * indices,
+ U64 index_count,
+ const unsigned int* remap);
+
+
+ static void remapPositionsBuffer(LLVector4a * destination_vertices,
+ const LLVector4a * vertex_positions,
U64 vertex_count,
- U64 vertex_positions_stride);
+ const unsigned int* remap);
+
+ static void remapNormalsBuffer(LLVector4a * destination_normalss,
+ const LLVector4a * normals,
+ U64 mormals_count,
+ const unsigned int* remap);
+
+ static void remapUVBuffer(LLVector2 * destination_uvs,
+ const LLVector2 * uv_positions,
+ U64 uv_count,
+ const unsigned int* remap);
+
+ // Simplification
// returns amount of indices in destiantion
// sloppy engages a variant of a mechanizm that does not respect topology as much