From 851767b808c3cb05d718538389ccc1ed3c95d1a1 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 14 Oct 2021 17:41:38 +0000 Subject: SL-16131 Fix for alignment warnings on Win32 builds. --- indra/llcommon/llmemory.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'indra/llcommon/llmemory.h') diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h index 24f86cc11e..2704a495e0 100644 --- a/indra/llcommon/llmemory.h +++ b/indra/llcommon/llmemory.h @@ -101,6 +101,19 @@ template T* LL_NEXT_ALIGNED_ADDRESS_64(T* address) #define LL_ALIGN_16(var) LL_ALIGN_PREFIX(16) var LL_ALIGN_POSTFIX(16) +#define LL_ALIGN_NEW \ +public: \ + void* operator new(size_t size) \ + { \ + return ll_aligned_malloc_16(size); \ + } \ + \ + void operator delete(void* ptr) \ + { \ + ll_aligned_free_16(ptr); \ + } + + //------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------ // for enable buffer overrun detection predefine LL_DEBUG_BUFFER_OVERRUN in current library -- cgit v1.2.3 From 8d20480c5f77fe1fab8149d3cda79bdd61e77656 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 28 Oct 2021 18:06:21 +0000 Subject: SL-16148 SL-16244 SL-16270 SL-16253 Remove most BlockTimers, remove LLMemTracked, introduce alignas, hook most/all reamining allocs, disable synchronous occlusion, and convert frequently accessed LLSingletons to LLSimpleton --- indra/llcommon/llmemory.h | 77 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 54 insertions(+), 23 deletions(-) (limited to 'indra/llcommon/llmemory.h') diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h index 2704a495e0..41023b4ba4 100644 --- a/indra/llcommon/llmemory.h +++ b/indra/llcommon/llmemory.h @@ -109,6 +109,16 @@ public: \ } \ \ void operator delete(void* ptr) \ + { \ + ll_aligned_free_16(ptr); \ + } \ + \ + void* operator new[](size_t size) \ + { \ + return ll_aligned_malloc_16(size); \ + } \ + \ + void operator delete[](void* ptr) \ { \ ll_aligned_free_16(ptr); \ } @@ -126,8 +136,9 @@ public: \ #else inline void* ll_aligned_malloc_fallback( size_t size, int align ) { + LL_PROFILE_ZONE_SCOPED; #if defined(LL_WINDOWS) - return _aligned_malloc(size, align); + void* ret = _aligned_malloc(size, align); #else char* aligned = NULL; void* mem = malloc( size + (align - 1) + sizeof(void*) ); @@ -138,12 +149,16 @@ public: \ ((void**)aligned)[-1] = mem; } - return aligned; + void* ret = aligned; #endif + LL_PROFILE_ALLOC(ret, size); + return ret; } inline void ll_aligned_free_fallback( void* ptr ) { + LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_FREE(ptr); #if defined(LL_WINDOWS) _aligned_free(ptr); #else @@ -159,21 +174,24 @@ public: \ inline void* ll_aligned_malloc_16(size_t size) // returned hunk MUST be freed with ll_aligned_free_16(). { + LL_PROFILE_ZONE_SCOPED; #if defined(LL_WINDOWS) - return _aligned_malloc(size, 16); + void* ret = _aligned_malloc(size, 16); #elif defined(LL_DARWIN) - return malloc(size); // default osx malloc is 16 byte aligned. + void* ret = malloc(size); // default osx malloc is 16 byte aligned. #else - void *rtn; - if (LL_LIKELY(0 == posix_memalign(&rtn, 16, size))) - return rtn; - else // bad alignment requested, or out of memory - return NULL; + void *ret; + if (0 != posix_memalign(&ret, 16, size)) + return nullptr; #endif + LL_PROFILE_ALLOC(ret, size); + return ret; } inline void ll_aligned_free_16(void *p) { + LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_FREE(p); #if defined(LL_WINDOWS) _aligned_free(p); #elif defined(LL_DARWIN) @@ -185,10 +203,12 @@ inline void ll_aligned_free_16(void *p) inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // returned hunk MUST be freed with ll_aligned_free_16(). { + LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_FREE(ptr); #if defined(LL_WINDOWS) - return _aligned_realloc(ptr, size, 16); + void* ret = _aligned_realloc(ptr, size, 16); #elif defined(LL_DARWIN) - return realloc(ptr,size); // default osx malloc is 16 byte aligned. + void* ret = realloc(ptr,size); // default osx malloc is 16 byte aligned. #else //FIXME: memcpy is SLOW void* ret = ll_aligned_malloc_16(size); @@ -201,27 +221,31 @@ inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // r } ll_aligned_free_16(ptr); } - return ret; #endif + LL_PROFILE_ALLOC(ptr, size); + return ret; } inline void* ll_aligned_malloc_32(size_t size) // returned hunk MUST be freed with ll_aligned_free_32(). { + LL_PROFILE_ZONE_SCOPED; #if defined(LL_WINDOWS) - return _aligned_malloc(size, 32); + void* ret = _aligned_malloc(size, 32); #elif defined(LL_DARWIN) - return ll_aligned_malloc_fallback( size, 32 ); + void* ret = ll_aligned_malloc_fallback( size, 32 ); #else - void *rtn; - if (LL_LIKELY(0 == posix_memalign(&rtn, 32, size))) - return rtn; - else // bad alignment requested, or out of memory - return NULL; + void *ret; + if (0 != posix_memalign(&ret, 32, size)) + return nullptr; #endif + LL_PROFILE_ALLOC(ret, size); + return ret; } inline void ll_aligned_free_32(void *p) { + LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_FREE(p); #if defined(LL_WINDOWS) _aligned_free(p); #elif defined(LL_DARWIN) @@ -235,29 +259,35 @@ inline void ll_aligned_free_32(void *p) template LL_FORCE_INLINE void* ll_aligned_malloc(size_t size) { + LL_PROFILE_ZONE_SCOPED; + void* ret; if (LL_DEFAULT_HEAP_ALIGN % ALIGNMENT == 0) { - return malloc(size); + ret = malloc(size); + LL_PROFILE_ALLOC(ret, size); } else if (ALIGNMENT == 16) { - return ll_aligned_malloc_16(size); + ret = ll_aligned_malloc_16(size); } else if (ALIGNMENT == 32) { - return ll_aligned_malloc_32(size); + ret = ll_aligned_malloc_32(size); } else { - return ll_aligned_malloc_fallback(size, ALIGNMENT); + ret = ll_aligned_malloc_fallback(size, ALIGNMENT); } + return ret; } template LL_FORCE_INLINE void ll_aligned_free(void* ptr) { + LL_PROFILE_ZONE_SCOPED; if (ALIGNMENT == LL_DEFAULT_HEAP_ALIGN) { + LL_PROFILE_FREE(ptr); free(ptr); } else if (ALIGNMENT == 16) @@ -279,6 +309,7 @@ LL_FORCE_INLINE void ll_aligned_free(void* ptr) // inline void ll_memcpy_nonaliased_aligned_16(char* __restrict dst, const char* __restrict src, size_t bytes) { + LL_PROFILE_ZONE_SCOPED; assert(src != NULL); assert(dst != NULL); assert(bytes > 0); -- cgit v1.2.3 From 9f2be2a0547f5e827a91cbcd9162cbe8f9cdfb53 Mon Sep 17 00:00:00 2001 From: Ptolemy Date: Thu, 13 Jan 2022 12:25:21 -0800 Subject: SL-16606: Add profiler category MEMORY --- indra/llcommon/llmemory.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'indra/llcommon/llmemory.h') diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h index 41023b4ba4..ac6c969d70 100644 --- a/indra/llcommon/llmemory.h +++ b/indra/llcommon/llmemory.h @@ -136,7 +136,7 @@ public: \ #else inline void* ll_aligned_malloc_fallback( size_t size, int align ) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; #if defined(LL_WINDOWS) void* ret = _aligned_malloc(size, align); #else @@ -157,7 +157,7 @@ public: \ inline void ll_aligned_free_fallback( void* ptr ) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; LL_PROFILE_FREE(ptr); #if defined(LL_WINDOWS) _aligned_free(ptr); @@ -174,7 +174,7 @@ public: \ inline void* ll_aligned_malloc_16(size_t size) // returned hunk MUST be freed with ll_aligned_free_16(). { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; #if defined(LL_WINDOWS) void* ret = _aligned_malloc(size, 16); #elif defined(LL_DARWIN) @@ -190,7 +190,7 @@ inline void* ll_aligned_malloc_16(size_t size) // returned hunk MUST be freed wi inline void ll_aligned_free_16(void *p) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; LL_PROFILE_FREE(p); #if defined(LL_WINDOWS) _aligned_free(p); @@ -203,7 +203,7 @@ inline void ll_aligned_free_16(void *p) inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // returned hunk MUST be freed with ll_aligned_free_16(). { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; LL_PROFILE_FREE(ptr); #if defined(LL_WINDOWS) void* ret = _aligned_realloc(ptr, size, 16); @@ -228,7 +228,7 @@ inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // r inline void* ll_aligned_malloc_32(size_t size) // returned hunk MUST be freed with ll_aligned_free_32(). { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; #if defined(LL_WINDOWS) void* ret = _aligned_malloc(size, 32); #elif defined(LL_DARWIN) @@ -244,7 +244,7 @@ inline void* ll_aligned_malloc_32(size_t size) // returned hunk MUST be freed wi inline void ll_aligned_free_32(void *p) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; LL_PROFILE_FREE(p); #if defined(LL_WINDOWS) _aligned_free(p); @@ -259,7 +259,7 @@ inline void ll_aligned_free_32(void *p) template LL_FORCE_INLINE void* ll_aligned_malloc(size_t size) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; void* ret; if (LL_DEFAULT_HEAP_ALIGN % ALIGNMENT == 0) { @@ -284,7 +284,7 @@ LL_FORCE_INLINE void* ll_aligned_malloc(size_t size) template LL_FORCE_INLINE void ll_aligned_free(void* ptr) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; if (ALIGNMENT == LL_DEFAULT_HEAP_ALIGN) { LL_PROFILE_FREE(ptr); @@ -309,7 +309,7 @@ LL_FORCE_INLINE void ll_aligned_free(void* ptr) // inline void ll_memcpy_nonaliased_aligned_16(char* __restrict dst, const char* __restrict src, size_t bytes) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; assert(src != NULL); assert(dst != NULL); assert(bytes > 0); -- cgit v1.2.3