summaryrefslogtreecommitdiff
path: root/indra/llcommon
diff options
context:
space:
mode:
authorRichard Linden <none@none>2013-10-14 10:18:41 -0700
committerRichard Linden <none@none>2013-10-14 10:18:41 -0700
commit1acceb3633c0f0c4fdf29b17d77d67c8a9b71986 (patch)
tree7fef10e1031417a866243a90df43654ad4659aca /indra/llcommon
parenta6a40bd69f2011337b138d833d412b2b3568f8ea (diff)
changed ll_aligned_(malloc|free) to take alignment size as a template argument
Diffstat (limited to 'indra/llcommon')
-rw-r--r--indra/llcommon/llalignedarray.h10
-rwxr-xr-xindra/llcommon/llmemory.h20
-rw-r--r--indra/llcommon/lltrace.h12
-rw-r--r--indra/llcommon/lltraceaccumulators.h4
4 files changed, 24 insertions, 22 deletions
diff --git a/indra/llcommon/llalignedarray.h b/indra/llcommon/llalignedarray.h
index 6f18bfe25c..b68e9e0f82 100644
--- a/indra/llcommon/llalignedarray.h
+++ b/indra/llcommon/llalignedarray.h
@@ -60,7 +60,7 @@ LLAlignedArray<T, alignment>::LLAlignedArray()
template <class T, U32 alignment>
LLAlignedArray<T, alignment>::~LLAlignedArray()
{
- ll_aligned_free(alignment, mArray);
+ ll_aligned_free<alignment>(mArray);
mArray = NULL;
mElementCount = 0;
mCapacity = 0;
@@ -74,7 +74,7 @@ void LLAlignedArray<T, alignment>::push_back(const T& elem)
{
mCapacity++;
mCapacity *= 2;
- T* new_buf = (T*) ll_aligned_malloc(alignment, mCapacity*sizeof(T));
+ T* new_buf = (T*) ll_aligned_malloc<alignment>(mCapacity*sizeof(T));
if (mArray)
{
ll_memcpy_nonaliased_aligned_16((char*)new_buf, (char*)mArray, sizeof(T)*mElementCount);
@@ -86,7 +86,7 @@ void LLAlignedArray<T, alignment>::push_back(const T& elem)
mArray[mElementCount++] = elem;
//delete old array here to prevent error on a.push_back(a[0])
- ll_aligned_free(alignment, old_buf);
+ ll_aligned_free<alignment>(old_buf);
}
template <class T, U32 alignment>
@@ -95,11 +95,11 @@ void LLAlignedArray<T, alignment>::resize(U32 size)
if (mCapacity < size)
{
mCapacity = size+mCapacity*2;
- T* new_buf = mCapacity > 0 ? (T*) ll_aligned_malloc(alignment, mCapacity*sizeof(T)) : NULL;
+ T* new_buf = mCapacity > 0 ? (T*) ll_aligned_malloc<alignment>(mCapacity*sizeof(T)) : NULL;
if (mArray)
{
ll_memcpy_nonaliased_aligned_16((char*) new_buf, (char*) mArray, sizeof(T)*mElementCount);
- ll_aligned_free(alignment, mArray);
+ ll_aligned_free<alignment>(mArray);
}
/*for (U32 i = mElementCount; i < mCapacity; ++i)
diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h
index 5ed4ea7d9e..3de59350db 100755
--- a/indra/llcommon/llmemory.h
+++ b/indra/llcommon/llmemory.h
@@ -204,37 +204,39 @@ inline void ll_aligned_free_32(void *p)
}
// general purpose dispatch functions that are forced inline so they can compile down to a single call
-LL_FORCE_INLINE void* ll_aligned_malloc(size_t alignment, size_t size)
+template<size_t ALIGNMENT>
+LL_FORCE_INLINE void* ll_aligned_malloc(size_t size)
{
- if (LL_DEFAULT_HEAP_ALIGN % alignment == 0)
+ if (LL_DEFAULT_HEAP_ALIGN % ALIGNMENT == 0)
{
return malloc(size);
}
- else if (alignment == 16)
+ else if (ALIGNMENT == 16)
{
return ll_aligned_malloc_16(size);
}
- else if (alignment == 32)
+ else if (ALIGNMENT == 32)
{
return ll_aligned_malloc_32(size);
}
else
{
- return ll_aligned_malloc_fallback(size, alignment);
+ return ll_aligned_malloc_fallback(size, ALIGNMENT);
}
}
-LL_FORCE_INLINE void ll_aligned_free(size_t alignment, void* ptr)
+template<size_t ALIGNMENT>
+LL_FORCE_INLINE void ll_aligned_free(void* ptr)
{
- if (alignment == LL_DEFAULT_HEAP_ALIGN)
+ if (ALIGNMENT == LL_DEFAULT_HEAP_ALIGN)
{
free(ptr);
}
- else if (alignment == 16)
+ else if (ALIGNMENT == 16)
{
ll_aligned_free_16(ptr);
}
- else if (alignment == 32)
+ else if (ALIGNMENT == 32)
{
return ll_aligned_free_32(ptr);
}
diff --git a/indra/llcommon/lltrace.h b/indra/llcommon/lltrace.h
index 325112b9b1..2f4390e4d1 100644
--- a/indra/llcommon/lltrace.h
+++ b/indra/llcommon/lltrace.h
@@ -371,40 +371,40 @@ public:
void* operator new(size_t size)
{
claim_alloc(sMemStat, size);
- return ll_aligned_malloc(ALIGNMENT, size);
+ return ll_aligned_malloc<ALIGNMENT>(size);
}
template<int CUSTOM_ALIGNMENT>
static void* aligned_new(size_t size)
{
claim_alloc(sMemStat, size);
- return ll_aligned_malloc(CUSTOM_ALIGNMENT, size);
+ return ll_aligned_malloc<CUSTOM_ALIGNMENT>(size);
}
void operator delete(void* ptr, size_t size)
{
disclaim_alloc(sMemStat, size);
- ll_aligned_free(ALIGNMENT, ptr);
+ ll_aligned_free<ALIGNMENT>(ptr);
}
template<int CUSTOM_ALIGNMENT>
static void aligned_delete(void* ptr, size_t size)
{
disclaim_alloc(sMemStat, size);
- ll_aligned_free(CUSTOM_ALIGNMENT, ptr);
+ ll_aligned_free<CUSTOM_ALIGNMENT>(ptr);
}
void* operator new [](size_t size)
{
claim_alloc(sMemStat, size);
- return ll_aligned_malloc(ALIGNMENT, size);
+ return ll_aligned_malloc<ALIGNMENT>(size);
}
void operator delete[](void* ptr, size_t size)
{
disclaim_alloc(sMemStat, size);
- ll_aligned_free(ALIGNMENT, ptr);
+ ll_aligned_free<ALIGNMENT>(ptr);
}
// claim memory associated with other objects/data as our own, adding to our calculated footprint
diff --git a/indra/llcommon/lltraceaccumulators.h b/indra/llcommon/lltraceaccumulators.h
index 85873d469a..77370629d3 100644
--- a/indra/llcommon/lltraceaccumulators.h
+++ b/indra/llcommon/lltraceaccumulators.h
@@ -441,12 +441,12 @@ namespace LLTrace
// arrays are allocated with 32 byte alignment
void *operator new [](size_t size)
{
- return ll_aligned_malloc(32, size);
+ return ll_aligned_malloc<32>(size);
}
void operator delete[](void* ptr, size_t size)
{
- ll_aligned_free(32, ptr);
+ ll_aligned_free<32>(ptr);
}
TimeBlockAccumulator();