diff options
| author | Richard Linden <none@none> | 2013-10-14 10:18:41 -0700 | 
|---|---|---|
| committer | Richard Linden <none@none> | 2013-10-14 10:18:41 -0700 | 
| commit | 1acceb3633c0f0c4fdf29b17d77d67c8a9b71986 (patch) | |
| tree | 7fef10e1031417a866243a90df43654ad4659aca | |
| parent | a6a40bd69f2011337b138d833d412b2b3568f8ea (diff) | |
changed ll_aligned_(malloc|free) to take alignment size as a template argument
| -rw-r--r-- | indra/llcommon/llalignedarray.h | 10 | ||||
| -rwxr-xr-x | indra/llcommon/llmemory.h | 20 | ||||
| -rw-r--r-- | indra/llcommon/lltrace.h | 12 | ||||
| -rw-r--r-- | indra/llcommon/lltraceaccumulators.h | 4 | ||||
| -rwxr-xr-x | indra/llmath/llvolume.cpp | 16 | ||||
| -rwxr-xr-x | indra/llrender/llvertexbuffer.cpp | 4 | ||||
| -rwxr-xr-x | indra/llvfs/llvfile.cpp | 4 | ||||
| -rwxr-xr-x | indra/newview/llvovolume.cpp | 14 | 
8 files changed, 43 insertions, 41 deletions
| diff --git a/indra/llcommon/llalignedarray.h b/indra/llcommon/llalignedarray.h index 6f18bfe25c..b68e9e0f82 100644 --- a/indra/llcommon/llalignedarray.h +++ b/indra/llcommon/llalignedarray.h @@ -60,7 +60,7 @@ LLAlignedArray<T, alignment>::LLAlignedArray()  template <class T, U32 alignment>  LLAlignedArray<T, alignment>::~LLAlignedArray()  { -	ll_aligned_free(alignment, mArray); +	ll_aligned_free<alignment>(mArray);  	mArray = NULL;  	mElementCount = 0;  	mCapacity = 0; @@ -74,7 +74,7 @@ void LLAlignedArray<T, alignment>::push_back(const T& elem)  	{  		mCapacity++;  		mCapacity *= 2; -		T* new_buf = (T*) ll_aligned_malloc(alignment, mCapacity*sizeof(T)); +		T* new_buf = (T*) ll_aligned_malloc<alignment>(mCapacity*sizeof(T));  		if (mArray)  		{  			ll_memcpy_nonaliased_aligned_16((char*)new_buf, (char*)mArray, sizeof(T)*mElementCount); @@ -86,7 +86,7 @@ void LLAlignedArray<T, alignment>::push_back(const T& elem)  	mArray[mElementCount++] = elem;  	//delete old array here to prevent error on a.push_back(a[0]) -	ll_aligned_free(alignment, old_buf); +	ll_aligned_free<alignment>(old_buf);  }  template <class T, U32 alignment> @@ -95,11 +95,11 @@ void LLAlignedArray<T, alignment>::resize(U32 size)  	if (mCapacity < size)  	{  		mCapacity = size+mCapacity*2; -		T* new_buf = mCapacity > 0 ? (T*) ll_aligned_malloc(alignment, mCapacity*sizeof(T)) : NULL; +		T* new_buf = mCapacity > 0 ? (T*) ll_aligned_malloc<alignment>(mCapacity*sizeof(T)) : NULL;  		if (mArray)  		{  			ll_memcpy_nonaliased_aligned_16((char*) new_buf, (char*) mArray, sizeof(T)*mElementCount); -			ll_aligned_free(alignment, mArray); +			ll_aligned_free<alignment>(mArray);  		}  		/*for (U32 i = mElementCount; i < mCapacity; ++i) diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h index 5ed4ea7d9e..3de59350db 100755 --- a/indra/llcommon/llmemory.h +++ b/indra/llcommon/llmemory.h @@ -204,37 +204,39 @@ inline void ll_aligned_free_32(void *p)  }  // general purpose dispatch functions that are forced inline so they can compile down to a single call -LL_FORCE_INLINE void* ll_aligned_malloc(size_t alignment, size_t size) +template<size_t ALIGNMENT> +LL_FORCE_INLINE void* ll_aligned_malloc(size_t size)  { -	if (LL_DEFAULT_HEAP_ALIGN % alignment == 0) +	if (LL_DEFAULT_HEAP_ALIGN % ALIGNMENT == 0)  	{  		return malloc(size);  	} -	else if (alignment == 16) +	else if (ALIGNMENT == 16)  	{  		return ll_aligned_malloc_16(size);  	} -	else if (alignment == 32) +	else if (ALIGNMENT == 32)  	{  		return ll_aligned_malloc_32(size);  	}  	else  	{ -		return ll_aligned_malloc_fallback(size, alignment); +		return ll_aligned_malloc_fallback(size, ALIGNMENT);  	}  } -LL_FORCE_INLINE void ll_aligned_free(size_t alignment, void* ptr) +template<size_t ALIGNMENT> +LL_FORCE_INLINE void ll_aligned_free(void* ptr)  { -	if (alignment == LL_DEFAULT_HEAP_ALIGN) +	if (ALIGNMENT == LL_DEFAULT_HEAP_ALIGN)  	{  		free(ptr);  	} -	else if (alignment == 16) +	else if (ALIGNMENT == 16)  	{  		ll_aligned_free_16(ptr);  	} -	else if (alignment == 32) +	else if (ALIGNMENT == 32)  	{  		return ll_aligned_free_32(ptr);  	} diff --git a/indra/llcommon/lltrace.h b/indra/llcommon/lltrace.h index 325112b9b1..2f4390e4d1 100644 --- a/indra/llcommon/lltrace.h +++ b/indra/llcommon/lltrace.h @@ -371,40 +371,40 @@ public:  	void* operator new(size_t size)   	{  		claim_alloc(sMemStat, size); -		return ll_aligned_malloc(ALIGNMENT, size); +		return ll_aligned_malloc<ALIGNMENT>(size);  	}  	template<int CUSTOM_ALIGNMENT>  	static void* aligned_new(size_t size)  	{  		claim_alloc(sMemStat, size); -		return ll_aligned_malloc(CUSTOM_ALIGNMENT, size); +		return ll_aligned_malloc<CUSTOM_ALIGNMENT>(size);  	}  	void operator delete(void* ptr, size_t size)  	{  		disclaim_alloc(sMemStat, size); -		ll_aligned_free(ALIGNMENT, ptr); +		ll_aligned_free<ALIGNMENT>(ptr);  	}  	template<int CUSTOM_ALIGNMENT>  	static void aligned_delete(void* ptr, size_t size)  	{  		disclaim_alloc(sMemStat, size); -		ll_aligned_free(CUSTOM_ALIGNMENT, ptr); +		ll_aligned_free<CUSTOM_ALIGNMENT>(ptr);  	}  	void* operator new [](size_t size)  	{  		claim_alloc(sMemStat, size); -		return ll_aligned_malloc(ALIGNMENT, size); +		return ll_aligned_malloc<ALIGNMENT>(size);  	}  	void operator delete[](void* ptr, size_t size)  	{  		disclaim_alloc(sMemStat, size); -		ll_aligned_free(ALIGNMENT, ptr); +		ll_aligned_free<ALIGNMENT>(ptr);  	}  	// claim memory associated with other objects/data as our own, adding to our calculated footprint diff --git a/indra/llcommon/lltraceaccumulators.h b/indra/llcommon/lltraceaccumulators.h index 85873d469a..77370629d3 100644 --- a/indra/llcommon/lltraceaccumulators.h +++ b/indra/llcommon/lltraceaccumulators.h @@ -441,12 +441,12 @@ namespace LLTrace  		// arrays are allocated with 32 byte alignment  		void *operator new [](size_t size)  		{ -			return ll_aligned_malloc(32, size); +			return ll_aligned_malloc<32>(size);  		}  		void operator delete[](void* ptr, size_t size)  		{ -			ll_aligned_free(32, ptr); +			ll_aligned_free<32>(ptr);  		}  		TimeBlockAccumulator(); diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 699eeb0b89..26d2ae2963 100755 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -4667,7 +4667,7 @@ LLVolumeFace::~LLVolumeFace()  void LLVolumeFace::freeData()  { -	ll_aligned_free(64, mPositions); +	ll_aligned_free<64>(mPositions);  	mPositions = NULL;  	//normals and texture coordinates are part of the same buffer as mPositions, do not free them separately @@ -5245,7 +5245,7 @@ void LLVolumeFace::cacheOptimize()  	//allocate space for new buffer  	S32 num_verts = mNumVertices;  	S32 size = ((num_verts*sizeof(LLVector2)) + 0xF) & ~0xF; -	LLVector4a* pos = (LLVector4a*) ll_aligned_malloc(64, sizeof(LLVector4a)*2*num_verts+size); +	LLVector4a* pos = (LLVector4a*) ll_aligned_malloc<64>(sizeof(LLVector4a)*2*num_verts+size);  	LLVector4a* norm = pos + num_verts;  	LLVector2* tc = (LLVector2*) (norm + num_verts); @@ -5295,7 +5295,7 @@ void LLVolumeFace::cacheOptimize()  		mIndices[i] = new_idx[mIndices[i]];  	} -	ll_aligned_free(64, mPositions); +	ll_aligned_free<64>(mPositions);  	// DO NOT free mNormals and mTexCoords as they are part of mPositions buffer  	ll_aligned_free_16(mWeights);  	ll_aligned_free_16(mTangents); @@ -6023,7 +6023,7 @@ void LLVolumeFace::createTangents()  void LLVolumeFace::resizeVertices(S32 num_verts)  { -	ll_aligned_free(64, mPositions); +	ll_aligned_free<64>(mPositions);  	//DO NOT free mNormals and mTexCoords as they are part of mPositions buffer  	ll_aligned_free_16(mTangents); @@ -6034,7 +6034,7 @@ void LLVolumeFace::resizeVertices(S32 num_verts)  		//pad texture coordinate block end to allow for QWORD reads  		S32 size = ((num_verts*sizeof(LLVector2)) + 0xF) & ~0xF; -		mPositions = (LLVector4a*) ll_aligned_malloc(64, sizeof(LLVector4a)*2*num_verts+size); +		mPositions = (LLVector4a*) ll_aligned_malloc<64>(sizeof(LLVector4a)*2*num_verts+size);  		mNormals = mPositions+num_verts;  		mTexCoords = (LLVector2*) (mNormals+num_verts); @@ -6074,7 +6074,7 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con  		LLVector4a* old_buf = mPositions; -		mPositions = (LLVector4a*) ll_aligned_malloc(64, new_size); +		mPositions = (LLVector4a*) ll_aligned_malloc<64>(new_size);  		mNormals = mPositions+new_verts;  		mTexCoords = (LLVector2*) (mNormals+new_verts); @@ -6090,7 +6090,7 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con  	//just clear tangents  	ll_aligned_free_16(mTangents);  	mTangents = NULL; -		ll_aligned_free(64, old_buf); +		ll_aligned_free<64>(old_buf);  		mNumAllocatedVertices = new_verts; @@ -6191,7 +6191,7 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat  	//allocate new buffer space  	LLVector4a* old_buf = mPositions; -	mPositions = (LLVector4a*) ll_aligned_malloc(64, new_size); +	mPositions = (LLVector4a*) ll_aligned_malloc<64>(new_size);  	mNormals = mPositions + new_count;  	mTexCoords = (LLVector2*) (mNormals+new_count); diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp index 2a36a3b96f..3bbbccbad1 100755 --- a/indra/llrender/llvertexbuffer.cpp +++ b/indra/llrender/llvertexbuffer.cpp @@ -202,7 +202,7 @@ volatile U8* LLVBOPool::allocate(U32& name, U32 size, bool for_seed)  			glBufferDataARB(mType, size, 0, mUsage);  			if (mUsage != GL_DYNAMIC_COPY_ARB)  			{ //data will be provided by application -				ret = (U8*) ll_aligned_malloc(64, size); +				ret = (U8*) ll_aligned_malloc<64>(size);  			}  		}  		else @@ -310,7 +310,7 @@ void LLVBOPool::cleanup()  			if (r.mClientData)  			{ -				ll_aligned_free(64, (void*) r.mClientData); +				ll_aligned_free<64>((void*) r.mClientData);  			}  			l.pop_front(); diff --git a/indra/llvfs/llvfile.cpp b/indra/llvfs/llvfile.cpp index 9b57246ea1..add88fe0a3 100755 --- a/indra/llvfs/llvfile.cpp +++ b/indra/llvfs/llvfile.cpp @@ -137,12 +137,12 @@ U8* LLVFile::readFile(LLVFS *vfs, const LLUUID &uuid, LLAssetType::EType type, S  	}  	else  	{		 -		data = (U8*) ll_aligned_malloc(16, file_size); +		data = (U8*) ll_aligned_malloc<16>(file_size);  		file.read(data, file_size);	/* Flawfinder: ignore */   		if (file.getLastBytesRead() != (S32)file_size)  		{ -			ll_aligned_free(16, data); +			ll_aligned_free<16>(data);  			data = NULL;  			file_size = 0;  		} diff --git a/indra/newview/llvovolume.cpp b/indra/newview/llvovolume.cpp index 6b5d06376f..818232a5c6 100755 --- a/indra/newview/llvovolume.cpp +++ b/indra/newview/llvovolume.cpp @@ -4433,13 +4433,13 @@ void LLVolumeGeometryManager::rebuildGeom(LLSpatialGroup* group)  	const U32 MAX_FACE_COUNT = 4096; -	static LLFace** fullbright_faces = (LLFace**) ll_aligned_malloc(64, MAX_FACE_COUNT*sizeof(LLFace*)); -	static LLFace** bump_faces = (LLFace**) ll_aligned_malloc(64, MAX_FACE_COUNT*sizeof(LLFace*)); -	static LLFace** simple_faces = (LLFace**) ll_aligned_malloc(64, MAX_FACE_COUNT*sizeof(LLFace*)); -	static LLFace** norm_faces = (LLFace**) ll_aligned_malloc(64, MAX_FACE_COUNT*sizeof(LLFace*)); -	static LLFace** spec_faces = (LLFace**) ll_aligned_malloc(64, MAX_FACE_COUNT*sizeof(LLFace*)); -	static LLFace** normspec_faces = (LLFace**) ll_aligned_malloc(64, MAX_FACE_COUNT*sizeof(LLFace*)); -	static LLFace** alpha_faces = (LLFace**) ll_aligned_malloc(64, MAX_FACE_COUNT*sizeof(LLFace*)); +	static LLFace** fullbright_faces = (LLFace**) ll_aligned_malloc<64>(MAX_FACE_COUNT*sizeof(LLFace*)); +	static LLFace** bump_faces = (LLFace**) ll_aligned_malloc<64>(MAX_FACE_COUNT*sizeof(LLFace*)); +	static LLFace** simple_faces = (LLFace**) ll_aligned_malloc<64>(MAX_FACE_COUNT*sizeof(LLFace*)); +	static LLFace** norm_faces = (LLFace**) ll_aligned_malloc<64>(MAX_FACE_COUNT*sizeof(LLFace*)); +	static LLFace** spec_faces = (LLFace**) ll_aligned_malloc<64>(MAX_FACE_COUNT*sizeof(LLFace*)); +	static LLFace** normspec_faces = (LLFace**) ll_aligned_malloc<64>(MAX_FACE_COUNT*sizeof(LLFace*)); +	static LLFace** alpha_faces = (LLFace**) ll_aligned_malloc<64>(MAX_FACE_COUNT*sizeof(LLFace*));  	U32 fullbright_count = 0;  	U32 bump_count = 0; | 
