diff options
25 files changed, 2273 insertions, 88 deletions
| diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp index 51fcd5b717..6ac0ef847d 100644 --- a/indra/llcommon/llmemory.cpp +++ b/indra/llcommon/llmemory.cpp @@ -26,14 +26,13 @@  #include "linden_common.h" -#include "llmemory.h" -#if MEM_TRACK_MEM +//#if MEM_TRACK_MEM  #include "llthread.h" -#endif +//#endif  #if defined(LL_WINDOWS) -# include <windows.h> +//# include <windows.h>  # include <psapi.h>  #elif defined(LL_DARWIN)  # include <sys/types.h> @@ -43,10 +42,20 @@  # include <unistd.h>  #endif +#include "llmemory.h" + +#include "llsys.h" +#include "llframetimer.h"  //----------------------------------------------------------------------------  //static  char* LLMemory::reserveMem = 0; +U32 LLMemory::sAvailPhysicalMemInKB = U32_MAX ; +U32 LLMemory::sMaxPhysicalMemInKB = 0; +U32 LLMemory::sAllocatedMemInKB = 0; +U32 LLMemory::sAllocatedPageSizeInKB = 0 ; +U32 LLMemory::sMaxHeapSizeInKB = U32_MAX ; +BOOL LLMemory::sEnableMemoryFailurePrevention = FALSE;  //static  void LLMemory::initClass() @@ -71,6 +80,131 @@ void LLMemory::freeReserve()  	reserveMem = NULL;  } +//static  +void LLMemory::initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure) +{ +	sMaxHeapSizeInKB = (U32)(max_heap_size_gb * 1024 * 1024) ; +	sEnableMemoryFailurePrevention = prevent_heap_failure ; +} + +//static  +void LLMemory::updateMemoryInfo()  +{ +#if LL_WINDOWS	 +	HANDLE self = GetCurrentProcess(); +	PROCESS_MEMORY_COUNTERS counters; +	 +	if (!GetProcessMemoryInfo(self, &counters, sizeof(counters))) +	{ +		llwarns << "GetProcessMemoryInfo failed" << llendl; +		return ; +	} + +	sAllocatedMemInKB = (U32)(counters.WorkingSetSize / 1024) ; +	sAllocatedPageSizeInKB = (U32)(counters.PagefileUsage / 1024) ; +	sMaxPhysicalMemInKB = llmin(LLMemoryInfo::getAvailableMemoryKB() + sAllocatedMemInKB, sMaxHeapSizeInKB); + +	if(sMaxPhysicalMemInKB > sAllocatedMemInKB) +	{ +		sAvailPhysicalMemInKB = sMaxPhysicalMemInKB - sAllocatedMemInKB ; +	} +	else +	{ +		sAvailPhysicalMemInKB = 0 ; +	} +#else +	//not valid for other systems for now. +	sAllocatedMemInKB = (U32)(LLMemory::getCurrentRSS() / 1024) ; +	sMaxPhysicalMemInKB = U32_MAX ; +	sAvailPhysicalMemInKB = U32_MAX ; +#endif + +	return ; +} + +// +//this function is to test if there is enough space with the size in the virtual address space. +//it does not do any real allocation +//if success, it returns the address where the memory chunk can fit in; +//otherwise it returns NULL. +// +//static  +void* LLMemory::tryToAlloc(void* address, U32 size) +{ +#if LL_WINDOWS +	address = VirtualAlloc(address, size, MEM_RESERVE | MEM_TOP_DOWN, PAGE_NOACCESS) ; +	if(address) +	{ +		if(!VirtualFree(address, 0, MEM_RELEASE)) +		{ +			llerrs << "error happens when free some memory reservation." << llendl ; +		} +	} +#else +#endif + +	return address ; +} + +//static  +void LLMemory::logMemoryInfo(BOOL update) +{ +	if(update) +	{ +		updateMemoryInfo() ; +	} + +	llinfos << "Current allocated physical memory(KB): " << sAllocatedMemInKB << llendl ; +	llinfos << "Current allocated page size (KB): " << sAllocatedPageSizeInKB << llendl ; +	llinfos << "Current availabe physical memory(KB): " << sAvailPhysicalMemInKB << llendl ; +	llinfos << "Current max usable memory(KB): " << sMaxPhysicalMemInKB << llendl ; +} + +//return 0: everything is normal; +//return 1: the memory pool is low, but not in danger; +//return -1: the memory pool is in danger, is about to crash. +//static  +S32 LLMemory::isMemoryPoolLow() +{ +	static const U32 LOW_MEMEOY_POOL_THRESHOLD_KB = 64 * 1024 ; //64 MB for emergency use + +	if(!sEnableMemoryFailurePrevention) +	{ +		return 0 ; //no memory failure prevention. +	} + +	if(sAvailPhysicalMemInKB < (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2)) //out of physical memory +	{ +		return -1 ; +	} + +	if(sAllocatedPageSizeInKB + (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2) > sMaxHeapSizeInKB) //out of virtual address space. +	{ +		return -1 ; +	} + +	return (S32)(sAvailPhysicalMemInKB < LOW_MEMEOY_POOL_THRESHOLD_KB ||  +		sAllocatedPageSizeInKB + LOW_MEMEOY_POOL_THRESHOLD_KB > sMaxHeapSizeInKB) ; +} + +//static  +U32 LLMemory::getAvailableMemKB()  +{ +	return sAvailPhysicalMemInKB ; +} + +//static  +U32 LLMemory::getMaxMemKB()  +{ +	return sMaxPhysicalMemInKB ; +} + +//static  +U32 LLMemory::getAllocatedMemKB()  +{ +	return sAllocatedMemInKB ; +} +  void* ll_allocate (size_t size)  {  	if (size == 0) @@ -256,7 +390,7 @@ U64 LLMemory::getCurrentRSS()  U32 LLMemory::getWorkingSetSize()  { -	return 0 ; +	return 0;  }  #endif @@ -395,3 +529,1508 @@ const char* LLMemTracker::getNextLine()  #endif //MEM_TRACK_MEM  //-------------------------------------------------------------------------------------------------- + +//-------------------------------------------------------------------------------------------------- +//-------------------------------------------------------------------------------------------------- +//minimum block sizes (page size) for small allocation, medium allocation, large allocation  +const U32 MIN_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {2 << 10, 4 << 10, 16 << 10} ; // + +//maximum block sizes for small allocation, medium allocation, large allocation  +const U32 MAX_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {64 << 10, 1 << 20, 4 << 20} ; + +//minimum slot sizes for small allocation, medium allocation, large allocation  +const U32 MIN_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION]  = {8, 2 << 10, 512 << 10}; + +//maximum slot sizes for small allocation, medium allocation, large allocation  +const U32 MAX_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION]  = {(2 << 10) - 8, (512 - 2) << 10, 4 << 20}; + +//size of a block with multiple slots can not exceed CUT_OFF_SIZE +const U32 CUT_OFF_SIZE = (64 << 10) ; //64 KB + +//------------------------------------------------------------- +//class LLPrivateMemoryPool::LLMemoryBlock +//------------------------------------------------------------- +// +//each memory block could fit for two page sizes: 0.75 * mSlotSize, which starts from the beginning of the memory chunk and grow towards the end of the +//the block; another is mSlotSize, which starts from the end of the block and grows towards the beginning of the block. +// +LLPrivateMemoryPool::LLMemoryBlock::LLMemoryBlock() +{ +	//empty +} +		 +LLPrivateMemoryPool::LLMemoryBlock::~LLMemoryBlock()  +{ +	//empty +} + +//create and initialize a memory block +void LLPrivateMemoryPool::LLMemoryBlock::init(char* buffer, U32 buffer_size, U32 slot_size) +{ +	mBuffer = buffer ; +	mBufferSize = buffer_size ; +	mSlotSize = slot_size ; +	mTotalSlots = buffer_size / mSlotSize ;	 +	 +	llassert_always(mTotalSlots < 256) ; //max number is 256 +	 +	mAllocatedSlots = 0 ; + +	//init the bit map. +	//mark free bits +	S32 usage_bit_len = (mTotalSlots + 31) / 32 ; +	mDummySize = usage_bit_len - 1 ; //if the mTotalSlots more than 32, needs extra space for bit map +	if(mDummySize > 0) //reserve extra space from mBuffer to store bitmap if needed. +	{ +		mTotalSlots -= (mDummySize * sizeof(mUsageBits) + mSlotSize - 1) / mSlotSize ; +		usage_bit_len = (mTotalSlots + 31) / 32 ; +		mDummySize = usage_bit_len - 1 ;//number of 32bits reserved from mBuffer for bitmap + +		if(mDummySize > 0) +		{ +			mUsageBits = 0 ; +			for(S32 i = 0 ; i < mDummySize ; i++) +			{ +				*((U32*)mBuffer + i) = 0 ; +			} +			if(mTotalSlots & 31) +			{ +				*((U32*)mBuffer + mDummySize - 1) = (0xffffffff << (mTotalSlots & 31)) ; +			} +		} +	} +	 +	if(mDummySize < 1)//no extra bitmap space reserved +	{ +		mUsageBits = 0 ; +		if(mTotalSlots & 31) +		{ +			mUsageBits = (0xffffffff << (mTotalSlots & 31)) ; +		} +	} + +	mSelf = this ; +	mNext = NULL ; +	mPrev = NULL ; + +	llassert_always(mTotalSlots > 0) ; +} + +//mark this block to be free with the memory [mBuffer, mBuffer + mBufferSize). +void LLPrivateMemoryPool::LLMemoryBlock::setBuffer(char* buffer, U32 buffer_size) +{ +	mBuffer = buffer ; +	mBufferSize = buffer_size ; +	mSelf = NULL ; +	mTotalSlots = 0 ; //set the block is free. +} + +//reserve a slot +char* LLPrivateMemoryPool::LLMemoryBlock::allocate()  +{ +	llassert_always(mAllocatedSlots < mTotalSlots) ; +	 +	//find a free slot +	U32* bits = NULL ; +	U32  k = 0 ; +	if(mUsageBits != 0xffffffff) +	{ +		bits = &mUsageBits ; +	} +	else if(mDummySize > 0)//go to extra space +	{		 +		for(S32 i = 0 ; i < mDummySize; i++) +		{ +			if(*((U32*)mBuffer + i) != 0xffffffff) +			{ +				bits = (U32*)mBuffer + i ; +				k = i + 1 ; +				break ; +			} +		} +	}	 +	S32 idx = 0 ; +	U32 tmp = *bits ; +	for(; tmp & 1 ; tmp >>= 1, idx++) ; + +	//set the slot reserved +	if(!idx) +	{ +		*bits |= 1 ; +	} +	else +	{ +		*bits |= (1 << idx) ; +	} + +	mAllocatedSlots++ ; +	 +	return mBuffer + mDummySize * sizeof(U32) + (k * 32 + idx) * mSlotSize ; +} + +//free a slot +void  LLPrivateMemoryPool::LLMemoryBlock::free(void* addr)  +{ +	//bit index +	U32 idx = ((U32)addr - (U32)mBuffer - mDummySize * sizeof(U32)) / mSlotSize ; + +	U32* bits = &mUsageBits ; +	if(idx >= 32) +	{ +		bits = (U32*)mBuffer + (idx - 32) / 32 ; +	} + +	//reset the bit +	if(idx & 31) +	{ +		*bits &= ~(1 << (idx & 31)) ; +	} +	else +	{ +		*bits &= ~1 ; +	} + +	mAllocatedSlots-- ; +} + +//for debug use: reset the entire bitmap. +void  LLPrivateMemoryPool::LLMemoryBlock::resetBitMap() +{ +	for(S32 i = 0 ; i < mDummySize ; i++) +	{ +		*((U32*)mBuffer + i) = 0 ; +	} +	mUsageBits = 0 ; +} +//------------------------------------------------------------------- +//class LLMemoryChunk +//-------------------------------------------------------------------- +LLPrivateMemoryPool::LLMemoryChunk::LLMemoryChunk() +{ +	//empty +} + +LLPrivateMemoryPool::LLMemoryChunk::~LLMemoryChunk() +{ +	//empty +} + +//create and init a memory chunk +void LLPrivateMemoryPool::LLMemoryChunk::init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size)  +{ +	mBuffer = buffer ; +	mBufferSize = buffer_size ; +	mAlloatedSize = 0 ; + +	mMetaBuffer = mBuffer + sizeof(LLMemoryChunk) ; + +	mMinBlockSize = min_block_size; //page size +	mMinSlotSize = min_slot_size; +	mMaxSlotSize = max_slot_size ; +	mBlockLevels = mMaxSlotSize / mMinSlotSize ; +	mPartitionLevels = max_block_size / mMinBlockSize + 1 ; + +	S32 max_num_blocks = (buffer_size - sizeof(LLMemoryChunk) - mBlockLevels * sizeof(LLMemoryBlock*) - mPartitionLevels * sizeof(LLMemoryBlock*)) /  +		                 (mMinBlockSize + sizeof(LLMemoryBlock)) ; +	//meta data space +	mBlocks = (LLMemoryBlock*)mMetaBuffer ; //space reserved for all memory blocks. +	mAvailBlockList = (LLMemoryBlock**)((char*)mBlocks + sizeof(LLMemoryBlock) * max_num_blocks) ;  +	mFreeSpaceList = (LLMemoryBlock**)((char*)mAvailBlockList + sizeof(LLMemoryBlock*) * mBlockLevels) ;  +	 +	//data buffer, which can be used for allocation +	mDataBuffer = (char*)mFreeSpaceList + sizeof(LLMemoryBlock*) * mPartitionLevels ; + +	//init +	for(U32 i = 0 ; i < mBlockLevels; i++) +	{ +		mAvailBlockList[i] = NULL ; +	} +	for(U32 i = 0 ; i < mPartitionLevels ; i++) +	{ +		mFreeSpaceList[i] = NULL ; +	} + +	//assign the entire chunk to the first block +	mBlocks[0].mPrev = NULL ; +	mBlocks[0].mNext = NULL ; +	mBlocks[0].setBuffer(mDataBuffer, buffer_size - (mDataBuffer - mBuffer)) ; +	addToFreeSpace(&mBlocks[0]) ; + +	mHashNext = NULL ; +	mNext = NULL ; +	mPrev = NULL ; +} + +//static  +U32 LLPrivateMemoryPool::LLMemoryChunk::getMaxOverhead(U32 data_buffer_size, U32 min_slot_size,  +													   U32 max_slot_size, U32 min_block_size, U32 max_block_size) +{ +	//for large allocations, reserve some extra memory for meta data to avoid wasting much  +	if(data_buffer_size / min_slot_size < 64) //large allocations +	{ +		U32 overhead = sizeof(LLMemoryChunk) + (data_buffer_size / min_block_size) * sizeof(LLMemoryBlock) + +			sizeof(LLMemoryBlock*) * (max_slot_size / min_slot_size) + sizeof(LLMemoryBlock*) * (max_block_size / min_block_size + 1) ; + +		//round to integer times of min_block_size +		overhead = ((overhead + min_block_size - 1) / min_block_size) * min_block_size ; +		return overhead ; +	} +	else +	{ +		return 0 ; //do not reserve extra overhead if for small allocations +	} +} + +char* LLPrivateMemoryPool::LLMemoryChunk::allocate(U32 size) +{ +	if(mMinSlotSize > size) +	{ +		size = mMinSlotSize ; +	} +	if(mAlloatedSize + size  > mBufferSize - (mDataBuffer - mBuffer)) +	{ +		return NULL ; //no enough space in this chunk. +	} + +	char* p = NULL ; +	U32 blk_idx = getBlockLevel(size); + +	LLMemoryBlock* blk = NULL ; + +	//check if there is free block available +	if(mAvailBlockList[blk_idx]) +	{ +		blk = mAvailBlockList[blk_idx] ; +		p = blk->allocate() ; +		 +		if(blk->isFull()) +		{ +			popAvailBlockList(blk_idx) ; +		} +	} + +	//ask for a new block +	if(!p) +	{ +		blk = addBlock(blk_idx) ; +		if(blk) +		{ +			p = blk->allocate() ; + +			if(blk->isFull()) +			{ +				popAvailBlockList(blk_idx) ; +			} +		} +	} + +	//ask for space from larger blocks +	if(!p) +	{ +		for(S32 i = blk_idx + 1 ; i < mBlockLevels; i++) +		{ +			if(mAvailBlockList[i]) +			{ +				blk = mAvailBlockList[i] ; +				p = blk->allocate() ; + +				if(blk->isFull()) +				{ +					popAvailBlockList(i) ; +				} +				break ; +			} +		} +	} + +	if(p && blk) +	{		 +		mAlloatedSize += blk->getSlotSize() ; +	} +	return p ; +} + +void LLPrivateMemoryPool::LLMemoryChunk::free(void* addr) +{	 +	U32 blk_idx = getPageIndex((U32)addr) ; +	LLMemoryBlock* blk = (LLMemoryBlock*)(mMetaBuffer + blk_idx * sizeof(LLMemoryBlock)) ; +	blk = blk->mSelf ; + +	bool was_full = blk->isFull() ; +	blk->free(addr) ; +	mAlloatedSize -= blk->getSlotSize() ; + +	if(blk->empty()) +	{ +		removeBlock(blk) ; +	} +	else if(was_full) +	{ +		addToAvailBlockList(blk) ; +	}	 +} + +bool LLPrivateMemoryPool::LLMemoryChunk::empty() +{ +	return !mAlloatedSize ; +} + +bool LLPrivateMemoryPool::LLMemoryChunk::containsAddress(const char* addr) const +{ +	return (U32)mBuffer <= (U32)addr && (U32)mBuffer + mBufferSize > (U32)addr ; +} + +//debug use +void LLPrivateMemoryPool::LLMemoryChunk::dump() +{ +#if 0 +	//sanity check +	//for(S32 i = 0 ; i < mBlockLevels ; i++) +	//{ +	//	LLMemoryBlock* blk = mAvailBlockList[i] ; +	//	while(blk) +	//	{ +	//		blk_list.push_back(blk) ; +	//		blk = blk->mNext ; +	//	} +	//} +	for(S32 i = 0 ; i < mPartitionLevels ; i++) +	{ +		LLMemoryBlock* blk = mFreeSpaceList[i] ; +		while(blk) +		{ +			blk_list.push_back(blk) ; +			blk = blk->mNext ; +		} +	} + +	std::sort(blk_list.begin(), blk_list.end(), LLMemoryBlock::CompareAddress()); + +	U32 total_size = blk_list[0]->getBufferSize() ; +	for(U32 i = 1 ; i < blk_list.size(); i++) +	{ +		total_size += blk_list[i]->getBufferSize() ; +		if((U32)blk_list[i]->getBuffer() < (U32)blk_list[i-1]->getBuffer() + blk_list[i-1]->getBufferSize()) +		{ +			llerrs << "buffer corrupted." << llendl ; +		} +	} + +	llassert_always(total_size + mMinBlockSize >= mBufferSize - ((U32)mDataBuffer - (U32)mBuffer)) ; + +	U32 blk_num = (mBufferSize - (mDataBuffer - mBuffer)) / mMinBlockSize ; +	for(U32 i = 0 ; i < blk_num ; ) +	{ +		LLMemoryBlock* blk = &mBlocks[i] ; +		if(blk->mSelf) +		{ +			U32 end = blk->getBufferSize() / mMinBlockSize ; +			for(U32 j = 0 ; j < end ; j++) +			{ +				llassert_always(blk->mSelf == blk || !blk->mSelf) ; +			} +			i += end ; +		} +		else +		{ +			llerrs << "gap happens" << llendl ; +		} +	} +#endif +#if 0 +	llinfos << "---------------------------" << llendl ; +	llinfos << "Chunk buffer: " << (U32)getBuffer() << " size: " << getBufferSize() << llendl ; + +	llinfos << "available blocks ... " << llendl ; +	for(S32 i = 0 ; i < mBlockLevels ; i++) +	{ +		LLMemoryBlock* blk = mAvailBlockList[i] ; +		while(blk) +		{ +			llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ; +			blk = blk->mNext ; +		} +	} + +	llinfos << "free blocks ... " << llendl ; +	for(S32 i = 0 ; i < mPartitionLevels ; i++) +	{ +		LLMemoryBlock* blk = mFreeSpaceList[i] ; +		while(blk) +		{ +			llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ; +			blk = blk->mNext ; +		} +	} +#endif +} + +//compute the size for a block, the size is round to integer times of mMinBlockSize. +U32 LLPrivateMemoryPool::LLMemoryChunk::calcBlockSize(U32 slot_size) +{ +	// +	//Note: we try to make a block to have 32 slots if the size is not over 32 pages +	//32 is the number of bits of an integer in a 32-bit system +	// + +	U32 block_size; +	U32 cut_off_size = llmin(CUT_OFF_SIZE, (U32)(mMinBlockSize << 5)) ; + +	if((slot_size << 5) <= mMinBlockSize)//for small allocations, return one page  +	{ +		block_size = mMinBlockSize ; +	} +	else if(slot_size >= cut_off_size)//for large allocations, return one-slot block +	{ +		block_size = (slot_size / mMinBlockSize) * mMinBlockSize ; +		if(block_size < slot_size) +		{ +			block_size += mMinBlockSize ; +		} +	} +	else //medium allocations +	{ +		if((slot_size << 5) >= cut_off_size) +		{ +			block_size = cut_off_size ; +		} +		else +		{ +			block_size = ((slot_size << 5) / mMinBlockSize) * mMinBlockSize ; +		} +	} + +	llassert_always(block_size >= slot_size) ; + +	return block_size ; +} + +//create a new block in the chunk +LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::addBlock(U32 blk_idx) +{	 +	U32 slot_size = mMinSlotSize * (blk_idx + 1) ; +	U32 preferred_block_size = calcBlockSize(slot_size) ;	 +	U16 idx = getPageLevel(preferred_block_size);  +	LLMemoryBlock* blk = NULL ; +	 +	if(mFreeSpaceList[idx])//if there is free slot for blk_idx +	{ +		blk = createNewBlock(mFreeSpaceList[idx], preferred_block_size, slot_size, blk_idx) ; +	} +	else if(mFreeSpaceList[mPartitionLevels - 1]) //search free pool +	{		 +		blk = createNewBlock(mFreeSpaceList[mPartitionLevels - 1], preferred_block_size, slot_size, blk_idx) ; +	} +	else //search for other non-preferred but enough space slot. +	{ +		S32 min_idx = 0 ; +		if(slot_size > mMinBlockSize) +		{ +			min_idx = getPageLevel(slot_size) ; +		} +		for(S32 i = (S32)idx - 1 ; i >= min_idx ; i--) //search the small slots first +		{ +			if(mFreeSpaceList[i]) +			{ +				U32 new_preferred_block_size = mFreeSpaceList[i]->getBufferSize(); +				new_preferred_block_size = (new_preferred_block_size / mMinBlockSize) * mMinBlockSize ; //round to integer times of mMinBlockSize. + +				//create a NEW BLOCK THERE. +				if(new_preferred_block_size >= slot_size) //at least there is space for one slot. +				{ +					 +					blk = createNewBlock(mFreeSpaceList[i], new_preferred_block_size, slot_size, blk_idx) ; +				} +				break ; +			}  +		} + +		if(!blk) +		{ +			for(U16 i = idx + 1 ; i < mPartitionLevels - 1; i++) //search the large slots  +			{ +				if(mFreeSpaceList[i]) +				{ +					//create a NEW BLOCK THERE. +					blk = createNewBlock(mFreeSpaceList[i], preferred_block_size, slot_size, blk_idx) ; +					break ; +				}  +			} +		} +	} + +	return blk ; +} + +//create a new block at the designed location +LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx) +{ +	//unlink from the free space +	removeFromFreeSpace(blk) ; + +	//check the rest space +	U32 new_free_blk_size = blk->getBufferSize() - buffer_size ;	 +	if(new_free_blk_size < mMinBlockSize) //can not partition the memory into size smaller than mMinBlockSize +	{ +		new_free_blk_size = 0 ; //discard the last small extra space. +	}			 + +	//add the rest space back to the free list +	if(new_free_blk_size > 0) //blk still has free space +	{ +		LLMemoryBlock* next_blk = blk + (buffer_size / mMinBlockSize) ; +		next_blk->setBuffer(blk->getBuffer() + buffer_size, new_free_blk_size) ; +		addToFreeSpace(next_blk) ; +	} + +	blk->init(blk->getBuffer(), buffer_size, slot_size) ; +	//insert to the available block list... +	mAvailBlockList[blk_idx] = blk ; + +	//mark the address map: all blocks covered by this block space pointing back to this block. +	U32 end = (buffer_size / mMinBlockSize) ; +	for(U32 i = 1 ; i < end ; i++) +	{ +		(blk + i)->mSelf = blk ; +	} + +	return blk ; +} + +//delete a block, release the block to the free pool. +void LLPrivateMemoryPool::LLMemoryChunk::removeBlock(LLMemoryBlock* blk) +{ +	//remove from the available block list +	if(blk->mPrev) +	{ +		blk->mPrev->mNext = blk->mNext ; +	} +	if(blk->mNext) +	{ +		blk->mNext->mPrev = blk->mPrev ; +	} +	U32 blk_idx = getBlockLevel(blk->getSlotSize()); +	if(mAvailBlockList[blk_idx] == blk) +	{ +		mAvailBlockList[blk_idx] = blk->mNext ; +	} + +	blk->mNext = NULL ; +	blk->mPrev = NULL ; +	 +	//mark it free +	blk->setBuffer(blk->getBuffer(), blk->getBufferSize()) ; + +#if 1 +	//merge blk with neighbors if possible +	if(blk->getBuffer() > mDataBuffer) //has the left neighbor +	{ +		if((blk - 1)->mSelf->isFree()) +		{ +			LLMemoryBlock* left_blk = (blk - 1)->mSelf ; +			removeFromFreeSpace((blk - 1)->mSelf); +			left_blk->setBuffer(left_blk->getBuffer(), left_blk->getBufferSize() + blk->getBufferSize()) ; +			blk = left_blk ; +		} +	} +	if(blk->getBuffer() + blk->getBufferSize() <= mBuffer + mBufferSize - mMinBlockSize) //has the right neighbor +	{ +		U32 d = blk->getBufferSize() / mMinBlockSize ; +		if((blk + d)->isFree()) +		{ +			LLMemoryBlock* right_blk = blk + d ; +			removeFromFreeSpace(blk + d) ; +			blk->setBuffer(blk->getBuffer(), blk->getBufferSize() + right_blk->getBufferSize()) ; +		} +	} +#endif +	 +	addToFreeSpace(blk) ; + +	return ; +} + +//the top block in the list is full, pop it out of the list +void LLPrivateMemoryPool::LLMemoryChunk::popAvailBlockList(U32 blk_idx)  +{ +	if(mAvailBlockList[blk_idx]) +	{ +		LLMemoryBlock* next = mAvailBlockList[blk_idx]->mNext ; +		if(next) +		{ +			next->mPrev = NULL ; +		} +		mAvailBlockList[blk_idx]->mPrev = NULL ; +		mAvailBlockList[blk_idx]->mNext = NULL ; +		mAvailBlockList[blk_idx] = next ; +	} +} + +//add the block back to the free pool +void LLPrivateMemoryPool::LLMemoryChunk::addToFreeSpace(LLMemoryBlock* blk)  +{ +	llassert_always(!blk->mPrev) ; +	llassert_always(!blk->mNext) ; + +	U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1; + +	(blk + free_idx)->mSelf = blk ; //mark the end pointing back to the head. +	free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ; + +	blk->mNext = mFreeSpaceList[free_idx] ; +	if(mFreeSpaceList[free_idx]) +	{ +		mFreeSpaceList[free_idx]->mPrev = blk ; +	} +	mFreeSpaceList[free_idx] = blk ; +	blk->mPrev = NULL ; +	blk->mSelf = blk ; +	 +	return ; +} + +//remove the space from the free pool +void LLPrivateMemoryPool::LLMemoryChunk::removeFromFreeSpace(LLMemoryBlock* blk)  +{ +	U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1; +	free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ; + +	if(mFreeSpaceList[free_idx] == blk) +	{ +		mFreeSpaceList[free_idx] = blk->mNext ; +	} +	if(blk->mPrev) +	{ +		blk->mPrev->mNext = blk->mNext ; +	} +	if(blk->mNext) +	{ +		blk->mNext->mPrev = blk->mPrev ; +	} +	blk->mNext = NULL ; +	blk->mPrev = NULL ; +	blk->mSelf = NULL ; + +	return ; +} + +void LLPrivateMemoryPool::LLMemoryChunk::addToAvailBlockList(LLMemoryBlock* blk)  +{ +	llassert_always(!blk->mPrev) ; +	llassert_always(!blk->mNext) ; + +	U32 blk_idx = getBlockLevel(blk->getSlotSize()); + +	blk->mNext = mAvailBlockList[blk_idx] ; +	if(blk->mNext) +	{ +		blk->mNext->mPrev = blk ; +	} +	blk->mPrev = NULL ; +	mAvailBlockList[blk_idx] = blk ; + +	return ; +} + +U32 LLPrivateMemoryPool::LLMemoryChunk::getPageIndex(U32 addr) +{ +	return (addr - (U32)mDataBuffer) / mMinBlockSize ; +} + +//for mAvailBlockList +U32 LLPrivateMemoryPool::LLMemoryChunk::getBlockLevel(U32 size) +{ +	llassert(size >= mMinSlotSize && size <= mMaxSlotSize) ; + +	//start from 0 +	return (size + mMinSlotSize - 1) / mMinSlotSize - 1 ; +} + +//for mFreeSpaceList +U16 LLPrivateMemoryPool::LLMemoryChunk::getPageLevel(U32 size) +{ +	//start from 0 +	U16 level = size / mMinBlockSize - 1 ; +	if(level >= mPartitionLevels) +	{ +		level = mPartitionLevels - 1 ; +	} +	return level ; +} + +//------------------------------------------------------------------- +//class LLPrivateMemoryPool +//-------------------------------------------------------------------- +const U32 CHUNK_SIZE = 4 << 20 ; //4 MB +const U32 LARGE_CHUNK_SIZE = 4 * CHUNK_SIZE ; //16 MB +LLPrivateMemoryPool::LLPrivateMemoryPool(U32 max_size, bool threaded) : +	mMutexp(NULL), +	mMaxPoolSize(max_size), +	mReservedPoolSize(0), +	mHashFactor(1) +{ +	if(threaded) +	{ +		mMutexp = new LLMutex(NULL) ; +	} + +	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		mChunkList[i] = NULL ; +	}	 +	 +	mNumOfChunks = 0 ; +} + +LLPrivateMemoryPool::~LLPrivateMemoryPool() +{ +	destroyPool(); +	delete mMutexp ; +} + +char* LLPrivateMemoryPool::allocate(U32 size) +{	 +	if(!size) +	{ +		return NULL ; +	} + +	//if the asked size larger than MAX_BLOCK_SIZE, fetch from heap directly, the pool does not manage it +	if(size >= CHUNK_SIZE) +	{ +		return new char[size] ; +	} + +	char* p = NULL ; + +	//find the appropriate chunk +	S32 chunk_idx = getChunkIndex(size) ; +	 +	lock() ; + +	LLMemoryChunk* chunk = mChunkList[chunk_idx]; +	while(chunk) +	{ +		if(p = chunk->allocate(size)) +		{ +			break ; +		} +		chunk = chunk->mNext ; +	} +	 +	//fetch new memory chunk +	if(!p) +	{ +		if(mReservedPoolSize + CHUNK_SIZE > mMaxPoolSize) +		{ +			chunk = mChunkList[chunk_idx]; +			while(chunk) +			{ +				if(p = chunk->allocate(size)) +				{ +					break ; +				} +				chunk = chunk->mNext ; +			} +		} + +		chunk = addChunk(chunk_idx) ; +		if(chunk) +		{ +			p = chunk->allocate(size) ; +		} +	} + +	unlock() ; + +	return p ; +} + +void LLPrivateMemoryPool::free(void* addr) +{ +	if(!addr) +	{ +		return ; +	} + +	lock() ; +	 +	LLMemoryChunk* chunk = findChunk((char*)addr) ; +	 +	if(!chunk) +	{ +		delete[] addr ; //release from heap +	} +	else +	{ +		chunk->free(addr) ; + +		if(chunk->empty()) +		{ +			removeChunk(chunk) ; +		} +	} +	 +	unlock() ; +} + +void LLPrivateMemoryPool::dump() +{ +} + +U32 LLPrivateMemoryPool::getTotalAllocatedSize() +{ +	U32 total_allocated = 0 ; + +	LLMemoryChunk* chunk ; +	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		chunk = mChunkList[i]; +		while(chunk) +		{ +			total_allocated += chunk->getAllocatedSize() ; +			chunk = chunk->mNext ; +		} +	} + +	return total_allocated ; +} + +void LLPrivateMemoryPool::lock() +{ +	if(mMutexp) +	{ +		mMutexp->lock() ; +	} +} + +void LLPrivateMemoryPool::unlock() +{ +	if(mMutexp) +	{ +		mMutexp->unlock() ; +	} +} + +S32  LLPrivateMemoryPool::getChunkIndex(U32 size)  +{ +	S32 i ; +	for(i = 0 ; size > MAX_SLOT_SIZES[i]; i++); +	 +	llassert_always(i < SUPER_ALLOCATION); + +	return i ; +} + +//destroy the entire pool +void  LLPrivateMemoryPool::destroyPool() +{ +	lock() ; +	if(mNumOfChunks > 0) +	{ +		for(U32 i = 0 ; i < mHashFactor; i++) +		{ +			while(mChunkHashList[i]) +			{ +				removeChunk(mChunkHashList[i]) ; +			} +		} +	} +	mChunkHashList.clear() ; +	mHashFactor = 1 ; +	llassert_always(mNumOfChunks == 0) ; +	llassert_always(mReservedPoolSize == 0) ; + +	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		mChunkList[i] = NULL ; +	} + +	unlock() ; +} + +void  LLPrivateMemoryPool::checkSize(U32 asked_size) +{ +	if(mReservedPoolSize + asked_size > mMaxPoolSize) +	{ +		llinfos << "Max pool size: " << mMaxPoolSize << llendl ; +		llinfos << "Total reserved size: " << mReservedPoolSize + asked_size << llendl ; +		llinfos << "Total_allocated Size: " << getTotalAllocatedSize() << llendl ; + +		llerrs << "The pool is overflowing..." << llendl ; +	} +} + +LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_index) +{ +	U32 preferred_size ; +	U32 overhead ; +	if(chunk_index < LARGE_ALLOCATION) +	{ +		preferred_size = CHUNK_SIZE ; //4MB +		overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index], +			MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ; +	} +	else +	{ +		preferred_size = LARGE_CHUNK_SIZE ; //16MB +		overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index],  +			MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ; +	} + +	checkSize(preferred_size + overhead) ; +	mReservedPoolSize += preferred_size + overhead ; + +	char* buffer = new(std::nothrow) char[preferred_size + overhead] ; +	if(!buffer) +	{ +		return NULL ; +	} +	 +	LLMemoryChunk* chunk = new (buffer) LLMemoryChunk() ; +	chunk->init(buffer, preferred_size + overhead, MIN_SLOT_SIZES[chunk_index], +		MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ; + +	//add to the tail of the linked list +	{ +		if(!mChunkList[chunk_index]) +		{ +			mChunkList[chunk_index] = chunk ; +		} +		else +		{ +			LLMemoryChunk* cur = mChunkList[chunk_index] ; +			while(cur->mNext) +			{ +				cur = cur->mNext ; +			} +			cur->mNext = chunk ; +			chunk->mPrev = cur ; +		} +	} + +	//insert into the hash table +	addToHashTable(chunk) ; +	 +	mNumOfChunks++; + +	return chunk ; +} + +void LLPrivateMemoryPool::removeChunk(LLMemoryChunk* chunk)  +{ +	if(!chunk) +	{ +		return ; +	} + +	//remove from the linked list +	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		if(mChunkList[i] == chunk) +		{ +			mChunkList[i] = chunk->mNext ; +		} +	} + +	if(chunk->mPrev) +	{ +		chunk->mPrev->mNext = chunk->mNext ; +	} +	if(chunk->mNext) +	{ +		chunk->mNext->mPrev = chunk->mPrev ; +	} + +	//remove from the hash table +	removeFromHashTable(chunk) ; +	 +	mNumOfChunks--; +	mReservedPoolSize -= chunk->getBufferSize() ; +	 +	//release memory +	delete[] chunk->getBuffer() ; +} + +U16 LLPrivateMemoryPool::findHashKey(const char* addr) +{ +	return (((U32)addr) / CHUNK_SIZE) % mHashFactor ; +} + +LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::findChunk(const char* addr) +{ +	U16 key = findHashKey(addr) ;	 +	if(mChunkHashList.size() <= key) +	{ +		return NULL ; +	} + +	//check the hash value "key" +	LLMemoryChunk* chunk = mChunkHashList[key] ; +	while(chunk && !chunk->containsAddress(addr)) +	{ +		chunk = chunk->mHashNext ; +	} + +	return chunk ; +} + +void LLPrivateMemoryPool::addToHashTable(LLMemoryChunk* chunk)  +{ +	static const U16 HASH_FACTORS[] = {41, 83, 193, 317, 419, 523, 0xFFFF};  +	 +	U16 i ; +	if(mChunkHashList.empty()) +	{ +		mHashFactor = HASH_FACTORS[0] ; +		rehash() ;		 +	} + +	U16 start_key = findHashKey(chunk->getBuffer()) ; +	U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ; +	bool need_rehash = false ; +	 +	if(mChunkHashList[start_key]) +	{ +		if(mChunkHashList[start_key] == chunk)  +		{ +			return; //already inserted. +		} +		 +		llassert_always(!chunk->mHashNext) ; + +		chunk->mHashNext = mChunkHashList[start_key] ; +		mChunkHashList[start_key] = chunk ; +	} +	else +	{ +		mChunkHashList[start_key] = chunk ; +	} + +	if(!need_rehash) +	{ +		if(mChunkHashList[end_key]) +		{ +			llassert_always(mChunkHashList[end_key] != chunk) +			 +			need_rehash =  mChunkHashList[end_key]->mHashNext != NULL || mChunkHashList[end_key] == chunk->mHashNext; +			if(!need_rehash) +			{ +				mChunkHashList[end_key]->mHashNext = chunk ; +			}			 +		} +		else +		{ +			mChunkHashList[end_key] = chunk ; +		} +	} + +	if(!need_rehash) +	{ +		if(end_key < start_key) +		{ +			need_rehash = fillHashTable(start_key + 1, mHashFactor, chunk) ; +			if(!need_rehash) +			{ +				need_rehash = fillHashTable(0, end_key, chunk) ; +			} +		} +		else +		{ +			need_rehash = fillHashTable(start_key + 1, end_key, chunk) ; +		} +	} +	 +	if(need_rehash) +	{ +		i = 0 ; +		while(HASH_FACTORS[i] <= mHashFactor) i++; + +		mHashFactor = HASH_FACTORS[i] ; +		llassert_always(mHashFactor != 0xFFFF) ;//stop point to prevent endlessly recursive calls + +		rehash() ; +	} +} + +void LLPrivateMemoryPool::removeFromHashTable(LLMemoryChunk* chunk)  +{ +	U16 start_key = findHashKey(chunk->getBuffer()) ; +	U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ; +	 +	mChunkHashList[start_key] = chunk->mHashNext ; +	chunk->mHashNext = NULL ; + +	if(mChunkHashList[end_key] != chunk) +	{ +		mChunkHashList[end_key]->mHashNext = NULL ; +	} +	else +	{ +		mChunkHashList[end_key] = NULL ; +	} + +	if(end_key < start_key) +	{ +		for(U16 i = start_key + 1 ; i < mHashFactor; i++) +		{ +			mChunkHashList[i] = NULL ; +		} +		for(U16 i = 0 ; i < end_key; i++) +		{ +			mChunkHashList[i] = NULL ; +		} +	} +	else +	{ +		for(U16 i = start_key + 1 ; i < end_key; i++) +		{ +			mChunkHashList[i] = NULL ; +		} +	} +} + +void LLPrivateMemoryPool::rehash() +{ +	llinfos << "new hash factor: " << mHashFactor << llendl ; + +	mChunkHashList.clear() ; +	mChunkHashList.resize(mHashFactor, NULL) ; + +	LLMemoryChunk* chunk ; +	for(U16 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		chunk = mChunkList[i] ;  +		while(chunk) +		{ +			chunk->mHashNext = NULL ; +			addToHashTable(chunk) ; +			chunk = chunk->mNext ; +		} +	} +} + +bool LLPrivateMemoryPool::fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk) +{ +	for(U16 i = start; i < end; i++) +	{ +		if(mChunkHashList[i]) //the slot is occupied. +		{ +			llassert_always(mChunkHashList[i] != chunk) ; +			return true ; +		} +		else +		{ +			mChunkHashList[i] = chunk ; +		} +	} + +	return false ; +} + +//-------------------------------------------------------------------- +//class LLPrivateMemoryPoolManager +//-------------------------------------------------------------------- +LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::sInstance = NULL ; + +LLPrivateMemoryPoolManager::LLPrivateMemoryPoolManager()  +{ +} + +LLPrivateMemoryPoolManager::~LLPrivateMemoryPoolManager()  +{ +	//all private pools should be released by their owners before reaching here. +	llassert_always(mPoolList.empty()) ; + +#if 0 +	if(!mPoolList.empty()) +	{ +		for(std::set<LLPrivateMemoryPool*>::iterator iter = mPoolList.begin(); iter != mPoolList.end(); ++iter) +		{ +			delete *iter; +		} +		mPoolList.clear() ; +	} +#endif +} + +//static  +LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::getInstance()  +{ +	if(!sInstance) +	{ +		sInstance = new LLPrivateMemoryPoolManager() ; +	} +	return sInstance ; +} +	 +//static  +void LLPrivateMemoryPoolManager::destroyClass()  +{ +	if(sInstance) +	{ +		delete sInstance ; +		sInstance = NULL ; +	} +} + +LLPrivateMemoryPool* LLPrivateMemoryPoolManager::newPool(U32 max_size, bool threaded)  +{ +	LLPrivateMemoryPool* pool = new LLPrivateMemoryPool(max_size, threaded) ; +	mPoolList.insert(pool) ; + +	return pool ; +} + +void LLPrivateMemoryPoolManager::deletePool(LLPrivateMemoryPool* pool)  +{ +	mPoolList.erase(pool) ; +	delete pool; +} + +//debug +void LLPrivateMemoryPoolManager::updateStatistics() +{ +	mTotalReservedSize = 0 ; +	mTotalAllocatedSize = 0 ; + +	for(std::set<LLPrivateMemoryPool*>::iterator iter = mPoolList.begin(); iter != mPoolList.end(); ++iter) +	{ +		mTotalReservedSize += (*iter)->getTotalReservedSize() ; +		mTotalAllocatedSize += (*iter)->getTotalAllocatedSize() ; +	} +} + +//-------------------------------------------------------------------- +//class LLPrivateMemoryPoolTester +//-------------------------------------------------------------------- +LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::sInstance = NULL ; +LLPrivateMemoryPool* LLPrivateMemoryPoolTester::sPool = NULL ; +LLPrivateMemoryPoolTester::LLPrivateMemoryPoolTester() +{	 +} +	 +LLPrivateMemoryPoolTester::~LLPrivateMemoryPoolTester()  +{	 +} + +//static  +LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::getInstance()  +{ +	if(!sInstance) +	{ +		sInstance = ::new LLPrivateMemoryPoolTester() ; +	} +	return sInstance ; +} + +//static  +void LLPrivateMemoryPoolTester::destroy() +{ +	if(sInstance) +	{ +		::delete sInstance ; +		sInstance = NULL ; +	} + +	if(sPool) +	{ +		LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ; +		sPool = NULL ; +	} +} + +void LLPrivateMemoryPoolTester::run(bool threaded)  +{ +	const U32 max_pool_size = 1024 << 20 ; +	 +	if(sPool) +	{ +		LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ; +	} +	sPool = LLPrivateMemoryPoolManager::getInstance()->newPool(max_pool_size, threaded) ; + +	//run the test +	correctnessTest() ; +	performanceTest() ; +	//fragmentationtest() ; + +	//release pool. +	LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ; +	sPool = NULL ; +} + +void LLPrivateMemoryPoolTester::test(U32 min_size, U32 max_size, U32 stride, U32 times,  +									 bool random_deletion, bool output_statistics) +{ +	U32 levels = (max_size - min_size) / stride + 1 ; +	char*** p ; +	U32 i, j ; +	U32 total_allocated_size = 0 ; + +	//allocate space for p ; +	if(!(p = ::new char**[times]) || !(*p = ::new char*[times * levels])) +	{ +		llerrs << "memory initialization for p failed" << llendl ; +	} + +	//init +	for(i = 0 ; i < times; i++) +	{ +		p[i] = *p + i * levels ; +		for(j = 0 ; j < levels; j++) +		{ +			p[i][j] = NULL ; +		} +	} + +	//allocation +	U32 size ; +	for(i = 0 ; i < times ; i++) +	{ +		for(j = 0 ; j < levels; j++)  +		{ +			size = min_size + j * stride ; +			p[i][j] = sPool->allocate(size) ; + +			total_allocated_size+= size ; + +			*(U32*)p[i][j] = i ; +			*((U32*)p[i][j] + 1) = j ; +			//p[i][j][size - 1] = '\0' ; //access the last element to verify the success of the allocation. + +			//randomly release memory +			if(random_deletion) +			{ +				S32 k = rand() % levels ; + +				if(p[i][k]) +				{ +					llassert_always(*(U32*)p[i][k] == i && *((U32*)p[i][k] + 1) == k) ; +					sPool->free(p[i][k]) ; +					total_allocated_size -= min_size + k * stride ; +					p[i][k] = NULL ; +				} +			} +		} +	} + +	//output pool allocation statistics +	if(output_statistics) +	{ +	} + +	//release all memory allocations +	for(i = 0 ; i < times; i++) +	{ +		for(j = 0 ; j < levels; j++) +		{ +			if(p[i][j]) +			{ +				llassert_always(*(U32*)p[i][j] == i && *((U32*)p[i][j] + 1) == j) ; +				sPool->free(p[i][j]) ; +				total_allocated_size -= min_size + j * stride ; +				p[i][j] = NULL ; +			} +		} +	} + +	::delete[] *p ; +	::delete[] p ; +} + +void LLPrivateMemoryPoolTester::testAndTime(U32 size, U32 times) +{ +	LLTimer timer ; + +	llinfos << " -**********************- " << llendl ; +	llinfos << "test size: " << size << " test times: " << times << llendl ; + +	timer.reset() ; +	char** p = new char*[times] ; +		 +	//using the customized memory pool +	//allocation +	for(U32 i = 0 ; i < times; i++) +	{ +		p[i] = sPool->allocate(size) ; +		if(!p[i]) +		{ +			llerrs << "allocation failed" << llendl ; +		} +	} +	//de-allocation +	for(U32 i = 0 ; i < times; i++) +	{ +		sPool->free(p[i]) ; +		p[i] = NULL ; +	} +	llinfos << "time spent using customized memory pool: " << timer.getElapsedTimeF32() << llendl ; + +	timer.reset() ; + +	//using the standard allocator/de-allocator: +	//allocation +	for(U32 i = 0 ; i < times; i++) +	{ +		p[i] = ::new char[size] ; +		if(!p[i]) +		{ +			llerrs << "allocation failed" << llendl ; +		} +	} +	//de-allocation +	for(U32 i = 0 ; i < times; i++) +	{ +		::delete[] p[i] ; +		p[i] = NULL ; +	} +	llinfos << "time spent using standard allocator/de-allocator: " << timer.getElapsedTimeF32() << llendl ; + +	delete[] p; +} + +void LLPrivateMemoryPoolTester::correctnessTest()  +{ +	//try many different sized allocation, and all kinds of edge cases, access the allocated memory  +	//to see if allocation is right. +	 +	//edge case +	char* p = sPool->allocate(0) ; +	sPool->free(p) ; + +	//small sized +	// [8 bytes, 2KB), each asks for 256 allocations and deallocations +	test(8, 2040, 8, 256, true, true) ; +	 +	//medium sized +	//[2KB, 512KB), each asks for 16 allocations and deallocations +	test(2048, 512 * 1024 - 2048, 2048, 16, true, true) ; + +	//large sized +	//[512KB, 4MB], each asks for 8 allocations and deallocations +	test(512 * 1024, 4 * 1024 * 1024, 64 * 1024, 6, true, true) ; +} + +void LLPrivateMemoryPoolTester::performanceTest()  +{ +	U32 test_size[3] = {768, 3* 1024, 3* 1024 * 1024}; +	 +	//small sized +	testAndTime(test_size[0], 8) ; +	 +	//medium sized +	testAndTime(test_size[1], 8) ; + +	//large sized +	testAndTime(test_size[2], 8) ; +} + +void LLPrivateMemoryPoolTester::fragmentationtest()  +{ +	//for internal fragmentation statistics: +	//every time when asking for a new chunk during correctness test, and performance test, +	//print out the chunk usage statistices. +} + +//-------------------------------------------------------------------- diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h index 11406f59b0..4474df6f86 100644 --- a/indra/llcommon/llmemory.h +++ b/indra/llcommon/llmemory.h @@ -27,7 +27,6 @@  #define LLMEMORY_H  #include "llmemtype.h" -  extern S32 gTotalDAlloc;  extern S32 gTotalDAUse;  extern S32 gDACount; @@ -45,8 +44,24 @@ public:  	// Return value is zero if not known.  	static U64 getCurrentRSS();  	static U32 getWorkingSetSize(); +	static void* tryToAlloc(void* address, U32 size); +	static void initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure); +	static void updateMemoryInfo() ; +	static void logMemoryInfo(BOOL update = FALSE); +	static S32  isMemoryPoolLow(); + +	static U32 getAvailableMemKB() ; +	static U32 getMaxMemKB() ; +	static U32 getAllocatedMemKB() ;  private:  	static char* reserveMem; +	static U32 sAvailPhysicalMemInKB ; +	static U32 sMaxPhysicalMemInKB ; +	static U32 sAllocatedMemInKB; +	static U32 sAllocatedPageSizeInKB ; + +	static U32 sMaxHeapSizeInKB; +	static BOOL sEnableMemoryFailurePrevention;  };  //---------------------------------------------------------------------------- @@ -93,6 +108,272 @@ private:  //---------------------------------------------------------------------------- + +// +//class LLPrivateMemoryPool defines a private memory pool for an application to use, so the application does not +//need to access the heap directly fro each memory allocation. Throught this, the allocation speed is faster,  +//and reduces virtaul address space gragmentation problem. +//Note: this class is thread-safe by passing true to the constructor function. However, you do not need to do this unless +//you are sure the memory allocation and de-allocation will happen in different threads. To make the pool thread safe +//increases allocation and deallocation cost. +// +class LL_COMMON_API LLPrivateMemoryPool +{ +	friend class LLPrivateMemoryPoolManager ; + +public: +	class LL_COMMON_API LLMemoryBlock //each block is devided into slots uniformly +	{ +	public:  +		LLMemoryBlock() ; +		~LLMemoryBlock() ; + +		void init(char* buffer, U32 buffer_size, U32 slot_size) ; +		void setBuffer(char* buffer, U32 buffer_size) ; + +		char* allocate() ; +		void  free(void* addr) ; + +		bool empty() {return !mAllocatedSlots;} +		bool isFull() {return mAllocatedSlots == mTotalSlots;} +		bool isFree() {return !mTotalSlots;} + +		U32  getSlotSize()const {return mSlotSize;} +		U32  getTotalSlots()const {return mTotalSlots;} +		U32  getBufferSize()const {return mBufferSize;} +		char* getBuffer() const {return mBuffer;} + +		//debug use +		void resetBitMap() ; +	private: +		char* mBuffer; +		U32   mSlotSize ; //when the block is not initialized, it is the buffer size. +		U32   mBufferSize ; +		U32   mUsageBits ; +		U8    mTotalSlots ; +		U8    mAllocatedSlots ; +		U8    mDummySize ; //size of extra U32 reserved for mUsageBits. + +	public: +		LLMemoryBlock* mPrev ; +		LLMemoryBlock* mNext ; +		LLMemoryBlock* mSelf ; + +		struct CompareAddress +		{ +			bool operator()(const LLMemoryBlock* const& lhs, const LLMemoryBlock* const& rhs) +			{ +				return (U32)lhs->getBuffer() < (U32)rhs->getBuffer(); +			} +		}; +	}; + +	class LL_COMMON_API LLMemoryChunk //is divided into memory blocks. +	{ +	public: +		LLMemoryChunk() ; +		~LLMemoryChunk() ; + +		void init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size) ; +		void setBuffer(char* buffer, U32 buffer_size) ; + +		bool empty() ; +		 +		char* allocate(U32 size) ; +		void  free(void* addr) ; + +		const char* getBuffer() const {return mBuffer;} +		U32 getBufferSize() const {return mBufferSize;} +		U32 getAllocatedSize() const {return mAlloatedSize;} + +		bool containsAddress(const char* addr) const; + +		static U32 getMaxOverhead(U32 data_buffer_size, U32 min_slot_size,  +													   U32 max_slot_size, U32 min_block_size, U32 max_block_size) ; +	 +		void dump() ; + +	private: +		U32 getPageIndex(U32 addr) ; +		U32 getBlockLevel(U32 size) ; +		U16 getPageLevel(U32 size) ; +		LLMemoryBlock* addBlock(U32 blk_idx) ; +		void popAvailBlockList(U32 blk_idx) ; +		void addToFreeSpace(LLMemoryBlock* blk) ; +		void removeFromFreeSpace(LLMemoryBlock* blk) ; +		void removeBlock(LLMemoryBlock* blk) ; +		void addToAvailBlockList(LLMemoryBlock* blk) ; +		U32  calcBlockSize(U32 slot_size); +		LLMemoryBlock* createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx) ; + +	private: +		LLMemoryBlock** mAvailBlockList ;//256 by mMinSlotSize +		LLMemoryBlock** mFreeSpaceList; +		LLMemoryBlock*  mBlocks ; //index of blocks by address. +		 +		char* mBuffer ; +		U32   mBufferSize ; +		char* mDataBuffer ; +		char* mMetaBuffer ; +		U32   mMinBlockSize ; +		U32   mMinSlotSize ; +		U32   mMaxSlotSize ; +		U32   mAlloatedSize ; +		U16   mBlockLevels; +		U16   mPartitionLevels; + +	public: +		//form a linked list +		LLMemoryChunk* mNext ; +		LLMemoryChunk* mPrev ; + +		LLMemoryChunk* mHashNext ; +	} ; + +private: +	LLPrivateMemoryPool(U32 max_size, bool threaded) ; +	~LLPrivateMemoryPool() ; + +public: +	char *allocate(U32 size) ; +	void  free(void* addr) ; +	 +	void  dump() ; +	U32   getTotalAllocatedSize() ; +	U32   getTotalReservedSize() {return mReservedPoolSize;} +	 +private: +	void lock() ; +	void unlock() ;	 +	S32 getChunkIndex(U32 size) ; +	LLMemoryChunk*  addChunk(S32 chunk_index) ; +	void checkSize(U32 asked_size) ; +	void removeChunk(LLMemoryChunk* chunk) ; +	U16  findHashKey(const char* addr); +	void addToHashTable(LLMemoryChunk* chunk) ; +	void removeFromHashTable(LLMemoryChunk* chunk) ; +	void rehash() ; +	bool fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk) ; +	LLMemoryChunk* findChunk(const char* addr) ; + +	void destroyPool() ; + +public: +	enum +	{ +		SMALL_ALLOCATION = 0, //from 8 bytes to 2KB(exclusive), page size 2KB, max chunk size is 4MB. +		MEDIUM_ALLOCATION,    //from 2KB to 512KB(exclusive), page size 32KB, max chunk size 4MB +		LARGE_ALLOCATION,     //from 512KB to 4MB(inclusive), page size 64KB, max chunk size 16MB +		SUPER_ALLOCATION      //allocation larger than 4MB. +	}; + +private: +	LLMutex* mMutexp ; +	U32  mMaxPoolSize; +	U32  mReservedPoolSize ;	 + +	LLMemoryChunk* mChunkList[SUPER_ALLOCATION] ; //all memory chunks reserved by this pool, sorted by address +	std::vector<LLMemoryChunk*> mChunkHashList ; +	U16 mNumOfChunks ; +	U16 mHashFactor ; +}; + +class LL_COMMON_API LLPrivateMemoryPoolManager +{ +private: +	LLPrivateMemoryPoolManager() ; +	~LLPrivateMemoryPoolManager() ; + +public: +	static LLPrivateMemoryPoolManager* getInstance() ; +	static void destroyClass() ; + +	LLPrivateMemoryPool* newPool(U32 max_size, bool threaded) ; +	void deletePool(LLPrivateMemoryPool* pool) ; + +private: +	static LLPrivateMemoryPoolManager* sInstance ; +	std::set<LLPrivateMemoryPool*> mPoolList ; + +public: +	//debug and statistics info. +	void updateStatistics() ; + +	U32 mTotalReservedSize ; +	U32 mTotalAllocatedSize ; +}; + +// +//the below singleton is used to test the private memory pool. +// +class LL_COMMON_API LLPrivateMemoryPoolTester +{ +private: +	LLPrivateMemoryPoolTester() ; +	~LLPrivateMemoryPoolTester() ; + +public: +	static LLPrivateMemoryPoolTester* getInstance() ; +	static void destroy() ; + +	void run(bool threaded) ;	 + +private: +	void correctnessTest() ; +	void performanceTest() ; +	void fragmentationtest() ; + +	void test(U32 min_size, U32 max_size, U32 stride, U32 times, bool random_deletion, bool output_statistics) ; +	void testAndTime(U32 size, U32 times) ; + +public: +	void* operator new(size_t size) +	{ +		return (void*)sPool->allocate(size) ; +	} +    void  operator delete(void* addr) +	{ +		sPool->free(addr) ; +	} +	void* operator new[](size_t size) +	{ +		return (void*)sPool->allocate(size) ; +	} +    void  operator delete[](void* addr) +	{ +		sPool->free(addr) ; +	} + +private: +	static LLPrivateMemoryPoolTester* sInstance; +	static LLPrivateMemoryPool* sPool ; +	static LLPrivateMemoryPool* sThreadedPool ; +}; +#if 0 +//static +void* LLPrivateMemoryPoolTester::operator new(size_t size) +{ +	return (void*)sPool->allocate(size) ; +} + +//static +void  LLPrivateMemoryPoolTester::operator delete(void* addr) +{ +	sPool->free(addr) ; +} + +//static +void* LLPrivateMemoryPoolTester::operator new[](size_t size) +{ +	return (void*)sPool->allocate(size) ; +} + +//static +void  LLPrivateMemoryPoolTester::operator delete[](void* addr) +{ +	sPool->free(addr) ; +} +#endif  // LLRefCount moved to llrefcount.h  // LLPointer moved to llpointer.h diff --git a/indra/llcommon/llsys.cpp b/indra/llcommon/llsys.cpp index 10cdc7087b..7968e53c13 100644 --- a/indra/llcommon/llsys.cpp +++ b/indra/llcommon/llsys.cpp @@ -636,22 +636,20 @@ U32 LLMemoryInfo::getPhysicalMemoryClamped() const  }  //static -void LLMemoryInfo::getAvailableMemoryKB(U32& avail_physical_mem_kb, U32& avail_virtual_mem_kb) +U32 LLMemoryInfo::getAvailableMemoryKB()  {  #if LL_WINDOWS  	MEMORYSTATUSEX state;  	state.dwLength = sizeof(state);  	GlobalMemoryStatusEx(&state); -	avail_physical_mem_kb = (U32)(state.ullAvailPhys/1024) ; -	avail_virtual_mem_kb = (U32)(state.ullAvailVirtual/1024) ; +	return (U32)(state.ullAvailPhys/1024) ;  #else  	//do not know how to collect available memory info for other systems.  	//leave it blank here for now. -	avail_physical_mem_kb = -1 ; -	avail_virtual_mem_kb = -1 ; +	return -1;  #endif  } diff --git a/indra/llcommon/llsys.h b/indra/llcommon/llsys.h index 41a4f25000..580eee4e8d 100644 --- a/indra/llcommon/llsys.h +++ b/indra/llcommon/llsys.h @@ -116,7 +116,7 @@ public:  	U32 getPhysicalMemoryClamped() const; ///< Memory size in clamped bytes  	//get the available memory infomation in KiloBytes. -	static void getAvailableMemoryKB(U32& avail_physical_mem_kb, U32& avail_virtual_mem_kb); +	static U32 getAvailableMemoryKB();  }; diff --git a/indra/llimage/llimage.cpp b/indra/llimage/llimage.cpp index 39211bf7fa..aa01df2be8 100644 --- a/indra/llimage/llimage.cpp +++ b/indra/llimage/llimage.cpp @@ -39,6 +39,7 @@  #include "llimagepng.h"  #include "llimagedxt.h"  #include "llimageworker.h" +#include "llmemory.h"  //---------------------------------------------------------------------------  // LLImage @@ -47,11 +48,14 @@  //static  std::string LLImage::sLastErrorMessage;  LLMutex* LLImage::sMutex = NULL; +LLPrivateMemoryPool* LLImageBase::sPrivatePoolp = NULL ;  //static  void LLImage::initClass()  {  	sMutex = new LLMutex(NULL); + +	LLImageBase::createPrivatePool() ;  }  //static @@ -59,6 +63,8 @@ void LLImage::cleanupClass()  {  	delete sMutex;  	sMutex = NULL; + +	LLImageBase::destroyPrivatePool() ;  }  //static @@ -97,6 +103,53 @@ LLImageBase::~LLImageBase()  	deleteData(); // virtual  } +//static  +void LLImageBase::createPrivatePool()  +{ +	const U32 MAX_POOL_SIZE = 512 * 1024 * 1024 ; //512 MB + +	if(!sPrivatePoolp) +	{ +		sPrivatePoolp = LLPrivateMemoryPoolManager::getInstance()->newPool(MAX_POOL_SIZE, true) ; +	} +} +	 +//static  +void LLImageBase::destroyPrivatePool()  +{ +	if(sPrivatePoolp) +	{ +		LLPrivateMemoryPoolManager::getInstance()->deletePool(sPrivatePoolp) ; +		sPrivatePoolp = NULL ; +	} +} + +//static +char* LLImageBase::allocateMemory(S32 size)  +{ +	if(sPrivatePoolp) +	{ +		return sPrivatePoolp->allocate(size) ; +	} +	else +	{ +		return new char[size]; +	} +} + +//static +void  LLImageBase::deleteMemory(void* p)  +{ +	if(sPrivatePoolp) +	{ +		sPrivatePoolp->free(p) ; +	} +	else +	{ +		delete[] p ; +	} +} +  // virtual  void LLImageBase::dump()  { @@ -130,7 +183,7 @@ void LLImageBase::sanityCheck()  // virtual  void LLImageBase::deleteData()  { -	delete[] mData; +	deleteMemory(mData) ;  	mData = NULL;  	mDataSize = 0;  } @@ -167,7 +220,7 @@ U8* LLImageBase::allocateData(S32 size)  	{  		deleteData(); // virtual  		mBadBufferAllocation = false ; -		mData = new U8[size]; +		mData = (U8*)allocateMemory(size);  		if (!mData)  		{  			llwarns << "allocate image data: " << size << llendl; @@ -185,7 +238,7 @@ U8* LLImageBase::allocateData(S32 size)  U8* LLImageBase::reallocateData(S32 size)  {  	LLMemType mt1(mMemType); -	U8 *new_datap = new U8[size]; +	U8 *new_datap = (U8*)allocateMemory(size);  	if (!new_datap)  	{  		llwarns << "Out of memory in LLImageBase::reallocateData" << llendl; @@ -195,7 +248,7 @@ U8* LLImageBase::reallocateData(S32 size)  	{  		S32 bytes = llmin(mDataSize, size);  		memcpy(new_datap, mData, bytes);	/* Flawfinder: ignore */ -		delete[] mData; +		deleteMemory(mData) ;  	}  	mData = new_datap;  	mDataSize = size; @@ -341,6 +394,7 @@ BOOL LLImageRaw::resize(U16 width, U16 height, S8 components)  	return TRUE;  } +#if 0  U8 * LLImageRaw::getSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height) const  {  	LLMemType mt1(mMemType); @@ -361,6 +415,7 @@ U8 * LLImageRaw::getSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height) const  	}  	return data;  } +#endif  BOOL LLImageRaw::setSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height,  							 const U8 *data, U32 stride, BOOL reverse_y) @@ -830,6 +885,7 @@ void LLImageRaw::copyScaled( LLImageRaw* src )  	}  } +#if 0  //scale down image by not blending a pixel with its neighbors.  BOOL LLImageRaw::scaleDownWithoutBlending( S32 new_width, S32 new_height)  { @@ -853,7 +909,7 @@ BOOL LLImageRaw::scaleDownWithoutBlending( S32 new_width, S32 new_height)  	ratio_x -= 1.0f ;  	ratio_y -= 1.0f ; -	U8* new_data = new U8[new_data_size] ; +	U8* new_data = allocateMemory(new_data_size) ;  	llassert_always(new_data != NULL) ;  	U8* old_data = getData() ; @@ -875,6 +931,7 @@ BOOL LLImageRaw::scaleDownWithoutBlending( S32 new_width, S32 new_height)  	return TRUE ;  } +#endif  BOOL LLImageRaw::scale( S32 new_width, S32 new_height, BOOL scale_image_data )  { @@ -1527,6 +1584,7 @@ void LLImageFormatted::setData(U8 *data, S32 size)  	{  		deleteData();  		setDataAndSize(data, size); // Access private LLImageBase members +  		sGlobalFormattedMemory += getDataSize();  	}  } @@ -1545,7 +1603,7 @@ void LLImageFormatted::appendData(U8 *data, S32 size)  			S32 newsize = cursize + size;  			reallocateData(newsize);  			memcpy(getData() + cursize, data, size); -			delete[] data; +			deleteMemory(data);  		}  	}  } diff --git a/indra/llimage/llimage.h b/indra/llimage/llimage.h index 825b9aab1a..ab20ccda9e 100644 --- a/indra/llimage/llimage.h +++ b/indra/llimage/llimage.h @@ -29,7 +29,6 @@  #include "lluuid.h"  #include "llstring.h" -//#include "llmemory.h"  #include "llthread.h"  #include "llmemtype.h" @@ -56,6 +55,7 @@ const S32 MAX_IMG_PACKET_SIZE = 1000;  class LLImageFormatted;  class LLImageRaw;  class LLColor4U; +class LLPrivateMemoryPool;  typedef enum e_image_codec  { @@ -127,7 +127,7 @@ public:  protected:  	// special accessor to allow direct setting of mData and mDataSize by LLImageFormatted -	void setDataAndSize(U8 *data, S32 size) { mData = data; mDataSize = size; } +	void setDataAndSize(U8 *data, S32 size) { mData = data; mDataSize = size; }	  public:  	static void generateMip(const U8 *indata, U8* mipdata, int width, int height, S32 nchannels); @@ -138,6 +138,11 @@ public:  	static EImageCodec getCodecFromExtension(const std::string& exten); +	static void createPrivatePool() ; +	static void destroyPrivatePool() ; +	static char* allocateMemory(S32 size) ; +	static void  deleteMemory(void* p) ;	 +  private:  	U8 *mData;  	S32 mDataSize; @@ -149,6 +154,8 @@ private:  	bool mBadBufferAllocation ;  	bool mAllowOverSize ; + +	static LLPrivateMemoryPool* sPrivatePoolp ;  public:  	LLMemType::DeclareMemType& mMemType; // debug  }; @@ -172,7 +179,7 @@ public:  	BOOL resize(U16 width, U16 height, S8 components); -	U8 * getSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height) const; +	//U8 * getSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height) const;  	BOOL setSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height,  					 const U8 *data, U32 stride = 0, BOOL reverse_y = FALSE); @@ -184,7 +191,7 @@ public:  	void contractToPowerOfTwo(S32 max_dim = MAX_IMAGE_SIZE, BOOL scale_image = TRUE);  	void biasedScaleToPowerOfTwo(S32 max_dim = MAX_IMAGE_SIZE);  	BOOL scale( S32 new_width, S32 new_height, BOOL scale_image = TRUE ); -	BOOL scaleDownWithoutBlending( S32 new_width, S32 new_height) ; +	//BOOL scaleDownWithoutBlending( S32 new_width, S32 new_height) ;  	// Fill the buffer with a constant color  	void fill( const LLColor4U& color ); diff --git a/indra/llimage/llimagedxt.cpp b/indra/llimage/llimagedxt.cpp index 4bd3efddaa..81be09a412 100644 --- a/indra/llimage/llimagedxt.cpp +++ b/indra/llimage/llimagedxt.cpp @@ -429,7 +429,7 @@ bool LLImageDXT::convertToDXR()  	S32 nmips = calcNumMips(width,height);  	S32 total_bytes = getDataSize();  	U8* olddata = getData(); -	U8* newdata = new U8[total_bytes]; +	U8* newdata = (U8*)allocateMemory(total_bytes);  	if (!newdata)  	{  		llerrs << "Out of memory in LLImageDXT::convertToDXR()" << llendl; diff --git a/indra/llimage/llimagej2c.cpp b/indra/llimage/llimagej2c.cpp index cb2a85fa91..b144f8fc66 100644 --- a/indra/llimage/llimagej2c.cpp +++ b/indra/llimage/llimagej2c.cpp @@ -373,14 +373,14 @@ BOOL LLImageJ2C::loadAndValidate(const std::string &filename)  	}  	else  	{ -		U8 *data = new U8[file_size]; +		U8 *data = (U8*)allocateMemory(file_size);  		apr_size_t bytes_read = file_size;  		apr_status_t s = apr_file_read(apr_file, data, &bytes_read); // modifies bytes_read	  		infile.close() ;  		if (s != APR_SUCCESS || (S32)bytes_read != file_size)  		{ -			delete[] data; +			deleteMemory(data);  			setLastError("Unable to read entire file");  			res = FALSE;  		} diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp index 660dc14d02..03befe6818 100644 --- a/indra/llrender/llvertexbuffer.cpp +++ b/indra/llrender/llvertexbuffer.cpp @@ -33,6 +33,7 @@  #include "llglheaders.h"  #include "llmemtype.h"  #include "llrender.h" +#include "llmemory.h"  //============================================================================ @@ -942,11 +943,8 @@ U8* LLVertexBuffer::mapBuffer(S32 access)  			log_glerror();  			//check the availability of memory -			U32 avail_phy_mem, avail_vir_mem; -			LLMemoryInfo::getAvailableMemoryKB(avail_phy_mem, avail_vir_mem) ; -			llinfos << "Available physical mwmory(KB): " << avail_phy_mem << llendl ;  -			llinfos << "Available virtual memory(KB): " << avail_vir_mem << llendl; - +			LLMemory::logMemoryInfo(TRUE) ;  +			  			//--------------------  			//print out more debug info before crash  			llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ; @@ -969,6 +967,7 @@ U8* LLVertexBuffer::mapBuffer(S32 access)  		if (!mMappedIndexData)  		{  			log_glerror(); +			LLMemory::logMemoryInfo(TRUE) ;  			GLint buff;  			glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff); diff --git a/indra/llxml/llcontrol.h b/indra/llxml/llcontrol.h index 93975579cc..70749b8ee9 100644 --- a/indra/llxml/llcontrol.h +++ b/indra/llxml/llcontrol.h @@ -385,7 +385,8 @@ class LLCachedControl  {  public:  	LLCachedControl(LLControlGroup& group, -					const std::string& name,  +					const std::string& name, +  					const T& default_value,   					const std::string& comment = "Declared In Code")  	{ diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml index 58abd4c091..fac4cfbef9 100644 --- a/indra/newview/app_settings/settings.xml +++ b/indra/newview/app_settings/settings.xml @@ -1841,6 +1841,17 @@        <key>Value</key>        <integer>0</integer>      </map> +    <key>DebugShowPrivateMem</key> +    <map> +      <key>Comment</key> +      <string>Show Private Mem Info</string> +      <key>Persist</key> +      <integer>1</integer> +      <key>Type</key> +      <string>Boolean</string> +      <key>Value</key> +      <integer>0</integer> +    </map>      <key>DebugShowRenderInfo</key>      <map>        <key>Comment</key> @@ -5220,6 +5231,17 @@        <key>Value</key>        <real>48.0</real>      </map> +    <key>MaxHeapSize</key> +    <map> +      <key>Comment</key> +      <string>Maximum heap size (GB)</string> +      <key>Persist</key> +      <integer>1</integer> +      <key>Type</key> +      <string>F32</string> +      <key>Value</key> +      <real>1.6</real> +    </map>      <key>MaxSelectDistance</key>      <map>        <key>Comment</key> @@ -5341,6 +5363,17 @@      <key>Value</key>      <integer>1</integer>    </map> +  <key>MemeoyFailurePreventionEnabled</key> +  <map> +    <key>Comment</key> +    <string>If set, the viewer will quit to avoid crash when memory failure happens</string> +    <key>Persist</key> +    <integer>0</integer> +    <key>Type</key> +    <string>Boolean</string> +    <key>Value</key> +    <integer>1</integer> +  </map>    <key>MemoryLogFrequency</key>          <map>          <key>Comment</key> diff --git a/indra/newview/llappviewer.cpp b/indra/newview/llappviewer.cpp index a23f809b71..89251d2d37 100644 --- a/indra/newview/llappviewer.cpp +++ b/indra/newview/llappviewer.cpp @@ -619,7 +619,7 @@ LLAppViewer::~LLAppViewer()  }  bool LLAppViewer::init() -{ +{	  	//  	// Start of the application  	// @@ -647,6 +647,9 @@ bool LLAppViewer::init()  	if (!initConfiguration())  		return false; +	//set the max heap size. +	initMaxHeapSize() ; +  	// write Google Breakpad minidump files to our log directory  	std::string logdir = gDirUtilp->getExpandedFilename(LL_PATH_LOGS, "");  	logdir += gDirUtilp->getDirDelimiter(); @@ -996,9 +999,97 @@ bool LLAppViewer::init()  	LLAgentLanguage::init(); +	return true; +} +void LLAppViewer::initMaxHeapSize() +{ +	//set the max heap size. +	//here is some info regarding to the max heap size: +	//------------------------------------------------------------------------------------------ +	// OS       | setting | SL address bits | max manageable memory space | max heap size +	// Win 32   | default | 32-bit          | 2GB                         | < 1.7GB +	// Win 32   | /3G     | 32-bit          | 3GB                         | < 1.7GB or 2.7GB +	//Linux 32  | default | 32-bit          | 3GB                         | < 2.7GB +	//Linux 32  |HUGEMEM  | 32-bit          | 4GB                         | < 3.7GB +	//64-bit OS |default  | 32-bit          | 4GB                         | < 3.7GB +	//64-bit OS |default  | 64-bit          | N/A (> 4GB)                 | N/A (> 4GB) +	//------------------------------------------------------------------------------------------ +	//currently SL is built under 32-bit setting, we set its max heap size no more than 1.6 GB. + +	//F32 max_heap_size_gb = llmin(1.6f, (F32)gSavedSettings.getF32("MaxHeapSize")) ; +	F32 max_heap_size_gb = gSavedSettings.getF32("MaxHeapSize") ; +	BOOL enable_mem_failure_prevention = (BOOL)gSavedSettings.getBOOL("MemeoyFailurePreventionEnabled") ; + +	LLMemory::initMaxHeapSizeGB(max_heap_size_gb, enable_mem_failure_prevention) ; +} -	return true; +void LLAppViewer::checkMemory() +{ +	const static F32 MEMORY_CHECK_INTERVAL = 1.0f ; //second +	const static F32 MAX_QUIT_WAIT_TIME = 30.0f ; //seconds +	const static U32 MAX_SIZE_CHECKED_MEMORY_BLOCK = 64 * 1024 * 1024 ; //64 MB +	//static F32 force_quit_timer = MAX_QUIT_WAIT_TIME + MEMORY_CHECK_INTERVAL ; +	static void* last_reserved_address = NULL ; + +	if(MEMORY_CHECK_INTERVAL > mMemCheckTimer.getElapsedTimeF32()) +	{ +		return ; +	} +	mMemCheckTimer.reset() ; + +	if(gGLManager.mDebugGPU) +	{ +		//update the availability of memory +		LLMemory::updateMemoryInfo() ; +	} + +	//check the virtual address space fragmentation +	if(!last_reserved_address) +	{ +		last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ; +	} +	else +	{ +		last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ; +		if(!last_reserved_address) //failed, try once more +		{ +			last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ; +		} +	} + +	S32 is_low = !last_reserved_address || LLMemory::isMemoryPoolLow() ; + +	//if(is_low < 0) //to force quit +	//{ +	//	if(force_quit_timer > MAX_QUIT_WAIT_TIME) //just hit the limit for the first time +	//	{ +	//		//send out the notification to tell the viewer is about to quit in 30 seconds. +	//		LLNotification::Params params("ForceQuitDueToLowMemory"); +	//		LLNotifications::instance().add(params); + +	//		force_quit_timer = MAX_QUIT_WAIT_TIME - MEMORY_CHECK_INTERVAL ; +	//	} +	//	else +	//	{ +	//		force_quit_timer -= MEMORY_CHECK_INTERVAL ; +	//		if(force_quit_timer < 0.f) +	//		{ +	//			forceQuit() ; //quit +	//		} +	//	} +	//} +	//else +	//{ +	//	force_quit_timer = MAX_QUIT_WAIT_TIME + MEMORY_CHECK_INTERVAL ; +	//} + +	LLPipeline::throttleNewMemoryAllocation(!is_low ? FALSE : TRUE) ;		 +	 +	if(is_low) +	{ +		LLMemory::logMemoryInfo() ; +	}  }  static LLFastTimer::DeclareTimer FTM_MESSAGES("System Messages"); @@ -1036,7 +1127,6 @@ bool LLAppViewer::mainLoop()  	LLVoiceClient::getInstance()->init(gServicePump);  	LLTimer frameTimer,idleTimer;  	LLTimer debugTime; -	LLFrameTimer memCheckTimer;  	LLViewerJoystick* joystick(LLViewerJoystick::getInstance());  	joystick->setNeedsReset(true); @@ -1047,7 +1137,9 @@ bool LLAppViewer::mainLoop()      // point of posting.      LLSD newFrame; -	const F32 memory_check_interval = 1.0f ; //second +	//LLPrivateMemoryPoolTester::getInstance()->run(false) ; +	//LLPrivateMemoryPoolTester::getInstance()->run(true) ; +	//LLPrivateMemoryPoolTester::destroy() ;  	// Handle messages  	while (!LLApp::isExiting()) @@ -1058,18 +1150,8 @@ bool LLAppViewer::mainLoop()  		llclearcallstacks;  		//check memory availability information -		{ -			if(memory_check_interval < memCheckTimer.getElapsedTimeF32()) -			{ -				memCheckTimer.reset() ; - -				//update the availability of memory -				LLMemoryInfo::getAvailableMemoryKB(mAvailPhysicalMemInKB, mAvailVirtualMemInKB) ; -			} -			llcallstacks << "Available physical mem(KB): " << mAvailPhysicalMemInKB << llcallstacksendl ; -			llcallstacks << "Available virtual mem(KB): " << mAvailVirtualMemInKB << llcallstacksendl ; -		} - +		checkMemory() ; +		  		try  		{  			pingMainloopTimeout("Main:MiscNativeWindowEvents"); @@ -1233,7 +1315,7 @@ bool LLAppViewer::mainLoop()  				idleTimer.reset();  				bool is_slow = (frameTimer.getElapsedTimeF64() > FRAME_SLOW_THRESHOLD) ;  				S32 total_work_pending = 0; -				S32 total_io_pending = 0;				 +				S32 total_io_pending = 0;	  				while(!is_slow)//do not unpause threads if the frame rates are very low.  				{  					S32 work_pending = 0; @@ -1300,15 +1382,7 @@ bool LLAppViewer::mainLoop()  		}  		catch(std::bad_alloc)  		{			 -			{ -				llinfos << "Availabe physical memory(KB) at the beginning of the frame: " << mAvailPhysicalMemInKB << llendl ; -				llinfos << "Availabe virtual memory(KB) at the beginning of the frame: " << mAvailVirtualMemInKB << llendl ; - -				LLMemoryInfo::getAvailableMemoryKB(mAvailPhysicalMemInKB, mAvailVirtualMemInKB) ; - -				llinfos << "Current availabe physical memory(KB): " << mAvailPhysicalMemInKB << llendl ; -				llinfos << "Current availabe virtual memory(KB): " << mAvailVirtualMemInKB << llendl ; -			} +			LLMemory::logMemoryInfo(TRUE) ;  			//stop memory leaking simulation  			LLFloaterMemLeak* mem_leak_instance = @@ -1777,6 +1851,9 @@ bool LLAppViewer::cleanup()  	LLMainLoopRepeater::instance().stop(); +	//release all private memory pools. +	LLPrivateMemoryPoolManager::destroyClass() ; +  	ll_close_fail_log();  	MEM_TRACK_RELEASE diff --git a/indra/newview/llappviewer.h b/indra/newview/llappviewer.h index a18e6cbb02..eff4cab2d0 100644 --- a/indra/newview/llappviewer.h +++ b/indra/newview/llappviewer.h @@ -168,7 +168,7 @@ public:  	// mute/unmute the system's master audio  	virtual void setMasterSystemAudioMute(bool mute); -	virtual bool getMasterSystemAudioMute(); +	virtual bool getMasterSystemAudioMute();	  	// Metrics policy helper statics.  	static void metricsUpdateRegion(U64 region_handle); @@ -190,11 +190,12 @@ protected:  private: +	void initMaxHeapSize();  	bool initThreads(); // Initialize viewer threads, return false on failure.  	bool initConfiguration(); // Initialize settings from the command line/config file.  	void initUpdater(); // Initialize the updater service.  	bool initCache(); // Initialize local client cache. - +	void checkMemory() ;  	// We have switched locations of both Mac and Windows cache, make sure  	// files migrate and old cache is cleared out. @@ -268,8 +269,7 @@ private:  	std::set<struct apr_dso_handle_t*> mPlugins; -	U32 mAvailPhysicalMemInKB ; -	U32 mAvailVirtualMemInKB ; +	LLFrameTimer mMemCheckTimer;  	boost::scoped_ptr<LLUpdaterService> mUpdater; diff --git a/indra/newview/lldynamictexture.cpp b/indra/newview/lldynamictexture.cpp index f781d5f3ff..fb9958ee9d 100644 --- a/indra/newview/lldynamictexture.cpp +++ b/indra/newview/lldynamictexture.cpp @@ -40,6 +40,7 @@  #include "llvertexbuffer.h"  #include "llviewerdisplay.h"  #include "llrender.h" +#include "pipeline.h"  // static  LLViewerDynamicTexture::instance_list_t LLViewerDynamicTexture::sInstances[ LLViewerDynamicTexture::ORDER_COUNT ]; @@ -201,7 +202,7 @@ void LLViewerDynamicTexture::postRender(BOOL success)  BOOL LLViewerDynamicTexture::updateAllInstances()  {  	sNumRenders = 0; -	if (gGLManager.mIsDisabled) +	if (gGLManager.mIsDisabled || LLPipeline::sMemAllocationThrottled)  	{  		return TRUE;  	} diff --git a/indra/newview/llfloatermemleak.cpp b/indra/newview/llfloatermemleak.cpp index 58931d112e..9edfe1e354 100644 --- a/indra/newview/llfloatermemleak.cpp +++ b/indra/newview/llfloatermemleak.cpp @@ -90,6 +90,11 @@ LLFloaterMemLeak::~LLFloaterMemLeak()  void LLFloaterMemLeak::release()  { +	if(mLeakedMem.empty()) +	{ +		return ; +	} +  	for(S32 i = 0 ; i < (S32)mLeakedMem.size() ; i++)  	{  		delete[] mLeakedMem[i] ; diff --git a/indra/newview/lltexturecache.cpp b/indra/newview/lltexturecache.cpp index f54214b95c..14e9ed79d7 100644 --- a/indra/newview/lltexturecache.cpp +++ b/indra/newview/lltexturecache.cpp @@ -113,7 +113,7 @@ public:  	~LLTextureCacheWorker()  	{  		llassert_always(!haveWork()); -		delete[] mReadData; +		LLImageBase::deleteMemory(mReadData);  	}  	// override this interface @@ -215,7 +215,7 @@ bool LLTextureCacheLocalFileWorker::doRead()  			mDataSize = 0;  			return true;  		} -		mReadData = new U8[mDataSize]; +		mReadData = (U8*)LLImageBase::allocateMemory(mDataSize);  		mBytesRead = -1;  		mBytesToRead = mDataSize;  		setPriority(LLWorkerThread::PRIORITY_LOW | mPriority); @@ -233,7 +233,7 @@ bool LLTextureCacheLocalFileWorker::doRead()  // 						<< " Bytes: " << mDataSize << " Offset: " << mOffset  // 						<< " / " << mDataSize << llendl;  				mDataSize = 0; // failed -				delete[] mReadData; +				LLImageBase::deleteMemory(mReadData);  				mReadData = NULL;  			}  			return true; @@ -248,7 +248,7 @@ bool LLTextureCacheLocalFileWorker::doRead()  	{  		mDataSize = local_size;  	} -	mReadData = new U8[mDataSize]; +	mReadData = (U8*)LLImageBase::allocateMemory(mDataSize);  	S32 bytes_read = LLAPRFile::readEx(mFileName, mReadData, mOffset, mDataSize, mCache->getLocalAPRFilePool());	 @@ -258,7 +258,7 @@ bool LLTextureCacheLocalFileWorker::doRead()  // 				<< " Bytes: " << mDataSize << " Offset: " << mOffset  // 				<< " / " << mDataSize << llendl;  		mDataSize = 0; -		delete[] mReadData; +		LLImageBase::deleteMemory(mReadData);  		mReadData = NULL;  	}  	else @@ -377,7 +377,7 @@ bool LLTextureCacheRemoteWorker::doRead()  			mDataSize = local_size;  		}  		// Allocate read buffer -		mReadData = new U8[mDataSize]; +		mReadData = (U8*)LLImageBase::allocateMemory(mDataSize);  		S32 bytes_read = LLAPRFile::readEx(local_filename,   											 mReadData, mOffset, mDataSize, mCache->getLocalAPRFilePool());  		if (bytes_read != mDataSize) @@ -386,7 +386,7 @@ bool LLTextureCacheRemoteWorker::doRead()   					<< " Bytes: " << mDataSize << " Offset: " << mOffset   					<< " / " << mDataSize << llendl;  			mDataSize = 0; -			delete[] mReadData; +			LLImageBase::deleteMemory(mReadData);  			mReadData = NULL;  		}  		else @@ -429,7 +429,7 @@ bool LLTextureCacheRemoteWorker::doRead()  		S32 size = TEXTURE_CACHE_ENTRY_SIZE - mOffset;  		size = llmin(size, mDataSize);  		// Allocate the read buffer -		mReadData = new U8[size]; +		mReadData = (U8*)LLImageBase::allocateMemory(size);  		S32 bytes_read = LLAPRFile::readEx(mCache->mHeaderDataFileName,   											 mReadData, offset, size, mCache->getLocalAPRFilePool());  		if (bytes_read != size) @@ -437,7 +437,7 @@ bool LLTextureCacheRemoteWorker::doRead()  			llwarns << "LLTextureCacheWorker: "  << mID  					<< " incorrect number of bytes read from header: " << bytes_read  					<< " / " << size << llendl; -			delete[] mReadData; +			LLImageBase::deleteMemory(mReadData);  			mReadData = NULL;  			mDataSize = -1; // failed  			done = true; @@ -467,7 +467,7 @@ bool LLTextureCacheRemoteWorker::doRead()  			S32 data_offset, file_size, file_offset;  			// Reserve the whole data buffer first -			U8* data = new U8[mDataSize]; +			U8* data = (U8*)LLImageBase::allocateMemory(mDataSize);  			// Set the data file pointers taking the read offset into account. 2 cases:  			if (mOffset < TEXTURE_CACHE_ENTRY_SIZE) @@ -480,7 +480,7 @@ bool LLTextureCacheRemoteWorker::doRead()  				// Copy the raw data we've been holding from the header cache into the new sized buffer  				llassert_always(mReadData);  				memcpy(data, mReadData, data_offset); -				delete[] mReadData; +				LLImageBase::deleteMemory(mReadData);  				mReadData = NULL;  			}  			else @@ -506,7 +506,7 @@ bool LLTextureCacheRemoteWorker::doRead()  				llwarns << "LLTextureCacheWorker: "  << mID  						<< " incorrect number of bytes read from body: " << bytes_read  						<< " / " << file_size << llendl; -				delete[] mReadData; +				LLImageBase::deleteMemory(mReadData);  				mReadData = NULL;  				mDataSize = -1; // failed  				done = true; @@ -598,11 +598,11 @@ bool LLTextureCacheRemoteWorker::doWrite()  		{  			// We need to write a full record in the header cache so, if the amount of data is smaller  			// than a record, we need to transfer the data to a buffer padded with 0 and write that -			U8* padBuffer = new U8[TEXTURE_CACHE_ENTRY_SIZE]; +			U8* padBuffer = (U8*)LLImageBase::allocateMemory(TEXTURE_CACHE_ENTRY_SIZE);  			memset(padBuffer, 0, TEXTURE_CACHE_ENTRY_SIZE);		// Init with zeros  			memcpy(padBuffer, mWriteData, mDataSize);			// Copy the write buffer  			bytes_written = LLAPRFile::writeEx(mCache->mHeaderDataFileName, padBuffer, offset, size, mCache->getLocalAPRFilePool()); -			delete [] padBuffer; +			LLImageBase::deleteMemory(padBuffer);  		}  		else  		{ @@ -698,7 +698,7 @@ void LLTextureCacheWorker::finishWork(S32 param, bool completed)  			}  			else  			{ -				delete[] mReadData; +				LLImageBase::deleteMemory(mReadData);  				mReadData = NULL;  			}  		} diff --git a/indra/newview/lltexturefetch.cpp b/indra/newview/lltexturefetch.cpp index 18c3a3b87d..484d5ea61a 100644 --- a/indra/newview/lltexturefetch.cpp +++ b/indra/newview/lltexturefetch.cpp @@ -817,7 +817,7 @@ void LLTextureFetchWorker::setImagePriority(F32 priority)  void LLTextureFetchWorker::resetFormattedData()  { -	delete[] mBuffer; +	LLImageBase::deleteMemory(mBuffer);  	mBuffer = NULL;  	mBufferSize = 0;  	if (mFormattedImage.notNull()) @@ -888,7 +888,7 @@ bool LLTextureFetchWorker::doWork(S32 param)  		mSentRequest = UNSENT;  		mDecoded  = FALSE;  		mWritten  = FALSE; -		delete[] mBuffer; +		LLImageBase::deleteMemory(mBuffer);  		mBuffer = NULL;  		mBufferSize = 0;  		mHaveAllData = FALSE; @@ -1284,7 +1284,7 @@ bool LLTextureFetchWorker::doWork(S32 param)  			llassert_always(mBufferSize == cur_size + mRequestedSize);  			if(!mBufferSize)//no data received.  			{ -				delete[] mBuffer;  +				LLImageBase::deleteMemory(mBuffer);   				mBuffer = NULL;  				//abort. @@ -1312,7 +1312,7 @@ bool LLTextureFetchWorker::doWork(S32 param)  				mFileSize = mBufferSize + 1 ; //flag the file is not fully loaded.  			} -			U8* buffer = new U8[mBufferSize]; +			U8* buffer = (U8*)LLImageBase::allocateMemory(mBufferSize);  			if (cur_size > 0)  			{  				memcpy(buffer, mFormattedImage->getData(), cur_size); @@ -1321,7 +1321,7 @@ bool LLTextureFetchWorker::doWork(S32 param)  			// NOTE: setData releases current data and owns new data (buffer)  			mFormattedImage->setData(buffer, mBufferSize);  			// delete temp data -			delete[] mBuffer; // Note: not 'buffer' (assigned in setData()) +			LLImageBase::deleteMemory(mBuffer); // Note: not 'buffer' (assigned in setData())  			mBuffer = NULL;  			mBufferSize = 0;  			mLoadedDiscard = mRequestedDiscard; @@ -1618,7 +1618,7 @@ bool LLTextureFetchWorker::processSimulatorPackets()  			if (buffer_size > cur_size)  			{  				/// We have new data -				U8* buffer = new U8[buffer_size]; +				U8* buffer = (U8*)LLImageBase::allocateMemory(buffer_size);  				S32 offset = 0;  				if (cur_size > 0 && mFirstPacket > 0)  				{ @@ -1670,7 +1670,7 @@ S32 LLTextureFetchWorker::callbackHttpGet(const LLChannelDescriptors& channels,  		if (data_size > 0)  		{  			// *TODO: set the formatted image data here directly to avoid the copy -			mBuffer = new U8[data_size]; +			mBuffer = (U8*)LLImageBase::allocateMemory(data_size);  			buffer->readAfter(channels.in(), NULL, mBuffer, data_size);  			mBufferSize += data_size;  			if (data_size < mRequestedSize && mRequestedDiscard == 0) diff --git a/indra/newview/llviewerdisplay.cpp b/indra/newview/llviewerdisplay.cpp index 41b7c13826..b9d81a76ff 100644 --- a/indra/newview/llviewerdisplay.cpp +++ b/indra/newview/llviewerdisplay.cpp @@ -203,6 +203,7 @@ void display_stats()  		gMemoryAllocated = LLMemory::getCurrentRSS();  		U32 memory = (U32)(gMemoryAllocated / (1024*1024));  		llinfos << llformat("MEMORY: %d MB", memory) << llendl; +		LLMemory::logMemoryInfo() ;  		gRecentMemoryTime.reset();  	}  } @@ -690,7 +691,11 @@ void display(BOOL rebuild, F32 zoom_factor, int subfield, BOOL for_snapshot)  				glh::matrix4f mod = glh_get_current_modelview();  				glViewport(0,0,512,512);  				LLVOAvatar::updateFreezeCounter() ; -				LLVOAvatar::updateImpostors(); + +				if(!LLPipeline::sMemAllocationThrottled) +				{		 +					LLVOAvatar::updateImpostors(); +				}  				glh_set_current_projection(proj);  				glh_set_current_modelview(mod); diff --git a/indra/newview/llviewertexture.cpp b/indra/newview/llviewertexture.cpp index cd16b15e3e..23d6ca1ae2 100644 --- a/indra/newview/llviewertexture.cpp +++ b/indra/newview/llviewertexture.cpp @@ -3080,9 +3080,16 @@ void LLViewerLODTexture::processTextureStats()  	{  		mDesiredDiscardLevel = llmin(mDesiredDiscardLevel, (S8)mDesiredSavedRawDiscardLevel) ;  	} +	else if(LLPipeline::sMemAllocationThrottled)//release memory of large textures by decrease their resolutions. +	{ +		if(scaleDown()) +		{ +			mDesiredDiscardLevel = mCachedRawDiscardLevel ; +		} +	}  } -void LLViewerLODTexture::scaleDown() +bool LLViewerLODTexture::scaleDown()  {  	if(hasGLTexture() && mCachedRawDiscardLevel > getDiscardLevel())  	{		 @@ -3093,7 +3100,10 @@ void LLViewerLODTexture::scaleDown()  		{  			tester->setStablizingTime() ;  		} + +		return true ;  	} +	return false ;  }  //----------------------------------------------------------------------------------------------  //end of LLViewerLODTexture diff --git a/indra/newview/llviewertexture.h b/indra/newview/llviewertexture.h index b5636bbdc7..07b91c2071 100644 --- a/indra/newview/llviewertexture.h +++ b/indra/newview/llviewertexture.h @@ -595,7 +595,7 @@ public:  private:  	void init(bool firstinit) ; -	void scaleDown() ;		 +	bool scaleDown() ;		  private:  	F32 mDiscardVirtualSize;		// Virtual size used to calculate desired discard	 diff --git a/indra/newview/llviewerwindow.cpp b/indra/newview/llviewerwindow.cpp index 0028ced6c8..dfdf429455 100644 --- a/indra/newview/llviewerwindow.cpp +++ b/indra/newview/llviewerwindow.cpp @@ -561,6 +561,17 @@ public:  			addText(xpos, ypos, llformat("%d %d %d %d", color[0], color[1], color[2], color[3]));  			ypos += y_inc;  		} + +		if (gSavedSettings.getBOOL("DebugShowPrivateMem")) +		{ +			LLPrivateMemoryPoolManager::getInstance()->updateStatistics() ; +			addText(xpos, ypos, llformat("Total Reserved(KB): %d", LLPrivateMemoryPoolManager::getInstance()->mTotalReservedSize / 1024)); +			ypos += y_inc; + +			addText(xpos, ypos, llformat("Total Allocated(KB): %d", LLPrivateMemoryPoolManager::getInstance()->mTotalAllocatedSize / 1024)); +			ypos += y_inc; +		} +  		// only display these messages if we are actually rendering beacons at this moment  		if (LLPipeline::getRenderBeacons(NULL) && LLFloaterReg::instanceVisible("beacons"))  		{ @@ -3978,6 +3989,19 @@ BOOL LLViewerWindow::rawSnapshot(LLImageRaw *raw, S32 image_width, S32 image_hei  	{  		return FALSE;  	} +	//check if there is enough memory for the snapshot image +	if(LLPipeline::sMemAllocationThrottled) +	{ +		return FALSE ; //snapshot taking is disabled due to memory restriction. +	} +	if(image_width * image_height > (1 << 22)) //if snapshot image is larger than 2K by 2K +	{ +		if(!LLMemory::tryToAlloc(NULL, image_width * image_height * 3)) +		{ +			llwarns << "No enough memory to take the snapshot with size (w : h): " << image_width << " : " << image_height << llendl ; +			return FALSE ; //there is no enough memory for taking this snapshot. +		} +	}  	// PRE SNAPSHOT  	gDisplaySwapBuffers = FALSE; diff --git a/indra/newview/pipeline.cpp b/indra/newview/pipeline.cpp index 13e537fae5..53564ec0f8 100644 --- a/indra/newview/pipeline.cpp +++ b/indra/newview/pipeline.cpp @@ -100,6 +100,7 @@  #include "llspatialpartition.h"  #include "llmutelist.h"  #include "lltoolpie.h" +#include "llnotifications.h"  #ifdef _DEBUG @@ -281,6 +282,7 @@ BOOL	LLPipeline::sRenderAttachedLights = TRUE;  BOOL	LLPipeline::sRenderAttachedParticles = TRUE;  BOOL	LLPipeline::sRenderDeferred = FALSE;  BOOL    LLPipeline::sAllowRebuildPriorityGroup = FALSE ; +BOOL    LLPipeline::sMemAllocationThrottled = FALSE;  S32		LLPipeline::sVisibleLightCount = 0;  F32		LLPipeline::sMinRenderSize = 0.f; @@ -513,6 +515,24 @@ void LLPipeline::destroyGL()  static LLFastTimer::DeclareTimer FTM_RESIZE_SCREEN_TEXTURE("Resize Screen Texture"); +//static +void LLPipeline::throttleNewMemoryAllocation(BOOL disable) +{ +	if(sMemAllocationThrottled != disable) +	{ +		sMemAllocationThrottled = disable ; + +		if(sMemAllocationThrottled) +		{ +			//send out notification +			LLNotification::Params params("LowMemory"); +			LLNotifications::instance().add(params); + +			//release some memory. +		} +	} +} +  void LLPipeline::resizeScreenTexture()  {  	LLFastTimer ft(FTM_RESIZE_SCREEN_TEXTURE); diff --git a/indra/newview/pipeline.h b/indra/newview/pipeline.h index e99b0d71e3..4a7cc77bde 100644 --- a/indra/newview/pipeline.h +++ b/indra/newview/pipeline.h @@ -334,6 +334,8 @@ public:  	static void updateRenderDeferred(); +	static void throttleNewMemoryAllocation(BOOL disable); +  private:  	void unloadShaders();  	void addToQuickLookup( LLDrawPool* new_poolp ); @@ -479,8 +481,9 @@ public:  	static BOOL				sRenderAttachedParticles;  	static BOOL				sRenderDeferred;  	static BOOL             sAllowRebuildPriorityGroup; +	static BOOL             sMemAllocationThrottled;  	static S32				sVisibleLightCount; -	static F32				sMinRenderSize; +	static F32				sMinRenderSize;	  	//screen texture  	U32 					mScreenWidth; diff --git a/indra/newview/skins/default/xui/en/menu_viewer.xml b/indra/newview/skins/default/xui/en/menu_viewer.xml index 606ff69599..6963b5bb45 100644 --- a/indra/newview/skins/default/xui/en/menu_viewer.xml +++ b/indra/newview/skins/default/xui/en/menu_viewer.xml @@ -2000,6 +2000,16 @@                 function="ToggleControl"                 parameter="DebugShowMemory" />              </menu_item_check> +	     <menu_item_check +               label="Show Private Mem Info" +               name="Show Private Mem Info"> +              <menu_item_check.on_check +               function="CheckControl" +               parameter="DebugShowPrivateMem" /> +              <menu_item_check.on_click +               function="ToggleControl" +               parameter="DebugShowPrivateMem" /> +            </menu_item_check>              <menu_item_separator/> diff --git a/indra/newview/skins/default/xui/en/notifications.xml b/indra/newview/skins/default/xui/en/notifications.xml index f008042a81..fcb4123b2e 100644 --- a/indra/newview/skins/default/xui/en/notifications.xml +++ b/indra/newview/skins/default/xui/en/notifications.xml @@ -6680,6 +6680,20 @@ Mute everyone?      Here's your current balance of L$. Click Buy L$ to purchase more Linden Dollars.    </notification> +   <notification +   icon="alertmodal.tga" +   name="LowMemory" +   type="alertmodal"> +    Your memory pool is low. Some functions of SL are disabled to avoid crash. Please close other applications. Restart SL if this persists. +  </notification> + +  <notification +     icon="alertmodal.tga" +     name="ForceQuitDueToLowMemory" +     type="alertmodal"> +    SL will quit in 30 seconds due to out of memory. +  </notification> +    <notification    name="PopupAttempt"    icon="Popup_Caution" | 
