summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrey Kleshchev <andreykproductengine@lindenlab.com>2018-01-30 14:03:26 +0000
committerAndrey Kleshchev <andreykproductengine@lindenlab.com>2018-01-30 14:03:26 +0000
commit6e445e82e2a1c1559be5193cd2c131c5f1207f55 (patch)
treeba6610b440ae0792539e79f053b086aac0a95ecc
parentb0dedf0b68ab47f6926f9b5d60ea05a1ef68eedf (diff)
MAINT-8091 Removed deprecated and unused private memory pooling
-rw-r--r--indra/llcommon/llmemory.cpp1732
-rw-r--r--indra/llcommon/llmemory.h321
-rw-r--r--indra/llimage/llimage.cpp36
-rw-r--r--indra/llimage/llimage.h7
-rw-r--r--indra/llimage/llimagedxt.cpp2
-rw-r--r--indra/llimage/llimagej2c.cpp4
-rw-r--r--indra/llrender/llvertexbuffer.cpp20
-rw-r--r--indra/newview/app_settings/settings.xml10
-rw-r--r--indra/newview/llappviewer.cpp8
-rw-r--r--indra/newview/lltexturecache.cpp38
-rw-r--r--indra/newview/lltexturefetch.cpp6
-rw-r--r--indra/newview/llviewerwindow.cpp10
-rw-r--r--indra/newview/skins/default/xui/en/menu_viewer.xml10
13 files changed, 40 insertions, 2164 deletions
diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp
index 049e962638..b3debf3550 100644
--- a/indra/llcommon/llmemory.cpp
+++ b/indra/llcommon/llmemory.cpp
@@ -57,10 +57,6 @@ U32Kilobytes LLMemory::sAllocatedPageSizeInKB(0);
U32Kilobytes LLMemory::sMaxHeapSizeInKB(U32_MAX);
BOOL LLMemory::sEnableMemoryFailurePrevention = FALSE;
-#if __DEBUG_PRIVATE_MEM__
-LLPrivateMemoryPoolManager::mem_allocation_info_t LLPrivateMemoryPoolManager::sMemAllocationTracker;
-#endif
-
void ll_assert_aligned_func(uintptr_t ptr,U32 alignment)
{
#if defined(LL_WINDOWS) && defined(LL_DEBUG_BUFFER_OVERRUN)
@@ -154,17 +150,12 @@ void LLMemory::logMemoryInfo(BOOL update)
if(update)
{
updateMemoryInfo() ;
- LLPrivateMemoryPoolManager::getInstance()->updateStatistics() ;
}
LL_INFOS() << "Current allocated physical memory(KB): " << sAllocatedMemInKB << LL_ENDL ;
LL_INFOS() << "Current allocated page size (KB): " << sAllocatedPageSizeInKB << LL_ENDL ;
LL_INFOS() << "Current available physical memory(KB): " << sAvailPhysicalMemInKB << LL_ENDL ;
LL_INFOS() << "Current max usable memory(KB): " << sMaxPhysicalMemInKB << LL_ENDL ;
-
- LL_INFOS() << "--- private pool information -- " << LL_ENDL ;
- LL_INFOS() << "Total reserved (KB): " << LLPrivateMemoryPoolManager::getInstance()->mTotalReservedSize / 1024 << LL_ENDL ;
- LL_INFOS() << "Total allocated (KB): " << LLPrivateMemoryPoolManager::getInstance()->mTotalAllocatedSize / 1024 << LL_ENDL ;
}
//return 0: everything is normal;
@@ -356,1729 +347,6 @@ U64 LLMemory::getCurrentRSS()
#endif
-//--------------------------------------------------------------------------------------------------
-//--------------------------------------------------------------------------------------------------
-//minimum slot size and minimal slot size interval
-const U32 ATOMIC_MEM_SLOT = 16 ; //bytes
-
-//minimum block sizes (page size) for small allocation, medium allocation, large allocation
-const U32 MIN_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {2 << 10, 4 << 10, 16 << 10} ; //
-
-//maximum block sizes for small allocation, medium allocation, large allocation
-const U32 MAX_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {64 << 10, 1 << 20, 4 << 20} ;
-
-//minimum slot sizes for small allocation, medium allocation, large allocation
-const U32 MIN_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {ATOMIC_MEM_SLOT, 2 << 10, 512 << 10};
-
-//maximum slot sizes for small allocation, medium allocation, large allocation
-const U32 MAX_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {(2 << 10) - ATOMIC_MEM_SLOT, (512 - 2) << 10, 4 << 20};
-
-//size of a block with multiple slots can not exceed CUT_OFF_SIZE
-const U32 CUT_OFF_SIZE = (64 << 10) ; //64 KB
-
-//max number of slots in a block
-const U32 MAX_NUM_SLOTS_IN_A_BLOCK = llmin(MIN_BLOCK_SIZES[0] / ATOMIC_MEM_SLOT, ATOMIC_MEM_SLOT * 8) ;
-
-//-------------------------------------------------------------
-//align val to be integer times of ATOMIC_MEM_SLOT
-U32 align(U32 val)
-{
- U32 aligned = (val / ATOMIC_MEM_SLOT) * ATOMIC_MEM_SLOT ;
- if(aligned < val)
- {
- aligned += ATOMIC_MEM_SLOT ;
- }
-
- return aligned ;
-}
-
-//-------------------------------------------------------------
-//class LLPrivateMemoryPool::LLMemoryBlock
-//-------------------------------------------------------------
-//
-//each memory block could fit for two page sizes: 0.75 * mSlotSize, which starts from the beginning of the memory chunk and grow towards the end of the
-//the block; another is mSlotSize, which starts from the end of the block and grows towards the beginning of the block.
-//
-LLPrivateMemoryPool::LLMemoryBlock::LLMemoryBlock()
-{
- //empty
-}
-
-LLPrivateMemoryPool::LLMemoryBlock::~LLMemoryBlock()
-{
- //empty
-}
-
-//create and initialize a memory block
-void LLPrivateMemoryPool::LLMemoryBlock::init(char* buffer, U32 buffer_size, U32 slot_size)
-{
- mBuffer = buffer ;
- mBufferSize = buffer_size ;
- mSlotSize = slot_size ;
- mTotalSlots = buffer_size / mSlotSize ;
-
- llassert_always(buffer_size / mSlotSize <= MAX_NUM_SLOTS_IN_A_BLOCK) ; //max number is 128
-
- mAllocatedSlots = 0 ;
- mDummySize = 0 ;
-
- //init the bit map.
- //mark free bits
- if(mTotalSlots > 32) //reserve extra space from mBuffer to store bitmap if needed.
- {
- mDummySize = ATOMIC_MEM_SLOT ;
- mTotalSlots -= (mDummySize + mSlotSize - 1) / mSlotSize ;
- mUsageBits = 0 ;
-
- S32 usage_bit_len = (mTotalSlots + 31) / 32 ;
-
- for(S32 i = 0 ; i < usage_bit_len - 1 ; i++)
- {
- *((U32*)mBuffer + i) = 0 ;
- }
- for(S32 i = usage_bit_len - 1 ; i < mDummySize / sizeof(U32) ; i++)
- {
- *((U32*)mBuffer + i) = 0xffffffff ;
- }
-
- if(mTotalSlots & 31)
- {
- *((U32*)mBuffer + usage_bit_len - 2) = (0xffffffff << (mTotalSlots & 31)) ;
- }
- }
- else//no extra bitmap space reserved
- {
- mUsageBits = 0 ;
- if(mTotalSlots & 31)
- {
- mUsageBits = (0xffffffff << (mTotalSlots & 31)) ;
- }
- }
-
- mSelf = this ;
- mNext = NULL ;
- mPrev = NULL ;
-
- llassert_always(mTotalSlots > 0) ;
-}
-
-//mark this block to be free with the memory [mBuffer, mBuffer + mBufferSize).
-void LLPrivateMemoryPool::LLMemoryBlock::setBuffer(char* buffer, U32 buffer_size)
-{
- mBuffer = buffer ;
- mBufferSize = buffer_size ;
- mSelf = NULL ;
- mTotalSlots = 0 ; //set the block is free.
-}
-
-//reserve a slot
-char* LLPrivateMemoryPool::LLMemoryBlock::allocate()
-{
- llassert_always(mAllocatedSlots < mTotalSlots) ;
-
- //find a free slot
- U32* bits = NULL ;
- U32 k = 0 ;
- if(mUsageBits != 0xffffffff)
- {
- bits = &mUsageBits ;
- }
- else if(mDummySize > 0)//go to extra space
- {
- for(S32 i = 0 ; i < mDummySize / sizeof(U32); i++)
- {
- if(*((U32*)mBuffer + i) != 0xffffffff)
- {
- bits = (U32*)mBuffer + i ;
- k = i + 1 ;
- break ;
- }
- }
- }
- S32 idx = 0 ;
- U32 tmp = *bits ;
- for(; tmp & 1 ; tmp >>= 1, idx++) ;
-
- //set the slot reserved
- if(!idx)
- {
- *bits |= 1 ;
- }
- else
- {
- *bits |= (1 << idx) ;
- }
-
- mAllocatedSlots++ ;
-
- return mBuffer + mDummySize + (k * 32 + idx) * mSlotSize ;
-}
-
-//free a slot
-void LLPrivateMemoryPool::LLMemoryBlock::freeMem(void* addr)
-{
- //bit index
- uintptr_t idx = ((uintptr_t)addr - (uintptr_t)mBuffer - mDummySize) / mSlotSize ;
-
- U32* bits = &mUsageBits ;
- if(idx >= 32)
- {
- bits = (U32*)mBuffer + (idx - 32) / 32 ;
- }
-
- //reset the bit
- if(idx & 31)
- {
- *bits &= ~(1 << (idx & 31)) ;
- }
- else
- {
- *bits &= ~1 ;
- }
-
- mAllocatedSlots-- ;
-}
-
-//for debug use: reset the entire bitmap.
-void LLPrivateMemoryPool::LLMemoryBlock::resetBitMap()
-{
- for(S32 i = 0 ; i < mDummySize / sizeof(U32) ; i++)
- {
- *((U32*)mBuffer + i) = 0 ;
- }
- mUsageBits = 0 ;
-}
-//-------------------------------------------------------------------
-//class LLMemoryChunk
-//--------------------------------------------------------------------
-LLPrivateMemoryPool::LLMemoryChunk::LLMemoryChunk()
-{
- //empty
-}
-
-LLPrivateMemoryPool::LLMemoryChunk::~LLMemoryChunk()
-{
- //empty
-}
-
-//create and init a memory chunk
-void LLPrivateMemoryPool::LLMemoryChunk::init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size)
-{
- mBuffer = buffer ;
- mBufferSize = buffer_size ;
- mAlloatedSize = 0 ;
-
- mMetaBuffer = mBuffer + sizeof(LLMemoryChunk) ;
-
- mMinBlockSize = min_block_size; //page size
- mMinSlotSize = min_slot_size;
- mMaxSlotSize = max_slot_size ;
- mBlockLevels = mMaxSlotSize / mMinSlotSize ;
- mPartitionLevels = max_block_size / mMinBlockSize + 1 ;
-
- S32 max_num_blocks = (buffer_size - sizeof(LLMemoryChunk) - mBlockLevels * sizeof(LLMemoryBlock*) - mPartitionLevels * sizeof(LLMemoryBlock*)) /
- (mMinBlockSize + sizeof(LLMemoryBlock)) ;
- //meta data space
- mBlocks = (LLMemoryBlock*)mMetaBuffer ; //space reserved for all memory blocks.
- mAvailBlockList = (LLMemoryBlock**)((char*)mBlocks + sizeof(LLMemoryBlock) * max_num_blocks) ;
- mFreeSpaceList = (LLMemoryBlock**)((char*)mAvailBlockList + sizeof(LLMemoryBlock*) * mBlockLevels) ;
-
- //data buffer, which can be used for allocation
- mDataBuffer = (char*)mFreeSpaceList + sizeof(LLMemoryBlock*) * mPartitionLevels ;
-
- //alignmnet
- mDataBuffer = mBuffer + align(mDataBuffer - mBuffer) ;
-
- //init
- for(U32 i = 0 ; i < mBlockLevels; i++)
- {
- mAvailBlockList[i] = NULL ;
- }
- for(U32 i = 0 ; i < mPartitionLevels ; i++)
- {
- mFreeSpaceList[i] = NULL ;
- }
-
- //assign the entire chunk to the first block
- mBlocks[0].mPrev = NULL ;
- mBlocks[0].mNext = NULL ;
- mBlocks[0].setBuffer(mDataBuffer, buffer_size - (mDataBuffer - mBuffer)) ;
- addToFreeSpace(&mBlocks[0]) ;
-
- mNext = NULL ;
- mPrev = NULL ;
-}
-
-//static
-U32 LLPrivateMemoryPool::LLMemoryChunk::getMaxOverhead(U32 data_buffer_size, U32 min_slot_size,
- U32 max_slot_size, U32 min_block_size, U32 max_block_size)
-{
- //for large allocations, reserve some extra memory for meta data to avoid wasting much
- if(data_buffer_size / min_slot_size < 64) //large allocations
- {
- U32 overhead = sizeof(LLMemoryChunk) + (data_buffer_size / min_block_size) * sizeof(LLMemoryBlock) +
- sizeof(LLMemoryBlock*) * (max_slot_size / min_slot_size) + sizeof(LLMemoryBlock*) * (max_block_size / min_block_size + 1) ;
-
- //round to integer times of min_block_size
- overhead = ((overhead + min_block_size - 1) / min_block_size) * min_block_size ;
- return overhead ;
- }
- else
- {
- return 0 ; //do not reserve extra overhead if for small allocations
- }
-}
-
-char* LLPrivateMemoryPool::LLMemoryChunk::allocate(U32 size)
-{
- if(mMinSlotSize > size)
- {
- size = mMinSlotSize ;
- }
- if(mAlloatedSize + size > mBufferSize - (mDataBuffer - mBuffer))
- {
- return NULL ; //no enough space in this chunk.
- }
-
- char* p = NULL ;
- U32 blk_idx = getBlockLevel(size);
-
- LLMemoryBlock* blk = NULL ;
-
- //check if there is free block available
- if(mAvailBlockList[blk_idx])
- {
- blk = mAvailBlockList[blk_idx] ;
- p = blk->allocate() ;
-
- if(blk->isFull())
- {
- popAvailBlockList(blk_idx) ;
- }
- }
-
- //ask for a new block
- if(!p)
- {
- blk = addBlock(blk_idx) ;
- if(blk)
- {
- p = blk->allocate() ;
-
- if(blk->isFull())
- {
- popAvailBlockList(blk_idx) ;
- }
- }
- }
-
- //ask for space from larger blocks
- if(!p)
- {
- for(S32 i = blk_idx + 1 ; i < mBlockLevels; i++)
- {
- if(mAvailBlockList[i])
- {
- blk = mAvailBlockList[i] ;
- p = blk->allocate() ;
-
- if(blk->isFull())
- {
- popAvailBlockList(i) ;
- }
- break ;
- }
- }
- }
-
- if(p && blk)
- {
- mAlloatedSize += blk->getSlotSize() ;
- }
- return p ;
-}
-
-void LLPrivateMemoryPool::LLMemoryChunk::freeMem(void* addr)
-{
- U32 blk_idx = getPageIndex((uintptr_t)addr) ;
- LLMemoryBlock* blk = (LLMemoryBlock*)(mMetaBuffer + blk_idx * sizeof(LLMemoryBlock)) ;
- blk = blk->mSelf ;
-
- bool was_full = blk->isFull() ;
- blk->freeMem(addr) ;
- mAlloatedSize -= blk->getSlotSize() ;
-
- if(blk->empty())
- {
- removeBlock(blk) ;
- }
- else if(was_full)
- {
- addToAvailBlockList(blk) ;
- }
-}
-
-bool LLPrivateMemoryPool::LLMemoryChunk::empty()
-{
- return !mAlloatedSize ;
-}
-
-bool LLPrivateMemoryPool::LLMemoryChunk::containsAddress(const char* addr) const
-{
- return (uintptr_t)mBuffer <= (uintptr_t)addr && (uintptr_t)mBuffer + mBufferSize > (uintptr_t)addr ;
-}
-
-//debug use
-void LLPrivateMemoryPool::LLMemoryChunk::dump()
-{
-#if 0
- //sanity check
- //for(S32 i = 0 ; i < mBlockLevels ; i++)
- //{
- // LLMemoryBlock* blk = mAvailBlockList[i] ;
- // while(blk)
- // {
- // blk_list.push_back(blk) ;
- // blk = blk->mNext ;
- // }
- //}
- for(S32 i = 0 ; i < mPartitionLevels ; i++)
- {
- LLMemoryBlock* blk = mFreeSpaceList[i] ;
- while(blk)
- {
- blk_list.push_back(blk) ;
- blk = blk->mNext ;
- }
- }
-
- std::sort(blk_list.begin(), blk_list.end(), LLMemoryBlock::CompareAddress());
-
- U32 total_size = blk_list[0]->getBufferSize() ;
- for(U32 i = 1 ; i < blk_list.size(); i++)
- {
- total_size += blk_list[i]->getBufferSize() ;
- if((uintptr_t)blk_list[i]->getBuffer() < (uintptr_t)blk_list[i-1]->getBuffer() + blk_list[i-1]->getBufferSize())
- {
- LL_ERRS() << "buffer corrupted." << LL_ENDL ;
- }
- }
-
- llassert_always(total_size + mMinBlockSize >= mBufferSize - ((uintptr_t)mDataBuffer - (uintptr_t)mBuffer)) ;
-
- U32 blk_num = (mBufferSize - (mDataBuffer - mBuffer)) / mMinBlockSize ;
- for(U32 i = 0 ; i < blk_num ; )
- {
- LLMemoryBlock* blk = &mBlocks[i] ;
- if(blk->mSelf)
- {
- U32 end = blk->getBufferSize() / mMinBlockSize ;
- for(U32 j = 0 ; j < end ; j++)
- {
- llassert_always(blk->mSelf == blk || !blk->mSelf) ;
- }
- i += end ;
- }
- else
- {
- LL_ERRS() << "gap happens" << LL_ENDL ;
- }
- }
-#endif
-#if 0
- LL_INFOS() << "---------------------------" << LL_ENDL ;
- LL_INFOS() << "Chunk buffer: " << (uintptr_t)getBuffer() << " size: " << getBufferSize() << LL_ENDL ;
-
- LL_INFOS() << "available blocks ... " << LL_ENDL ;
- for(S32 i = 0 ; i < mBlockLevels ; i++)
- {
- LLMemoryBlock* blk = mAvailBlockList[i] ;
- while(blk)
- {
- LL_INFOS() << "blk buffer " << (uintptr_t)blk->getBuffer() << " size: " << blk->getBufferSize() << LL_ENDL ;
- blk = blk->mNext ;
- }
- }
-
- LL_INFOS() << "free blocks ... " << LL_ENDL ;
- for(S32 i = 0 ; i < mPartitionLevels ; i++)
- {
- LLMemoryBlock* blk = mFreeSpaceList[i] ;
- while(blk)
- {
- LL_INFOS() << "blk buffer " << (uintptr_t)blk->getBuffer() << " size: " << blk->getBufferSize() << LL_ENDL ;
- blk = blk->mNext ;
- }
- }
-#endif
-}
-
-//compute the size for a block, the size is round to integer times of mMinBlockSize.
-U32 LLPrivateMemoryPool::LLMemoryChunk::calcBlockSize(U32 slot_size)
-{
- //
- //Note: we try to make a block to have 32 slots if the size is not over 32 pages
- //32 is the number of bits of an integer in a 32-bit system
- //
-
- U32 block_size;
- U32 cut_off_size = llmin(CUT_OFF_SIZE, (U32)(mMinBlockSize << 5)) ;
-
- if((slot_size << 5) <= mMinBlockSize)//for small allocations, return one page
- {
- block_size = mMinBlockSize ;
- }
- else if(slot_size >= cut_off_size)//for large allocations, return one-slot block
- {
- block_size = (slot_size / mMinBlockSize) * mMinBlockSize ;
- if(block_size < slot_size)
- {
- block_size += mMinBlockSize ;
- }
- }
- else //medium allocations
- {
- if((slot_size << 5) >= cut_off_size)
- {
- block_size = cut_off_size ;
- }
- else
- {
- block_size = ((slot_size << 5) / mMinBlockSize) * mMinBlockSize ;
- }
- }
-
- llassert_always(block_size >= slot_size) ;
-
- return block_size ;
-}
-
-//create a new block in the chunk
-LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::addBlock(U32 blk_idx)
-{
- U32 slot_size = mMinSlotSize * (blk_idx + 1) ;
- U32 preferred_block_size = calcBlockSize(slot_size) ;
- U16 idx = getPageLevel(preferred_block_size);
- LLMemoryBlock* blk = NULL ;
-
- if(mFreeSpaceList[idx])//if there is free slot for blk_idx
- {
- blk = createNewBlock(mFreeSpaceList[idx], preferred_block_size, slot_size, blk_idx) ;
- }
- else if(mFreeSpaceList[mPartitionLevels - 1]) //search free pool
- {
- blk = createNewBlock(mFreeSpaceList[mPartitionLevels - 1], preferred_block_size, slot_size, blk_idx) ;
- }
- else //search for other non-preferred but enough space slot.
- {
- S32 min_idx = 0 ;
- if(slot_size > mMinBlockSize)
- {
- min_idx = getPageLevel(slot_size) ;
- }
- for(S32 i = (S32)idx - 1 ; i >= min_idx ; i--) //search the small slots first
- {
- if(mFreeSpaceList[i])
- {
- U32 new_preferred_block_size = mFreeSpaceList[i]->getBufferSize();
- new_preferred_block_size = (new_preferred_block_size / mMinBlockSize) * mMinBlockSize ; //round to integer times of mMinBlockSize.
-
- //create a NEW BLOCK THERE.
- if(new_preferred_block_size >= slot_size) //at least there is space for one slot.
- {
-
- blk = createNewBlock(mFreeSpaceList[i], new_preferred_block_size, slot_size, blk_idx) ;
- }
- break ;
- }
- }
-
- if(!blk)
- {
- for(U16 i = idx + 1 ; i < mPartitionLevels - 1; i++) //search the large slots
- {
- if(mFreeSpaceList[i])
- {
- //create a NEW BLOCK THERE.
- blk = createNewBlock(mFreeSpaceList[i], preferred_block_size, slot_size, blk_idx) ;
- break ;
- }
- }
- }
- }
-
- return blk ;
-}
-
-//create a new block at the designed location
-LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx)
-{
- //unlink from the free space
- removeFromFreeSpace(blk) ;
-
- //check the rest space
- U32 new_free_blk_size = blk->getBufferSize() - buffer_size ;
- if(new_free_blk_size < mMinBlockSize) //can not partition the memory into size smaller than mMinBlockSize
- {
- new_free_blk_size = 0 ; //discard the last small extra space.
- }
-
- //add the rest space back to the free list
- if(new_free_blk_size > 0) //blk still has free space
- {
- LLMemoryBlock* next_blk = blk + (buffer_size / mMinBlockSize) ;
- next_blk->mPrev = NULL ;
- next_blk->mNext = NULL ;
- next_blk->setBuffer(blk->getBuffer() + buffer_size, new_free_blk_size) ;
- addToFreeSpace(next_blk) ;
- }
-
- blk->init(blk->getBuffer(), buffer_size, slot_size) ;
- //insert to the available block list...
- mAvailBlockList[blk_idx] = blk ;
-
- //mark the address map: all blocks covered by this block space pointing back to this block.
- U32 end = (buffer_size / mMinBlockSize) ;
- for(U32 i = 1 ; i < end ; i++)
- {
- (blk + i)->mSelf = blk ;
- }
-
- return blk ;
-}
-
-//delete a block, release the block to the free pool.
-void LLPrivateMemoryPool::LLMemoryChunk::removeBlock(LLMemoryBlock* blk)
-{
- //remove from the available block list
- if(blk->mPrev)
- {
- blk->mPrev->mNext = blk->mNext ;
- }
- if(blk->mNext)
- {
- blk->mNext->mPrev = blk->mPrev ;
- }
- U32 blk_idx = getBlockLevel(blk->getSlotSize());
- if(mAvailBlockList[blk_idx] == blk)
- {
- mAvailBlockList[blk_idx] = blk->mNext ;
- }
-
- blk->mNext = NULL ;
- blk->mPrev = NULL ;
-
- //mark it free
- blk->setBuffer(blk->getBuffer(), blk->getBufferSize()) ;
-
-#if 1
- //merge blk with neighbors if possible
- if(blk->getBuffer() > mDataBuffer) //has the left neighbor
- {
- if((blk - 1)->mSelf->isFree())
- {
- LLMemoryBlock* left_blk = (blk - 1)->mSelf ;
- removeFromFreeSpace((blk - 1)->mSelf);
- left_blk->setBuffer(left_blk->getBuffer(), left_blk->getBufferSize() + blk->getBufferSize()) ;
- blk = left_blk ;
- }
- }
- if(blk->getBuffer() + blk->getBufferSize() <= mBuffer + mBufferSize - mMinBlockSize) //has the right neighbor
- {
- U32 d = blk->getBufferSize() / mMinBlockSize ;
- if((blk + d)->isFree())
- {
- LLMemoryBlock* right_blk = blk + d ;
- removeFromFreeSpace(blk + d) ;
- blk->setBuffer(blk->getBuffer(), blk->getBufferSize() + right_blk->getBufferSize()) ;
- }
- }
-#endif
-
- addToFreeSpace(blk) ;
-
- return ;
-}
-
-//the top block in the list is full, pop it out of the list
-void LLPrivateMemoryPool::LLMemoryChunk::popAvailBlockList(U32 blk_idx)
-{
- if(mAvailBlockList[blk_idx])
- {
- LLMemoryBlock* next = mAvailBlockList[blk_idx]->mNext ;
- if(next)
- {
- next->mPrev = NULL ;
- }
- mAvailBlockList[blk_idx]->mPrev = NULL ;
- mAvailBlockList[blk_idx]->mNext = NULL ;
- mAvailBlockList[blk_idx] = next ;
- }
-}
-
-//add the block back to the free pool
-void LLPrivateMemoryPool::LLMemoryChunk::addToFreeSpace(LLMemoryBlock* blk)
-{
- llassert_always(!blk->mPrev) ;
- llassert_always(!blk->mNext) ;
-
- U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1;
-
- (blk + free_idx)->mSelf = blk ; //mark the end pointing back to the head.
- free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
-
- blk->mNext = mFreeSpaceList[free_idx] ;
- if(mFreeSpaceList[free_idx])
- {
- mFreeSpaceList[free_idx]->mPrev = blk ;
- }
- mFreeSpaceList[free_idx] = blk ;
- blk->mPrev = NULL ;
- blk->mSelf = blk ;
-
- return ;
-}
-
-//remove the space from the free pool
-void LLPrivateMemoryPool::LLMemoryChunk::removeFromFreeSpace(LLMemoryBlock* blk)
-{
- U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1;
- free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
-
- if(mFreeSpaceList[free_idx] == blk)
- {
- mFreeSpaceList[free_idx] = blk->mNext ;
- }
- if(blk->mPrev)
- {
- blk->mPrev->mNext = blk->mNext ;
- }
- if(blk->mNext)
- {
- blk->mNext->mPrev = blk->mPrev ;
- }
- blk->mNext = NULL ;
- blk->mPrev = NULL ;
- blk->mSelf = NULL ;
-
- return ;
-}
-
-void LLPrivateMemoryPool::LLMemoryChunk::addToAvailBlockList(LLMemoryBlock* blk)
-{
- llassert_always(!blk->mPrev) ;
- llassert_always(!blk->mNext) ;
-
- U32 blk_idx = getBlockLevel(blk->getSlotSize());
-
- blk->mNext = mAvailBlockList[blk_idx] ;
- if(blk->mNext)
- {
- blk->mNext->mPrev = blk ;
- }
- blk->mPrev = NULL ;
- mAvailBlockList[blk_idx] = blk ;
-
- return ;
-}
-
-U32 LLPrivateMemoryPool::LLMemoryChunk::getPageIndex(uintptr_t addr)
-{
- return (addr - (uintptr_t)mDataBuffer) / mMinBlockSize ;
-}
-
-//for mAvailBlockList
-U32 LLPrivateMemoryPool::LLMemoryChunk::getBlockLevel(U32 size)
-{
- llassert(size >= mMinSlotSize && size <= mMaxSlotSize) ;
-
- //start from 0
- return (size + mMinSlotSize - 1) / mMinSlotSize - 1 ;
-}
-
-//for mFreeSpaceList
-U16 LLPrivateMemoryPool::LLMemoryChunk::getPageLevel(U32 size)
-{
- //start from 0
- U16 level = size / mMinBlockSize - 1 ;
- if(level >= mPartitionLevels)
- {
- level = mPartitionLevels - 1 ;
- }
- return level ;
-}
-
-//-------------------------------------------------------------------
-//class LLPrivateMemoryPool
-//--------------------------------------------------------------------
-const U32 CHUNK_SIZE = 4 << 20 ; //4 MB
-const U32 LARGE_CHUNK_SIZE = 4 * CHUNK_SIZE ; //16 MB
-LLPrivateMemoryPool::LLPrivateMemoryPool(S32 type, U32 max_pool_size) :
- mMutexp(NULL),
- mReservedPoolSize(0),
- mHashFactor(1),
- mType(type),
- mMaxPoolSize(max_pool_size)
-{
- if(type == STATIC_THREADED || type == VOLATILE_THREADED)
- {
- mMutexp = new LLMutex(NULL) ;
- }
-
- for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
- {
- mChunkList[i] = NULL ;
- }
-
- mNumOfChunks = 0 ;
-}
-
-LLPrivateMemoryPool::~LLPrivateMemoryPool()
-{
- destroyPool();
- delete mMutexp ;
-}
-
-char* LLPrivateMemoryPool::allocate(U32 size)
-{
- if(!size)
- {
- return NULL ;
- }
-
- //if the asked size larger than MAX_BLOCK_SIZE, fetch from heap directly, the pool does not manage it
- if(size >= CHUNK_SIZE)
- {
- return (char*)ll_aligned_malloc_16(size) ;
- }
-
- char* p = NULL ;
-
- //find the appropriate chunk
- S32 chunk_idx = getChunkIndex(size) ;
-
- lock() ;
-
- LLMemoryChunk* chunk = mChunkList[chunk_idx];
- while(chunk)
- {
- if((p = chunk->allocate(size)))
- {
- break ;
- }
- chunk = chunk->mNext ;
- }
-
- //fetch new memory chunk
- if(!p)
- {
- if(mReservedPoolSize + CHUNK_SIZE > mMaxPoolSize)
- {
- chunk = mChunkList[chunk_idx];
- while(chunk)
- {
- if((p = chunk->allocate(size)))
- {
- break ;
- }
- chunk = chunk->mNext ;
- }
- }
- else
- {
- chunk = addChunk(chunk_idx) ;
- if(chunk)
- {
- p = chunk->allocate(size) ;
- }
- }
- }
-
- unlock() ;
-
- if(!p) //to get memory from the private pool failed, try the heap directly
- {
- static bool to_log = true ;
-
- if(to_log)
- {
- LL_WARNS() << "The memory pool overflows, now using heap directly!" << LL_ENDL ;
- to_log = false ;
- }
-
- return (char*)ll_aligned_malloc_16(size) ;
- }
-
- return p ;
-}
-
-void LLPrivateMemoryPool::freeMem(void* addr)
-{
- if(!addr)
- {
- return ;
- }
-
- lock() ;
-
- LLMemoryChunk* chunk = findChunk((char*)addr) ;
-
- if(!chunk)
- {
- ll_aligned_free_16(addr) ; //release from heap
- }
- else
- {
- chunk->freeMem(addr) ;
-
- if(chunk->empty())
- {
- removeChunk(chunk) ;
- }
- }
-
- unlock() ;
-}
-
-void LLPrivateMemoryPool::dump()
-{
-}
-
-U32 LLPrivateMemoryPool::getTotalAllocatedSize()
-{
- U32 total_allocated = 0 ;
-
- LLMemoryChunk* chunk ;
- for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
- {
- chunk = mChunkList[i];
- while(chunk)
- {
- total_allocated += chunk->getAllocatedSize() ;
- chunk = chunk->mNext ;
- }
- }
-
- return total_allocated ;
-}
-
-void LLPrivateMemoryPool::lock()
-{
- if(mMutexp)
- {
- mMutexp->lock() ;
- }
-}
-
-void LLPrivateMemoryPool::unlock()
-{
- if(mMutexp)
- {
- mMutexp->unlock() ;
- }
-}
-
-S32 LLPrivateMemoryPool::getChunkIndex(U32 size)
-{
- S32 i ;
- for(i = 0 ; size > MAX_SLOT_SIZES[i]; i++);
-
- llassert_always(i < SUPER_ALLOCATION);
-
- return i ;
-}
-
-//destroy the entire pool
-void LLPrivateMemoryPool::destroyPool()
-{
- lock() ;
-
- if(mNumOfChunks > 0)
- {
- LL_WARNS() << "There is some memory not freed when destroy the memory pool!" << LL_ENDL ;
- }
-
- mNumOfChunks = 0 ;
- mChunkHashList.clear() ;
- mHashFactor = 1 ;
- for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
- {
- mChunkList[i] = NULL ;
- }
-
- unlock() ;
-}
-
-bool LLPrivateMemoryPool::checkSize(U32 asked_size)
-{
- if(mReservedPoolSize + asked_size > mMaxPoolSize)
- {
- LL_INFOS() << "Max pool size: " << mMaxPoolSize << LL_ENDL ;
- LL_INFOS() << "Total reserved size: " << mReservedPoolSize + asked_size << LL_ENDL ;
- LL_INFOS() << "Total_allocated Size: " << getTotalAllocatedSize() << LL_ENDL ;
-
- //LL_ERRS() << "The pool is overflowing..." << LL_ENDL ;
-
- return false ;
- }
-
- return true ;
-}
-
-LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_index)
-{
- U32 preferred_size ;
- U32 overhead ;
- if(chunk_index < LARGE_ALLOCATION)
- {
- preferred_size = CHUNK_SIZE ; //4MB
- overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index],
- MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
- }
- else
- {
- preferred_size = LARGE_CHUNK_SIZE ; //16MB
- overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index],
- MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
- }
-
- if(!checkSize(preferred_size + overhead))
- {
- return NULL ;
- }
-
- mReservedPoolSize += preferred_size + overhead ;
-
- char* buffer = (char*)ll_aligned_malloc_16(preferred_size + overhead) ;
- if(!buffer)
- {
- return NULL ;
- }
-
- LLMemoryChunk* chunk = new (buffer) LLMemoryChunk() ;
- chunk->init(buffer, preferred_size + overhead, MIN_SLOT_SIZES[chunk_index],
- MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
-
- //add to the tail of the linked list
- {
- if(!mChunkList[chunk_index])
- {
- mChunkList[chunk_index] = chunk ;
- }
- else
- {
- LLMemoryChunk* cur = mChunkList[chunk_index] ;
- while(cur->mNext)
- {
- cur = cur->mNext ;
- }
- cur->mNext = chunk ;
- chunk->mPrev = cur ;
- }
- }
-
- //insert into the hash table
- addToHashTable(chunk) ;
-
- mNumOfChunks++;
-
- return chunk ;
-}
-
-void LLPrivateMemoryPool::removeChunk(LLMemoryChunk* chunk)
-{
- if(!chunk)
- {
- return ;
- }
-
- //remove from the linked list
- for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
- {
- if(mChunkList[i] == chunk)
- {
- mChunkList[i] = chunk->mNext ;
- }
- }
-
- if(chunk->mPrev)
- {
- chunk->mPrev->mNext = chunk->mNext ;
- }
- if(chunk->mNext)
- {
- chunk->mNext->mPrev = chunk->mPrev ;
- }
-
- //remove from the hash table
- removeFromHashTable(chunk) ;
-
- mNumOfChunks--;
- mReservedPoolSize -= chunk->getBufferSize() ;
-
- //release memory
- ll_aligned_free_16(chunk->getBuffer()) ;
-}
-
-U16 LLPrivateMemoryPool::findHashKey(const char* addr)
-{
- return (((uintptr_t)addr) / CHUNK_SIZE) % mHashFactor ;
-}
-
-LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::findChunk(const char* addr)
-{
- U16 key = findHashKey(addr) ;
- if(mChunkHashList.size() <= key)
- {
- return NULL ;
- }
-
- return mChunkHashList[key].findChunk(addr) ;
-}
-
-void LLPrivateMemoryPool::addToHashTable(LLMemoryChunk* chunk)
-{
- static const U16 HASH_FACTORS[] = {41, 83, 193, 317, 419, 523, 719, 997, 1523, 0xFFFF};
-
- U16 i ;
- if(mChunkHashList.empty())
- {
- mHashFactor = HASH_FACTORS[0] ;
- rehash() ;
- }
-
- U16 start_key = findHashKey(chunk->getBuffer()) ;
- U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ;
- bool need_rehash = false ;
-
- if(mChunkHashList[start_key].hasElement(chunk))
- {
- return; //already inserted.
- }
- need_rehash = mChunkHashList[start_key].add(chunk) ;
-
- if(start_key == end_key && !need_rehash)
- {
- return ; //done
- }
-
- if(!need_rehash)
- {
- need_rehash = mChunkHashList[end_key].add(chunk) ;
- }
-
- if(!need_rehash)
- {
- if(end_key < start_key)
- {
- need_rehash = fillHashTable(start_key + 1, mHashFactor, chunk) ;
- if(!need_rehash)
- {
- need_rehash = fillHashTable(0, end_key, chunk) ;
- }
- }
- else
- {
- need_rehash = fillHashTable(start_key + 1, end_key, chunk) ;
- }
- }
-
- if(need_rehash)
- {
- i = 0 ;
- while(HASH_FACTORS[i] <= mHashFactor) i++;
-
- mHashFactor = HASH_FACTORS[i] ;
- llassert_always(mHashFactor != 0xFFFF) ;//stop point to prevent endlessly recursive calls
-
- rehash() ;
- }
-}
-
-void LLPrivateMemoryPool::removeFromHashTable(LLMemoryChunk* chunk)
-{
- U16 start_key = findHashKey(chunk->getBuffer()) ;
- U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ;
-
- mChunkHashList[start_key].remove(chunk) ;
- if(start_key == end_key)
- {
- return ; //done
- }
-
- mChunkHashList[end_key].remove(chunk) ;
-
- if(end_key < start_key)
- {
- for(U16 i = start_key + 1 ; i < mHashFactor; i++)
- {
- mChunkHashList[i].remove(chunk) ;
- }
- for(U16 i = 0 ; i < end_key; i++)
- {
- mChunkHashList[i].remove(chunk) ;
- }
- }
- else
- {
- for(U16 i = start_key + 1 ; i < end_key; i++)
- {
- mChunkHashList[i].remove(chunk) ;
- }
- }
-}
-
-void LLPrivateMemoryPool::rehash()
-{
- LL_INFOS() << "new hash factor: " << mHashFactor << LL_ENDL ;
-
- mChunkHashList.clear() ;
- mChunkHashList.resize(mHashFactor) ;
-
- LLMemoryChunk* chunk ;
- for(U16 i = 0 ; i < SUPER_ALLOCATION ; i++)
- {
- chunk = mChunkList[i] ;
- while(chunk)
- {
- addToHashTable(chunk) ;
- chunk = chunk->mNext ;
- }
- }
-}
-
-bool LLPrivateMemoryPool::fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk)
-{
- for(U16 i = start; i < end; i++)
- {
- if(mChunkHashList[i].add(chunk))
- {
- return true ;
- }
- }
-
- return false ;
-}
-
-//--------------------------------------------------------------------
-// class LLChunkHashElement
-//--------------------------------------------------------------------
-LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::LLChunkHashElement::findChunk(const char* addr)
-{
- if(mFirst && mFirst->containsAddress(addr))
- {
- return mFirst ;
- }
- else if(mSecond && mSecond->containsAddress(addr))
- {
- return mSecond ;
- }
-
- return NULL ;
-}
-
-//return false if successfully inserted to the hash slot.
-bool LLPrivateMemoryPool::LLChunkHashElement::add(LLPrivateMemoryPool::LLMemoryChunk* chunk)
-{
- llassert_always(!hasElement(chunk)) ;
-
- if(!mFirst)
- {
- mFirst = chunk ;
- }
- else if(!mSecond)
- {
- mSecond = chunk ;
- }
- else
- {
- return true ; //failed
- }
-
- return false ;
-}
-
-void LLPrivateMemoryPool::LLChunkHashElement::remove(LLPrivateMemoryPool::LLMemoryChunk* chunk)
-{
- if(mFirst == chunk)
- {
- mFirst = NULL ;
- }
- else if(mSecond ==chunk)
- {
- mSecond = NULL ;
- }
- else
- {
- LL_ERRS() << "This slot does not contain this chunk!" << LL_ENDL ;
- }
-}
-
-//--------------------------------------------------------------------
-//class LLPrivateMemoryPoolManager
-//--------------------------------------------------------------------
-LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::sInstance = NULL ;
-BOOL LLPrivateMemoryPoolManager::sPrivatePoolEnabled = FALSE ;
-std::vector<LLPrivateMemoryPool*> LLPrivateMemoryPoolManager::sDanglingPoolList ;
-
-LLPrivateMemoryPoolManager::LLPrivateMemoryPoolManager(BOOL enabled, U32 max_pool_size)
-{
- mPoolList.resize(LLPrivateMemoryPool::MAX_TYPES) ;
-
- for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
- {
- mPoolList[i] = NULL ;
- }
-
- sPrivatePoolEnabled = enabled ;
-
- const U32 MAX_POOL_SIZE = 256 * 1024 * 1024 ; //256 MB
- mMaxPrivatePoolSize = llmax(max_pool_size, MAX_POOL_SIZE) ;
-}
-
-LLPrivateMemoryPoolManager::~LLPrivateMemoryPoolManager()
-{
-
-#if __DEBUG_PRIVATE_MEM__
- if(!sMemAllocationTracker.empty())
- {
- LL_WARNS() << "there is potential memory leaking here. The list of not freed memory blocks are from: " <<LL_ENDL ;
-
- S32 k = 0 ;
- for(mem_allocation_info_t::iterator iter = sMemAllocationTracker.begin() ; iter != sMemAllocationTracker.end() ; ++iter)
- {
- LL_INFOS() << k++ << ", " << (uintptr_t)iter->first << " : " << iter->second << LL_ENDL ;
- }
- sMemAllocationTracker.clear() ;
- }
-#endif
-
-#if 0
- //all private pools should be released by their owners before reaching here.
- for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
- {
- llassert_always(!mPoolList[i]) ;
- }
- mPoolList.clear() ;
-
-#else
- //forcefully release all memory
- for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
- {
- if(mPoolList[i])
- {
- if(mPoolList[i]->isEmpty())
- {
- delete mPoolList[i] ;
- }
- else
- {
- //can not delete this pool because it has alloacted memory to be freed.
- //move it to the dangling list.
- sDanglingPoolList.push_back(mPoolList[i]) ;
- }
-
- mPoolList[i] = NULL ;
- }
- }
- mPoolList.clear() ;
-#endif
-}
-
-//static
-void LLPrivateMemoryPoolManager::initClass(BOOL enabled, U32 max_pool_size)
-{
- llassert_always(!sInstance) ;
-
- sInstance = new LLPrivateMemoryPoolManager(enabled, max_pool_size) ;
-}
-
-//static
-LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::getInstance()
-{
- //if(!sInstance)
- //{
- // sInstance = new LLPrivateMemoryPoolManager(FALSE) ;
- //}
- return sInstance ;
-}
-
-//static
-void LLPrivateMemoryPoolManager::destroyClass()
-{
- if(sInstance)
- {
- delete sInstance ;
- sInstance = NULL ;
- }
-}
-
-LLPrivateMemoryPool* LLPrivateMemoryPoolManager::newPool(S32 type)
-{
- if(!sPrivatePoolEnabled)
- {
- return NULL ;
- }
-
- if(!mPoolList[type])
- {
- mPoolList[type] = new LLPrivateMemoryPool(type, mMaxPrivatePoolSize) ;
- }
-
- return mPoolList[type] ;
-}
-
-void LLPrivateMemoryPoolManager::deletePool(LLPrivateMemoryPool* pool)
-{
- if(pool && pool->isEmpty())
- {
- mPoolList[pool->getType()] = NULL ;
- delete pool;
- }
-}
-
-//debug
-void LLPrivateMemoryPoolManager::updateStatistics()
-{
- mTotalReservedSize = 0 ;
- mTotalAllocatedSize = 0 ;
-
- for(U32 i = 0; i < mPoolList.size(); i++)
- {
- if(mPoolList[i])
- {
- mTotalReservedSize += mPoolList[i]->getTotalReservedSize() ;
- mTotalAllocatedSize += mPoolList[i]->getTotalAllocatedSize() ;
- }
- }
-}
-
-#if __DEBUG_PRIVATE_MEM__
-//static
-char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size, const char* function, const int line)
-{
- char* p ;
-
- if(!poolp)
- {
- p = (char*)ll_aligned_malloc_16(size) ;
- }
- else
- {
- p = poolp->allocate(size) ;
- }
-
- if(p)
- {
- char num[16] ;
- sprintf(num, " line: %d ", line) ;
- std::string str(function) ;
- str += num;
-
- sMemAllocationTracker[p] = str ;
- }
-
- return p ;
-}
-#else
-//static
-char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size)
-{
- if(poolp)
- {
- return poolp->allocate(size) ;
- }
- else
- {
- return (char*)ll_aligned_malloc_16(size) ;
- }
-}
-#endif
-
-//static
-void LLPrivateMemoryPoolManager::freeMem(LLPrivateMemoryPool* poolp, void* addr)
-{
- if(!addr)
- {
- return ;
- }
-
-#if __DEBUG_PRIVATE_MEM__
- sMemAllocationTracker.erase((char*)addr) ;
-#endif
-
- if(poolp)
- {
- poolp->freeMem(addr) ;
- }
- else
- {
- if(!sPrivatePoolEnabled)
- {
- ll_aligned_free_16(addr) ; //private pool is disabled.
- }
- else if(!sInstance) //the private memory manager is destroyed, try the dangling list
- {
- for(S32 i = 0 ; i < sDanglingPoolList.size(); i++)
- {
- if(sDanglingPoolList[i]->findChunk((char*)addr))
- {
- sDanglingPoolList[i]->freeMem(addr) ;
- if(sDanglingPoolList[i]->isEmpty())
- {
- delete sDanglingPoolList[i] ;
-
- if(i < sDanglingPoolList.size() - 1)
- {
- sDanglingPoolList[i] = sDanglingPoolList[sDanglingPoolList.size() - 1] ;
- }
- sDanglingPoolList.pop_back() ;
- }
-
- addr = NULL ;
- break ;
- }
- }
- llassert_always(!addr) ; //addr should be release before hitting here!
- }
- else
- {
- LL_ERRS() << "private pool is used before initialized.!" << LL_ENDL ;
- }
- }
-}
-
-//--------------------------------------------------------------------
-//class LLPrivateMemoryPoolTester
-//--------------------------------------------------------------------
-#if 0
-LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::sInstance = NULL ;
-LLPrivateMemoryPool* LLPrivateMemoryPoolTester::sPool = NULL ;
-LLPrivateMemoryPoolTester::LLPrivateMemoryPoolTester()
-{
-}
-
-LLPrivateMemoryPoolTester::~LLPrivateMemoryPoolTester()
-{
-}
-
-//static
-LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::getInstance()
-{
- if(!sInstance)
- {
- sInstance = ::new LLPrivateMemoryPoolTester() ;
- }
- return sInstance ;
-}
-
-//static
-void LLPrivateMemoryPoolTester::destroy()
-{
- if(sInstance)
- {
- ::delete sInstance ;
- sInstance = NULL ;
- }
-
- if(sPool)
- {
- LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
- sPool = NULL ;
- }
-}
-
-void LLPrivateMemoryPoolTester::run(S32 type)
-{
- if(sPool)
- {
- LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
- }
- sPool = LLPrivateMemoryPoolManager::getInstance()->newPool(type) ;
-
- //run the test
- correctnessTest() ;
- performanceTest() ;
- //fragmentationtest() ;
-
- //release pool.
- LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
- sPool = NULL ;
-}
-
-void LLPrivateMemoryPoolTester::test(U32 min_size, U32 max_size, U32 stride, U32 times,
- bool random_deletion, bool output_statistics)
-{
- U32 levels = (max_size - min_size) / stride + 1 ;
- char*** p ;
- U32 i, j ;
- U32 total_allocated_size = 0 ;
-
- //allocate space for p ;
- if(!(p = ::new char**[times]) || !(*p = ::new char*[times * levels]))
- {
- LL_ERRS() << "memory initialization for p failed" << LL_ENDL ;
- }
-
- //init
- for(i = 0 ; i < times; i++)
- {
- p[i] = *p + i * levels ;
- for(j = 0 ; j < levels; j++)
- {
- p[i][j] = NULL ;
- }
- }
-
- //allocation
- U32 size ;
- for(i = 0 ; i < times ; i++)
- {
- for(j = 0 ; j < levels; j++)
- {
- size = min_size + j * stride ;
- p[i][j] = ALLOCATE_MEM(sPool, size) ;
-
- total_allocated_size+= size ;
-
- *(U32*)p[i][j] = i ;
- *((U32*)p[i][j] + 1) = j ;
- //p[i][j][size - 1] = '\0' ; //access the last element to verify the success of the allocation.
-
- //randomly release memory
- if(random_deletion)
- {
- S32 k = rand() % levels ;
-
- if(p[i][k])
- {
- llassert_always(*(U32*)p[i][k] == i && *((U32*)p[i][k] + 1) == k) ;
- FREE_MEM(sPool, p[i][k]) ;
- total_allocated_size -= min_size + k * stride ;
- p[i][k] = NULL ;
- }
- }
- }
- }
-
- //output pool allocation statistics
- if(output_statistics)
- {
- }
-
- //release all memory allocations
- for(i = 0 ; i < times; i++)
- {
- for(j = 0 ; j < levels; j++)
- {
- if(p[i][j])
- {
- llassert_always(*(U32*)p[i][j] == i && *((U32*)p[i][j] + 1) == j) ;
- FREE_MEM(sPool, p[i][j]) ;
- total_allocated_size -= min_size + j * stride ;
- p[i][j] = NULL ;
- }
- }
- }
-
- ::delete[] *p ;
- ::delete[] p ;
-}
-
-void LLPrivateMemoryPoolTester::testAndTime(U32 size, U32 times)
-{
- LLTimer timer ;
-
- LL_INFOS() << " -**********************- " << LL_ENDL ;
- LL_INFOS() << "test size: " << size << " test times: " << times << LL_ENDL ;
-
- timer.reset() ;
- char** p = new char*[times] ;
-
- //using the customized memory pool
- //allocation
- for(U32 i = 0 ; i < times; i++)
- {
- p[i] = ALLOCATE_MEM(sPool, size) ;
- if(!p[i])
- {
- LL_ERRS() << "allocation failed" << LL_ENDL ;
- }
- }
- //de-allocation
- for(U32 i = 0 ; i < times; i++)
- {
- FREE_MEM(sPool, p[i]) ;
- p[i] = NULL ;
- }
- LL_INFOS() << "time spent using customized memory pool: " << timer.getElapsedTimeF32() << LL_ENDL ;
-
- timer.reset() ;
-
- //using the standard allocator/de-allocator:
- //allocation
- for(U32 i = 0 ; i < times; i++)
- {
- p[i] = ::new char[size] ;
- if(!p[i])
- {
- LL_ERRS() << "allocation failed" << LL_ENDL ;
- }
- }
- //de-allocation
- for(U32 i = 0 ; i < times; i++)
- {
- ::delete[] p[i] ;
- p[i] = NULL ;
- }
- LL_INFOS() << "time spent using standard allocator/de-allocator: " << timer.getElapsedTimeF32() << LL_ENDL ;
-
- delete[] p;
-}
-
-void LLPrivateMemoryPoolTester::correctnessTest()
-{
- //try many different sized allocation, and all kinds of edge cases, access the allocated memory
- //to see if allocation is right.
-
- //edge case
- char* p = ALLOCATE_MEM(sPool, 0) ;
- FREE_MEM(sPool, p) ;
-
- //small sized
- // [8 bytes, 2KB), each asks for 256 allocations and deallocations
- test(8, 2040, 8, 256, true, true) ;
-
- //medium sized
- //[2KB, 512KB), each asks for 16 allocations and deallocations
- test(2048, 512 * 1024 - 2048, 2048, 16, true, true) ;
-
- //large sized
- //[512KB, 4MB], each asks for 8 allocations and deallocations
- test(512 * 1024, 4 * 1024 * 1024, 64 * 1024, 6, true, true) ;
-}
-
-void LLPrivateMemoryPoolTester::performanceTest()
-{
- U32 test_size[3] = {768, 3* 1024, 3* 1024 * 1024};
-
- //small sized
- testAndTime(test_size[0], 8) ;
-
- //medium sized
- testAndTime(test_size[1], 8) ;
-
- //large sized
- testAndTime(test_size[2], 8) ;
-}
-
-void LLPrivateMemoryPoolTester::fragmentationtest()
-{
- //for internal fragmentation statistics:
- //every time when asking for a new chunk during correctness test, and performance test,
- //print out the chunk usage statistices.
-}
-#endif
//--------------------------------------------------------------------
#if defined(LL_WINDOWS) && defined(LL_DEBUG_BUFFER_OVERRUN)
diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h
index c37967e10e..5b17d9e3a4 100644
--- a/indra/llcommon/llmemory.h
+++ b/indra/llcommon/llmemory.h
@@ -356,327 +356,6 @@ private:
static BOOL sEnableMemoryFailurePrevention;
};
-//
-//class LLPrivateMemoryPool defines a private memory pool for an application to use, so the application does not
-//need to access the heap directly fro each memory allocation. Throught this, the allocation speed is faster,
-//and reduces virtaul address space gragmentation problem.
-//Note: this class is thread-safe by passing true to the constructor function. However, you do not need to do this unless
-//you are sure the memory allocation and de-allocation will happen in different threads. To make the pool thread safe
-//increases allocation and deallocation cost.
-//
-class LL_COMMON_API LLPrivateMemoryPool
-{
- friend class LLPrivateMemoryPoolManager ;
-
-public:
- class LL_COMMON_API LLMemoryBlock //each block is devided into slots uniformly
- {
- public:
- LLMemoryBlock() ;
- ~LLMemoryBlock() ;
-
- void init(char* buffer, U32 buffer_size, U32 slot_size) ;
- void setBuffer(char* buffer, U32 buffer_size) ;
-
- char* allocate() ;
- void freeMem(void* addr) ;
-
- bool empty() {return !mAllocatedSlots;}
- bool isFull() {return mAllocatedSlots == mTotalSlots;}
- bool isFree() {return !mTotalSlots;}
-
- U32 getSlotSize()const {return mSlotSize;}
- U32 getTotalSlots()const {return mTotalSlots;}
- U32 getBufferSize()const {return mBufferSize;}
- char* getBuffer() const {return mBuffer;}
-
- //debug use
- void resetBitMap() ;
- private:
- char* mBuffer;
- U32 mSlotSize ; //when the block is not initialized, it is the buffer size.
- U32 mBufferSize ;
- U32 mUsageBits ;
- U8 mTotalSlots ;
- U8 mAllocatedSlots ;
- U8 mDummySize ; //size of extra bytes reserved for mUsageBits.
-
- public:
- LLMemoryBlock* mPrev ;
- LLMemoryBlock* mNext ;
- LLMemoryBlock* mSelf ;
-
- struct CompareAddress
- {
- bool operator()(const LLMemoryBlock* const& lhs, const LLMemoryBlock* const& rhs)
- {
- return (uintptr_t)lhs->getBuffer() < (uintptr_t)rhs->getBuffer();
- }
- };
- };
-
- class LL_COMMON_API LLMemoryChunk //is divided into memory blocks.
- {
- public:
- LLMemoryChunk() ;
- ~LLMemoryChunk() ;
-
- void init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size) ;
- void setBuffer(char* buffer, U32 buffer_size) ;
-
- bool empty() ;
-
- char* allocate(U32 size) ;
- void freeMem(void* addr) ;
-
- char* getBuffer() const {return mBuffer;}
- U32 getBufferSize() const {return mBufferSize;}
- U32 getAllocatedSize() const {return mAlloatedSize;}
-
- bool containsAddress(const char* addr) const;
-
- static U32 getMaxOverhead(U32 data_buffer_size, U32 min_slot_size,
- U32 max_slot_size, U32 min_block_size, U32 max_block_size) ;
-
- void dump() ;
-
- private:
- U32 getPageIndex(uintptr_t addr) ;
- U32 getBlockLevel(U32 size) ;
- U16 getPageLevel(U32 size) ;
- LLMemoryBlock* addBlock(U32 blk_idx) ;
- void popAvailBlockList(U32 blk_idx) ;
- void addToFreeSpace(LLMemoryBlock* blk) ;
- void removeFromFreeSpace(LLMemoryBlock* blk) ;
- void removeBlock(LLMemoryBlock* blk) ;
- void addToAvailBlockList(LLMemoryBlock* blk) ;
- U32 calcBlockSize(U32 slot_size);
- LLMemoryBlock* createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx) ;
-
- private:
- LLMemoryBlock** mAvailBlockList ;//256 by mMinSlotSize
- LLMemoryBlock** mFreeSpaceList;
- LLMemoryBlock* mBlocks ; //index of blocks by address.
-
- char* mBuffer ;
- U32 mBufferSize ;
- char* mDataBuffer ;
- char* mMetaBuffer ;
- U32 mMinBlockSize ;
- U32 mMinSlotSize ;
- U32 mMaxSlotSize ;
- U32 mAlloatedSize ;
- U16 mBlockLevels;
- U16 mPartitionLevels;
-
- public:
- //form a linked list
- LLMemoryChunk* mNext ;
- LLMemoryChunk* mPrev ;
- } ;
-
-private:
- LLPrivateMemoryPool(S32 type, U32 max_pool_size) ;
- ~LLPrivateMemoryPool() ;
-
- char *allocate(U32 size) ;
- void freeMem(void* addr) ;
-
- void dump() ;
- U32 getTotalAllocatedSize() ;
- U32 getTotalReservedSize() {return mReservedPoolSize;}
- S32 getType() const {return mType; }
- bool isEmpty() const {return !mNumOfChunks; }
-
-private:
- void lock() ;
- void unlock() ;
- S32 getChunkIndex(U32 size) ;
- LLMemoryChunk* addChunk(S32 chunk_index) ;
- bool checkSize(U32 asked_size) ;
- void removeChunk(LLMemoryChunk* chunk) ;
- U16 findHashKey(const char* addr);
- void addToHashTable(LLMemoryChunk* chunk) ;
- void removeFromHashTable(LLMemoryChunk* chunk) ;
- void rehash() ;
- bool fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk) ;
- LLMemoryChunk* findChunk(const char* addr) ;
-
- void destroyPool() ;
-
-public:
- enum
- {
- SMALL_ALLOCATION = 0, //from 8 bytes to 2KB(exclusive), page size 2KB, max chunk size is 4MB.
- MEDIUM_ALLOCATION, //from 2KB to 512KB(exclusive), page size 32KB, max chunk size 4MB
- LARGE_ALLOCATION, //from 512KB to 4MB(inclusive), page size 64KB, max chunk size 16MB
- SUPER_ALLOCATION //allocation larger than 4MB.
- };
-
- enum
- {
- STATIC = 0 , //static pool(each alllocation stays for a long time) without threading support
- VOLATILE, //Volatile pool(each allocation stays for a very short time) without threading support
- STATIC_THREADED, //static pool with threading support
- VOLATILE_THREADED, //volatile pool with threading support
- MAX_TYPES
- }; //pool types
-
-private:
- LLMutex* mMutexp ;
- U32 mMaxPoolSize;
- U32 mReservedPoolSize ;
-
- LLMemoryChunk* mChunkList[SUPER_ALLOCATION] ; //all memory chunks reserved by this pool, sorted by address
- U16 mNumOfChunks ;
- U16 mHashFactor ;
-
- S32 mType ;
-
- class LLChunkHashElement
- {
- public:
- LLChunkHashElement() {mFirst = NULL ; mSecond = NULL ;}
-
- bool add(LLMemoryChunk* chunk) ;
- void remove(LLMemoryChunk* chunk) ;
- LLMemoryChunk* findChunk(const char* addr) ;
-
- bool empty() {return !mFirst && !mSecond; }
- bool full() {return mFirst && mSecond; }
- bool hasElement(LLMemoryChunk* chunk) {return mFirst == chunk || mSecond == chunk;}
-
- private:
- LLMemoryChunk* mFirst ;
- LLMemoryChunk* mSecond ;
- };
- std::vector<LLChunkHashElement> mChunkHashList ;
-};
-
-class LL_COMMON_API LLPrivateMemoryPoolManager
-{
-private:
- LLPrivateMemoryPoolManager(BOOL enabled, U32 max_pool_size) ;
- ~LLPrivateMemoryPoolManager() ;
-
-public:
- static LLPrivateMemoryPoolManager* getInstance() ;
- static void initClass(BOOL enabled, U32 pool_size) ;
- static void destroyClass() ;
-
- LLPrivateMemoryPool* newPool(S32 type) ;
- void deletePool(LLPrivateMemoryPool* pool) ;
-
-private:
- std::vector<LLPrivateMemoryPool*> mPoolList ;
- U32 mMaxPrivatePoolSize;
-
- static LLPrivateMemoryPoolManager* sInstance ;
- static BOOL sPrivatePoolEnabled;
- static std::vector<LLPrivateMemoryPool*> sDanglingPoolList ;
-public:
- //debug and statistics info.
- void updateStatistics() ;
-
- U32 mTotalReservedSize ;
- U32 mTotalAllocatedSize ;
-
-public:
-#if __DEBUG_PRIVATE_MEM__
- static char* allocate(LLPrivateMemoryPool* poolp, U32 size, const char* function, const int line) ;
-
- typedef std::map<char*, std::string> mem_allocation_info_t ;
- static mem_allocation_info_t sMemAllocationTracker;
-#else
- static char* allocate(LLPrivateMemoryPool* poolp, U32 size) ;
-#endif
- static void freeMem(LLPrivateMemoryPool* poolp, void* addr) ;
-};
-
-//-------------------------------------------------------------------------------------
-#if __DEBUG_PRIVATE_MEM__
-#define ALLOCATE_MEM(poolp, size) LLPrivateMemoryPoolManager::allocate((poolp), (size), __FUNCTION__, __LINE__)
-#else
-#define ALLOCATE_MEM(poolp, size) LLPrivateMemoryPoolManager::allocate((poolp), (size))
-#endif
-#define FREE_MEM(poolp, addr) LLPrivateMemoryPoolManager::freeMem((poolp), (addr))
-//-------------------------------------------------------------------------------------
-
-//
-//the below singleton is used to test the private memory pool.
-//
-#if 0
-class LL_COMMON_API LLPrivateMemoryPoolTester
-{
-private:
- LLPrivateMemoryPoolTester() ;
- ~LLPrivateMemoryPoolTester() ;
-
-public:
- static LLPrivateMemoryPoolTester* getInstance() ;
- static void destroy() ;
-
- void run(S32 type) ;
-
-private:
- void correctnessTest() ;
- void performanceTest() ;
- void fragmentationtest() ;
-
- void test(U32 min_size, U32 max_size, U32 stride, U32 times, bool random_deletion, bool output_statistics) ;
- void testAndTime(U32 size, U32 times) ;
-
-#if 0
-public:
- void* operator new(size_t size)
- {
- return (void*)sPool->allocate(size) ;
- }
- void operator delete(void* addr)
- {
- sPool->freeMem(addr) ;
- }
- void* operator new[](size_t size)
- {
- return (void*)sPool->allocate(size) ;
- }
- void operator delete[](void* addr)
- {
- sPool->freeMem(addr) ;
- }
-#endif
-
-private:
- static LLPrivateMemoryPoolTester* sInstance;
- static LLPrivateMemoryPool* sPool ;
- static LLPrivateMemoryPool* sThreadedPool ;
-};
-#if 0
-//static
-void* LLPrivateMemoryPoolTester::operator new(size_t size)
-{
- return (void*)sPool->allocate(size) ;
-}
-
-//static
-void LLPrivateMemoryPoolTester::operator delete(void* addr)
-{
- sPool->free(addr) ;
-}
-
-//static
-void* LLPrivateMemoryPoolTester::operator new[](size_t size)
-{
- return (void*)sPool->allocate(size) ;
-}
-
-//static
-void LLPrivateMemoryPoolTester::operator delete[](void* addr)
-{
- sPool->free(addr) ;
-}
-#endif
-#endif
// LLRefCount moved to llrefcount.h
// LLPointer moved to llpointer.h
diff --git a/indra/llimage/llimage.cpp b/indra/llimage/llimage.cpp
index 4a76d15096..dca03cfe04 100644
--- a/indra/llimage/llimage.cpp
+++ b/indra/llimage/llimage.cpp
@@ -588,7 +588,6 @@ std::string LLImage::sLastErrorMessage;
LLMutex* LLImage::sMutex = NULL;
bool LLImage::sUseNewByteRange = false;
S32 LLImage::sMinimalReverseByteRangePercent = 75;
-LLPrivateMemoryPool* LLImageBase::sPrivatePoolp = NULL ;
//static
void LLImage::initClass(bool use_new_byte_range, S32 minimal_reverse_byte_range_percent)
@@ -596,8 +595,6 @@ void LLImage::initClass(bool use_new_byte_range, S32 minimal_reverse_byte_range_
sUseNewByteRange = use_new_byte_range;
sMinimalReverseByteRangePercent = minimal_reverse_byte_range_percent;
sMutex = new LLMutex(NULL);
-
- LLImageBase::createPrivatePool() ;
}
//static
@@ -605,8 +602,6 @@ void LLImage::cleanupClass()
{
delete sMutex;
sMutex = NULL;
-
- LLImageBase::destroyPrivatePool() ;
}
//static
@@ -644,25 +639,6 @@ LLImageBase::~LLImageBase()
deleteData(); // virtual
}
-//static
-void LLImageBase::createPrivatePool()
-{
- if(!sPrivatePoolp)
- {
- sPrivatePoolp = LLPrivateMemoryPoolManager::getInstance()->newPool(LLPrivateMemoryPool::STATIC_THREADED) ;
- }
-}
-
-//static
-void LLImageBase::destroyPrivatePool()
-{
- if(sPrivatePoolp)
- {
- LLPrivateMemoryPoolManager::getInstance()->deletePool(sPrivatePoolp) ;
- sPrivatePoolp = NULL ;
- }
-}
-
// virtual
void LLImageBase::dump()
{
@@ -696,7 +672,7 @@ void LLImageBase::sanityCheck()
// virtual
void LLImageBase::deleteData()
{
- FREE_MEM(sPrivatePoolp, mData) ;
+ ll_aligned_free_16(mData);
disclaimMem(mDataSize);
mDataSize = 0;
mData = NULL;
@@ -736,7 +712,7 @@ U8* LLImageBase::allocateData(S32 size)
if (!mBadBufferAllocation && (!mData || size != mDataSize))
{
deleteData(); // virtual
- mData = (U8*)ALLOCATE_MEM(sPrivatePoolp, size);
+ mData = (U8*)ll_aligned_malloc_16(size);
if (!mData)
{
LL_WARNS() << "Failed to allocate image data size [" << size << "]" << LL_ENDL;
@@ -763,7 +739,7 @@ U8* LLImageBase::allocateData(S32 size)
// virtual
U8* LLImageBase::reallocateData(S32 size)
{
- U8 *new_datap = (U8*)ALLOCATE_MEM(sPrivatePoolp, size);
+ U8 *new_datap = (U8*)ll_aligned_malloc_16(size);
if (!new_datap)
{
LL_WARNS() << "Out of memory in LLImageBase::reallocateData" << LL_ENDL;
@@ -773,7 +749,7 @@ U8* LLImageBase::reallocateData(S32 size)
{
S32 bytes = llmin(mDataSize, size);
memcpy(new_datap, mData, bytes); /* Flawfinder: ignore */
- FREE_MEM(sPrivatePoolp, mData) ;
+ ll_aligned_free_16(mData) ;
}
mData = new_datap;
disclaimMem(mDataSize);
@@ -1470,7 +1446,7 @@ bool LLImageRaw::scale( S32 new_width, S32 new_height, bool scale_image_data )
if (new_data_size > 0)
{
- U8 *new_data = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), new_data_size);
+ U8 *new_data = (U8*)ll_aligned_malloc_16(new_data_size);
if(NULL == new_data)
{
return false;
@@ -2169,7 +2145,7 @@ void LLImageFormatted::appendData(U8 *data, S32 size)
S32 newsize = cursize + size;
reallocateData(newsize);
memcpy(getData() + cursize, data, size);
- FREE_MEM(LLImageBase::getPrivatePool(), data);
+ ll_aligned_free_16(data);
}
}
}
diff --git a/indra/llimage/llimage.h b/indra/llimage/llimage.h
index 958c9fad3d..8ec49d3f0f 100644
--- a/indra/llimage/llimage.h
+++ b/indra/llimage/llimage.h
@@ -71,7 +71,6 @@ const S32 HTTP_PACKET_SIZE = 1496;
class LLImageFormatted;
class LLImageRaw;
class LLColor4U;
-class LLPrivateMemoryPool;
typedef enum e_image_codec
{
@@ -160,10 +159,6 @@ public:
static F32 calc_download_priority(F32 virtual_size, F32 visible_area, S32 bytes_sent);
static EImageCodec getCodecFromExtension(const std::string& exten);
-
- static void createPrivatePool() ;
- static void destroyPrivatePool() ;
- static LLPrivateMemoryPool* getPrivatePool() {return sPrivatePoolp;}
//static LLTrace::MemStatHandle sMemStat;
@@ -178,8 +173,6 @@ private:
bool mBadBufferAllocation ;
bool mAllowOverSize ;
-
- static LLPrivateMemoryPool* sPrivatePoolp ;
};
// Raw representation of an image (used for textures, and other uncompressed formats
diff --git a/indra/llimage/llimagedxt.cpp b/indra/llimage/llimagedxt.cpp
index 0ec83415a0..3a7319d765 100644
--- a/indra/llimage/llimagedxt.cpp
+++ b/indra/llimage/llimagedxt.cpp
@@ -430,7 +430,7 @@ bool LLImageDXT::convertToDXR()
S32 nmips = calcNumMips(width,height);
S32 total_bytes = getDataSize();
U8* olddata = getData();
- U8* newdata = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), total_bytes);
+ U8* newdata = (U8*)ll_aligned_malloc_16(total_bytes);
if (!newdata)
{
LL_ERRS() << "Out of memory in LLImageDXT::convertToDXR()" << LL_ENDL;
diff --git a/indra/llimage/llimagej2c.cpp b/indra/llimage/llimagej2c.cpp
index c40df009d8..4bff21610f 100644
--- a/indra/llimage/llimagej2c.cpp
+++ b/indra/llimage/llimagej2c.cpp
@@ -368,7 +368,7 @@ bool LLImageJ2C::loadAndValidate(const std::string &filename)
}
else
{
- U8 *data = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), file_size);
+ U8 *data = (U8*)ll_aligned_malloc_16(file_size);
if (!data)
{
infile.close();
@@ -383,7 +383,7 @@ bool LLImageJ2C::loadAndValidate(const std::string &filename)
if (s != APR_SUCCESS || (S32)bytes_read != file_size)
{
- FREE_MEM(LLImageBase::getPrivatePool(), data);
+ ll_aligned_free_16(data);
setLastError("Unable to read entire file");
res = false;
}
diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp
index c06fdd9700..a55ca5ed9c 100644
--- a/indra/llrender/llvertexbuffer.cpp
+++ b/indra/llrender/llvertexbuffer.cpp
@@ -98,7 +98,6 @@ U32 LLVertexBuffer::sCurVAOName = 1;
U32 LLVertexBuffer::sAllocatedIndexBytes = 0;
U32 LLVertexBuffer::sIndexCount = 0;
-LLPrivateMemoryPool* LLVertexBuffer::sPrivatePoolp = NULL;
U32 LLVertexBuffer::sBindCount = 0;
U32 LLVertexBuffer::sSetCount = 0;
S32 LLVertexBuffer::sCount = 0;
@@ -863,11 +862,6 @@ void LLVertexBuffer::initClass(bool use_vbo, bool no_vbo_mapping)
{
sEnableVBOs = use_vbo && gGLManager.mHasVertexBufferObject;
sDisableVBOMapping = sEnableVBOs && no_vbo_mapping;
-
- if (!sPrivatePoolp)
- {
- sPrivatePoolp = LLPrivateMemoryPoolManager::getInstance()->newPool(LLPrivateMemoryPool::STATIC);
- }
}
//static
@@ -910,12 +904,6 @@ void LLVertexBuffer::cleanupClass()
sStreamVBOPool.cleanup();
sDynamicVBOPool.cleanup();
sDynamicCopyVBOPool.cleanup();
-
- if(sPrivatePoolp)
- {
- LLPrivateMemoryPoolManager::getInstance()->deletePool(sPrivatePoolp);
- sPrivatePoolp = NULL;
- }
}
//----------------------------------------------------------------------------
@@ -1206,7 +1194,7 @@ bool LLVertexBuffer::createGLBuffer(U32 size)
{
static int gl_buffer_idx = 0;
mGLBuffer = ++gl_buffer_idx;
- mMappedData = (U8*)ALLOCATE_MEM(sPrivatePoolp, size);
+ mMappedData = (U8*)ll_aligned_malloc_16(size);
disclaimMem(mSize);
mSize = size;
claimMem(mSize);
@@ -1248,7 +1236,7 @@ bool LLVertexBuffer::createGLIndices(U32 size)
}
else
{
- mMappedIndexData = (U8*)ALLOCATE_MEM(sPrivatePoolp, size);
+ mMappedIndexData = (U8*)ll_aligned_malloc_16(size);
static int gl_buffer_idx = 0;
mGLIndices = ++gl_buffer_idx;
mIndicesSize = size;
@@ -1271,7 +1259,7 @@ void LLVertexBuffer::destroyGLBuffer()
}
else
{
- FREE_MEM(sPrivatePoolp, (void*) mMappedData);
+ ll_aligned_free_16((void*)mMappedData);
mMappedData = NULL;
mEmpty = true;
}
@@ -1291,7 +1279,7 @@ void LLVertexBuffer::destroyGLIndices()
}
else
{
- FREE_MEM(sPrivatePoolp, (void*) mMappedIndexData);
+ ll_aligned_free_16((void*)mMappedIndexData);
mMappedIndexData = NULL;
mEmpty = true;
}
diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml
index 4a4f4bfc61..1cf3b31ac0 100644
--- a/indra/newview/app_settings/settings.xml
+++ b/indra/newview/app_settings/settings.xml
@@ -2369,7 +2369,7 @@
<key>Value</key>
<integer>0</integer>
</map>
- <key>DebugShowPrivateMem</key>
+ <key>DEPRECATED: DebugShowPrivateMem</key> <!-- deprecated (see MAINT-8091) -->
<map>
<key>Comment</key>
<string>Show Private Mem Info</string>
@@ -6408,10 +6408,10 @@
<key>Value</key>
<real>600.0</real>
</map>
- <key>MemoryPrivatePoolEnabled</key>
+ <key>MemoryPrivatePoolEnabled</key> <!-- deprecated (see MAINT-8091) -->
<map>
<key>Comment</key>
- <string>Enable the private memory pool management</string>
+ <string>DEPRECATED: Enable the private memory pool management</string>
<key>Persist</key>
<integer>1</integer>
<key>Type</key>
@@ -6419,10 +6419,10 @@
<key>Value</key>
<integer>0</integer>
</map>
- <key>MemoryPrivatePoolSize</key>
+ <key>MemoryPrivatePoolSize</key> <!-- deprecated (see MAINT-8091) -->
<map>
<key>Comment</key>
- <string>Size of the private memory pool in MB (min. value is 256)</string>
+ <string>DEPRECATED: Size of the private memory pool in MB (min. value is 256)</string>
<key>Persist</key>
<integer>1</integer>
<key>Type</key>
diff --git a/indra/newview/llappviewer.cpp b/indra/newview/llappviewer.cpp
index d48ff458d7..73f70dffe4 100644
--- a/indra/newview/llappviewer.cpp
+++ b/indra/newview/llappviewer.cpp
@@ -799,7 +799,6 @@ bool LLAppViewer::init()
initMaxHeapSize() ;
LLCoros::instance().setStackSize(gSavedSettings.getS32("CoroutineStackSize"));
- LLPrivateMemoryPoolManager::initClass((BOOL)gSavedSettings.getBOOL("MemoryPrivatePoolEnabled"), (U32)gSavedSettings.getU32("MemoryPrivatePoolSize")*1024*1024) ;
// write Google Breakpad minidump files to a per-run dump directory to avoid multiple viewer issues.
std::string logdir = gDirUtilp->getExpandedFilename(LL_PATH_DUMP, "");
mDumpPath = logdir;
@@ -1363,10 +1362,6 @@ bool LLAppViewer::doFrame()
LLEventPump& mainloop(LLEventPumps::instance().obtain("mainloop"));
LLSD newFrame;
- //LLPrivateMemoryPoolTester::getInstance()->run(false) ;
- //LLPrivateMemoryPoolTester::getInstance()->run(true) ;
- //LLPrivateMemoryPoolTester::destroy() ;
-
LL_RECORD_BLOCK_TIME(FTM_FRAME);
LLTrace::BlockTimer::processTimes();
LLTrace::get_frame_recording().nextPeriod();
@@ -2075,9 +2070,6 @@ bool LLAppViewer::cleanup()
LLMainLoopRepeater::instance().stop();
- //release all private memory pools.
- LLPrivateMemoryPoolManager::destroyClass() ;
-
ll_close_fail_log();
LLError::LLCallStacks::cleanup();
diff --git a/indra/newview/lltexturecache.cpp b/indra/newview/lltexturecache.cpp
index dadf2f9701..2a0d961952 100644
--- a/indra/newview/lltexturecache.cpp
+++ b/indra/newview/lltexturecache.cpp
@@ -117,7 +117,7 @@ public:
~LLTextureCacheWorker()
{
llassert_always(!haveWork());
- FREE_MEM(LLImageBase::getPrivatePool(), mReadData);
+ ll_aligned_free_16(mReadData);
}
// override this interface
@@ -237,7 +237,7 @@ bool LLTextureCacheLocalFileWorker::doRead()
// << " Bytes: " << mDataSize << " Offset: " << mOffset
// << " / " << mDataSize << LL_ENDL;
mDataSize = 0; // failed
- FREE_MEM(LLImageBase::getPrivatePool(), mReadData);
+ ll_aligned_free_16(mReadData);
mReadData = NULL;
}
return true;
@@ -252,7 +252,7 @@ bool LLTextureCacheLocalFileWorker::doRead()
{
mDataSize = local_size;
}
- mReadData = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), mDataSize);
+ mReadData = (U8*)ll_aligned_malloc_16(mDataSize);
S32 bytes_read = LLAPRFile::readEx(mFileName, mReadData, mOffset, mDataSize, mCache->getLocalAPRFilePool());
@@ -262,7 +262,7 @@ bool LLTextureCacheLocalFileWorker::doRead()
// << " Bytes: " << mDataSize << " Offset: " << mOffset
// << " / " << mDataSize << LL_ENDL;
mDataSize = 0;
- FREE_MEM(LLImageBase::getPrivatePool(), mReadData);
+ ll_aligned_free_16(mReadData);
mReadData = NULL;
}
else
@@ -386,7 +386,7 @@ bool LLTextureCacheRemoteWorker::doRead()
mDataSize = local_size;
}
// Allocate read buffer
- mReadData = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), mDataSize);
+ mReadData = (U8*)ll_aligned_malloc_16(mDataSize);
if (mReadData)
{
@@ -402,7 +402,7 @@ bool LLTextureCacheRemoteWorker::doRead()
<< " Bytes: " << mDataSize << " Offset: " << mOffset
<< " / " << mDataSize << LL_ENDL;
mDataSize = 0;
- FREE_MEM(LLImageBase::getPrivatePool(), mReadData);
+ ll_aligned_free_16(mReadData);
mReadData = NULL;
}
else
@@ -451,7 +451,7 @@ bool LLTextureCacheRemoteWorker::doRead()
S32 size = TEXTURE_CACHE_ENTRY_SIZE - mOffset;
size = llmin(size, mDataSize);
// Allocate the read buffer
- mReadData = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), size);
+ mReadData = (U8*)ll_aligned_malloc_16(size);
if (mReadData)
{
S32 bytes_read = LLAPRFile::readEx(mCache->mHeaderDataFileName,
@@ -461,7 +461,7 @@ bool LLTextureCacheRemoteWorker::doRead()
LL_WARNS() << "LLTextureCacheWorker: " << mID
<< " incorrect number of bytes read from header: " << bytes_read
<< " / " << size << LL_ENDL;
- FREE_MEM(LLImageBase::getPrivatePool(), mReadData);
+ ll_aligned_free_16(mReadData);
mReadData = NULL;
mDataSize = -1; // failed
done = true;
@@ -500,7 +500,7 @@ bool LLTextureCacheRemoteWorker::doRead()
S32 data_offset, file_size, file_offset;
// Reserve the whole data buffer first
- U8* data = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), mDataSize);
+ U8* data = (U8*)ll_aligned_malloc_16(mDataSize);
if (data)
{
// Set the data file pointers taking the read offset into account. 2 cases:
@@ -514,7 +514,7 @@ bool LLTextureCacheRemoteWorker::doRead()
// Copy the raw data we've been holding from the header cache into the new sized buffer
llassert_always(mReadData);
memcpy(data, mReadData, data_offset);
- FREE_MEM(LLImageBase::getPrivatePool(), mReadData);
+ ll_aligned_free_16(mReadData);
mReadData = NULL;
}
else
@@ -540,7 +540,7 @@ bool LLTextureCacheRemoteWorker::doRead()
LL_WARNS() << "LLTextureCacheWorker: " << mID
<< " incorrect number of bytes read from body: " << bytes_read
<< " / " << file_size << LL_ENDL;
- FREE_MEM(LLImageBase::getPrivatePool(), mReadData);
+ ll_aligned_free_16(mReadData);
mReadData = NULL;
mDataSize = -1; // failed
done = true;
@@ -550,7 +550,7 @@ bool LLTextureCacheRemoteWorker::doRead()
{
LL_WARNS() << "LLTextureCacheWorker: " << mID
<< " failed to allocate memory for reading: " << mDataSize << LL_ENDL;
- FREE_MEM(LLImageBase::getPrivatePool(), mReadData);
+ ll_aligned_free_16(mReadData);
mReadData = NULL;
mDataSize = -1; // failed
done = true;
@@ -673,11 +673,11 @@ bool LLTextureCacheRemoteWorker::doWrite()
{
// We need to write a full record in the header cache so, if the amount of data is smaller
// than a record, we need to transfer the data to a buffer padded with 0 and write that
- U8* padBuffer = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), TEXTURE_CACHE_ENTRY_SIZE);
+ U8* padBuffer = (U8*)ll_aligned_malloc_16(TEXTURE_CACHE_ENTRY_SIZE);
memset(padBuffer, 0, TEXTURE_CACHE_ENTRY_SIZE); // Init with zeros
memcpy(padBuffer, mWriteData, mDataSize); // Copy the write buffer
bytes_written = LLAPRFile::writeEx(mCache->mHeaderDataFileName, padBuffer, offset, size, mCache->getLocalAPRFilePool());
- FREE_MEM(LLImageBase::getPrivatePool(), padBuffer);
+ ll_aligned_free_16(padBuffer);
}
else
{
@@ -783,7 +783,7 @@ void LLTextureCacheWorker::finishWork(S32 param, bool completed)
}
else
{
- FREE_MEM(LLImageBase::getPrivatePool(), mReadData);
+ ll_aligned_free_16(mReadData);
mReadData = NULL;
}
}
@@ -845,7 +845,7 @@ LLTextureCache::~LLTextureCache()
writeUpdatedEntries() ;
delete mFastCachep;
delete mFastCachePoolp;
- FREE_MEM(LLImageBase::getPrivatePool(), mFastCachePadBuffer);
+ ll_aligned_free_16(mFastCachePadBuffer);
}
//////////////////////////////////////////////////////////////////////////////
@@ -1983,10 +1983,10 @@ LLPointer<LLImageRaw> LLTextureCache::readFromFastCache(const LLUUID& id, S32& d
}
discardlevel = head[3];
- data = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), image_size);
+ data = (U8*)ll_aligned_malloc_16(image_size);
if(mFastCachep->read(data, image_size) != image_size)
{
- FREE_MEM(LLImageBase::getPrivatePool(), data);
+ ll_aligned_free_16(data);
closeFastCache();
return NULL;
}
@@ -2078,7 +2078,7 @@ void LLTextureCache::openFastCache(bool first_time)
{
if(!mFastCachePadBuffer)
{
- mFastCachePadBuffer = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), TEXTURE_FAST_CACHE_ENTRY_SIZE);
+ mFastCachePadBuffer = (U8*)ll_aligned_malloc_16(TEXTURE_FAST_CACHE_ENTRY_SIZE);
}
mFastCachePoolp = new LLVolatileAPRPool();
if (LLAPRFile::isExist(mFastCacheFileName, mFastCachePoolp))
diff --git a/indra/newview/lltexturefetch.cpp b/indra/newview/lltexturefetch.cpp
index 1f7796e6d0..6fd90e4935 100644
--- a/indra/newview/lltexturefetch.cpp
+++ b/indra/newview/lltexturefetch.cpp
@@ -1761,7 +1761,7 @@ bool LLTextureFetchWorker::doWork(S32 param)
mRequestedOffset += src_offset;
}
- U8 * buffer = (U8 *)ALLOCATE_MEM(LLImageBase::getPrivatePool(), total_size);
+ U8 * buffer = (U8 *)ll_aligned_malloc_16(total_size);
if (!buffer)
{
// abort. If we have no space for packet, we have not enough space to decode image
@@ -2266,7 +2266,7 @@ bool LLTextureFetchWorker::processSimulatorPackets()
if (buffer_size > cur_size)
{
/// We have new data
- U8* buffer = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), buffer_size);
+ U8* buffer = (U8*)ll_aligned_malloc_16(buffer_size);
S32 offset = 0;
if (cur_size > 0 && mFirstPacket > 0)
{
@@ -5059,7 +5059,7 @@ void LLTextureFetchDebugger::callbackHTTP(FetchEntry & fetch, LLCore::HttpRespon
//LL_INFOS(LOG_TXT) << "Fetch Debugger : got results for " << fetch.mID << ", data_size = " << data_size << ", received = " << fetch.mCurlReceivedSize << ", requested = " << fetch.mRequestedSize << ", partial = " << partial << LL_ENDL;
if ((fetch.mCurlReceivedSize >= fetch.mRequestedSize) || !partial || (fetch.mRequestedSize == 600))
{
- U8* d_buffer = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), data_size);
+ U8* d_buffer = (U8*)ll_aligned_malloc_16(data_size);
if (ba)
{
ba->read(0, d_buffer, data_size);
diff --git a/indra/newview/llviewerwindow.cpp b/indra/newview/llviewerwindow.cpp
index 74deaffe16..c057954606 100644
--- a/indra/newview/llviewerwindow.cpp
+++ b/indra/newview/llviewerwindow.cpp
@@ -740,16 +740,6 @@ public:
ypos += y_inc;
}
- if (gSavedSettings.getBOOL("DebugShowPrivateMem"))
- {
- LLPrivateMemoryPoolManager::getInstance()->updateStatistics() ;
- addText(xpos, ypos, llformat("Total Reserved(KB): %d", LLPrivateMemoryPoolManager::getInstance()->mTotalReservedSize / 1024));
- ypos += y_inc;
-
- addText(xpos, ypos, llformat("Total Allocated(KB): %d", LLPrivateMemoryPoolManager::getInstance()->mTotalAllocatedSize / 1024));
- ypos += y_inc;
- }
-
// only display these messages if we are actually rendering beacons at this moment
if (LLPipeline::getRenderBeacons() && LLFloaterReg::instanceVisible("beacons"))
{
diff --git a/indra/newview/skins/default/xui/en/menu_viewer.xml b/indra/newview/skins/default/xui/en/menu_viewer.xml
index 92511167c0..fa15fb0657 100644
--- a/indra/newview/skins/default/xui/en/menu_viewer.xml
+++ b/indra/newview/skins/default/xui/en/menu_viewer.xml
@@ -2373,16 +2373,6 @@
function="ToggleControl"
parameter="DebugShowMemory" />
</menu_item_check>
- <menu_item_check
- label="Show Private Mem Info"
- name="Show Private Mem Info">
- <menu_item_check.on_check
- function="CheckControl"
- parameter="DebugShowPrivateMem" />
- <menu_item_check.on_click
- function="ToggleControl"
- parameter="DebugShowPrivateMem" />
- </menu_item_check>
<menu_item_separator/>