summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--indra/llcommon/llmemory.cpp955
-rw-r--r--indra/llcommon/llmemory.h174
-rw-r--r--indra/llcommon/llsys.cpp8
-rw-r--r--indra/llcommon/llsys.h2
-rw-r--r--indra/llrender/llvertexbuffer.cpp9
-rw-r--r--indra/llxml/llcontrol.h3
-rw-r--r--indra/newview/app_settings/settings.xml22
-rw-r--r--indra/newview/llappviewer.cpp128
-rw-r--r--indra/newview/llappviewer.h10
-rw-r--r--indra/newview/lldynamictexture.cpp6
-rw-r--r--indra/newview/llfloatermemleak.cpp5
-rw-r--r--indra/newview/llviewerdisplay.cpp7
-rw-r--r--indra/newview/llviewertexture.cpp12
-rw-r--r--indra/newview/llviewertexture.h2
-rw-r--r--indra/newview/llviewerwindow.cpp13
-rw-r--r--indra/newview/pipeline.cpp22
-rw-r--r--indra/newview/pipeline.h5
-rw-r--r--indra/newview/skins/default/xui/en/notifications.xml16
18 files changed, 1342 insertions, 57 deletions
diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp
index a502d1a7eb..ca06589611 100644
--- a/indra/llcommon/llmemory.cpp
+++ b/indra/llcommon/llmemory.cpp
@@ -26,8 +26,9 @@
#include "linden_common.h"
+#include "llthread.h"
#if defined(LL_WINDOWS)
-# include <windows.h>
+//# include <windows.h>
# include <psapi.h>
#elif defined(LL_DARWIN)
# include <sys/types.h>
@@ -38,11 +39,19 @@
#endif
#include "llmemory.h"
+#include "llsys.h"
+
//----------------------------------------------------------------------------
//static
char* LLMemory::reserveMem = 0;
+U32 LLMemory::sAvailPhysicalMemInKB = U32_MAX ;
+U32 LLMemory::sMaxPhysicalMemInKB = 0;
+U32 LLMemory::sAllocatedMemInKB = 0;
+U32 LLMemory::sAllocatedPageSizeInKB = 0 ;
+U32 LLMemory::sMaxHeapSizeInKB = U32_MAX ;
+BOOL LLMemory::sEnableMemoryFailurePrevention = FALSE;
//static
void LLMemory::initClass()
@@ -67,6 +76,131 @@ void LLMemory::freeReserve()
reserveMem = NULL;
}
+//static
+void LLMemory::initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure)
+{
+ sMaxHeapSizeInKB = (U32)(max_heap_size_gb * 1024 * 1024) ;
+ sEnableMemoryFailurePrevention = prevent_heap_failure ;
+}
+
+//static
+void LLMemory::updateMemoryInfo()
+{
+#if LL_WINDOWS
+ HANDLE self = GetCurrentProcess();
+ PROCESS_MEMORY_COUNTERS counters;
+
+ if (!GetProcessMemoryInfo(self, &counters, sizeof(counters)))
+ {
+ llwarns << "GetProcessMemoryInfo failed" << llendl;
+ return ;
+ }
+
+ sAllocatedMemInKB = (U32)(counters.WorkingSetSize / 1024) ;
+ sAllocatedPageSizeInKB = (U32)(counters.PagefileUsage / 1024) ;
+ sMaxPhysicalMemInKB = llmin(LLMemoryInfo::getAvailableMemoryKB() + sAllocatedMemInKB, sMaxHeapSizeInKB);
+
+ if(sMaxPhysicalMemInKB > sAllocatedMemInKB)
+ {
+ sAvailPhysicalMemInKB = sMaxPhysicalMemInKB - sAllocatedMemInKB ;
+ }
+ else
+ {
+ sAvailPhysicalMemInKB = 0 ;
+ }
+#else
+ //not valid for other systems for now.
+ sAllocatedMemInKB = (U32)(LLMemory::getCurrentRSS() / 1024) ;
+ sMaxPhysicalMemInKB = U32_MAX ;
+ sAvailPhysicalMemInKB = U32_MAX ;
+#endif
+
+ return ;
+}
+
+//
+//this function is to test if there is enough space with the size in the virtual address space.
+//it does not do any real allocation
+//if success, it returns the address where the memory chunk can fit in;
+//otherwise it returns NULL.
+//
+//static
+void* LLMemory::tryToAlloc(void* address, U32 size)
+{
+#if LL_WINDOWS
+ address = VirtualAlloc(address, size, MEM_RESERVE | MEM_TOP_DOWN, PAGE_NOACCESS) ;
+ if(address)
+ {
+ if(!VirtualFree(address, 0, MEM_RELEASE))
+ {
+ llerrs << "error happens when free some memory reservation." << llendl ;
+ }
+ }
+#else
+#endif
+
+ return address ;
+}
+
+//static
+void LLMemory::logMemoryInfo(BOOL update)
+{
+ if(update)
+ {
+ updateMemoryInfo() ;
+ }
+
+ llinfos << "Current allocated physical memory(KB): " << sAllocatedMemInKB << llendl ;
+ llinfos << "Current allocated page size (KB): " << sAllocatedPageSizeInKB << llendl ;
+ llinfos << "Current availabe physical memory(KB): " << sAvailPhysicalMemInKB << llendl ;
+ llinfos << "Current max usable memory(KB): " << sMaxPhysicalMemInKB << llendl ;
+}
+
+//return 0: everything is normal;
+//return 1: the memory pool is low, but not in danger;
+//return -1: the memory pool is in danger, is about to crash.
+//static
+S32 LLMemory::isMemoryPoolLow()
+{
+ static const U32 LOW_MEMEOY_POOL_THRESHOLD_KB = 64 * 1024 ; //64 MB for emergency use
+
+ if(!sEnableMemoryFailurePrevention)
+ {
+ return 0 ; //no memory failure prevention.
+ }
+
+ if(sAvailPhysicalMemInKB < (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2)) //out of physical memory
+ {
+ return -1 ;
+ }
+
+ if(sAllocatedPageSizeInKB + (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2) > sMaxHeapSizeInKB) //out of virtual address space.
+ {
+ return -1 ;
+ }
+
+ return (S32)(sAvailPhysicalMemInKB < LOW_MEMEOY_POOL_THRESHOLD_KB ||
+ sAllocatedPageSizeInKB + LOW_MEMEOY_POOL_THRESHOLD_KB > sMaxHeapSizeInKB) ;
+}
+
+//static
+U32 LLMemory::getAvailableMemKB()
+{
+ return sAvailPhysicalMemInKB ;
+}
+
+//static
+U32 LLMemory::getMaxMemKB()
+{
+ return sMaxPhysicalMemInKB ;
+}
+
+//static
+U32 LLMemory::getAllocatedMemKB()
+{
+ return sAllocatedMemInKB ;
+}
+
void* ll_allocate (size_t size)
{
if (size == 0)
@@ -221,3 +355,822 @@ U64 LLMemory::getCurrentRSS()
}
#endif
+
+//-------------------------------------------------------------
+//class LLPrivateMemoryPool::LLMemoryBlock
+//-------------------------------------------------------------
+//
+//each memory block could fit for two page sizes: 0.75 * mSlotSize, which starts from the beginning of the memory chunk and grow towards the end of the
+//the block; another is mSlotSize, which starts from the end of the block and grows towards the beginning of the block.
+//
+LLPrivateMemoryPool::LLMemoryBlock::LLMemoryBlock()
+{
+ //empty
+}
+
+LLPrivateMemoryPool::LLMemoryBlock::~LLMemoryBlock()
+{
+ //empty
+}
+
+void LLPrivateMemoryPool::LLMemoryBlock::init(char* buffer, U32 buffer_size, U32 slot_size)
+{
+ mBuffer = buffer ;
+ mBufferSize = buffer_size ;
+ mSlotSize = slot_size ;
+ mTotalSlots = buffer_size / mSlotSize ;
+ llassert_always(mTotalSlots < 256) ; //max number is 256
+ mAllocatedSlots = 0 ;
+
+ //mark free bits
+ S32 usage_bit_len = (mTotalSlots + 31) / 32 ;
+ mDummySize = usage_bit_len - 1 ;
+ if(mDummySize > 0) //extra space to store mUsageBits
+ {
+ mTotalSlots -= (mDummySize * sizeof(mUsageBits) + mSlotSize - 1) / mSlotSize ;
+ usage_bit_len = (mTotalSlots + 31) / 32 ;
+ mDummySize = usage_bit_len - 1 ;
+
+ if(mDummySize > 0)
+ {
+ mUsageBits = 0 ;
+ for(S32 i = 0 ; i < mDummySize ; i++)
+ {
+ *((U32*)mBuffer + i) = 0 ;
+ }
+ if(mTotalSlots & 31)
+ {
+ *((U32*)mBuffer + mDummySize - 1) = (0xffffffff << (mTotalSlots & 31)) ;
+ }
+ }
+ }
+
+ if(mDummySize < 1)
+ {
+ mUsageBits = 0 ;
+ if(mTotalSlots & 31)
+ {
+ mUsageBits = (0xffffffff << (mTotalSlots & 31)) ;
+ }
+ }
+
+ mSelf = NULL ;
+ mNext = NULL ;
+}
+
+void LLPrivateMemoryPool::LLMemoryBlock::setBuffer(char* buffer, U32 buffer_size)
+{
+ mBuffer = buffer ;
+ mBufferSize = buffer_size ;
+ mTotalSlots = 0 ; //set the block is free.
+}
+
+char* LLPrivateMemoryPool::LLMemoryBlock::allocate()
+{
+ llassert_always(mAllocatedSlots < mTotalSlots) ;
+
+ //find a free slot
+ U32* bits = NULL ;
+ U32 k = 0 ;
+ if(mUsageBits != 0xffffffff)
+ {
+ bits = &mUsageBits ;
+ }
+ else if(mDummySize > 0)//go to extra space
+ {
+ for(S32 i = 0 ; i < mDummySize; i++)
+ {
+ if(*((U32*)mBuffer + i) != 0xffffffff)
+ {
+ bits = (U32*)mBuffer + i ;
+ k = i + 1 ;
+ break ;
+ }
+ }
+ }
+ S32 idx = 0 ;
+ U32 tmp = *bits ;
+ for(; tmp & 1 ; tmp >>= 1, idx++) ;
+
+ //set the slot reserved
+ if(!idx)
+ {
+ *bits |= 1 ;
+ }
+ else
+ {
+ *bits |= (1 << idx) ;
+ }
+
+ mAllocatedSlots++ ;
+
+ return mBuffer + mDummySize * sizeof(U32) + (k * 32 + idx) * mSlotSize ;
+}
+
+void LLPrivateMemoryPool::LLMemoryBlock::free(void* addr)
+{
+ U32 idx = ((char*) addr - mBuffer - mDummySize * sizeof(U32)) / mSlotSize ;
+
+ U32* bits = &mUsageBits ;
+ if(idx > 32)
+ {
+ bits = (U32*)mBuffer + (idx - 32) / 32 ;
+ }
+ if(idx & 31)
+ {
+ *bits &= ~(1 << (idx & 31)) ;
+ }
+ else
+ {
+ *bits &= ~1 ;
+ }
+
+ mAllocatedSlots-- ;
+}
+
+//-------------------------------------------------------------------
+//class LLMemoryChunk
+//--------------------------------------------------------------------
+LLPrivateMemoryPool::LLMemoryChunk::LLMemoryChunk()
+{
+ //empty
+}
+
+LLPrivateMemoryPool::LLMemoryChunk::~LLMemoryChunk()
+{
+ //empty
+}
+
+void LLPrivateMemoryPool::LLMemoryChunk::init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size)
+{
+ mBuffer = buffer ;
+ mBufferSize = buffer_size ;
+
+ mMetaBuffer = mBuffer + sizeof(LLMemoryChunk) ;
+
+ mMinBlockSize = min_block_size;
+ mMaxBlockSize = max_block_size;
+ mMinSlotSize = min_slot_size;
+ mBlockLevels = max_block_size / min_block_size ;
+ mPartitionLevels = mMaxBlockSize / mMinBlockSize + 1 ;
+
+ S32 max_num_blocks = (buffer_size - sizeof(LLMemoryChunk) - mBlockLevels * sizeof(LLMemoryBlock*) - mPartitionLevels * sizeof(LLMemoryBlock*)) /
+ (mMinBlockSize + sizeof(LLMemoryBlock)) ;
+ //meta data space
+ mBlocks = (LLMemoryBlock*)mMetaBuffer ;
+ mAvailBlockList = (LLMemoryBlock**)((char*)mBlocks + sizeof(LLMemoryBlock) * max_num_blocks) ;
+ mFreeSpaceList = (LLMemoryBlock**)((char*)mAvailBlockList + sizeof(LLMemoryBlock*) * mBlockLevels) ;
+
+ //data buffer
+ mDataBuffer = (char*)mFreeSpaceList + sizeof(LLMemoryBlock*) * mPartitionLevels ;
+
+ //init
+ for(U32 i = 0 ; i < mBlockLevels; i++)
+ {
+ mAvailBlockList[i] = NULL ;
+ }
+ for(U32 i = 0 ; i < mPartitionLevels ; i++)
+ {
+ mFreeSpaceList[i] = NULL ;
+ }
+
+ mBlocks[0].setBuffer(mDataBuffer, buffer_size - (mDataBuffer - mBuffer)) ;
+ addToFreeSpace(&mBlocks[0]) ;
+
+ mKey = (U32)mBuffer ;
+ mNext = NULL ;
+ mPrev = NULL ;
+}
+
+//static
+U32 LLPrivateMemoryPool::LLMemoryChunk::getMaxOverhead(U32 data_buffer_size, U32 min_page_size)
+{
+ return 2048 +
+ sizeof(LLMemoryBlock) * (data_buffer_size / min_page_size) ;
+}
+
+char* LLPrivateMemoryPool::LLMemoryChunk::allocate(U32 size)
+{
+ char* p = NULL ;
+ U32 blk_idx = size / mMinSlotSize ;
+ if(mMinSlotSize * blk_idx < size)
+ {
+ blk_idx++ ;
+ }
+
+ //check if there is free block available
+ if(mAvailBlockList[blk_idx])
+ {
+ LLMemoryBlock* blk = mAvailBlockList[blk_idx] ;
+ p = blk->allocate() ;
+
+ if(blk->isFull())
+ {
+ //removeFromFreelist
+ popAvailBlockList(blk_idx) ;
+ }
+ }
+
+ //ask for a new block
+ if(!p)
+ {
+ LLMemoryBlock* blk = addBlock(blk_idx) ;
+ if(blk)
+ {
+ p = blk->allocate() ;
+
+ if(blk->isFull())
+ {
+ //removeFromFreelist
+ popAvailBlockList(blk_idx) ;
+ }
+ }
+ }
+
+ //ask for space from higher level blocks
+ if(!p)
+ {
+ for(S32 i = blk_idx + 1 ; i < mBlockLevels; i++)
+ {
+ if(mAvailBlockList[i])
+ {
+ LLMemoryBlock* blk = mAvailBlockList[i] ;
+ p = blk->allocate() ;
+
+ if(blk->isFull())
+ {
+ //removeFromFreelist
+ popAvailBlockList(i) ;
+ }
+ break ;
+ }
+ }
+ }
+
+ return p ;
+}
+
+void LLPrivateMemoryPool::LLMemoryChunk::free(void* addr)
+{
+ LLMemoryBlock* blk = (LLMemoryBlock*)(mMetaBuffer + (((char*)addr - mDataBuffer) / mMinBlockSize) * sizeof(LLMemoryBlock)) ;
+ blk = blk->mSelf ;
+
+ bool was_full = blk->isFull() ;
+ blk->free(addr) ;
+
+ if(blk->empty())
+ {
+ removeBlock(blk) ;
+ }
+ else if(was_full)
+ {
+ addToAvailBlockList(blk) ;
+ }
+}
+
+LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::addBlock(U32 blk_idx)
+{
+ U32 slot_size = mMinSlotSize * (blk_idx + 1) ;
+ U32 preferred_block_size = llmax(mMinBlockSize, slot_size * 32) ;
+ preferred_block_size = llmin(preferred_block_size, mMaxBlockSize) ;
+
+ U32 idx = preferred_block_size / mMinBlockSize ;
+ preferred_block_size = idx * mMinBlockSize ; //round to integer times of mMinBlockSize.
+
+ LLMemoryBlock* blk = NULL ;
+
+ if(mFreeSpaceList[idx])//if there is free slot for blk_idx
+ {
+ blk = createNewBlock(&mFreeSpaceList[idx], preferred_block_size, slot_size, blk_idx) ;
+ }
+ else if(mFreeSpaceList[mPartitionLevels - 1]) //search free pool
+ {
+ blk = createNewBlock(&mFreeSpaceList[mPartitionLevels - 1], preferred_block_size, slot_size, blk_idx) ;
+ }
+ else //search for other non-preferred but enough space slot.
+ {
+ for(U32 i = idx - 1 ; i >= 0 ; i--) //search the small slots first
+ {
+ if(mFreeSpaceList[i])
+ {
+ //create a NEW BLOCK THERE.
+ if(mFreeSpaceList[i]->getBufferSize() >= slot_size) //at least there is space for one slot.
+ {
+ blk = createNewBlock(&mFreeSpaceList[i], preferred_block_size, slot_size, blk_idx) ;
+ }
+ break ;
+ }
+ }
+
+ if(!blk)
+ {
+ for(U16 i = idx + 1 ; i < mPartitionLevels - 1; i++) //search the large slots
+ {
+ if(mFreeSpaceList[i])
+ {
+ //create a NEW BLOCK THERE.
+ blk = createNewBlock(&mFreeSpaceList[i], preferred_block_size, slot_size, blk_idx) ;
+ break ;
+ }
+ }
+ }
+ }
+
+ return blk ;
+}
+
+LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::createNewBlock(LLMemoryBlock** cur_idxp, U32 buffer_size, U32 slot_size, U32 blk_idx)
+{
+ LLMemoryBlock* blk = *cur_idxp ;
+
+ buffer_size = llmin(buffer_size, blk->getBufferSize()) ;
+ U32 new_free_blk_size = blk->getBufferSize() - buffer_size ;
+ if(new_free_blk_size < mMinBlockSize) //can not partition the memory into size smaller than mMinBlockSize
+ {
+ buffer_size += new_free_blk_size ;
+ new_free_blk_size = 0 ;
+ }
+ blk->init(blk->getBuffer(), buffer_size, slot_size) ;
+
+ if(new_free_blk_size > 0) //cur_idx still has free space
+ {
+ LLMemoryBlock* next_blk = blk + (buffer_size / mMinBlockSize) ;
+ next_blk->setBuffer(blk->getBuffer() + buffer_size, new_free_blk_size) ;
+
+ if(new_free_blk_size > mMaxBlockSize) //stays in the free pool
+ {
+ next_blk->mPrev = NULL ;
+ next_blk->mNext = blk->mNext ;
+ if(next_blk->mNext)
+ {
+ next_blk->mNext->mPrev = next_blk ;
+ }
+ *cur_idxp = next_blk ;
+ }
+ else
+ {
+ *cur_idxp = blk->mNext ; //move to the next slot
+ (*cur_idxp)->mPrev = NULL ;
+
+ addToFreeSpace(next_blk) ;
+ }
+ }
+ else //move to the next block
+ {
+ *cur_idxp = blk->mNext ;
+ (*cur_idxp)->mPrev = NULL ;
+ }
+
+ //insert to the available block list...
+ blk->mNext = NULL ;
+ blk->mPrev = NULL ;
+ blk->mSelf = blk ;
+ mAvailBlockList[blk_idx] = blk ;
+
+ //mark the address map
+ U32 end = (buffer_size / mMinBlockSize) ;
+ for(U32 i = 1 ; i < end ; i++)
+ {
+ (blk + i)->mSelf = blk ;
+ }
+
+ return blk ;
+}
+
+void LLPrivateMemoryPool::LLMemoryChunk::removeBlock(LLMemoryBlock* blk)
+{
+ //remove from the available block list
+ if(blk->mPrev)
+ {
+ blk->mPrev->mNext = blk->mNext ;
+ }
+ if(blk->mNext)
+ {
+ blk->mNext->mPrev = blk->mPrev ;
+ }
+
+ //mark it free
+ blk->setBuffer(blk->getBuffer(), blk->getBufferSize()) ;
+
+ //merge blk with neighbors if possible
+ if(blk->getBuffer() > mDataBuffer) //has the left neighbor
+ {
+ if((blk - 1)->mSelf->isFree())
+ {
+ removeFromFreeSpace((blk - 1)->mSelf);
+ (blk - 1)->mSelf->setBuffer((blk-1)->mSelf->getBuffer(), (blk-1)->mSelf->getBufferSize() + blk->getBufferSize()) ;
+ blk = (blk - 1)->mSelf ;
+ }
+ }
+ if(blk->getBuffer() + blk->getBufferSize() < mBuffer + mBufferSize) //has the right neighbor
+ {
+ U32 d = blk->getBufferSize() / mMinBlockSize ;
+ if((blk + d)->isFree())
+ {
+ removeFromFreeSpace(blk + d) ;
+ blk->setBuffer(blk->getBuffer(), blk->getBufferSize() + (blk + d)->getBufferSize()) ;
+ }
+ }
+
+ addToFreeSpace(blk) ;
+
+ return ;
+}
+
+//the top block in the list is full, pop it out of the list
+void LLPrivateMemoryPool::LLMemoryChunk::popAvailBlockList(U32 blk_idx)
+{
+ if(mAvailBlockList[blk_idx])
+ {
+ LLMemoryBlock* next = mAvailBlockList[blk_idx]->mNext ;
+ next->mPrev = NULL ;
+ mAvailBlockList[blk_idx]->mNext = NULL ;
+ mAvailBlockList[blk_idx] = next ;
+ }
+}
+
+void LLPrivateMemoryPool::LLMemoryChunk::addToFreeSpace(LLMemoryBlock* blk)
+{
+ U16 free_idx = blk->getBufferSize() / mMinBlockSize ;
+ (blk + free_idx)->mSelf = blk ; //mark the end pointing back to the head.
+ free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
+
+ blk->mNext = mFreeSpaceList[free_idx] ;
+ if(mFreeSpaceList[free_idx])
+ {
+ mFreeSpaceList[free_idx]->mPrev = blk ;
+ }
+ mFreeSpaceList[free_idx] = blk ;
+ blk->mPrev = NULL ;
+ blk->mSelf = blk ;
+
+ return ;
+}
+
+void LLPrivateMemoryPool::LLMemoryChunk::removeFromFreeSpace(LLMemoryBlock* blk)
+{
+ U16 free_idx = blk->getBufferSize() / mMinBlockSize ;
+ free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
+
+ if(mFreeSpaceList[free_idx] == blk)
+ {
+ mFreeSpaceList[free_idx] = blk->mNext ;
+ }
+ if(blk->mPrev)
+ {
+ blk->mPrev->mNext = blk->mNext ;
+ }
+ if(blk->mNext)
+ {
+ blk->mNext->mPrev = blk->mPrev ;
+ }
+
+ return ;
+}
+
+void LLPrivateMemoryPool::LLMemoryChunk::addToAvailBlockList(LLMemoryBlock* blk)
+{
+ U32 blk_idx = blk->getSlotSize() / mMinSlotSize ;
+
+ blk->mNext = mAvailBlockList[blk_idx] ;
+ if(blk->mNext)
+ {
+ blk->mNext->mPrev = blk ;
+ }
+ blk->mPrev = NULL ;
+
+ return ;
+}
+
+//-------------------------------------------------------------------
+//class LLPrivateMemoryPool
+//--------------------------------------------------------------------
+LLPrivateMemoryPool::LLPrivateMemoryPool(U32 max_size, bool threaded) :
+ mMutexp(NULL),
+ mMaxPoolSize(max_size),
+ mReservedPoolSize(0)
+{
+ if(threaded)
+ {
+ mMutexp = new LLMutex(NULL) ;
+ }
+
+ for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
+ {
+ mChunkList[i] = NULL ;
+ }
+
+ mChunkVectorCapacity = 128 ;
+ mChunks.resize(mChunkVectorCapacity) ; //at most 128 chunks
+ mNumOfChunks = 0 ;
+}
+
+LLPrivateMemoryPool::~LLPrivateMemoryPool()
+{
+ destroyPool();
+ delete mMutexp ;
+}
+
+char* LLPrivateMemoryPool::allocate(U32 size)
+{
+ const static U32 MAX_BLOCK_SIZE = 4 * 1024 * 1024 ; //4MB
+
+ //if the asked size larger than MAX_BLOCK_SIZE, fetch from heap directly, the pool does not manage it
+ if(size >= MAX_BLOCK_SIZE)
+ {
+ return new char[size] ;
+ }
+
+ char* p = NULL ;
+
+ //find the appropriate chunk
+ S32 chunk_idx = getChunkIndex(size) ;
+
+ lock() ;
+
+ LLMemoryChunk* chunk = mChunkList[chunk_idx];
+ while(chunk)
+ {
+ if(p = chunk->allocate(size))
+ {
+ break ;
+ }
+ chunk = chunk->mNext ;
+ }
+
+ //fetch new memory chunk
+ if(!p)
+ {
+ chunk = addChunk(chunk_idx) ;
+ p = chunk->allocate(size) ;
+ }
+
+ unlock() ;
+
+ return p ;
+}
+
+void LLPrivateMemoryPool::free(void* addr)
+{
+ lock() ;
+
+ LLMemoryChunk* chunk = mChunks[findChunk((char*)addr)] ;
+ if(!chunk)
+ {
+ delete[] (char*)addr ; //release from heap
+ }
+ else
+ {
+ chunk->free(addr) ;
+
+ if(chunk->empty())
+ {
+ removeChunk(chunk) ;
+ }
+ }
+
+ unlock() ;
+}
+
+void LLPrivateMemoryPool::dump()
+{
+}
+
+void LLPrivateMemoryPool::lock()
+{
+ if(mMutexp)
+ {
+ mMutexp->lock() ;
+ }
+}
+
+void LLPrivateMemoryPool::unlock()
+{
+ if(mMutexp)
+ {
+ mMutexp->unlock() ;
+ }
+}
+
+S32 LLPrivateMemoryPool::getChunkIndex(U32 size)
+{
+ if(size < 2048)
+ {
+ return 0 ;
+ }
+ else if(size < (512 << 10))
+ {
+ return 1 ;
+ }
+ else
+ {
+ return 2 ;
+ }
+}
+
+//destroy the entire pool
+void LLPrivateMemoryPool::destroyPool()
+{
+ for(U16 i = 0 ; i < mNumOfChunks ; i++)
+ {
+ delete[] mChunks[i]->getBuffer() ;
+ }
+ mNumOfChunks = 0 ;
+ for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
+ {
+ mChunkList[i] = NULL ;
+ }
+}
+
+LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_index)
+{
+ static const U32 MIN_BLOCK_SIZES[SUPER_ALLOCATION] = {2 << 10, 32 << 10, 64 << 10} ;
+ static const U32 MAX_BLOCK_SIZES[SUPER_ALLOCATION] = {64 << 10, 1 << 20, 4 << 20} ;
+ static const U32 MIN_SLOT_SIZES[SUPER_ALLOCATION] = {8, 2 << 10, 512 << 10};
+ static const U32 MAX_SLOT_SIZES[SUPER_ALLOCATION] = {(2 << 10) - 8, (512 - 2) << 10, 4 << 20};
+
+ U32 preferred_size ;
+ U32 overhead ;
+ if(chunk_index < LARGE_ALLOCATION)
+ {
+ preferred_size = (4 << 20) ; //4MB
+ overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_BLOCK_SIZES[chunk_index]) ;
+ }
+ else
+ {
+ preferred_size = (16 << 20) ; //16MB
+ overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_BLOCK_SIZES[chunk_index]) ;
+ }
+ char* buffer = new(std::nothrow) char[preferred_size + overhead] ;
+ if(!buffer)
+ {
+ return NULL ;
+ }
+
+ LLMemoryChunk* chunk = new (buffer) LLMemoryChunk() ;
+ chunk->init(buffer, preferred_size + overhead, MIN_SLOT_SIZES[chunk_index],
+ MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
+
+ //add to the head of the linked list
+ chunk->mNext = mChunkList[chunk_index] ;
+ if(mChunkList[chunk_index])
+ {
+ mChunkList[chunk_index]->mPrev = chunk ;
+ }
+ chunk->mPrev = NULL ;
+ mChunkList[chunk_index] = chunk ;
+
+ //insert into the array
+ llassert_always(mNumOfChunks + 1 < mChunkVectorCapacity) ;
+ if(!mNumOfChunks)
+ {
+ mChunks[0] = chunk ;
+ }
+ else
+ {
+ U16 k ;
+ if(mChunks[0]->getBuffer() > chunk->getBuffer())
+ {
+ k = 0 ;
+ }
+ else
+ {
+ k = findChunk(chunk->getBuffer()) + 1 ;
+ }
+ for(U16 i = mNumOfChunks ; i > k ; i++)
+ {
+ mChunks[i] = mChunks[i-1] ;
+ }
+ mChunks[k] = chunk ;
+ }
+ mNumOfChunks++;
+
+ return chunk ;
+}
+
+void LLPrivateMemoryPool::removeChunk(LLMemoryChunk* chunk)
+{
+ //remove from the linked list
+ if(chunk->mPrev)
+ {
+ chunk->mPrev->mNext = chunk->mNext ;
+ }
+ if(chunk->mNext)
+ {
+ chunk->mNext->mPrev = chunk->mPrev ;
+ }
+
+ //remove from the array
+ U16 k = findChunk(chunk->getBuffer()) ;
+ mNumOfChunks--;
+ for(U16 i = k ; i < mNumOfChunks ; i++)
+ {
+ mChunks[i] = mChunks[i+1] ;
+ }
+
+ //release memory
+ delete[] chunk->getBuffer() ;
+}
+
+U16 LLPrivateMemoryPool::findChunk(const char* addr)
+{
+ llassert_always(mNumOfChunks > 0) ;
+
+ U16 s = 0, e = mNumOfChunks;
+ U16 k = (s + e) / 2 ;
+ while(s < e)
+ {
+ if(mChunks[k]->mKey > (U32)addr)
+ {
+ e = k ;
+ }
+ else if(k < mNumOfChunks - 1 && mChunks[k+1]->mKey < (U32)addr)
+ {
+ s = k ;
+ }
+ else
+ {
+ break ;
+ }
+
+ k = (s + e) / 2 ;
+ }
+
+ return k ;
+}
+
+//--------------------------------------------------------------------
+//class LLPrivateMemoryPoolTester
+LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::sInstance = NULL ;
+LLPrivateMemoryPool* LLPrivateMemoryPoolTester::sPool = NULL ;
+LLPrivateMemoryPoolTester::LLPrivateMemoryPoolTester()
+{
+}
+
+LLPrivateMemoryPoolTester::~LLPrivateMemoryPoolTester()
+{
+}
+
+//static
+LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::getInstance()
+{
+ if(!sInstance)
+ {
+ sInstance = new LLPrivateMemoryPoolTester() ;
+ }
+ return sInstance ;
+}
+
+//static
+void LLPrivateMemoryPoolTester::destroy()
+{
+ if(sInstance)
+ {
+ delete sInstance ;
+ sInstance = NULL ;
+ }
+
+ if(sPool)
+ {
+ delete sPool ;
+ sPool = NULL ;
+ }
+}
+
+void LLPrivateMemoryPoolTester::run()
+{
+ const U32 max_pool_size = 16 << 20 ;
+ const bool threaded = false ;
+ if(!sPool)
+ {
+ sPool = new LLPrivateMemoryPool(max_pool_size, threaded) ;
+ }
+
+ //run the test
+ correctnessTest() ;
+ reliabilityTest() ;
+ performanceTest() ;
+ fragmentationtest() ;
+}
+
+void LLPrivateMemoryPoolTester::correctnessTest()
+{
+ //try many different sized allocation, fill the memory fully to see if allocation is right.
+
+}
+
+void LLPrivateMemoryPoolTester::reliabilityTest()
+void LLPrivateMemoryPoolTester::performanceTest()
+void LLPrivateMemoryPoolTester::fragmentationtest()
+
+void* LLPrivateMemoryPoolTester::operator new(size_t size)
+{
+ return (void*)sPool->allocate(size) ;
+}
+
+void LLPrivateMemoryPoolTester::operator delete(void* addr)
+{
+ sPool->free(addr) ;
+}
+
+//--------------------------------------------------------------------
diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h
index 9bf4248bb7..d9e93d0e96 100644
--- a/indra/llcommon/llmemory.h
+++ b/indra/llcommon/llmemory.h
@@ -26,8 +26,6 @@
#ifndef LLMEMORY_H
#define LLMEMORY_H
-
-
extern S32 gTotalDAlloc;
extern S32 gTotalDAUse;
extern S32 gDACount;
@@ -44,8 +42,180 @@ public:
// Return the resident set size of the current process, in bytes.
// Return value is zero if not known.
static U64 getCurrentRSS();
+
+ static void* tryToAlloc(void* address, U32 size);
+ static void initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure);
+ static void updateMemoryInfo() ;
+ static void logMemoryInfo(BOOL update = FALSE);
+ static S32 isMemoryPoolLow();
+
+ static U32 getAvailableMemKB() ;
+ static U32 getMaxMemKB() ;
+ static U32 getAllocatedMemKB() ;
private:
static char* reserveMem;
+ static U32 sAvailPhysicalMemInKB ;
+ static U32 sMaxPhysicalMemInKB ;
+ static U32 sAllocatedMemInKB;
+ static U32 sAllocatedPageSizeInKB ;
+
+ static U32 sMaxHeapSizeInKB;
+ static BOOL sEnableMemoryFailurePrevention;
+};
+
+class LL_COMMON_API LLPrivateMemoryPool
+{
+public:
+ class LL_COMMON_API LLMemoryBlock //each block is devided into slots uniformly
+ {
+ public:
+ LLMemoryBlock() ;
+ ~LLMemoryBlock() ;
+
+ void init(char* buffer, U32 buffer_size, U32 slot_size) ;
+ void setBuffer(char* buffer, U32 buffer_size) ;
+
+ char* allocate() ;
+ void free(void* addr) ;
+
+ bool empty() {return !mAllocatedSlots;}
+ bool isFull() {return mAllocatedSlots == mTotalSlots;}
+ bool isFree() {return !mTotalSlots;}
+
+ U32 getSlotSize()const {return mSlotSize;}
+ U32 getTotalSlots()const {return mTotalSlots;}
+ U32 getBufferSize()const {return mBufferSize;}
+ char* getBuffer() const {return mBuffer;}
+
+ private:
+ char* mBuffer;
+ U32 mSlotSize ; //when the block is not initialized, it is the buffer size.
+ U32 mBufferSize ;
+ U32 mUsageBits ;
+ U8 mTotalSlots ;
+ U8 mAllocatedSlots ;
+ U8 mDummySize ; //size of extra U32 reserved for mUsageBits.
+
+ public:
+ LLMemoryBlock* mPrev ;
+ LLMemoryBlock* mNext ;
+ LLMemoryBlock* mSelf ;
+ };
+
+ class LL_COMMON_API LLMemoryChunk //is divided into memory blocks.
+ {
+ public:
+ LLMemoryChunk() ;
+ ~LLMemoryChunk() ;
+
+ void init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size) ;
+ void setBuffer(char* buffer, U32 buffer_size) ;
+
+ bool empty() ;
+
+ char* allocate(U32 size) ;
+ void free(void* addr) ;
+
+ const char* getBuffer() const {return mBuffer;}
+ U32 getBufferSize() const {return mBufferSize;}
+
+ static U32 getMaxOverhead(U32 data_buffer_size, U32 min_page_size) ;
+
+ private:
+ LLMemoryBlock* addBlock(U32 blk_idx) ;
+ void popAvailBlockList(U32 blk_idx) ;
+ void addToFreeSpace(LLMemoryBlock* blk) ;
+ void removeFromFreeSpace(LLMemoryBlock* blk) ;
+ void removeBlock(LLMemoryBlock* blk) ;
+ void addToAvailBlockList(LLMemoryBlock* blk) ;
+ LLMemoryBlock* createNewBlock(LLMemoryBlock** cur_idxp, U32 buffer_size, U32 slot_size, U32 blk_idx) ;
+
+ private:
+ LLMemoryBlock** mAvailBlockList ;//256 by mMinSlotSize
+ LLMemoryBlock** mFreeSpaceList;
+ LLMemoryBlock* mBlocks ; //index of blocks by address.
+
+ char* mBuffer ;
+ U32 mBufferSize ;
+ char* mDataBuffer ;
+ char* mMetaBuffer ;
+ U32 mMinBlockSize ;
+ U32 mMaxBlockSize;
+ U32 mMinSlotSize ;
+ U16 mBlockLevels;
+ U16 mPartitionLevels;
+
+ public:
+ //form a linked list
+ LLMemoryChunk* mNext ;
+ LLMemoryChunk* mPrev ;
+
+ U32 mKey ; //= mBuffer
+ } ;
+
+public:
+ LLPrivateMemoryPool(U32 max_size, bool threaded) ;
+ ~LLPrivateMemoryPool() ;
+
+ char *allocate(U32 size) ;
+ void free(void* addr) ;
+ void dump() ;
+
+private:
+ void lock() ;
+ void unlock() ;
+ S32 getChunkIndex(U32 size) ;
+ LLMemoryChunk* addChunk(S32 chunk_index) ;
+ void removeChunk(LLMemoryChunk* chunk) ;
+ U16 findChunk(const char* addr) ;
+ void destroyPool() ;
+
+private:
+ LLMutex* mMutexp ;
+ U32 mMaxPoolSize;
+ U32 mReservedPoolSize ;
+
+ enum
+ {
+ SMALL_ALLOCATION = 0, //from 8 bytes to 2KB(exclusive), page size 2KB, max chunk size is 4MB.
+ MEDIUM_ALLOCATION, //from 2KB to 512KB(exclusive), page size 32KB, max chunk size 4MB
+ LARGE_ALLOCATION, //from 512KB to 4MB(inclusive), page size 64KB, max chunk size 16MB
+ SUPER_ALLOCATION //allocation larger than 4MB.
+ };
+
+ LLMemoryChunk* mChunkList[SUPER_ALLOCATION] ; //all memory chunks reserved by this pool, sorted by address
+ std::vector<LLMemoryChunk*> mChunks ;
+ U16 mNumOfChunks ;
+ U16 mChunkVectorCapacity ;
+};
+
+//
+//the below singleton is used to test the private memory pool.
+//
+class LLPrivateMemoryPoolTester
+{
+private:
+ LLPrivateMemoryPoolTester() ;
+ ~LLPrivateMemoryPoolTester() ;
+
+public:
+ static LLPrivateMemoryPoolTester* getInstance() ;
+ static void destroy() ;
+
+ void run() ;
+
+private:
+ void correctnessTest() ;
+ void reliabilityTest() ;
+ void performanceTest() ;
+ void fragmentationtest() ;
+
+ void* operator new(size_t);
+ void operator delete(void*);
+
+private:
+ static LLPrivateMemoryPoolTester* sInstance;
+ static LLPrivateMemoryPool* sPool ;
};
// LLRefCount moved to llrefcount.h
diff --git a/indra/llcommon/llsys.cpp b/indra/llcommon/llsys.cpp
index 10cdc7087b..7968e53c13 100644
--- a/indra/llcommon/llsys.cpp
+++ b/indra/llcommon/llsys.cpp
@@ -636,22 +636,20 @@ U32 LLMemoryInfo::getPhysicalMemoryClamped() const
}
//static
-void LLMemoryInfo::getAvailableMemoryKB(U32& avail_physical_mem_kb, U32& avail_virtual_mem_kb)
+U32 LLMemoryInfo::getAvailableMemoryKB()
{
#if LL_WINDOWS
MEMORYSTATUSEX state;
state.dwLength = sizeof(state);
GlobalMemoryStatusEx(&state);
- avail_physical_mem_kb = (U32)(state.ullAvailPhys/1024) ;
- avail_virtual_mem_kb = (U32)(state.ullAvailVirtual/1024) ;
+ return (U32)(state.ullAvailPhys/1024) ;
#else
//do not know how to collect available memory info for other systems.
//leave it blank here for now.
- avail_physical_mem_kb = -1 ;
- avail_virtual_mem_kb = -1 ;
+ return -1;
#endif
}
diff --git a/indra/llcommon/llsys.h b/indra/llcommon/llsys.h
index 41a4f25000..580eee4e8d 100644
--- a/indra/llcommon/llsys.h
+++ b/indra/llcommon/llsys.h
@@ -116,7 +116,7 @@ public:
U32 getPhysicalMemoryClamped() const; ///< Memory size in clamped bytes
//get the available memory infomation in KiloBytes.
- static void getAvailableMemoryKB(U32& avail_physical_mem_kb, U32& avail_virtual_mem_kb);
+ static U32 getAvailableMemoryKB();
};
diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp
index 02160b09c4..19269c2a31 100644
--- a/indra/llrender/llvertexbuffer.cpp
+++ b/indra/llrender/llvertexbuffer.cpp
@@ -33,6 +33,7 @@
#include "llglheaders.h"
#include "llmemtype.h"
#include "llrender.h"
+#include "llmemory.h"
//============================================================================
@@ -857,11 +858,8 @@ U8* LLVertexBuffer::mapBuffer(S32 access)
log_glerror();
//check the availability of memory
- U32 avail_phy_mem, avail_vir_mem;
- LLMemoryInfo::getAvailableMemoryKB(avail_phy_mem, avail_vir_mem) ;
- llinfos << "Available physical mwmory(KB): " << avail_phy_mem << llendl ;
- llinfos << "Available virtual memory(KB): " << avail_vir_mem << llendl;
-
+ LLMemory::logMemoryInfo(TRUE) ;
+
//--------------------
//print out more debug info before crash
llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ;
@@ -884,6 +882,7 @@ U8* LLVertexBuffer::mapBuffer(S32 access)
if (!mMappedIndexData)
{
log_glerror();
+ LLMemory::logMemoryInfo(TRUE) ;
GLint buff;
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
diff --git a/indra/llxml/llcontrol.h b/indra/llxml/llcontrol.h
index 93975579cc..70749b8ee9 100644
--- a/indra/llxml/llcontrol.h
+++ b/indra/llxml/llcontrol.h
@@ -385,7 +385,8 @@ class LLCachedControl
{
public:
LLCachedControl(LLControlGroup& group,
- const std::string& name,
+ const std::string& name,
+
const T& default_value,
const std::string& comment = "Declared In Code")
{
diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml
index 535bc95287..905c683f69 100644
--- a/indra/newview/app_settings/settings.xml
+++ b/indra/newview/app_settings/settings.xml
@@ -5153,6 +5153,17 @@
<key>Value</key>
<real>48.0</real>
</map>
+ <key>MaxHeapSize</key>
+ <map>
+ <key>Comment</key>
+ <string>Maximum heap size (GB)</string>
+ <key>Persist</key>
+ <integer>1</integer>
+ <key>Type</key>
+ <string>F32</string>
+ <key>Value</key>
+ <real>1.6</real>
+ </map>
<key>MaxSelectDistance</key>
<map>
<key>Comment</key>
@@ -5329,6 +5340,17 @@
<key>Value</key>
<integer>1</integer>
</map>
+ <key>MemeoyFailurePreventionEnabled</key>
+ <map>
+ <key>Comment</key>
+ <string>If set, the viewer will quit to avoid crash when memory failure happens</string>
+ <key>Persist</key>
+ <integer>0</integer>
+ <key>Type</key>
+ <string>Boolean</string>
+ <key>Value</key>
+ <integer>1</integer>
+ </map>
<key>MemoryLogFrequency</key>
<map>
<key>Comment</key>
diff --git a/indra/newview/llappviewer.cpp b/indra/newview/llappviewer.cpp
index 3a98749c0f..84e36ac3c7 100644
--- a/indra/newview/llappviewer.cpp
+++ b/indra/newview/llappviewer.cpp
@@ -604,7 +604,7 @@ LLAppViewer::~LLAppViewer()
}
bool LLAppViewer::init()
-{
+{
//
// Start of the application
//
@@ -632,6 +632,9 @@ bool LLAppViewer::init()
if (!initConfiguration())
return false;
+ //set the max heap size.
+ initMaxHeapSize() ;
+
// write Google Breakpad minidump files to our log directory
std::string logdir = gDirUtilp->getExpandedFilename(LL_PATH_LOGS, "");
logdir += gDirUtilp->getDirDelimiter();
@@ -949,6 +952,96 @@ bool LLAppViewer::init()
return true;
}
+void LLAppViewer::initMaxHeapSize()
+{
+ //set the max heap size.
+ //here is some info regarding to the max heap size:
+ //------------------------------------------------------------------------------------------
+ // OS | setting | SL address bits | max manageable memory space | max heap size
+ // Win 32 | default | 32-bit | 2GB | < 1.7GB
+ // Win 32 | /3G | 32-bit | 3GB | < 1.7GB or 2.7GB
+ //Linux 32 | default | 32-bit | 3GB | < 2.7GB
+ //Linux 32 |HUGEMEM | 32-bit | 4GB | < 3.7GB
+ //64-bit OS |default | 32-bit | 4GB | < 3.7GB
+ //64-bit OS |default | 64-bit | N/A (> 4GB) | N/A (> 4GB)
+ //------------------------------------------------------------------------------------------
+ //currently SL is built under 32-bit setting, we set its max heap size no more than 1.6 GB.
+
+ //F32 max_heap_size_gb = llmin(1.6f, (F32)gSavedSettings.getF32("MaxHeapSize")) ;
+ F32 max_heap_size_gb = gSavedSettings.getF32("MaxHeapSize") ;
+ BOOL enable_mem_failure_prevention = (BOOL)gSavedSettings.getBOOL("MemeoyFailurePreventionEnabled") ;
+
+ LLMemory::initMaxHeapSizeGB(max_heap_size_gb, enable_mem_failure_prevention) ;
+}
+
+void LLAppViewer::checkMemory()
+{
+ const static F32 MEMORY_CHECK_INTERVAL = 1.0f ; //second
+ const static F32 MAX_QUIT_WAIT_TIME = 30.0f ; //seconds
+ const static U32 MAX_SIZE_CHECKED_MEMORY_BLOCK = 64 * 1024 * 1024 ; //64 MB
+ //static F32 force_quit_timer = MAX_QUIT_WAIT_TIME + MEMORY_CHECK_INTERVAL ;
+ static void* last_reserved_address = NULL ;
+
+ if(MEMORY_CHECK_INTERVAL > mMemCheckTimer.getElapsedTimeF32())
+ {
+ return ;
+ }
+ mMemCheckTimer.reset() ;
+
+ if(gGLManager.mDebugGPU)
+ {
+ //update the availability of memory
+ LLMemory::updateMemoryInfo() ;
+ }
+
+ //check the virtual address space fragmentation
+ if(!last_reserved_address)
+ {
+ last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
+ }
+ else
+ {
+ last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
+ if(!last_reserved_address) //failed, try once more
+ {
+ last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
+ }
+ }
+
+ S32 is_low = !last_reserved_address || LLMemory::isMemoryPoolLow() ;
+
+ //if(is_low < 0) //to force quit
+ //{
+ // if(force_quit_timer > MAX_QUIT_WAIT_TIME) //just hit the limit for the first time
+ // {
+ // //send out the notification to tell the viewer is about to quit in 30 seconds.
+ // LLNotification::Params params("ForceQuitDueToLowMemory");
+ // LLNotifications::instance().add(params);
+
+ // force_quit_timer = MAX_QUIT_WAIT_TIME - MEMORY_CHECK_INTERVAL ;
+ // }
+ // else
+ // {
+ // force_quit_timer -= MEMORY_CHECK_INTERVAL ;
+ // if(force_quit_timer < 0.f)
+ // {
+ // forceQuit() ; //quit
+ // }
+ // }
+ //}
+ //else
+ //{
+ // force_quit_timer = MAX_QUIT_WAIT_TIME + MEMORY_CHECK_INTERVAL ;
+ //}
+
+ LLPipeline::throttleNewMemoryAllocation(!is_low ? FALSE : TRUE) ;
+
+ if(is_low)
+ {
+ LLMemory::logMemoryInfo() ;
+ }
+}
+
static LLFastTimer::DeclareTimer FTM_MESSAGES("System Messages");
static LLFastTimer::DeclareTimer FTM_SLEEP("Sleep");
static LLFastTimer::DeclareTimer FTM_TEXTURE_CACHE("Texture Cache");
@@ -983,8 +1076,7 @@ bool LLAppViewer::mainLoop()
LLVoiceChannel::initClass();
LLVoiceClient::getInstance()->init(gServicePump);
LLTimer frameTimer,idleTimer;
- LLTimer debugTime;
- LLFrameTimer memCheckTimer;
+ LLTimer debugTime;
LLViewerJoystick* joystick(LLViewerJoystick::getInstance());
joystick->setNeedsReset(true);
@@ -993,9 +1085,7 @@ bool LLAppViewer::mainLoop()
// with each frame, no need to instantiate a new LLSD event object each
// time. Obviously, if that changes, just instantiate the LLSD at the
// point of posting.
- LLSD newFrame;
-
- const F32 memory_check_interval = 1.0f ; //second
+ LLSD newFrame;
// Handle messages
while (!LLApp::isExiting())
@@ -1006,18 +1096,8 @@ bool LLAppViewer::mainLoop()
llclearcallstacks;
//check memory availability information
- {
- if(memory_check_interval < memCheckTimer.getElapsedTimeF32())
- {
- memCheckTimer.reset() ;
-
- //update the availability of memory
- LLMemoryInfo::getAvailableMemoryKB(mAvailPhysicalMemInKB, mAvailVirtualMemInKB) ;
- }
- llcallstacks << "Available physical mem(KB): " << mAvailPhysicalMemInKB << llcallstacksendl ;
- llcallstacks << "Available virtual mem(KB): " << mAvailVirtualMemInKB << llcallstacksendl ;
- }
-
+ checkMemory() ;
+
try
{
pingMainloopTimeout("Main:MiscNativeWindowEvents");
@@ -1181,7 +1261,7 @@ bool LLAppViewer::mainLoop()
idleTimer.reset();
bool is_slow = (frameTimer.getElapsedTimeF64() > FRAME_SLOW_THRESHOLD) ;
S32 total_work_pending = 0;
- S32 total_io_pending = 0;
+ S32 total_io_pending = 0;
while(!is_slow)//do not unpause threads if the frame rates are very low.
{
S32 work_pending = 0;
@@ -1248,15 +1328,7 @@ bool LLAppViewer::mainLoop()
}
catch(std::bad_alloc)
{
- {
- llinfos << "Availabe physical memory(KB) at the beginning of the frame: " << mAvailPhysicalMemInKB << llendl ;
- llinfos << "Availabe virtual memory(KB) at the beginning of the frame: " << mAvailVirtualMemInKB << llendl ;
-
- LLMemoryInfo::getAvailableMemoryKB(mAvailPhysicalMemInKB, mAvailVirtualMemInKB) ;
-
- llinfos << "Current availabe physical memory(KB): " << mAvailPhysicalMemInKB << llendl ;
- llinfos << "Current availabe virtual memory(KB): " << mAvailVirtualMemInKB << llendl ;
- }
+ LLMemory::logMemoryInfo(TRUE) ;
//stop memory leaking simulation
LLFloaterMemLeak* mem_leak_instance =
diff --git a/indra/newview/llappviewer.h b/indra/newview/llappviewer.h
index a70a727c5d..7761a10f1c 100644
--- a/indra/newview/llappviewer.h
+++ b/indra/newview/llappviewer.h
@@ -166,8 +166,8 @@ public:
// mute/unmute the system's master audio
virtual void setMasterSystemAudioMute(bool mute);
- virtual bool getMasterSystemAudioMute();
-
+ virtual bool getMasterSystemAudioMute();
+
protected:
virtual bool initWindow(); // Initialize the viewer's window.
virtual bool initLogging(); // Initialize log files, logging system, return false on failure.
@@ -184,11 +184,12 @@ protected:
private:
+ void initMaxHeapSize();
bool initThreads(); // Initialize viewer threads, return false on failure.
bool initConfiguration(); // Initialize settings from the command line/config file.
bool initCache(); // Initialize local client cache.
-
+ void checkMemory() ;
// We have switched locations of both Mac and Windows cache, make sure
// files migrate and old cache is cleared out.
@@ -258,8 +259,7 @@ private:
std::set<struct apr_dso_handle_t*> mPlugins;
- U32 mAvailPhysicalMemInKB ;
- U32 mAvailVirtualMemInKB ;
+ LLFrameTimer mMemCheckTimer;
public:
//some information for updater
typedef struct
diff --git a/indra/newview/lldynamictexture.cpp b/indra/newview/lldynamictexture.cpp
index a3d2941114..58eef45935 100644
--- a/indra/newview/lldynamictexture.cpp
+++ b/indra/newview/lldynamictexture.cpp
@@ -40,6 +40,7 @@
#include "llvertexbuffer.h"
#include "llviewerdisplay.h"
#include "llrender.h"
+#include "pipeline.h"
// static
LLViewerDynamicTexture::instance_list_t LLViewerDynamicTexture::sInstances[ LLViewerDynamicTexture::ORDER_COUNT ];
@@ -205,7 +206,7 @@ void LLViewerDynamicTexture::postRender(BOOL success)
BOOL LLViewerDynamicTexture::updateAllInstances()
{
sNumRenders = 0;
- if (gGLManager.mIsDisabled)
+ if (gGLManager.mIsDisabled || LLPipeline::sMemAllocationThrottled)
{
return TRUE;
}
@@ -221,9 +222,8 @@ BOOL LLViewerDynamicTexture::updateAllInstances()
if (dynamicTexture->needsRender())
{
if(gGLManager.mDebugGPU)
- {
+ {
llinfos << "class type: " << (S32)dynamicTexture->getType() << llendl;
- LLGLState::dumpStates() ;
}
glClear(GL_DEPTH_BUFFER_BIT);
diff --git a/indra/newview/llfloatermemleak.cpp b/indra/newview/llfloatermemleak.cpp
index 58931d112e..9edfe1e354 100644
--- a/indra/newview/llfloatermemleak.cpp
+++ b/indra/newview/llfloatermemleak.cpp
@@ -90,6 +90,11 @@ LLFloaterMemLeak::~LLFloaterMemLeak()
void LLFloaterMemLeak::release()
{
+ if(mLeakedMem.empty())
+ {
+ return ;
+ }
+
for(S32 i = 0 ; i < (S32)mLeakedMem.size() ; i++)
{
delete[] mLeakedMem[i] ;
diff --git a/indra/newview/llviewerdisplay.cpp b/indra/newview/llviewerdisplay.cpp
index 7c8b52d0b6..e7153f7ffc 100644
--- a/indra/newview/llviewerdisplay.cpp
+++ b/indra/newview/llviewerdisplay.cpp
@@ -202,6 +202,7 @@ void display_stats()
gMemoryAllocated = LLMemory::getCurrentRSS();
U32 memory = (U32)(gMemoryAllocated / (1024*1024));
llinfos << llformat("MEMORY: %d MB", memory) << llendl;
+ LLMemory::logMemoryInfo() ;
gRecentMemoryTime.reset();
}
}
@@ -672,7 +673,11 @@ void display(BOOL rebuild, F32 zoom_factor, int subfield, BOOL for_snapshot)
glh::matrix4f mod = glh_get_current_modelview();
glViewport(0,0,512,512);
LLVOAvatar::updateFreezeCounter() ;
- LLVOAvatar::updateImpostors();
+
+ if(!LLPipeline::sMemAllocationThrottled)
+ {
+ LLVOAvatar::updateImpostors();
+ }
glh_set_current_projection(proj);
glh_set_current_modelview(mod);
diff --git a/indra/newview/llviewertexture.cpp b/indra/newview/llviewertexture.cpp
index f96b93da4d..260023a802 100644
--- a/indra/newview/llviewertexture.cpp
+++ b/indra/newview/llviewertexture.cpp
@@ -3068,9 +3068,16 @@ void LLViewerLODTexture::processTextureStats()
{
mDesiredDiscardLevel = llmin(mDesiredDiscardLevel, (S8)mDesiredSavedRawDiscardLevel) ;
}
+ else if(LLPipeline::sMemAllocationThrottled)//release memory of large textures by decrease their resolutions.
+ {
+ if(scaleDown())
+ {
+ mDesiredDiscardLevel = mCachedRawDiscardLevel ;
+ }
+ }
}
-void LLViewerLODTexture::scaleDown()
+bool LLViewerLODTexture::scaleDown()
{
if(hasGLTexture() && mCachedRawDiscardLevel > getDiscardLevel())
{
@@ -3080,7 +3087,10 @@ void LLViewerLODTexture::scaleDown()
{
LLViewerTextureManager::sTesterp->setStablizingTime() ;
}
+
+ return true ;
}
+ return false ;
}
//----------------------------------------------------------------------------------------------
//end of LLViewerLODTexture
diff --git a/indra/newview/llviewertexture.h b/indra/newview/llviewertexture.h
index b779396293..ffcbba3efd 100644
--- a/indra/newview/llviewertexture.h
+++ b/indra/newview/llviewertexture.h
@@ -595,7 +595,7 @@ public:
private:
void init(bool firstinit) ;
- void scaleDown() ;
+ bool scaleDown() ;
private:
F32 mDiscardVirtualSize; // Virtual size used to calculate desired discard
diff --git a/indra/newview/llviewerwindow.cpp b/indra/newview/llviewerwindow.cpp
index 64698ed006..943b5b5886 100644
--- a/indra/newview/llviewerwindow.cpp
+++ b/indra/newview/llviewerwindow.cpp
@@ -3874,6 +3874,19 @@ BOOL LLViewerWindow::rawSnapshot(LLImageRaw *raw, S32 image_width, S32 image_hei
{
return FALSE;
}
+ //check if there is enough memory for the snapshot image
+ if(LLPipeline::sMemAllocationThrottled)
+ {
+ return FALSE ; //snapshot taking is disabled due to memory restriction.
+ }
+ if(image_width * image_height > (1 << 22)) //if snapshot image is larger than 2K by 2K
+ {
+ if(!LLMemory::tryToAlloc(NULL, image_width * image_height * 3))
+ {
+ llwarns << "No enough memory to take the snapshot with size (w : h): " << image_width << " : " << image_height << llendl ;
+ return FALSE ; //there is no enough memory for taking this snapshot.
+ }
+ }
// PRE SNAPSHOT
gDisplaySwapBuffers = FALSE;
diff --git a/indra/newview/pipeline.cpp b/indra/newview/pipeline.cpp
index b4a5777f10..7c69af17f2 100644
--- a/indra/newview/pipeline.cpp
+++ b/indra/newview/pipeline.cpp
@@ -98,6 +98,7 @@
#include "llspatialpartition.h"
#include "llmutelist.h"
#include "lltoolpie.h"
+#include "llnotifications.h"
#ifdef _DEBUG
@@ -281,6 +282,7 @@ BOOL LLPipeline::sRenderAttachedLights = TRUE;
BOOL LLPipeline::sRenderAttachedParticles = TRUE;
BOOL LLPipeline::sRenderDeferred = FALSE;
BOOL LLPipeline::sAllowRebuildPriorityGroup = FALSE ;
+BOOL LLPipeline::sMemAllocationThrottled = FALSE;
S32 LLPipeline::sVisibleLightCount = 0;
F32 LLPipeline::sMinRenderSize = 0.f;
@@ -513,6 +515,24 @@ void LLPipeline::destroyGL()
static LLFastTimer::DeclareTimer FTM_RESIZE_SCREEN_TEXTURE("Resize Screen Texture");
+//static
+void LLPipeline::throttleNewMemoryAllocation(BOOL disable)
+{
+ if(sMemAllocationThrottled != disable)
+ {
+ sMemAllocationThrottled = disable ;
+
+ if(sMemAllocationThrottled)
+ {
+ //send out notification
+ LLNotification::Params params("LowMemory");
+ LLNotifications::instance().add(params);
+
+ //release some memory.
+ }
+ }
+}
+
void LLPipeline::resizeScreenTexture()
{
LLFastTimer ft(FTM_RESIZE_SCREEN_TEXTURE);
@@ -8792,7 +8812,7 @@ void LLPipeline::renderGroups(LLRenderPass* pass, U32 type, U32 mask, BOOL textu
void LLPipeline::generateImpostor(LLVOAvatar* avatar)
{
- LLMemType mt_gi(LLMemType::MTYPE_PIPELINE_GENERATE_IMPOSTOR);
+ LLMemType mt_gi(LLMemType::MTYPE_PIPELINE_GENERATE_IMPOSTOR);
LLGLState::checkStates();
LLGLState::checkTextureChannels();
LLGLState::checkClientArrays();
diff --git a/indra/newview/pipeline.h b/indra/newview/pipeline.h
index b80765dac6..f4a7dfd38d 100644
--- a/indra/newview/pipeline.h
+++ b/indra/newview/pipeline.h
@@ -334,6 +334,8 @@ public:
static void updateRenderDeferred();
+ static void throttleNewMemoryAllocation(BOOL disable);
+
private:
void unloadShaders();
void addToQuickLookup( LLDrawPool* new_poolp );
@@ -477,8 +479,9 @@ public:
static BOOL sRenderAttachedParticles;
static BOOL sRenderDeferred;
static BOOL sAllowRebuildPriorityGroup;
+ static BOOL sMemAllocationThrottled;
static S32 sVisibleLightCount;
- static F32 sMinRenderSize;
+ static F32 sMinRenderSize;
//screen texture
U32 mScreenWidth;
diff --git a/indra/newview/skins/default/xui/en/notifications.xml b/indra/newview/skins/default/xui/en/notifications.xml
index dfbb408d96..5183788c98 100644
--- a/indra/newview/skins/default/xui/en/notifications.xml
+++ b/indra/newview/skins/default/xui/en/notifications.xml
@@ -6451,6 +6451,20 @@ Mute everyone?
</notification>
<notification
+ icon="alertmodal.tga"
+ name="LowMemory"
+ type="alertmodal">
+ Your memory pool is low. Some functions of SL are disabled to avoid crash. Please close other applications. Restart SL if this persists.
+ </notification>
+
+ <notification
+ icon="alertmodal.tga"
+ name="ForceQuitDueToLowMemory"
+ type="alertmodal">
+ SL will quit in 30 seconds due to out of memory.
+ </notification>
+
+ <notification
name="PopupAttempt"
icon="Popup_Caution"
type="browser">
@@ -6504,4 +6518,4 @@ Otherwise, you can look at the Map and find places marked &quot;Infohub&quot;.
You died and have been teleported to your home location.
</global>
-</notifications>
+</notifications> \ No newline at end of file