summaryrefslogtreecommitdiff
path: root/indra/llcommon/llmemory.cpp
diff options
context:
space:
mode:
authorOz Linden <oz@lindenlab.com>2011-10-11 10:58:02 -0400
committerOz Linden <oz@lindenlab.com>2011-10-11 10:58:02 -0400
commit852349218e5cfdd48a1a64c672e9663aab716031 (patch)
treefb7fd8f2947639a6a445aed0c72b1cc12ffb6296 /indra/llcommon/llmemory.cpp
parentbaf802e25b1006783a69625e90022db087aabbfb (diff)
parent616a7b549d21624e6667218efe29c1e552f9b375 (diff)
merge changes for vwr-26864
Diffstat (limited to 'indra/llcommon/llmemory.cpp')
-rw-r--r--indra/llcommon/llmemory.cpp95
1 files changed, 73 insertions, 22 deletions
diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp
index 8c02ad8290..3b27a1639a 100644
--- a/indra/llcommon/llmemory.cpp
+++ b/indra/llcommon/llmemory.cpp
@@ -165,33 +165,60 @@ void LLMemory::logMemoryInfo(BOOL update)
llinfos << "Current allocated page size (KB): " << sAllocatedPageSizeInKB << llendl ;
llinfos << "Current availabe physical memory(KB): " << sAvailPhysicalMemInKB << llendl ;
llinfos << "Current max usable memory(KB): " << sMaxPhysicalMemInKB << llendl ;
+
+ llinfos << "--- private pool information -- " << llendl ;
+ llinfos << "Total reserved (KB): " << LLPrivateMemoryPoolManager::getInstance()->mTotalReservedSize / 1024 << llendl ;
+ llinfos << "Total allocated (KB): " << LLPrivateMemoryPoolManager::getInstance()->mTotalAllocatedSize / 1024 << llendl ;
}
//return 0: everything is normal;
//return 1: the memory pool is low, but not in danger;
//return -1: the memory pool is in danger, is about to crash.
//static
-S32 LLMemory::isMemoryPoolLow()
+bool LLMemory::isMemoryPoolLow()
{
static const U32 LOW_MEMEOY_POOL_THRESHOLD_KB = 64 * 1024 ; //64 MB for emergency use
+ const static U32 MAX_SIZE_CHECKED_MEMORY_BLOCK = 64 * 1024 * 1024 ; //64 MB
+ static void* last_reserved_address = NULL ;
if(!sEnableMemoryFailurePrevention)
{
- return 0 ; //no memory failure prevention.
+ return false ; //no memory failure prevention.
}
if(sAvailPhysicalMemInKB < (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2)) //out of physical memory
{
- return -1 ;
+ return true ;
}
if(sAllocatedPageSizeInKB + (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2) > sMaxHeapSizeInKB) //out of virtual address space.
{
- return -1 ;
+ return true ;
}
- return (S32)(sAvailPhysicalMemInKB < LOW_MEMEOY_POOL_THRESHOLD_KB ||
+ bool is_low = (S32)(sAvailPhysicalMemInKB < LOW_MEMEOY_POOL_THRESHOLD_KB ||
sAllocatedPageSizeInKB + LOW_MEMEOY_POOL_THRESHOLD_KB > sMaxHeapSizeInKB) ;
+
+ //check the virtual address space fragmentation
+ if(!is_low)
+ {
+ if(!last_reserved_address)
+ {
+ last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
+ }
+ else
+ {
+ last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
+ if(!last_reserved_address) //failed, try once more
+ {
+ last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
+ }
+ }
+
+ is_low = !last_reserved_address ; //allocation failed
+ }
+
+ return is_low ;
}
//static
@@ -1289,15 +1316,13 @@ U16 LLPrivateMemoryPool::LLMemoryChunk::getPageLevel(U32 size)
//--------------------------------------------------------------------
const U32 CHUNK_SIZE = 4 << 20 ; //4 MB
const U32 LARGE_CHUNK_SIZE = 4 * CHUNK_SIZE ; //16 MB
-LLPrivateMemoryPool::LLPrivateMemoryPool(S32 type) :
+LLPrivateMemoryPool::LLPrivateMemoryPool(S32 type, U32 max_pool_size) :
mMutexp(NULL),
mReservedPoolSize(0),
mHashFactor(1),
- mType(type)
+ mType(type),
+ mMaxPoolSize(max_pool_size)
{
- const U32 MAX_POOL_SIZE = 256 * 1024 * 1024 ; //256 MB
-
- mMaxPoolSize = MAX_POOL_SIZE ;
if(type == STATIC_THREADED || type == VOLATILE_THREADED)
{
mMutexp = new LLMutex ;
@@ -1362,16 +1387,31 @@ char* LLPrivateMemoryPool::allocate(U32 size)
chunk = chunk->mNext ;
}
}
-
- chunk = addChunk(chunk_idx) ;
- if(chunk)
+ else
{
- p = chunk->allocate(size) ;
+ chunk = addChunk(chunk_idx) ;
+ if(chunk)
+ {
+ p = chunk->allocate(size) ;
+ }
}
}
unlock() ;
+ if(!p) //to get memory from the private pool failed, try the heap directly
+ {
+ static bool to_log = true ;
+
+ if(to_log)
+ {
+ llwarns << "The memory pool overflows, now using heap directly!" << llendl ;
+ to_log = false ;
+ }
+
+ return (char*)malloc(size) ;
+ }
+
return p ;
}
@@ -1472,7 +1512,7 @@ void LLPrivateMemoryPool::destroyPool()
unlock() ;
}
-void LLPrivateMemoryPool::checkSize(U32 asked_size)
+bool LLPrivateMemoryPool::checkSize(U32 asked_size)
{
if(mReservedPoolSize + asked_size > mMaxPoolSize)
{
@@ -1480,8 +1520,12 @@ void LLPrivateMemoryPool::checkSize(U32 asked_size)
llinfos << "Total reserved size: " << mReservedPoolSize + asked_size << llendl ;
llinfos << "Total_allocated Size: " << getTotalAllocatedSize() << llendl ;
- llerrs << "The pool is overflowing..." << llendl ;
+ //llerrs << "The pool is overflowing..." << llendl ;
+
+ return false ;
}
+
+ return true ;
}
LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_index)
@@ -1501,7 +1545,11 @@ LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_inde
MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
}
- checkSize(preferred_size + overhead) ;
+ if(!checkSize(preferred_size + overhead))
+ {
+ return NULL ;
+ }
+
mReservedPoolSize += preferred_size + overhead ;
char* buffer = (char*)malloc(preferred_size + overhead) ;
@@ -1593,7 +1641,7 @@ LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::findChunk(const char* a
void LLPrivateMemoryPool::addToHashTable(LLMemoryChunk* chunk)
{
- static const U16 HASH_FACTORS[] = {41, 83, 193, 317, 419, 523, 0xFFFF};
+ static const U16 HASH_FACTORS[] = {41, 83, 193, 317, 419, 523, 719, 997, 1523, 0xFFFF};
U16 i ;
if(mChunkHashList.empty())
@@ -1774,7 +1822,7 @@ void LLPrivateMemoryPool::LLChunkHashElement::remove(LLPrivateMemoryPool::LLMemo
//--------------------------------------------------------------------
LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::sInstance = NULL ;
-LLPrivateMemoryPoolManager::LLPrivateMemoryPoolManager(BOOL enabled)
+LLPrivateMemoryPoolManager::LLPrivateMemoryPoolManager(BOOL enabled, U32 max_pool_size)
{
mPoolList.resize(LLPrivateMemoryPool::MAX_TYPES) ;
@@ -1784,6 +1832,9 @@ LLPrivateMemoryPoolManager::LLPrivateMemoryPoolManager(BOOL enabled)
}
mPrivatePoolEnabled = enabled ;
+
+ const U32 MAX_POOL_SIZE = 256 * 1024 * 1024 ; //256 MB
+ mMaxPrivatePoolSize = llmax(max_pool_size, MAX_POOL_SIZE) ;
}
LLPrivateMemoryPoolManager::~LLPrivateMemoryPoolManager()
@@ -1826,11 +1877,11 @@ LLPrivateMemoryPoolManager::~LLPrivateMemoryPoolManager()
}
//static
-void LLPrivateMemoryPoolManager::initClass(BOOL enabled)
+void LLPrivateMemoryPoolManager::initClass(BOOL enabled, U32 max_pool_size)
{
llassert_always(!sInstance) ;
- sInstance = new LLPrivateMemoryPoolManager(enabled) ;
+ sInstance = new LLPrivateMemoryPoolManager(enabled, max_pool_size) ;
}
//static
@@ -1862,7 +1913,7 @@ LLPrivateMemoryPool* LLPrivateMemoryPoolManager::newPool(S32 type)
if(!mPoolList[type])
{
- mPoolList[type] = new LLPrivateMemoryPool(type) ;
+ mPoolList[type] = new LLPrivateMemoryPool(type, mMaxPrivatePoolSize) ;
}
return mPoolList[type] ;