summaryrefslogtreecommitdiff
path: root/indra/llcommon
diff options
context:
space:
mode:
authorXiaohong Bao <bao@lindenlab.com>2011-07-19 23:48:11 -0600
committerXiaohong Bao <bao@lindenlab.com>2011-07-19 23:48:11 -0600
commit83e48f4a5100ab1adadc27a3e7a0b23b68b8b568 (patch)
tree68a2b81442389d1f9e54bce3dca5bc0e47b7a3e6 /indra/llcommon
parent4dc0850da7469906e5ad052da71830b05005295b (diff)
parent76eca5d0bce3e303f6d77b0d16f114320830ac6a (diff)
Merge
Diffstat (limited to 'indra/llcommon')
-rw-r--r--indra/llcommon/CMakeLists.txt3
-rw-r--r--indra/llcommon/llapp.cpp4
-rw-r--r--indra/llcommon/llapr.cpp445
-rw-r--r--indra/llcommon/llapr.h107
-rw-r--r--indra/llcommon/llaprpool.cpp202
-rw-r--r--indra/llcommon/llaprpool.h256
-rw-r--r--indra/llcommon/llcommon.cpp13
-rw-r--r--indra/llcommon/llcommon.h2
-rw-r--r--indra/llcommon/llerror.cpp3
-rw-r--r--indra/llcommon/llerror.h1
-rw-r--r--indra/llcommon/llfixedbuffer.cpp3
-rw-r--r--indra/llcommon/llmemory.cpp1830
-rw-r--r--indra/llcommon/llmemory.h323
-rw-r--r--indra/llcommon/llscopedvolatileaprpool.h52
-rw-r--r--indra/llcommon/llthread.cpp240
-rw-r--r--indra/llcommon/llthread.h133
-rw-r--r--indra/llcommon/llthreadsafequeue.cpp15
-rw-r--r--indra/llcommon/llthreadsafequeue.h16
-rw-r--r--indra/llcommon/llworkerthread.cpp8
-rw-r--r--indra/llcommon/llworkerthread.h3
20 files changed, 2986 insertions, 673 deletions
diff --git a/indra/llcommon/CMakeLists.txt b/indra/llcommon/CMakeLists.txt
index 9910281b64..b22e41e7e3 100644
--- a/indra/llcommon/CMakeLists.txt
+++ b/indra/llcommon/CMakeLists.txt
@@ -31,6 +31,7 @@ set(llcommon_SOURCE_FILES
llallocator_heap_profile.cpp
llapp.cpp
llapr.cpp
+ llaprpool.cpp
llassettype.cpp
llavatarname.cpp
llbase32.cpp
@@ -80,6 +81,7 @@ set(llcommon_SOURCE_FILES
llrand.cpp
llrefcount.cpp
llrun.cpp
+ llscopedvolatileaprpool.h
llsd.cpp
llsdserialize.cpp
llsdserialize_xml.cpp
@@ -122,6 +124,7 @@ set(llcommon_HEADER_FILES
llavatarname.h
llapp.h
llapr.h
+ llaprpool.h
llassettype.h
llassoclist.h
llavatarconstants.h
diff --git a/indra/llcommon/llapp.cpp b/indra/llcommon/llapp.cpp
index ed192a9975..a8b7106078 100644
--- a/indra/llcommon/llapp.cpp
+++ b/indra/llcommon/llapp.cpp
@@ -137,10 +137,6 @@ void LLApp::commonCtor()
mOptions.append(sd);
}
- // Make sure we clean up APR when we exit
- // Don't need to do this if we're cleaning up APR in the destructor
- //atexit(ll_cleanup_apr);
-
// Set the application to this instance.
sApplication = this;
diff --git a/indra/llcommon/llapr.cpp b/indra/llcommon/llapr.cpp
index d1c44c9403..1e4a51102e 100644
--- a/indra/llcommon/llapr.cpp
+++ b/indra/llcommon/llapr.cpp
@@ -29,212 +29,8 @@
#include "linden_common.h"
#include "llapr.h"
#include "apr_dso.h"
+#include "llscopedvolatileaprpool.h"
-apr_pool_t *gAPRPoolp = NULL; // Global APR memory pool
-LLVolatileAPRPool *LLAPRFile::sAPRFilePoolp = NULL ; //global volatile APR memory pool.
-apr_thread_mutex_t *gLogMutexp = NULL;
-apr_thread_mutex_t *gCallStacksLogMutexp = NULL;
-
-const S32 FULL_VOLATILE_APR_POOL = 1024 ; //number of references to LLVolatileAPRPool
-
-void ll_init_apr()
-{
- if (!gAPRPoolp)
- {
- // Initialize APR and create the global pool
- apr_initialize();
- apr_pool_create(&gAPRPoolp, NULL);
-
- // Initialize the logging mutex
- apr_thread_mutex_create(&gLogMutexp, APR_THREAD_MUTEX_UNNESTED, gAPRPoolp);
- apr_thread_mutex_create(&gCallStacksLogMutexp, APR_THREAD_MUTEX_UNNESTED, gAPRPoolp);
- }
-
- if(!LLAPRFile::sAPRFilePoolp)
- {
- LLAPRFile::sAPRFilePoolp = new LLVolatileAPRPool(FALSE) ;
- }
-}
-
-
-void ll_cleanup_apr()
-{
- LL_INFOS("APR") << "Cleaning up APR" << LL_ENDL;
-
- if (gLogMutexp)
- {
- // Clean up the logging mutex
-
- // All other threads NEED to be done before we clean up APR, so this is okay.
- apr_thread_mutex_destroy(gLogMutexp);
- gLogMutexp = NULL;
- }
- if (gCallStacksLogMutexp)
- {
- // Clean up the logging mutex
-
- // All other threads NEED to be done before we clean up APR, so this is okay.
- apr_thread_mutex_destroy(gCallStacksLogMutexp);
- gCallStacksLogMutexp = NULL;
- }
- if (gAPRPoolp)
- {
- apr_pool_destroy(gAPRPoolp);
- gAPRPoolp = NULL;
- }
- if (LLAPRFile::sAPRFilePoolp)
- {
- delete LLAPRFile::sAPRFilePoolp ;
- LLAPRFile::sAPRFilePoolp = NULL ;
- }
- apr_terminate();
-}
-
-//
-//
-//LLAPRPool
-//
-LLAPRPool::LLAPRPool(apr_pool_t *parent, apr_size_t size, BOOL releasePoolFlag)
- : mParent(parent),
- mReleasePoolFlag(releasePoolFlag),
- mMaxSize(size),
- mPool(NULL)
-{
- createAPRPool() ;
-}
-
-LLAPRPool::~LLAPRPool()
-{
- releaseAPRPool() ;
-}
-
-void LLAPRPool::createAPRPool()
-{
- if(mPool)
- {
- return ;
- }
-
- mStatus = apr_pool_create(&mPool, mParent);
- ll_apr_warn_status(mStatus) ;
-
- if(mMaxSize > 0) //size is the number of blocks (which is usually 4K), NOT bytes.
- {
- apr_allocator_t *allocator = apr_pool_allocator_get(mPool);
- if (allocator)
- {
- apr_allocator_max_free_set(allocator, mMaxSize) ;
- }
- }
-}
-
-void LLAPRPool::releaseAPRPool()
-{
- if(!mPool)
- {
- return ;
- }
-
- if(!mParent || mReleasePoolFlag)
- {
- apr_pool_destroy(mPool) ;
- mPool = NULL ;
- }
-}
-
-//virtual
-apr_pool_t* LLAPRPool::getAPRPool()
-{
- return mPool ;
-}
-
-LLVolatileAPRPool::LLVolatileAPRPool(BOOL is_local, apr_pool_t *parent, apr_size_t size, BOOL releasePoolFlag)
- : LLAPRPool(parent, size, releasePoolFlag),
- mNumActiveRef(0),
- mNumTotalRef(0),
- mMutexPool(NULL),
- mMutexp(NULL)
-{
- //create mutex
- if(!is_local) //not a local apr_pool, that is: shared by multiple threads.
- {
- apr_pool_create(&mMutexPool, NULL); // Create a pool for mutex
- apr_thread_mutex_create(&mMutexp, APR_THREAD_MUTEX_UNNESTED, mMutexPool);
- }
-}
-
-LLVolatileAPRPool::~LLVolatileAPRPool()
-{
- //delete mutex
- if(mMutexp)
- {
- apr_thread_mutex_destroy(mMutexp);
- apr_pool_destroy(mMutexPool);
- }
-}
-
-//
-//define this virtual function to avoid any mistakenly calling LLAPRPool::getAPRPool().
-//
-//virtual
-apr_pool_t* LLVolatileAPRPool::getAPRPool()
-{
- return LLVolatileAPRPool::getVolatileAPRPool() ;
-}
-
-apr_pool_t* LLVolatileAPRPool::getVolatileAPRPool()
-{
- LLScopedLock lock(mMutexp) ;
-
- mNumTotalRef++ ;
- mNumActiveRef++ ;
-
- if(!mPool)
- {
- createAPRPool() ;
- }
-
- return mPool ;
-}
-
-void LLVolatileAPRPool::clearVolatileAPRPool()
-{
- LLScopedLock lock(mMutexp) ;
-
- if(mNumActiveRef > 0)
- {
- mNumActiveRef--;
- if(mNumActiveRef < 1)
- {
- if(isFull())
- {
- mNumTotalRef = 0 ;
-
- //destroy the apr_pool.
- releaseAPRPool() ;
- }
- else
- {
- //This does not actually free the memory,
- //it just allows the pool to re-use this memory for the next allocation.
- apr_pool_clear(mPool) ;
- }
- }
- }
- else
- {
- llassert_always(mNumActiveRef > 0) ;
- }
-
- //paranoia check if the pool is jammed.
- //will remove the check before going to release.
- llassert_always(mNumTotalRef < (FULL_VOLATILE_APR_POOL << 2)) ;
-}
-
-BOOL LLVolatileAPRPool::isFull()
-{
- return mNumTotalRef > FULL_VOLATILE_APR_POOL ;
-}
//---------------------------------------------------------------------
//
// LLScopedLock
@@ -313,15 +109,17 @@ void ll_apr_assert_status(apr_status_t status, apr_dso_handle_t *handle)
//
LLAPRFile::LLAPRFile()
: mFile(NULL),
- mCurrentFilePoolp(NULL)
+ mVolatileFilePoolp(NULL),
+ mRegularFilePoolp(NULL)
{
}
-LLAPRFile::LLAPRFile(const std::string& filename, apr_int32_t flags, LLVolatileAPRPool* pool)
+LLAPRFile::LLAPRFile(std::string const& filename, apr_int32_t flags, S32* sizep, access_t access_type)
: mFile(NULL),
- mCurrentFilePoolp(NULL)
+ mVolatileFilePoolp(NULL),
+ mRegularFilePoolp(NULL)
{
- open(filename, flags, pool);
+ open(filename, flags, access_type, sizep);
}
LLAPRFile::~LLAPRFile()
@@ -338,36 +136,58 @@ apr_status_t LLAPRFile::close()
mFile = NULL ;
}
- if(mCurrentFilePoolp)
+ if (mVolatileFilePoolp)
{
- mCurrentFilePoolp->clearVolatileAPRPool() ;
- mCurrentFilePoolp = NULL ;
+ mVolatileFilePoolp->clearVolatileAPRPool() ;
+ mVolatileFilePoolp = NULL ;
+ }
+
+ if (mRegularFilePoolp)
+ {
+ delete mRegularFilePoolp;
+ mRegularFilePoolp = NULL;
}
return ret ;
}
-apr_status_t LLAPRFile::open(const std::string& filename, apr_int32_t flags, LLVolatileAPRPool* pool, S32* sizep)
+apr_status_t LLAPRFile::open(std::string const& filename, apr_int32_t flags, access_t access_type, S32* sizep)
{
- apr_status_t s ;
-
- //check if already open some file
- llassert_always(!mFile) ;
- llassert_always(!mCurrentFilePoolp) ;
-
- apr_pool_t* apr_pool = pool ? pool->getVolatileAPRPool() : NULL ;
- s = apr_file_open(&mFile, filename.c_str(), flags, APR_OS_DEFAULT, getAPRFilePool(apr_pool));
+ llassert_always(!mFile);
+ llassert_always(!mVolatileFilePoolp && !mRegularFilePoolp);
- if (s != APR_SUCCESS || !mFile)
+ apr_status_t status;
+ {
+ apr_pool_t* apr_file_open_pool; // The use of apr_pool_t is OK here.
+ // This is a temporary variable for a pool that is passed directly to apr_file_open below.
+ if (access_type == short_lived)
+ {
+ // Use a "volatile" thread-local pool.
+ mVolatileFilePoolp = &LLThreadLocalData::tldata().mVolatileAPRPool;
+ // Access the pool and increment its reference count.
+ // The reference count of LLVolatileAPRPool objects will be decremented
+ // again in LLAPRFile::close by calling mVolatileFilePoolp->clearVolatileAPRPool().
+ apr_file_open_pool = mVolatileFilePoolp->getVolatileAPRPool();
+ }
+ else
+ {
+ mRegularFilePoolp = new LLAPRPool(LLThreadLocalData::tldata().mRootPool);
+ apr_file_open_pool = (*mRegularFilePoolp)();
+ }
+ status = apr_file_open(&mFile, filename.c_str(), flags, APR_OS_DEFAULT, apr_file_open_pool);
+ }
+ if (status != APR_SUCCESS || !mFile)
{
mFile = NULL ;
-
+ close() ;
if (sizep)
{
*sizep = 0;
}
+ return status;
}
- else if (sizep)
+
+ if (sizep)
{
S32 file_size = 0;
apr_off_t offset = 0;
@@ -381,49 +201,7 @@ apr_status_t LLAPRFile::open(const std::string& filename, apr_int32_t flags, LLV
*sizep = file_size;
}
- if(!mCurrentFilePoolp)
- {
- mCurrentFilePoolp = pool ;
-
- if(!mFile)
- {
- close() ;
- }
- }
-
- return s ;
-}
-
-//use gAPRPoolp.
-apr_status_t LLAPRFile::open(const std::string& filename, apr_int32_t flags, BOOL use_global_pool)
-{
- apr_status_t s;
-
- //check if already open some file
- llassert_always(!mFile) ;
- llassert_always(!mCurrentFilePoolp) ;
- llassert_always(use_global_pool) ; //be aware of using gAPRPoolp.
-
- s = apr_file_open(&mFile, filename.c_str(), flags, APR_OS_DEFAULT, gAPRPoolp);
- if (s != APR_SUCCESS || !mFile)
- {
- mFile = NULL ;
- close() ;
- return s;
- }
-
- return s;
-}
-
-apr_pool_t* LLAPRFile::getAPRFilePool(apr_pool_t* pool)
-{
- if(!pool)
- {
- mCurrentFilePoolp = sAPRFilePoolp ;
- return mCurrentFilePoolp->getVolatileAPRPool() ;
- }
-
- return pool ;
+ return status;
}
// File I/O
@@ -482,45 +260,6 @@ S32 LLAPRFile::seek(apr_seek_where_t where, S32 offset)
//
//static
-apr_status_t LLAPRFile::close(apr_file_t* file_handle, LLVolatileAPRPool* pool)
-{
- apr_status_t ret = APR_SUCCESS ;
- if(file_handle)
- {
- ret = apr_file_close(file_handle);
- file_handle = NULL ;
- }
-
- if(pool)
- {
- pool->clearVolatileAPRPool() ;
- }
-
- return ret ;
-}
-
-//static
-apr_file_t* LLAPRFile::open(const std::string& filename, LLVolatileAPRPool* pool, apr_int32_t flags)
-{
- apr_status_t s;
- apr_file_t* file_handle ;
-
- pool = pool ? pool : LLAPRFile::sAPRFilePoolp ;
-
- s = apr_file_open(&file_handle, filename.c_str(), flags, APR_OS_DEFAULT, pool->getVolatileAPRPool());
- if (s != APR_SUCCESS || !file_handle)
- {
- ll_apr_warn_status(s);
- LL_WARNS("APR") << " Attempting to open filename: " << filename << LL_ENDL;
- file_handle = NULL ;
- close(file_handle, pool) ;
- return NULL;
- }
-
- return file_handle ;
-}
-
-//static
S32 LLAPRFile::seek(apr_file_t* file_handle, apr_seek_where_t where, S32 offset)
{
if(!file_handle)
@@ -553,13 +292,15 @@ S32 LLAPRFile::seek(apr_file_t* file_handle, apr_seek_where_t where, S32 offset)
}
//static
-S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
+S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes)
{
- //*****************************************
- apr_file_t* file_handle = open(filename, pool, APR_READ|APR_BINARY);
- //*****************************************
- if (!file_handle)
+ apr_file_t* file_handle;
+ LLScopedVolatileAPRPool pool;
+ apr_status_t s = apr_file_open(&file_handle, filename.c_str(), APR_READ|APR_BINARY, APR_OS_DEFAULT, pool);
+ if (s != APR_SUCCESS || !file_handle)
{
+ ll_apr_warn_status(s);
+ LL_WARNS("APR") << " while attempting to open file \"" << filename << '"' << LL_ENDL;
return 0;
}
@@ -589,14 +330,13 @@ S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nb
}
}
- //*****************************************
- close(file_handle, pool) ;
- //*****************************************
+ apr_file_close(file_handle);
+
return (S32)bytes_read;
}
//static
-S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
+S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes)
{
apr_int32_t flags = APR_CREATE|APR_WRITE|APR_BINARY;
if (offset < 0)
@@ -605,11 +345,13 @@ S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 n
offset = 0;
}
- //*****************************************
- apr_file_t* file_handle = open(filename, pool, flags);
- //*****************************************
- if (!file_handle)
+ apr_file_t* file_handle;
+ LLScopedVolatileAPRPool pool;
+ apr_status_t s = apr_file_open(&file_handle, filename.c_str(), flags, APR_OS_DEFAULT, pool);
+ if (s != APR_SUCCESS || !file_handle)
{
+ ll_apr_warn_status(s);
+ LL_WARNS("APR") << " while attempting to open file \"" << filename << '"' << LL_ENDL;
return 0;
}
@@ -639,21 +381,18 @@ S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 n
}
}
- //*****************************************
- LLAPRFile::close(file_handle, pool);
- //*****************************************
+ apr_file_close(file_handle);
return (S32)bytes_written;
}
//static
-bool LLAPRFile::remove(const std::string& filename, LLVolatileAPRPool* pool)
+bool LLAPRFile::remove(const std::string& filename)
{
apr_status_t s;
- pool = pool ? pool : LLAPRFile::sAPRFilePoolp ;
- s = apr_file_remove(filename.c_str(), pool->getVolatileAPRPool());
- pool->clearVolatileAPRPool() ;
+ LLScopedVolatileAPRPool pool;
+ s = apr_file_remove(filename.c_str(), pool);
if (s != APR_SUCCESS)
{
@@ -665,13 +404,12 @@ bool LLAPRFile::remove(const std::string& filename, LLVolatileAPRPool* pool)
}
//static
-bool LLAPRFile::rename(const std::string& filename, const std::string& newname, LLVolatileAPRPool* pool)
+bool LLAPRFile::rename(const std::string& filename, const std::string& newname)
{
apr_status_t s;
- pool = pool ? pool : LLAPRFile::sAPRFilePoolp ;
- s = apr_file_rename(filename.c_str(), newname.c_str(), pool->getVolatileAPRPool());
- pool->clearVolatileAPRPool() ;
+ LLScopedVolatileAPRPool pool;
+ s = apr_file_rename(filename.c_str(), newname.c_str(), pool);
if (s != APR_SUCCESS)
{
@@ -683,49 +421,44 @@ bool LLAPRFile::rename(const std::string& filename, const std::string& newname,
}
//static
-bool LLAPRFile::isExist(const std::string& filename, LLVolatileAPRPool* pool, apr_int32_t flags)
+bool LLAPRFile::isExist(const std::string& filename, apr_int32_t flags)
{
- apr_file_t* apr_file;
+ apr_file_t* file_handle;
apr_status_t s;
- pool = pool ? pool : LLAPRFile::sAPRFilePoolp ;
- s = apr_file_open(&apr_file, filename.c_str(), flags, APR_OS_DEFAULT, pool->getVolatileAPRPool());
+ LLScopedVolatileAPRPool pool;
+ s = apr_file_open(&file_handle, filename.c_str(), flags, APR_OS_DEFAULT, pool);
- if (s != APR_SUCCESS || !apr_file)
+ if (s != APR_SUCCESS || !file_handle)
{
- pool->clearVolatileAPRPool() ;
return false;
}
else
{
- apr_file_close(apr_file) ;
- pool->clearVolatileAPRPool() ;
+ apr_file_close(file_handle);
return true;
}
}
//static
-S32 LLAPRFile::size(const std::string& filename, LLVolatileAPRPool* pool)
+S32 LLAPRFile::size(const std::string& filename)
{
- apr_file_t* apr_file;
+ apr_file_t* file_handle;
apr_finfo_t info;
apr_status_t s;
- pool = pool ? pool : LLAPRFile::sAPRFilePoolp ;
- s = apr_file_open(&apr_file, filename.c_str(), APR_READ, APR_OS_DEFAULT, pool->getVolatileAPRPool());
+ LLScopedVolatileAPRPool pool;
+ s = apr_file_open(&file_handle, filename.c_str(), APR_READ, APR_OS_DEFAULT, pool);
- if (s != APR_SUCCESS || !apr_file)
+ if (s != APR_SUCCESS || !file_handle)
{
- pool->clearVolatileAPRPool() ;
-
return 0;
}
else
{
- apr_status_t s = apr_file_info_get(&info, APR_FINFO_SIZE, apr_file);
+ apr_status_t s = apr_file_info_get(&info, APR_FINFO_SIZE, file_handle);
- apr_file_close(apr_file) ;
- pool->clearVolatileAPRPool() ;
+ apr_file_close(file_handle) ;
if (s == APR_SUCCESS)
{
@@ -739,31 +472,29 @@ S32 LLAPRFile::size(const std::string& filename, LLVolatileAPRPool* pool)
}
//static
-bool LLAPRFile::makeDir(const std::string& dirname, LLVolatileAPRPool* pool)
+bool LLAPRFile::makeDir(const std::string& dirname)
{
apr_status_t s;
- pool = pool ? pool : LLAPRFile::sAPRFilePoolp ;
- s = apr_dir_make(dirname.c_str(), APR_FPROT_OS_DEFAULT, pool->getVolatileAPRPool());
- pool->clearVolatileAPRPool() ;
+ LLScopedVolatileAPRPool pool;
+ s = apr_dir_make(dirname.c_str(), APR_FPROT_OS_DEFAULT, pool);
if (s != APR_SUCCESS)
{
ll_apr_warn_status(s);
- LL_WARNS("APR") << " Attempting to make directory: " << dirname << LL_ENDL;
+ LL_WARNS("APR") << " while attempting to make directory: " << dirname << LL_ENDL;
return false;
}
return true;
}
//static
-bool LLAPRFile::removeDir(const std::string& dirname, LLVolatileAPRPool* pool)
+bool LLAPRFile::removeDir(const std::string& dirname)
{
apr_status_t s;
- pool = pool ? pool : LLAPRFile::sAPRFilePoolp ;
- s = apr_file_remove(dirname.c_str(), pool->getVolatileAPRPool());
- pool->clearVolatileAPRPool() ;
+ LLScopedVolatileAPRPool pool;
+ s = apr_file_remove(dirname.c_str(), pool);
if (s != APR_SUCCESS)
{
diff --git a/indra/llcommon/llapr.h b/indra/llcommon/llapr.h
index af33ce666f..3f846f1314 100644
--- a/indra/llcommon/llapr.h
+++ b/indra/llcommon/llapr.h
@@ -50,71 +50,9 @@
#include "apr_atomic.h"
#include "llstring.h"
-extern LL_COMMON_API apr_thread_mutex_t* gLogMutexp;
-extern apr_thread_mutex_t* gCallStacksLogMutexp;
-
struct apr_dso_handle_t;
-
-/**
- * @brief initialize the common apr constructs -- apr itself, the
- * global pool, and a mutex.
- */
-void LL_COMMON_API ll_init_apr();
-
-/**
- * @brief Cleanup those common apr constructs.
- */
-void LL_COMMON_API ll_cleanup_apr();
-
-//
-//LL apr_pool
-//manage apr_pool_t, destroy allocated apr_pool in the destruction function.
-//
-class LL_COMMON_API LLAPRPool
-{
-public:
- LLAPRPool(apr_pool_t *parent = NULL, apr_size_t size = 0, BOOL releasePoolFlag = TRUE) ;
- virtual ~LLAPRPool() ;
-
- virtual apr_pool_t* getAPRPool() ;
- apr_status_t getStatus() {return mStatus ; }
-
-protected:
- void releaseAPRPool() ;
- void createAPRPool() ;
-
-protected:
- apr_pool_t* mPool ; //pointing to an apr_pool
- apr_pool_t* mParent ; //parent pool
- apr_size_t mMaxSize ; //max size of mPool, mPool should return memory to system if allocated memory beyond this limit. However it seems not to work.
- apr_status_t mStatus ; //status when creating the pool
- BOOL mReleasePoolFlag ; //if set, mPool is destroyed when LLAPRPool is deleted. default value is true.
-};
-
-//
-//volatile LL apr_pool
-//which clears memory automatically.
-//so it can not hold static data or data after memory is cleared
-//
-class LL_COMMON_API LLVolatileAPRPool : public LLAPRPool
-{
-public:
- LLVolatileAPRPool(BOOL is_local = TRUE, apr_pool_t *parent = NULL, apr_size_t size = 0, BOOL releasePoolFlag = TRUE);
- virtual ~LLVolatileAPRPool();
-
- /*virtual*/ apr_pool_t* getAPRPool() ; //define this virtual function to avoid any mistakenly calling LLAPRPool::getAPRPool().
- apr_pool_t* getVolatileAPRPool() ;
- void clearVolatileAPRPool() ;
-
- BOOL isFull() ;
-
-private:
- S32 mNumActiveRef ; //number of active pointers pointing to the apr_pool.
- S32 mNumTotalRef ; //number of total pointers pointing to the apr_pool since last creating.
-
- apr_thread_mutex_t *mMutexp;
- apr_pool_t *mMutexPool;
-} ;
+class LLAPRPool;
+class LLVolatileAPRPool;
/**
* @class LLScopedLock
@@ -205,15 +143,20 @@ class LL_COMMON_API LLAPRFile : boost::noncopyable
// make this non copyable since a copy closes the file
private:
apr_file_t* mFile ;
- LLVolatileAPRPool *mCurrentFilePoolp ; //currently in use apr_pool, could be one of them: sAPRFilePoolp, or a temp pool.
+ LLVolatileAPRPool* mVolatileFilePoolp; // (Thread local) APR pool currently in use.
+ LLAPRPool* mRegularFilePoolp; // ...or a regular pool.
public:
+ enum access_t {
+ long_lived, // Use a global pool for long-lived file accesses.
+ short_lived // Use a volatile pool for short-lived file accesses.
+ };
+
LLAPRFile() ;
- LLAPRFile(const std::string& filename, apr_int32_t flags, LLVolatileAPRPool* pool = NULL);
+ LLAPRFile(std::string const& filename, apr_int32_t flags, S32* sizep = NULL, access_t access_type = short_lived);
~LLAPRFile() ;
-
- apr_status_t open(const std::string& filename, apr_int32_t flags, LLVolatileAPRPool* pool = NULL, S32* sizep = NULL);
- apr_status_t open(const std::string& filename, apr_int32_t flags, BOOL use_global_pool); //use gAPRPoolp.
+
+ apr_status_t open(const std::string& filename, apr_int32_t flags, access_t access_type, S32* sizep = NULL);
apr_status_t close() ;
// Returns actual offset, -1 if seek fails
@@ -226,32 +169,24 @@ public:
apr_file_t* getFileHandle() {return mFile;}
-private:
- apr_pool_t* getAPRFilePool(apr_pool_t* pool) ;
-
//
//*******************************************************************************************************************************
//static components
//
-public:
- static LLVolatileAPRPool *sAPRFilePoolp ; //a global apr_pool for APRFile, which is used only when local pool does not exist.
-
private:
- static apr_file_t* open(const std::string& filename, LLVolatileAPRPool* pool, apr_int32_t flags);
- static apr_status_t close(apr_file_t* file, LLVolatileAPRPool* pool) ;
static S32 seek(apr_file_t* file, apr_seek_where_t where, S32 offset);
public:
// returns false if failure:
- static bool remove(const std::string& filename, LLVolatileAPRPool* pool = NULL);
- static bool rename(const std::string& filename, const std::string& newname, LLVolatileAPRPool* pool = NULL);
- static bool isExist(const std::string& filename, LLVolatileAPRPool* pool = NULL, apr_int32_t flags = APR_READ);
- static S32 size(const std::string& filename, LLVolatileAPRPool* pool = NULL);
- static bool makeDir(const std::string& dirname, LLVolatileAPRPool* pool = NULL);
- static bool removeDir(const std::string& dirname, LLVolatileAPRPool* pool = NULL);
+ static bool remove(const std::string& filename);
+ static bool rename(const std::string& filename, const std::string& newname);
+ static bool isExist(const std::string& filename, apr_int32_t flags = APR_READ);
+ static S32 size(const std::string& filename);
+ static bool makeDir(const std::string& dirname);
+ static bool removeDir(const std::string& dirname);
// Returns bytes read/written, 0 if read/write fails:
- static S32 readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool = NULL);
- static S32 writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool = NULL); // offset<0 means append
+ static S32 readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes);
+ static S32 writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes); // offset<0 means append
//*******************************************************************************************************************************
};
@@ -267,6 +202,4 @@ bool LL_COMMON_API ll_apr_warn_status(apr_status_t status, apr_dso_handle_t* han
void LL_COMMON_API ll_apr_assert_status(apr_status_t status);
void LL_COMMON_API ll_apr_assert_status(apr_status_t status, apr_dso_handle_t* handle);
-extern "C" LL_COMMON_API apr_pool_t* gAPRPoolp; // Global APR memory pool
-
#endif // LL_LLAPR_H
diff --git a/indra/llcommon/llaprpool.cpp b/indra/llcommon/llaprpool.cpp
new file mode 100644
index 0000000000..6f21b61b65
--- /dev/null
+++ b/indra/llcommon/llaprpool.cpp
@@ -0,0 +1,202 @@
+/**
+ * @file llaprpool.cpp
+ *
+ * $LicenseInfo:firstyear=2011&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2011, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ *
+ * CHANGELOG
+ * and additional copyright holders.
+ *
+ * 04/04/2010
+ * - Initial version, written by Aleric Inglewood @ SL
+ *
+ * 10/11/2010
+ * - Added APR_HAS_THREADS #if's to allow creation and destruction
+ * of subpools by threads other than the parent pool owner.
+ */
+
+#include "linden_common.h"
+
+#include "llerror.h"
+#include "llaprpool.h"
+#include "llthread.h"
+
+// Create a subpool from parent.
+void LLAPRPool::create(LLAPRPool& parent)
+{
+ llassert(!mPool); // Must be non-initialized.
+ mParent = &parent;
+ if (!mParent) // Using the default parameter?
+ {
+ // By default use the root pool of the current thread.
+ mParent = &LLThreadLocalData::tldata().mRootPool;
+ }
+ llassert(mParent->mPool); // Parent must be initialized.
+#if APR_HAS_THREADS
+ // As per the documentation of APR (ie http://apr.apache.org/docs/apr/1.4/apr__pools_8h.html):
+ //
+ // Note that most operations on pools are not thread-safe: a single pool should only be
+ // accessed by a single thread at any given time. The one exception to this rule is creating
+ // a subpool of a given pool: one or more threads can safely create subpools at the same
+ // time that another thread accesses the parent pool.
+ //
+ // In other words, it's safe for any thread to create a (sub)pool, independent of who
+ // owns the parent pool.
+ mOwner = apr_os_thread_current();
+#else
+ mOwner = mParent->mOwner;
+ llassert(apr_os_thread_equal(mOwner, apr_os_thread_current()));
+#endif
+ apr_status_t const apr_pool_create_status = apr_pool_create(&mPool, mParent->mPool);
+ llassert_always(apr_pool_create_status == APR_SUCCESS);
+ llassert(mPool); // Initialized.
+ apr_pool_cleanup_register(mPool, this, &s_plain_cleanup, &apr_pool_cleanup_null);
+}
+
+// Destroy the (sub)pool, if any.
+void LLAPRPool::destroy(void)
+{
+ // Only do anything if we are not already (being) destroyed.
+ if (mPool)
+ {
+#if !APR_HAS_THREADS
+ // If we are a root pool, then every thread may destruct us: in that case
+ // we have to assume that no other thread will use this pool concurrently,
+ // of course. Otherwise, if we are a subpool, only the thread that owns
+ // the parent may destruct us, since that is the pool that is still alive,
+ // possibly being used by others and being altered here.
+ llassert(!mParent || apr_os_thread_equal(mParent->mOwner, apr_os_thread_current()));
+#endif
+ apr_pool_t* pool = mPool; // The use of apr_pool_t is OK here.
+ // Temporary store before destroying the pool.
+ mPool = NULL; // Mark that we are BEING destructed.
+ apr_pool_cleanup_kill(pool, this, &s_plain_cleanup);
+ apr_pool_destroy(pool);
+ }
+}
+
+bool LLAPRPool::parent_is_being_destructed(void)
+{
+ return mParent && (!mParent->mPool || mParent->parent_is_being_destructed());
+}
+
+LLAPRInitialization::LLAPRInitialization(void)
+{
+ static bool apr_initialized = false;
+
+ if (!apr_initialized)
+ {
+ apr_initialize();
+ }
+
+ apr_initialized = true;
+}
+
+bool LLAPRRootPool::sCountInitialized = false;
+apr_uint32_t volatile LLAPRRootPool::sCount;
+
+apr_thread_mutex_t* gLogMutexp;
+apr_thread_mutex_t* gCallStacksLogMutexp;
+
+LLAPRRootPool::LLAPRRootPool(void) : LLAPRInitialization(), LLAPRPool(0)
+{
+ // sCountInitialized don't need locking because when we get here there is still only a single thread.
+ if (!sCountInitialized)
+ {
+ // Initialize the logging mutex
+ apr_thread_mutex_create(&gLogMutexp, APR_THREAD_MUTEX_UNNESTED, mPool);
+ apr_thread_mutex_create(&gCallStacksLogMutexp, APR_THREAD_MUTEX_UNNESTED, mPool);
+
+ apr_status_t status = apr_atomic_init(mPool);
+ llassert_always(status == APR_SUCCESS);
+ apr_atomic_set32(&sCount, 1); // Set to 1 to account for the global root pool.
+ sCountInitialized = true;
+
+ // Initialize thread-local APR pool support.
+ // Because this recursively calls LLAPRRootPool::LLAPRRootPool(void)
+ // it must be done last, so that sCount is already initialized.
+ LLThreadLocalData::init();
+ }
+ apr_atomic_inc32(&sCount);
+}
+
+LLAPRRootPool::~LLAPRRootPool()
+{
+ if (!apr_atomic_dec32(&sCount))
+ {
+ // The last pool was destructed. Cleanup remainder of APR.
+ LL_INFOS("APR") << "Cleaning up APR" << LL_ENDL;
+
+ if (gLogMutexp)
+ {
+ // Clean up the logging mutex
+
+ // All other threads NEED to be done before we clean up APR, so this is okay.
+ apr_thread_mutex_destroy(gLogMutexp);
+ gLogMutexp = NULL;
+ }
+ if (gCallStacksLogMutexp)
+ {
+ // Clean up the logging mutex
+
+ // All other threads NEED to be done before we clean up APR, so this is okay.
+ apr_thread_mutex_destroy(gCallStacksLogMutexp);
+ gCallStacksLogMutexp = NULL;
+ }
+
+ // Must destroy ALL, and therefore this last LLAPRRootPool, before terminating APR.
+ static_cast<LLAPRRootPool*>(this)->destroy();
+
+ apr_terminate();
+ }
+}
+
+//static
+// Return a global root pool that is independent of LLThreadLocalData.
+// Normally you should NOT use this. Only use for early initialization
+// (before main) and deinitialization (after main).
+LLAPRRootPool& LLAPRRootPool::get(void)
+{
+ static LLAPRRootPool global_APRpool(0);
+ return global_APRpool;
+}
+
+void LLVolatileAPRPool::clearVolatileAPRPool()
+{
+ llassert_always(mNumActiveRef > 0);
+ if (--mNumActiveRef == 0)
+ {
+ if (isOld())
+ {
+ destroy();
+ mNumTotalRef = 0 ;
+ }
+ else
+ {
+ // This does not actually free the memory,
+ // it just allows the pool to re-use this memory for the next allocation.
+ clear();
+ }
+ }
+
+ // Paranoia check if the pool is jammed.
+ llassert(mNumTotalRef < (FULL_VOLATILE_APR_POOL << 2)) ;
+}
diff --git a/indra/llcommon/llaprpool.h b/indra/llcommon/llaprpool.h
new file mode 100644
index 0000000000..bf4102c584
--- /dev/null
+++ b/indra/llcommon/llaprpool.h
@@ -0,0 +1,256 @@
+/**
+ * @file llaprpool.h
+ * @brief Implementation of LLAPRPool
+ *
+ * $LicenseInfo:firstyear=2011&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2011, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ *
+ * CHANGELOG
+ * and additional copyright holders.
+ *
+ * 04/04/2010
+ * - Initial version, written by Aleric Inglewood @ SL
+ *
+ * 10/11/2010
+ * - Added APR_HAS_THREADS #if's to allow creation and destruction
+ * of subpools by threads other than the parent pool owner.
+ *
+ * 05/02/2011
+ * - Fixed compilation on windows: Suppress compile warning 4996
+ * and include <winsock2.h> before including <ws2tcpip.h>,
+ * by Merov Linden @ SL.
+ */
+
+#ifndef LL_LLAPRPOOL_H
+#define LL_LLAPRPOOL_H
+
+#ifdef LL_WINDOWS
+#pragma warning(push)
+#pragma warning(disable:4996)
+#include <winsock2.h>
+#include <ws2tcpip.h> // Needed before including apr_portable.h
+#pragma warning(pop)
+#endif
+
+#include "apr_portable.h"
+#include "apr_pools.h"
+#include "llerror.h"
+
+extern void ll_init_apr();
+
+/**
+ * @brief A wrapper around the APR memory pool API.
+ *
+ * Usage of this class should be restricted to passing it to libapr-1 function calls that need it.
+ *
+ */
+class LL_COMMON_API LLAPRPool
+{
+protected:
+ //! Pointer to the underlaying pool. NULL if not initialized.
+ apr_pool_t* mPool; // The use of apr_pool_t is OK here.
+ // This is the wrapped pointer that it is all about!
+ //! Pointer to the parent pool, if any. Only valid when mPool is non-zero.
+ LLAPRPool* mParent;
+ //! The thread that owns this memory pool. Only valid when mPool is non-zero.
+ apr_os_thread_t mOwner;
+
+public:
+ /// Construct an uninitialized (destructed) pool.
+ LLAPRPool(void) : mPool(NULL) { }
+
+ /// Construct a subpool from an existing pool.
+ /// This is not a copy-constructor, this class doesn't have one!
+ LLAPRPool(LLAPRPool& parent) : mPool(NULL) { create(parent); }
+
+ /// Destruct the memory pool (free all of its subpools and allocated memory).
+ ~LLAPRPool() { destroy(); }
+
+protected:
+ /// Create a pool that is allocated from the Operating System. Only used by LLAPRRootPool.
+ LLAPRPool(int) : mPool(NULL), mParent(NULL), mOwner(apr_os_thread_current())
+ {
+ apr_status_t const apr_pool_create_status = apr_pool_create(&mPool, NULL);
+ llassert_always(apr_pool_create_status == APR_SUCCESS);
+ llassert(mPool);
+ apr_pool_cleanup_register(mPool, this, &s_plain_cleanup, &apr_pool_cleanup_null);
+ }
+
+public:
+ /// Create a subpool from parent. May only be called for an uninitialized/destroyed pool.
+ /// The default parameter causes the root pool of the current thread to be used.
+ void create(LLAPRPool& parent = *static_cast<LLAPRPool*>(NULL));
+
+ /// Destroy the (sub)pool, if any.
+ void destroy(void);
+
+ // Use some safebool idiom (http://www.artima.com/cppsource/safebool.html) rather than operator bool.
+ typedef LLAPRPool* const LLAPRPool::* const bool_type;
+ /// Return true if the pool is initialized.
+ operator bool_type() const { return mPool ? &LLAPRPool::mParent : 0; }
+
+ /// Painful, but we have to either provide access to this, or wrap
+ /// every APR function call that needs an apr pool as argument.
+ /// NEVER destroy a pool that is returned by this function!
+ apr_pool_t* operator()(void) const // The use of apr_pool_t is OK here.
+ // This is the accessor for passing the pool to libapr-1 functions.
+ {
+ llassert(mPool);
+ llassert(apr_os_thread_equal(mOwner, apr_os_thread_current()));
+ return mPool;
+ }
+
+ /// Free all memory without destructing the pool.
+ void clear(void)
+ {
+ llassert(mPool);
+ llassert(apr_os_thread_equal(mOwner, apr_os_thread_current()));
+ apr_pool_clear(mPool);
+ }
+
+// These methods would make this class 'complete' (as wrapper around the libapr
+// pool functions), but we don't use memory pools in the viewer (only when
+// we are forced to pass one to a libapr call), so don't define them in order
+// not to encourage people to use them.
+#if 0
+ void* palloc(size_t size)
+ {
+ llassert(mPool);
+ llassert(apr_os_thread_equal(mOwner, apr_os_thread_current()));
+ return apr_palloc(mPool, size);
+ }
+ void* pcalloc(size_t size)
+ {
+ llassert(mPool);
+ llassert(apr_os_thread_equal(mOwner, apr_os_thread_current()));
+ return apr_pcalloc(mPool, size);
+ }
+#endif
+
+private:
+ bool parent_is_being_destructed(void);
+ static apr_status_t s_plain_cleanup(void* userdata) { return static_cast<LLAPRPool*>(userdata)->plain_cleanup(); }
+
+ apr_status_t plain_cleanup(void)
+ {
+ if (mPool && // We are not being destructed,
+ parent_is_being_destructed()) // but our parent is.
+ // This means the pool is being destructed recursively by libapr
+ // because one of its parents is being destructed.
+ {
+ mPool = NULL; // Stop destroy() from destructing the pool again.
+ }
+ return APR_SUCCESS;
+ }
+};
+
+class LLAPRInitialization
+{
+public:
+ LLAPRInitialization(void);
+};
+
+/**
+ * @brief Root memory pool (allocates memory from the operating system).
+ *
+ * This class should only be used by LLThreadLocalData
+ * (and LLMutexRootPool when APR_HAS_THREADS isn't defined).
+ */
+class LL_COMMON_API LLAPRRootPool : public LLAPRInitialization, public LLAPRPool
+{
+private:
+ /// Construct a root memory pool. Should only be used by LLThreadLocalData and LLMutexRootPool.
+ friend class LLThreadLocalData;
+#if !APR_HAS_THREADS
+ friend class LLMutexRootPool;
+#endif
+ /// Construct a root memory pool.
+ /// Should only be used by LLThreadLocalData.
+ LLAPRRootPool(void);
+ ~LLAPRRootPool();
+
+private:
+ // Keep track of how many root pools exist and when the last one is destructed.
+ static bool sCountInitialized;
+ static apr_uint32_t volatile sCount;
+
+public:
+ // Return a global root pool that is independent of LLThreadLocalData.
+ // Normally you should not use this. Only use for early initialization
+ // (before main) and deinitialization (after main).
+ static LLAPRRootPool& get(void);
+
+#if APR_POOL_DEBUG
+ void grab_ownership(void)
+ {
+ // You need a patched libapr to use this.
+ // See http://web.archiveorange.com/archive/v/5XO9y2zoxUOMt6Gmi1OI
+ apr_pool_owner_set(mPool);
+ }
+#endif
+
+private:
+ // Used for constructing the Special Global Root Pool (returned by LLAPRRootPool::get).
+ // It is the same as the default constructor but omits to increment sCount. As a result,
+ // we must be sure that at least one other LLAPRRootPool is created before termination
+ // of the application (which is the case: we create one LLAPRRootPool per thread).
+ LLAPRRootPool(int) : LLAPRInitialization(), LLAPRPool(0) { }
+};
+
+/** Volatile memory pool
+ *
+ * 'Volatile' APR memory pool which normally only clears memory,
+ * and does not destroy the pool (the same pool is reused) for
+ * greater efficiency. However, as a safe guard the apr pool
+ * is destructed every FULL_VOLATILE_APR_POOL uses to allow
+ * the system memory to be allocated more efficiently and not
+ * get scattered through RAM.
+ */
+class LL_COMMON_API LLVolatileAPRPool : protected LLAPRPool
+{
+public:
+ LLVolatileAPRPool(void) : mNumActiveRef(0), mNumTotalRef(0) { }
+
+ void clearVolatileAPRPool(void);
+
+ bool isOld(void) const { return mNumTotalRef > FULL_VOLATILE_APR_POOL; }
+ bool isUnused() const { return mNumActiveRef == 0; }
+
+private:
+ friend class LLScopedVolatileAPRPool;
+ friend class LLAPRFile;
+ apr_pool_t* getVolatileAPRPool(void) // The use of apr_pool_t is OK here.
+ {
+ if (!mPool) create();
+ ++mNumActiveRef;
+ ++mNumTotalRef;
+ return LLAPRPool::operator()();
+ }
+
+private:
+ S32 mNumActiveRef; // Number of active uses of the pool.
+ S32 mNumTotalRef; // Number of total uses of the pool since last creation.
+
+ // Maximum number of references to LLVolatileAPRPool until the pool is recreated.
+ static S32 const FULL_VOLATILE_APR_POOL = 1024;
+};
+
+#endif // LL_LLAPRPOOL_H
diff --git a/indra/llcommon/llcommon.cpp b/indra/llcommon/llcommon.cpp
index 8be9e4f4de..b8a7394852 100644
--- a/indra/llcommon/llcommon.cpp
+++ b/indra/llcommon/llcommon.cpp
@@ -31,17 +31,9 @@
#include "llthread.h"
//static
-BOOL LLCommon::sAprInitialized = FALSE;
-
-//static
void LLCommon::initClass()
{
LLMemory::initClass();
- if (!sAprInitialized)
- {
- ll_init_apr();
- sAprInitialized = TRUE;
- }
LLTimer::initClass();
LLThreadSafeRefCount::initThreadSafeRefCount();
// LLWorkerThread::initClass();
@@ -55,10 +47,5 @@ void LLCommon::cleanupClass()
// LLWorkerThread::cleanupClass();
LLThreadSafeRefCount::cleanupThreadSafeRefCount();
LLTimer::cleanupClass();
- if (sAprInitialized)
- {
- ll_cleanup_apr();
- sAprInitialized = FALSE;
- }
LLMemory::cleanupClass();
}
diff --git a/indra/llcommon/llcommon.h b/indra/llcommon/llcommon.h
index ca9cad5d05..171590f3d8 100644
--- a/indra/llcommon/llcommon.h
+++ b/indra/llcommon/llcommon.h
@@ -35,8 +35,6 @@ class LL_COMMON_API LLCommon
public:
static void initClass();
static void cleanupClass();
-private:
- static BOOL sAprInitialized;
};
#endif
diff --git a/indra/llcommon/llerror.cpp b/indra/llcommon/llerror.cpp
index bb64152407..75048073ca 100644
--- a/indra/llcommon/llerror.cpp
+++ b/indra/llcommon/llerror.cpp
@@ -866,6 +866,9 @@ You get:
*/
+extern apr_thread_mutex_t* gLogMutexp;
+extern apr_thread_mutex_t* gCallStacksLogMutexp;
+
namespace {
bool checkLevelMap(const LevelMap& map, const std::string& key,
LLError::ELevel& level)
diff --git a/indra/llcommon/llerror.h b/indra/llcommon/llerror.h
index 4a42241c4f..15d167c32e 100644
--- a/indra/llcommon/llerror.h
+++ b/indra/llcommon/llerror.h
@@ -296,5 +296,4 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;
Such computation is done iff the message will be logged.
*/
-
#endif // LL_LLERROR_H
diff --git a/indra/llcommon/llfixedbuffer.cpp b/indra/llcommon/llfixedbuffer.cpp
index d394f179fb..4b5cdbe288 100644
--- a/indra/llcommon/llfixedbuffer.cpp
+++ b/indra/llcommon/llfixedbuffer.cpp
@@ -30,8 +30,7 @@
LLFixedBuffer::LLFixedBuffer(const U32 max_lines)
: LLLineBuffer(),
- mMaxLines(max_lines),
- mMutex(NULL)
+ mMaxLines(max_lines)
{
mTimer.reset();
}
diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp
index 21d1c84d69..6e804a94b0 100644
--- a/indra/llcommon/llmemory.cpp
+++ b/indra/llcommon/llmemory.cpp
@@ -26,14 +26,13 @@
#include "linden_common.h"
-#include "llmemory.h"
-#if MEM_TRACK_MEM
+//#if MEM_TRACK_MEM
#include "llthread.h"
-#endif
+//#endif
#if defined(LL_WINDOWS)
-# include <windows.h>
+//# include <windows.h>
# include <psapi.h>
#elif defined(LL_DARWIN)
# include <sys/types.h>
@@ -43,10 +42,28 @@
# include <unistd.h>
#endif
+#include "llmemory.h"
+
+#include "llsys.h"
+#include "llframetimer.h"
//----------------------------------------------------------------------------
//static
char* LLMemory::reserveMem = 0;
+U32 LLMemory::sAvailPhysicalMemInKB = U32_MAX ;
+U32 LLMemory::sMaxPhysicalMemInKB = 0;
+U32 LLMemory::sAllocatedMemInKB = 0;
+U32 LLMemory::sAllocatedPageSizeInKB = 0 ;
+U32 LLMemory::sMaxHeapSizeInKB = U32_MAX ;
+BOOL LLMemory::sEnableMemoryFailurePrevention = FALSE;
+
+#if __DEBUG_PRIVATE_MEM__
+LLPrivateMemoryPoolManager::mem_allocation_info_t LLPrivateMemoryPoolManager::sMemAllocationTracker;
+#endif
+
+#ifndef _USE_PRIVATE_MEM_POOL_
+#define _USE_PRIVATE_MEM_POOL_ 1
+#endif
//static
void LLMemory::initClass()
@@ -71,6 +88,145 @@ void LLMemory::freeReserve()
reserveMem = NULL;
}
+//static
+void LLMemory::initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure)
+{
+ sMaxHeapSizeInKB = (U32)(max_heap_size_gb * 1024 * 1024) ;
+ sEnableMemoryFailurePrevention = prevent_heap_failure ;
+}
+
+//static
+void LLMemory::updateMemoryInfo()
+{
+#if LL_WINDOWS
+ HANDLE self = GetCurrentProcess();
+ PROCESS_MEMORY_COUNTERS counters;
+
+ if (!GetProcessMemoryInfo(self, &counters, sizeof(counters)))
+ {
+ llwarns << "GetProcessMemoryInfo failed" << llendl;
+ return ;
+ }
+
+ sAllocatedMemInKB = (U32)(counters.WorkingSetSize / 1024) ;
+ sAllocatedPageSizeInKB = (U32)(counters.PagefileUsage / 1024) ;
+ sMaxPhysicalMemInKB = llmin(LLMemoryInfo::getAvailableMemoryKB() + sAllocatedMemInKB, sMaxHeapSizeInKB);
+
+ if(sMaxPhysicalMemInKB > sAllocatedMemInKB)
+ {
+ sAvailPhysicalMemInKB = sMaxPhysicalMemInKB - sAllocatedMemInKB ;
+ }
+ else
+ {
+ sAvailPhysicalMemInKB = 0 ;
+ }
+#else
+ //not valid for other systems for now.
+ sAllocatedMemInKB = (U32)(LLMemory::getCurrentRSS() / 1024) ;
+ sMaxPhysicalMemInKB = U32_MAX ;
+ sAvailPhysicalMemInKB = U32_MAX ;
+#endif
+
+ return ;
+}
+
+//
+//this function is to test if there is enough space with the size in the virtual address space.
+//it does not do any real allocation
+//if success, it returns the address where the memory chunk can fit in;
+//otherwise it returns NULL.
+//
+//static
+void* LLMemory::tryToAlloc(void* address, U32 size)
+{
+#if LL_WINDOWS
+ address = VirtualAlloc(address, size, MEM_RESERVE | MEM_TOP_DOWN, PAGE_NOACCESS) ;
+ if(address)
+ {
+ if(!VirtualFree(address, 0, MEM_RELEASE))
+ {
+ llerrs << "error happens when free some memory reservation." << llendl ;
+ }
+ }
+ return address ;
+#else
+ return (void*)0x01 ; //skip checking
+#endif
+}
+
+//static
+void LLMemory::logMemoryInfo(BOOL update)
+{
+ if(update)
+ {
+ updateMemoryInfo() ;
+ }
+
+ llinfos << "Current allocated physical memory(KB): " << sAllocatedMemInKB << llendl ;
+ llinfos << "Current allocated page size (KB): " << sAllocatedPageSizeInKB << llendl ;
+ llinfos << "Current availabe physical memory(KB): " << sAvailPhysicalMemInKB << llendl ;
+ llinfos << "Current max usable memory(KB): " << sMaxPhysicalMemInKB << llendl ;
+}
+
+//return 0: everything is normal;
+//return 1: the memory pool is low, but not in danger;
+//return -1: the memory pool is in danger, is about to crash.
+//static
+S32 LLMemory::isMemoryPoolLow()
+{
+ static const U32 LOW_MEMEOY_POOL_THRESHOLD_KB = 64 * 1024 ; //64 MB for emergency use
+
+ if(!sEnableMemoryFailurePrevention)
+ {
+ return 0 ; //no memory failure prevention.
+ }
+
+ if(sAvailPhysicalMemInKB < (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2)) //out of physical memory
+ {
+ return -1 ;
+ }
+
+ if(sAllocatedPageSizeInKB + (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2) > sMaxHeapSizeInKB) //out of virtual address space.
+ {
+ return -1 ;
+ }
+
+ return (S32)(sAvailPhysicalMemInKB < LOW_MEMEOY_POOL_THRESHOLD_KB ||
+ sAllocatedPageSizeInKB + LOW_MEMEOY_POOL_THRESHOLD_KB > sMaxHeapSizeInKB) ;
+}
+
+//static
+U32 LLMemory::getAvailableMemKB()
+{
+ return sAvailPhysicalMemInKB ;
+}
+
+//static
+U32 LLMemory::getMaxMemKB()
+{
+ return sMaxPhysicalMemInKB ;
+}
+
+//static
+U32 LLMemory::getAllocatedMemKB()
+{
+ return sAllocatedMemInKB ;
+}
+
+void* ll_allocate (size_t size)
+{
+ if (size == 0)
+ {
+ llwarns << "Null allocation" << llendl;
+ }
+ void *p = malloc(size);
+ if (p == NULL)
+ {
+ LLMemory::freeReserve();
+ llerrs << "Out of memory Error" << llendl;
+ }
+ return p;
+}
//----------------------------------------------------------------------------
@@ -237,7 +393,7 @@ U64 LLMemory::getCurrentRSS()
U32 LLMemory::getWorkingSetSize()
{
- return 0 ;
+ return 0;
}
#endif
@@ -258,7 +414,7 @@ LLMemTracker::LLMemTracker()
mDrawnIndex = 0 ;
mPaused = FALSE ;
- mMutexp = new LLMutex(NULL) ;
+ mMutexp = new LLMutex() ;
mStringBuffer = new char*[128] ;
mStringBuffer[0] = new char[mCapacity * 128] ;
for(S32 i = 1 ; i < mCapacity ; i++)
@@ -376,3 +532,1665 @@ const char* LLMemTracker::getNextLine()
#endif //MEM_TRACK_MEM
//--------------------------------------------------------------------------------------------------
+
+//--------------------------------------------------------------------------------------------------
+//--------------------------------------------------------------------------------------------------
+//minimum slot size and minimal slot size interval
+const U32 ATOMIC_MEM_SLOT = 16 ; //bytes
+
+//minimum block sizes (page size) for small allocation, medium allocation, large allocation
+const U32 MIN_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {2 << 10, 4 << 10, 16 << 10} ; //
+
+//maximum block sizes for small allocation, medium allocation, large allocation
+const U32 MAX_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {64 << 10, 1 << 20, 4 << 20} ;
+
+//minimum slot sizes for small allocation, medium allocation, large allocation
+const U32 MIN_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {ATOMIC_MEM_SLOT, 2 << 10, 512 << 10};
+
+//maximum slot sizes for small allocation, medium allocation, large allocation
+const U32 MAX_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {(2 << 10) - ATOMIC_MEM_SLOT, (512 - 2) << 10, 4 << 20};
+
+//size of a block with multiple slots can not exceed CUT_OFF_SIZE
+const U32 CUT_OFF_SIZE = (64 << 10) ; //64 KB
+
+//max number of slots in a block
+const U32 MAX_NUM_SLOTS_IN_A_BLOCK = llmin(MIN_BLOCK_SIZES[0] / ATOMIC_MEM_SLOT, ATOMIC_MEM_SLOT * 8) ;
+
+//-------------------------------------------------------------
+//align val to be integer times of ATOMIC_MEM_SLOT
+U32 align(U32 val)
+{
+ U32 aligned = (val / ATOMIC_MEM_SLOT) * ATOMIC_MEM_SLOT ;
+ if(aligned < val)
+ {
+ aligned += ATOMIC_MEM_SLOT ;
+ }
+
+ return aligned ;
+}
+
+//-------------------------------------------------------------
+//class LLPrivateMemoryPool::LLMemoryBlock
+//-------------------------------------------------------------
+//
+//each memory block could fit for two page sizes: 0.75 * mSlotSize, which starts from the beginning of the memory chunk and grow towards the end of the
+//the block; another is mSlotSize, which starts from the end of the block and grows towards the beginning of the block.
+//
+LLPrivateMemoryPool::LLMemoryBlock::LLMemoryBlock()
+{
+ //empty
+}
+
+LLPrivateMemoryPool::LLMemoryBlock::~LLMemoryBlock()
+{
+ //empty
+}
+
+//create and initialize a memory block
+void LLPrivateMemoryPool::LLMemoryBlock::init(char* buffer, U32 buffer_size, U32 slot_size)
+{
+ mBuffer = buffer ;
+ mBufferSize = buffer_size ;
+ mSlotSize = slot_size ;
+ mTotalSlots = buffer_size / mSlotSize ;
+
+ llassert_always(buffer_size / mSlotSize <= MAX_NUM_SLOTS_IN_A_BLOCK) ; //max number is 128
+
+ mAllocatedSlots = 0 ;
+ mDummySize = 0 ;
+
+ //init the bit map.
+ //mark free bits
+ if(mTotalSlots > 32) //reserve extra space from mBuffer to store bitmap if needed.
+ {
+ mDummySize = ATOMIC_MEM_SLOT ;
+ mTotalSlots -= (mDummySize + mSlotSize - 1) / mSlotSize ;
+ mUsageBits = 0 ;
+
+ S32 usage_bit_len = (mTotalSlots + 31) / 32 ;
+
+ for(S32 i = 0 ; i < usage_bit_len - 1 ; i++)
+ {
+ *((U32*)mBuffer + i) = 0 ;
+ }
+ for(S32 i = usage_bit_len - 1 ; i < mDummySize / sizeof(U32) ; i++)
+ {
+ *((U32*)mBuffer + i) = 0xffffffff ;
+ }
+
+ if(mTotalSlots & 31)
+ {
+ *((U32*)mBuffer + usage_bit_len - 2) = (0xffffffff << (mTotalSlots & 31)) ;
+ }
+ }
+ else//no extra bitmap space reserved
+ {
+ mUsageBits = 0 ;
+ if(mTotalSlots & 31)
+ {
+ mUsageBits = (0xffffffff << (mTotalSlots & 31)) ;
+ }
+ }
+
+ mSelf = this ;
+ mNext = NULL ;
+ mPrev = NULL ;
+
+ llassert_always(mTotalSlots > 0) ;
+}
+
+//mark this block to be free with the memory [mBuffer, mBuffer + mBufferSize).
+void LLPrivateMemoryPool::LLMemoryBlock::setBuffer(char* buffer, U32 buffer_size)
+{
+ mBuffer = buffer ;
+ mBufferSize = buffer_size ;
+ mSelf = NULL ;
+ mTotalSlots = 0 ; //set the block is free.
+}
+
+//reserve a slot
+char* LLPrivateMemoryPool::LLMemoryBlock::allocate()
+{
+ llassert_always(mAllocatedSlots < mTotalSlots) ;
+
+ //find a free slot
+ U32* bits = NULL ;
+ U32 k = 0 ;
+ if(mUsageBits != 0xffffffff)
+ {
+ bits = &mUsageBits ;
+ }
+ else if(mDummySize > 0)//go to extra space
+ {
+ for(S32 i = 0 ; i < mDummySize / sizeof(U32); i++)
+ {
+ if(*((U32*)mBuffer + i) != 0xffffffff)
+ {
+ bits = (U32*)mBuffer + i ;
+ k = i + 1 ;
+ break ;
+ }
+ }
+ }
+ S32 idx = 0 ;
+ U32 tmp = *bits ;
+ for(; tmp & 1 ; tmp >>= 1, idx++) ;
+
+ //set the slot reserved
+ if(!idx)
+ {
+ *bits |= 1 ;
+ }
+ else
+ {
+ *bits |= (1 << idx) ;
+ }
+
+ mAllocatedSlots++ ;
+
+ return mBuffer + mDummySize + (k * 32 + idx) * mSlotSize ;
+}
+
+//free a slot
+void LLPrivateMemoryPool::LLMemoryBlock::freeMem(void* addr)
+{
+ //bit index
+ U32 idx = ((U32)addr - (U32)mBuffer - mDummySize) / mSlotSize ;
+
+ U32* bits = &mUsageBits ;
+ if(idx >= 32)
+ {
+ bits = (U32*)mBuffer + (idx - 32) / 32 ;
+ }
+
+ //reset the bit
+ if(idx & 31)
+ {
+ *bits &= ~(1 << (idx & 31)) ;
+ }
+ else
+ {
+ *bits &= ~1 ;
+ }
+
+ mAllocatedSlots-- ;
+}
+
+//for debug use: reset the entire bitmap.
+void LLPrivateMemoryPool::LLMemoryBlock::resetBitMap()
+{
+ for(S32 i = 0 ; i < mDummySize / sizeof(U32) ; i++)
+ {
+ *((U32*)mBuffer + i) = 0 ;
+ }
+ mUsageBits = 0 ;
+}
+//-------------------------------------------------------------------
+//class LLMemoryChunk
+//--------------------------------------------------------------------
+LLPrivateMemoryPool::LLMemoryChunk::LLMemoryChunk()
+{
+ //empty
+}
+
+LLPrivateMemoryPool::LLMemoryChunk::~LLMemoryChunk()
+{
+ //empty
+}
+
+//create and init a memory chunk
+void LLPrivateMemoryPool::LLMemoryChunk::init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size)
+{
+ mBuffer = buffer ;
+ mBufferSize = buffer_size ;
+ mAlloatedSize = 0 ;
+
+ mMetaBuffer = mBuffer + sizeof(LLMemoryChunk) ;
+
+ mMinBlockSize = min_block_size; //page size
+ mMinSlotSize = min_slot_size;
+ mMaxSlotSize = max_slot_size ;
+ mBlockLevels = mMaxSlotSize / mMinSlotSize ;
+ mPartitionLevels = max_block_size / mMinBlockSize + 1 ;
+
+ S32 max_num_blocks = (buffer_size - sizeof(LLMemoryChunk) - mBlockLevels * sizeof(LLMemoryBlock*) - mPartitionLevels * sizeof(LLMemoryBlock*)) /
+ (mMinBlockSize + sizeof(LLMemoryBlock)) ;
+ //meta data space
+ mBlocks = (LLMemoryBlock*)mMetaBuffer ; //space reserved for all memory blocks.
+ mAvailBlockList = (LLMemoryBlock**)((char*)mBlocks + sizeof(LLMemoryBlock) * max_num_blocks) ;
+ mFreeSpaceList = (LLMemoryBlock**)((char*)mAvailBlockList + sizeof(LLMemoryBlock*) * mBlockLevels) ;
+
+ //data buffer, which can be used for allocation
+ mDataBuffer = (char*)mFreeSpaceList + sizeof(LLMemoryBlock*) * mPartitionLevels ;
+
+ //alignmnet
+ mDataBuffer = mBuffer + align(mDataBuffer - mBuffer) ;
+
+ //init
+ for(U32 i = 0 ; i < mBlockLevels; i++)
+ {
+ mAvailBlockList[i] = NULL ;
+ }
+ for(U32 i = 0 ; i < mPartitionLevels ; i++)
+ {
+ mFreeSpaceList[i] = NULL ;
+ }
+
+ //assign the entire chunk to the first block
+ mBlocks[0].mPrev = NULL ;
+ mBlocks[0].mNext = NULL ;
+ mBlocks[0].setBuffer(mDataBuffer, buffer_size - (mDataBuffer - mBuffer)) ;
+ addToFreeSpace(&mBlocks[0]) ;
+
+ mHashNext = NULL ;
+ mNext = NULL ;
+ mPrev = NULL ;
+}
+
+//static
+U32 LLPrivateMemoryPool::LLMemoryChunk::getMaxOverhead(U32 data_buffer_size, U32 min_slot_size,
+ U32 max_slot_size, U32 min_block_size, U32 max_block_size)
+{
+ //for large allocations, reserve some extra memory for meta data to avoid wasting much
+ if(data_buffer_size / min_slot_size < 64) //large allocations
+ {
+ U32 overhead = sizeof(LLMemoryChunk) + (data_buffer_size / min_block_size) * sizeof(LLMemoryBlock) +
+ sizeof(LLMemoryBlock*) * (max_slot_size / min_slot_size) + sizeof(LLMemoryBlock*) * (max_block_size / min_block_size + 1) ;
+
+ //round to integer times of min_block_size
+ overhead = ((overhead + min_block_size - 1) / min_block_size) * min_block_size ;
+ return overhead ;
+ }
+ else
+ {
+ return 0 ; //do not reserve extra overhead if for small allocations
+ }
+}
+
+char* LLPrivateMemoryPool::LLMemoryChunk::allocate(U32 size)
+{
+ if(mMinSlotSize > size)
+ {
+ size = mMinSlotSize ;
+ }
+ if(mAlloatedSize + size > mBufferSize - (mDataBuffer - mBuffer))
+ {
+ return NULL ; //no enough space in this chunk.
+ }
+
+ char* p = NULL ;
+ U32 blk_idx = getBlockLevel(size);
+
+ LLMemoryBlock* blk = NULL ;
+
+ //check if there is free block available
+ if(mAvailBlockList[blk_idx])
+ {
+ blk = mAvailBlockList[blk_idx] ;
+ p = blk->allocate() ;
+
+ if(blk->isFull())
+ {
+ popAvailBlockList(blk_idx) ;
+ }
+ }
+
+ //ask for a new block
+ if(!p)
+ {
+ blk = addBlock(blk_idx) ;
+ if(blk)
+ {
+ p = blk->allocate() ;
+
+ if(blk->isFull())
+ {
+ popAvailBlockList(blk_idx) ;
+ }
+ }
+ }
+
+ //ask for space from larger blocks
+ if(!p)
+ {
+ for(S32 i = blk_idx + 1 ; i < mBlockLevels; i++)
+ {
+ if(mAvailBlockList[i])
+ {
+ blk = mAvailBlockList[i] ;
+ p = blk->allocate() ;
+
+ if(blk->isFull())
+ {
+ popAvailBlockList(i) ;
+ }
+ break ;
+ }
+ }
+ }
+
+ if(p && blk)
+ {
+ mAlloatedSize += blk->getSlotSize() ;
+ }
+ return p ;
+}
+
+void LLPrivateMemoryPool::LLMemoryChunk::freeMem(void* addr)
+{
+ U32 blk_idx = getPageIndex((U32)addr) ;
+ LLMemoryBlock* blk = (LLMemoryBlock*)(mMetaBuffer + blk_idx * sizeof(LLMemoryBlock)) ;
+ blk = blk->mSelf ;
+
+ bool was_full = blk->isFull() ;
+ blk->freeMem(addr) ;
+ mAlloatedSize -= blk->getSlotSize() ;
+
+ if(blk->empty())
+ {
+ removeBlock(blk) ;
+ }
+ else if(was_full)
+ {
+ addToAvailBlockList(blk) ;
+ }
+}
+
+bool LLPrivateMemoryPool::LLMemoryChunk::empty()
+{
+ return !mAlloatedSize ;
+}
+
+bool LLPrivateMemoryPool::LLMemoryChunk::containsAddress(const char* addr) const
+{
+ return (U32)mBuffer <= (U32)addr && (U32)mBuffer + mBufferSize > (U32)addr ;
+}
+
+//debug use
+void LLPrivateMemoryPool::LLMemoryChunk::dump()
+{
+#if 0
+ //sanity check
+ //for(S32 i = 0 ; i < mBlockLevels ; i++)
+ //{
+ // LLMemoryBlock* blk = mAvailBlockList[i] ;
+ // while(blk)
+ // {
+ // blk_list.push_back(blk) ;
+ // blk = blk->mNext ;
+ // }
+ //}
+ for(S32 i = 0 ; i < mPartitionLevels ; i++)
+ {
+ LLMemoryBlock* blk = mFreeSpaceList[i] ;
+ while(blk)
+ {
+ blk_list.push_back(blk) ;
+ blk = blk->mNext ;
+ }
+ }
+
+ std::sort(blk_list.begin(), blk_list.end(), LLMemoryBlock::CompareAddress());
+
+ U32 total_size = blk_list[0]->getBufferSize() ;
+ for(U32 i = 1 ; i < blk_list.size(); i++)
+ {
+ total_size += blk_list[i]->getBufferSize() ;
+ if((U32)blk_list[i]->getBuffer() < (U32)blk_list[i-1]->getBuffer() + blk_list[i-1]->getBufferSize())
+ {
+ llerrs << "buffer corrupted." << llendl ;
+ }
+ }
+
+ llassert_always(total_size + mMinBlockSize >= mBufferSize - ((U32)mDataBuffer - (U32)mBuffer)) ;
+
+ U32 blk_num = (mBufferSize - (mDataBuffer - mBuffer)) / mMinBlockSize ;
+ for(U32 i = 0 ; i < blk_num ; )
+ {
+ LLMemoryBlock* blk = &mBlocks[i] ;
+ if(blk->mSelf)
+ {
+ U32 end = blk->getBufferSize() / mMinBlockSize ;
+ for(U32 j = 0 ; j < end ; j++)
+ {
+ llassert_always(blk->mSelf == blk || !blk->mSelf) ;
+ }
+ i += end ;
+ }
+ else
+ {
+ llerrs << "gap happens" << llendl ;
+ }
+ }
+#endif
+#if 0
+ llinfos << "---------------------------" << llendl ;
+ llinfos << "Chunk buffer: " << (U32)getBuffer() << " size: " << getBufferSize() << llendl ;
+
+ llinfos << "available blocks ... " << llendl ;
+ for(S32 i = 0 ; i < mBlockLevels ; i++)
+ {
+ LLMemoryBlock* blk = mAvailBlockList[i] ;
+ while(blk)
+ {
+ llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ;
+ blk = blk->mNext ;
+ }
+ }
+
+ llinfos << "free blocks ... " << llendl ;
+ for(S32 i = 0 ; i < mPartitionLevels ; i++)
+ {
+ LLMemoryBlock* blk = mFreeSpaceList[i] ;
+ while(blk)
+ {
+ llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ;
+ blk = blk->mNext ;
+ }
+ }
+#endif
+}
+
+//compute the size for a block, the size is round to integer times of mMinBlockSize.
+U32 LLPrivateMemoryPool::LLMemoryChunk::calcBlockSize(U32 slot_size)
+{
+ //
+ //Note: we try to make a block to have 32 slots if the size is not over 32 pages
+ //32 is the number of bits of an integer in a 32-bit system
+ //
+
+ U32 block_size;
+ U32 cut_off_size = llmin(CUT_OFF_SIZE, (U32)(mMinBlockSize << 5)) ;
+
+ if((slot_size << 5) <= mMinBlockSize)//for small allocations, return one page
+ {
+ block_size = mMinBlockSize ;
+ }
+ else if(slot_size >= cut_off_size)//for large allocations, return one-slot block
+ {
+ block_size = (slot_size / mMinBlockSize) * mMinBlockSize ;
+ if(block_size < slot_size)
+ {
+ block_size += mMinBlockSize ;
+ }
+ }
+ else //medium allocations
+ {
+ if((slot_size << 5) >= cut_off_size)
+ {
+ block_size = cut_off_size ;
+ }
+ else
+ {
+ block_size = ((slot_size << 5) / mMinBlockSize) * mMinBlockSize ;
+ }
+ }
+
+ llassert_always(block_size >= slot_size) ;
+
+ return block_size ;
+}
+
+//create a new block in the chunk
+LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::addBlock(U32 blk_idx)
+{
+ U32 slot_size = mMinSlotSize * (blk_idx + 1) ;
+ U32 preferred_block_size = calcBlockSize(slot_size) ;
+ U16 idx = getPageLevel(preferred_block_size);
+ LLMemoryBlock* blk = NULL ;
+
+ if(mFreeSpaceList[idx])//if there is free slot for blk_idx
+ {
+ blk = createNewBlock(mFreeSpaceList[idx], preferred_block_size, slot_size, blk_idx) ;
+ }
+ else if(mFreeSpaceList[mPartitionLevels - 1]) //search free pool
+ {
+ blk = createNewBlock(mFreeSpaceList[mPartitionLevels - 1], preferred_block_size, slot_size, blk_idx) ;
+ }
+ else //search for other non-preferred but enough space slot.
+ {
+ S32 min_idx = 0 ;
+ if(slot_size > mMinBlockSize)
+ {
+ min_idx = getPageLevel(slot_size) ;
+ }
+ for(S32 i = (S32)idx - 1 ; i >= min_idx ; i--) //search the small slots first
+ {
+ if(mFreeSpaceList[i])
+ {
+ U32 new_preferred_block_size = mFreeSpaceList[i]->getBufferSize();
+ new_preferred_block_size = (new_preferred_block_size / mMinBlockSize) * mMinBlockSize ; //round to integer times of mMinBlockSize.
+
+ //create a NEW BLOCK THERE.
+ if(new_preferred_block_size >= slot_size) //at least there is space for one slot.
+ {
+
+ blk = createNewBlock(mFreeSpaceList[i], new_preferred_block_size, slot_size, blk_idx) ;
+ }
+ break ;
+ }
+ }
+
+ if(!blk)
+ {
+ for(U16 i = idx + 1 ; i < mPartitionLevels - 1; i++) //search the large slots
+ {
+ if(mFreeSpaceList[i])
+ {
+ //create a NEW BLOCK THERE.
+ blk = createNewBlock(mFreeSpaceList[i], preferred_block_size, slot_size, blk_idx) ;
+ break ;
+ }
+ }
+ }
+ }
+
+ return blk ;
+}
+
+//create a new block at the designed location
+LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx)
+{
+ //unlink from the free space
+ removeFromFreeSpace(blk) ;
+
+ //check the rest space
+ U32 new_free_blk_size = blk->getBufferSize() - buffer_size ;
+ if(new_free_blk_size < mMinBlockSize) //can not partition the memory into size smaller than mMinBlockSize
+ {
+ new_free_blk_size = 0 ; //discard the last small extra space.
+ }
+
+ //add the rest space back to the free list
+ if(new_free_blk_size > 0) //blk still has free space
+ {
+ LLMemoryBlock* next_blk = blk + (buffer_size / mMinBlockSize) ;
+ next_blk->mPrev = NULL ;
+ next_blk->mNext = NULL ;
+ next_blk->setBuffer(blk->getBuffer() + buffer_size, new_free_blk_size) ;
+ addToFreeSpace(next_blk) ;
+ }
+
+ blk->init(blk->getBuffer(), buffer_size, slot_size) ;
+ //insert to the available block list...
+ mAvailBlockList[blk_idx] = blk ;
+
+ //mark the address map: all blocks covered by this block space pointing back to this block.
+ U32 end = (buffer_size / mMinBlockSize) ;
+ for(U32 i = 1 ; i < end ; i++)
+ {
+ (blk + i)->mSelf = blk ;
+ }
+
+ return blk ;
+}
+
+//delete a block, release the block to the free pool.
+void LLPrivateMemoryPool::LLMemoryChunk::removeBlock(LLMemoryBlock* blk)
+{
+ //remove from the available block list
+ if(blk->mPrev)
+ {
+ blk->mPrev->mNext = blk->mNext ;
+ }
+ if(blk->mNext)
+ {
+ blk->mNext->mPrev = blk->mPrev ;
+ }
+ U32 blk_idx = getBlockLevel(blk->getSlotSize());
+ if(mAvailBlockList[blk_idx] == blk)
+ {
+ mAvailBlockList[blk_idx] = blk->mNext ;
+ }
+
+ blk->mNext = NULL ;
+ blk->mPrev = NULL ;
+
+ //mark it free
+ blk->setBuffer(blk->getBuffer(), blk->getBufferSize()) ;
+
+#if 1
+ //merge blk with neighbors if possible
+ if(blk->getBuffer() > mDataBuffer) //has the left neighbor
+ {
+ if((blk - 1)->mSelf->isFree())
+ {
+ LLMemoryBlock* left_blk = (blk - 1)->mSelf ;
+ removeFromFreeSpace((blk - 1)->mSelf);
+ left_blk->setBuffer(left_blk->getBuffer(), left_blk->getBufferSize() + blk->getBufferSize()) ;
+ blk = left_blk ;
+ }
+ }
+ if(blk->getBuffer() + blk->getBufferSize() <= mBuffer + mBufferSize - mMinBlockSize) //has the right neighbor
+ {
+ U32 d = blk->getBufferSize() / mMinBlockSize ;
+ if((blk + d)->isFree())
+ {
+ LLMemoryBlock* right_blk = blk + d ;
+ removeFromFreeSpace(blk + d) ;
+ blk->setBuffer(blk->getBuffer(), blk->getBufferSize() + right_blk->getBufferSize()) ;
+ }
+ }
+#endif
+
+ addToFreeSpace(blk) ;
+
+ return ;
+}
+
+//the top block in the list is full, pop it out of the list
+void LLPrivateMemoryPool::LLMemoryChunk::popAvailBlockList(U32 blk_idx)
+{
+ if(mAvailBlockList[blk_idx])
+ {
+ LLMemoryBlock* next = mAvailBlockList[blk_idx]->mNext ;
+ if(next)
+ {
+ next->mPrev = NULL ;
+ }
+ mAvailBlockList[blk_idx]->mPrev = NULL ;
+ mAvailBlockList[blk_idx]->mNext = NULL ;
+ mAvailBlockList[blk_idx] = next ;
+ }
+}
+
+//add the block back to the free pool
+void LLPrivateMemoryPool::LLMemoryChunk::addToFreeSpace(LLMemoryBlock* blk)
+{
+ llassert_always(!blk->mPrev) ;
+ llassert_always(!blk->mNext) ;
+
+ U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1;
+
+ (blk + free_idx)->mSelf = blk ; //mark the end pointing back to the head.
+ free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
+
+ blk->mNext = mFreeSpaceList[free_idx] ;
+ if(mFreeSpaceList[free_idx])
+ {
+ mFreeSpaceList[free_idx]->mPrev = blk ;
+ }
+ mFreeSpaceList[free_idx] = blk ;
+ blk->mPrev = NULL ;
+ blk->mSelf = blk ;
+
+ return ;
+}
+
+//remove the space from the free pool
+void LLPrivateMemoryPool::LLMemoryChunk::removeFromFreeSpace(LLMemoryBlock* blk)
+{
+ U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1;
+ free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
+
+ if(mFreeSpaceList[free_idx] == blk)
+ {
+ mFreeSpaceList[free_idx] = blk->mNext ;
+ }
+ if(blk->mPrev)
+ {
+ blk->mPrev->mNext = blk->mNext ;
+ }
+ if(blk->mNext)
+ {
+ blk->mNext->mPrev = blk->mPrev ;
+ }
+ blk->mNext = NULL ;
+ blk->mPrev = NULL ;
+ blk->mSelf = NULL ;
+
+ return ;
+}
+
+void LLPrivateMemoryPool::LLMemoryChunk::addToAvailBlockList(LLMemoryBlock* blk)
+{
+ llassert_always(!blk->mPrev) ;
+ llassert_always(!blk->mNext) ;
+
+ U32 blk_idx = getBlockLevel(blk->getSlotSize());
+
+ blk->mNext = mAvailBlockList[blk_idx] ;
+ if(blk->mNext)
+ {
+ blk->mNext->mPrev = blk ;
+ }
+ blk->mPrev = NULL ;
+ mAvailBlockList[blk_idx] = blk ;
+
+ return ;
+}
+
+U32 LLPrivateMemoryPool::LLMemoryChunk::getPageIndex(U32 addr)
+{
+ return (addr - (U32)mDataBuffer) / mMinBlockSize ;
+}
+
+//for mAvailBlockList
+U32 LLPrivateMemoryPool::LLMemoryChunk::getBlockLevel(U32 size)
+{
+ llassert(size >= mMinSlotSize && size <= mMaxSlotSize) ;
+
+ //start from 0
+ return (size + mMinSlotSize - 1) / mMinSlotSize - 1 ;
+}
+
+//for mFreeSpaceList
+U16 LLPrivateMemoryPool::LLMemoryChunk::getPageLevel(U32 size)
+{
+ //start from 0
+ U16 level = size / mMinBlockSize - 1 ;
+ if(level >= mPartitionLevels)
+ {
+ level = mPartitionLevels - 1 ;
+ }
+ return level ;
+}
+
+//-------------------------------------------------------------------
+//class LLPrivateMemoryPool
+//--------------------------------------------------------------------
+const U32 CHUNK_SIZE = 4 << 20 ; //4 MB
+const U32 LARGE_CHUNK_SIZE = 4 * CHUNK_SIZE ; //16 MB
+LLPrivateMemoryPool::LLPrivateMemoryPool(S32 type) :
+ mMutexp(NULL),
+ mReservedPoolSize(0),
+ mHashFactor(1),
+ mType(type)
+{
+ const U32 MAX_POOL_SIZE = 256 * 1024 * 1024 ; //256 MB
+
+ mMaxPoolSize = MAX_POOL_SIZE ;
+ if(type == STATIC_THREADED || type == VOLATILE_THREADED)
+ {
+ mMutexp = new LLMutex ;
+ }
+
+ for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
+ {
+ mChunkList[i] = NULL ;
+ }
+
+ mNumOfChunks = 0 ;
+}
+
+LLPrivateMemoryPool::~LLPrivateMemoryPool()
+{
+ destroyPool();
+ delete mMutexp ;
+}
+
+char* LLPrivateMemoryPool::allocate(U32 size)
+{
+ if(!size)
+ {
+ return NULL ;
+ }
+
+ //if the asked size larger than MAX_BLOCK_SIZE, fetch from heap directly, the pool does not manage it
+ if(size >= CHUNK_SIZE)
+ {
+ return (char*)malloc(size) ;
+ }
+
+ char* p = NULL ;
+
+ //find the appropriate chunk
+ S32 chunk_idx = getChunkIndex(size) ;
+
+ lock() ;
+
+ LLMemoryChunk* chunk = mChunkList[chunk_idx];
+ while(chunk)
+ {
+ if((p = chunk->allocate(size)))
+ {
+ break ;
+ }
+ chunk = chunk->mNext ;
+ }
+
+ //fetch new memory chunk
+ if(!p)
+ {
+ if(mReservedPoolSize + CHUNK_SIZE > mMaxPoolSize)
+ {
+ chunk = mChunkList[chunk_idx];
+ while(chunk)
+ {
+ if((p = chunk->allocate(size)))
+ {
+ break ;
+ }
+ chunk = chunk->mNext ;
+ }
+ }
+
+ chunk = addChunk(chunk_idx) ;
+ if(chunk)
+ {
+ p = chunk->allocate(size) ;
+ }
+ }
+
+ unlock() ;
+
+ return p ;
+}
+
+void LLPrivateMemoryPool::freeMem(void* addr)
+{
+ if(!addr)
+ {
+ return ;
+ }
+
+ lock() ;
+
+ LLMemoryChunk* chunk = findChunk((char*)addr) ;
+
+ if(!chunk)
+ {
+ free(addr) ; //release from heap
+ }
+ else
+ {
+ chunk->freeMem(addr) ;
+
+ if(chunk->empty())
+ {
+ removeChunk(chunk) ;
+ }
+ }
+
+ unlock() ;
+}
+
+void LLPrivateMemoryPool::dump()
+{
+}
+
+U32 LLPrivateMemoryPool::getTotalAllocatedSize()
+{
+ U32 total_allocated = 0 ;
+
+ LLMemoryChunk* chunk ;
+ for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
+ {
+ chunk = mChunkList[i];
+ while(chunk)
+ {
+ total_allocated += chunk->getAllocatedSize() ;
+ chunk = chunk->mNext ;
+ }
+ }
+
+ return total_allocated ;
+}
+
+void LLPrivateMemoryPool::lock()
+{
+ if(mMutexp)
+ {
+ mMutexp->lock() ;
+ }
+}
+
+void LLPrivateMemoryPool::unlock()
+{
+ if(mMutexp)
+ {
+ mMutexp->unlock() ;
+ }
+}
+
+S32 LLPrivateMemoryPool::getChunkIndex(U32 size)
+{
+ S32 i ;
+ for(i = 0 ; size > MAX_SLOT_SIZES[i]; i++);
+
+ llassert_always(i < SUPER_ALLOCATION);
+
+ return i ;
+}
+
+//destroy the entire pool
+void LLPrivateMemoryPool::destroyPool()
+{
+ lock() ;
+
+#if 0
+ if(mNumOfChunks > 0)
+ {
+ //Warn:
+ //should crash here because there is memory leaking if reach here.
+ //
+
+ for(U32 i = 0 ; i < mHashFactor; i++)
+ {
+ while(mChunkHashList[i])
+ {
+ removeChunk(mChunkHashList[i]) ;
+ }
+ }
+ }
+
+ llassert_always(mNumOfChunks == 0) ;
+ llassert_always(mReservedPoolSize == 0) ;
+#endif
+
+ if(mNumOfChunks > 0)
+ {
+ llwarns << "There is some memory not freed when destroy the memory pool!" << llendl ;
+ }
+
+ mNumOfChunks = 0 ;
+ mChunkHashList.clear() ;
+ mHashFactor = 1 ;
+ for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
+ {
+ mChunkList[i] = NULL ;
+ }
+
+ unlock() ;
+}
+
+void LLPrivateMemoryPool::checkSize(U32 asked_size)
+{
+ if(mReservedPoolSize + asked_size > mMaxPoolSize)
+ {
+ llinfos << "Max pool size: " << mMaxPoolSize << llendl ;
+ llinfos << "Total reserved size: " << mReservedPoolSize + asked_size << llendl ;
+ llinfos << "Total_allocated Size: " << getTotalAllocatedSize() << llendl ;
+
+ llerrs << "The pool is overflowing..." << llendl ;
+ }
+}
+
+LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_index)
+{
+ U32 preferred_size ;
+ U32 overhead ;
+ if(chunk_index < LARGE_ALLOCATION)
+ {
+ preferred_size = CHUNK_SIZE ; //4MB
+ overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index],
+ MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
+ }
+ else
+ {
+ preferred_size = LARGE_CHUNK_SIZE ; //16MB
+ overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index],
+ MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
+ }
+
+ checkSize(preferred_size + overhead) ;
+ mReservedPoolSize += preferred_size + overhead ;
+
+ char* buffer = (char*)malloc(preferred_size + overhead) ;
+ if(!buffer)
+ {
+ return NULL ;
+ }
+
+ LLMemoryChunk* chunk = new (buffer) LLMemoryChunk() ;
+ chunk->init(buffer, preferred_size + overhead, MIN_SLOT_SIZES[chunk_index],
+ MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
+
+ //add to the tail of the linked list
+ {
+ if(!mChunkList[chunk_index])
+ {
+ mChunkList[chunk_index] = chunk ;
+ }
+ else
+ {
+ LLMemoryChunk* cur = mChunkList[chunk_index] ;
+ while(cur->mNext)
+ {
+ cur = cur->mNext ;
+ }
+ cur->mNext = chunk ;
+ chunk->mPrev = cur ;
+ }
+ }
+
+ //insert into the hash table
+ addToHashTable(chunk) ;
+
+ mNumOfChunks++;
+
+ return chunk ;
+}
+
+void LLPrivateMemoryPool::removeChunk(LLMemoryChunk* chunk)
+{
+ if(!chunk)
+ {
+ return ;
+ }
+
+ //remove from the linked list
+ for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
+ {
+ if(mChunkList[i] == chunk)
+ {
+ mChunkList[i] = chunk->mNext ;
+ }
+ }
+
+ if(chunk->mPrev)
+ {
+ chunk->mPrev->mNext = chunk->mNext ;
+ }
+ if(chunk->mNext)
+ {
+ chunk->mNext->mPrev = chunk->mPrev ;
+ }
+
+ //remove from the hash table
+ removeFromHashTable(chunk) ;
+
+ mNumOfChunks--;
+ mReservedPoolSize -= chunk->getBufferSize() ;
+
+ //release memory
+ free(chunk->getBuffer()) ;
+}
+
+U16 LLPrivateMemoryPool::findHashKey(const char* addr)
+{
+ return (((U32)addr) / CHUNK_SIZE) % mHashFactor ;
+}
+
+LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::findChunk(const char* addr)
+{
+ U16 key = findHashKey(addr) ;
+ if(mChunkHashList.size() <= key)
+ {
+ return NULL ;
+ }
+
+ //check the hash value "key"
+ LLMemoryChunk* chunk = mChunkHashList[key] ;
+ while(chunk && !chunk->containsAddress(addr))
+ {
+ chunk = chunk->mHashNext ;
+ }
+
+ return chunk ;
+}
+
+void LLPrivateMemoryPool::addToHashTable(LLMemoryChunk* chunk)
+{
+ static const U16 HASH_FACTORS[] = {41, 83, 193, 317, 419, 523, 0xFFFF};
+
+ U16 i ;
+ if(mChunkHashList.empty())
+ {
+ mHashFactor = HASH_FACTORS[0] ;
+ rehash() ;
+ }
+
+ U16 start_key = findHashKey(chunk->getBuffer()) ;
+ U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ;
+ bool need_rehash = false ;
+
+ if(mChunkHashList[start_key])
+ {
+ if(mChunkHashList[start_key] == chunk)
+ {
+ return; //already inserted.
+ }
+
+ llassert_always(!chunk->mHashNext) ;
+
+ chunk->mHashNext = mChunkHashList[start_key] ;
+ mChunkHashList[start_key] = chunk ;
+ }
+ else
+ {
+ mChunkHashList[start_key] = chunk ;
+ }
+ if(start_key == end_key)
+ {
+ return ; //done
+ }
+
+ if(!need_rehash)
+ {
+ if(mChunkHashList[end_key])
+ {
+ llassert_always(mChunkHashList[end_key] != chunk)
+
+ need_rehash = mChunkHashList[end_key]->mHashNext != NULL || mChunkHashList[end_key] == chunk->mHashNext;
+ if(!need_rehash)
+ {
+ mChunkHashList[end_key]->mHashNext = chunk ;
+ }
+ }
+ else
+ {
+ mChunkHashList[end_key] = chunk ;
+ }
+ }
+
+ if(!need_rehash)
+ {
+ if(end_key < start_key)
+ {
+ need_rehash = fillHashTable(start_key + 1, mHashFactor, chunk) ;
+ if(!need_rehash)
+ {
+ need_rehash = fillHashTable(0, end_key, chunk) ;
+ }
+ }
+ else
+ {
+ need_rehash = fillHashTable(start_key + 1, end_key, chunk) ;
+ }
+ }
+
+ if(need_rehash)
+ {
+ i = 0 ;
+ while(HASH_FACTORS[i] <= mHashFactor) i++;
+
+ mHashFactor = HASH_FACTORS[i] ;
+ llassert_always(mHashFactor != 0xFFFF) ;//stop point to prevent endlessly recursive calls
+
+ rehash() ;
+ }
+}
+
+void LLPrivateMemoryPool::removeFromHashTable(LLMemoryChunk* chunk)
+{
+ U16 start_key = findHashKey(chunk->getBuffer()) ;
+ U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ;
+
+ mChunkHashList[start_key] = chunk->mHashNext ;
+ chunk->mHashNext = NULL ;
+ if(start_key == end_key)
+ {
+ return ; //done
+ }
+
+ if(mChunkHashList[end_key] != chunk)
+ {
+ mChunkHashList[end_key]->mHashNext = NULL ;
+ }
+ else
+ {
+ mChunkHashList[end_key] = NULL ;
+ }
+
+ if(end_key < start_key)
+ {
+ for(U16 i = start_key + 1 ; i < mHashFactor; i++)
+ {
+ mChunkHashList[i] = NULL ;
+ }
+ for(U16 i = 0 ; i < end_key; i++)
+ {
+ mChunkHashList[i] = NULL ;
+ }
+ }
+ else
+ {
+ for(U16 i = start_key + 1 ; i < end_key; i++)
+ {
+ mChunkHashList[i] = NULL ;
+ }
+ }
+}
+
+void LLPrivateMemoryPool::rehash()
+{
+ llinfos << "new hash factor: " << mHashFactor << llendl ;
+
+ mChunkHashList.clear() ;
+ mChunkHashList.resize(mHashFactor, NULL) ;
+
+ LLMemoryChunk* chunk ;
+ for(U16 i = 0 ; i < SUPER_ALLOCATION ; i++)
+ {
+ chunk = mChunkList[i] ;
+ while(chunk)
+ {
+ chunk->mHashNext = NULL ;
+ addToHashTable(chunk) ;
+ chunk = chunk->mNext ;
+ }
+ }
+}
+
+bool LLPrivateMemoryPool::fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk)
+{
+ for(U16 i = start; i < end; i++)
+ {
+ if(mChunkHashList[i]) //the slot is occupied.
+ {
+ llassert_always(mChunkHashList[i] != chunk) ;
+ return true ;
+ }
+ else
+ {
+ mChunkHashList[i] = chunk ;
+ }
+ }
+
+ return false ;
+}
+
+//--------------------------------------------------------------------
+//class LLPrivateMemoryPoolManager
+//--------------------------------------------------------------------
+LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::sInstance = NULL ;
+
+LLPrivateMemoryPoolManager::LLPrivateMemoryPoolManager()
+{
+ mPoolList.resize(LLPrivateMemoryPool::MAX_TYPES) ;
+
+ for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
+ {
+ mPoolList[i] = NULL ;
+ }
+}
+
+LLPrivateMemoryPoolManager::~LLPrivateMemoryPoolManager()
+{
+
+#if __DEBUG_PRIVATE_MEM__
+ if(!sMemAllocationTracker.empty())
+ {
+ llwarns << "there is potential memory leaking here. The list of not freed memory blocks are from: " <<llendl ;
+
+ S32 k = 0 ;
+ for(mem_allocation_info_t::iterator iter = sMemAllocationTracker.begin() ; iter != sMemAllocationTracker.end() ; ++iter)
+ {
+ llinfos << k++ << ", " << iter->second << llendl ;
+ }
+ sMemAllocationTracker.clear() ;
+ }
+#endif
+
+#if 0
+ //all private pools should be released by their owners before reaching here.
+ for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
+ {
+ llassert_always(!mPoolList[i]) ;
+ }
+ mPoolList.clear() ;
+
+#else
+ //forcefully release all memory
+ for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
+ {
+ if(mPoolList[i])
+ {
+ delete mPoolList[i] ;
+ mPoolList[i] = NULL ;
+ }
+ }
+ mPoolList.clear() ;
+#endif
+}
+
+//static
+LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::getInstance()
+{
+ if(!sInstance)
+ {
+ sInstance = new LLPrivateMemoryPoolManager() ;
+ }
+ return sInstance ;
+}
+
+//static
+void LLPrivateMemoryPoolManager::destroyClass()
+{
+ if(sInstance)
+ {
+ delete sInstance ;
+ sInstance = NULL ;
+ }
+}
+
+LLPrivateMemoryPool* LLPrivateMemoryPoolManager::newPool(S32 type)
+{
+ if(!mPoolList[type])
+ {
+ mPoolList[type] = new LLPrivateMemoryPool(type) ;
+ }
+
+ return mPoolList[type] ;
+}
+
+void LLPrivateMemoryPoolManager::deletePool(LLPrivateMemoryPool* pool)
+{
+ if(pool->isEmpty())
+ {
+ mPoolList[pool->getType()] = NULL ;
+ delete pool;
+ }
+}
+
+//debug
+void LLPrivateMemoryPoolManager::updateStatistics()
+{
+ mTotalReservedSize = 0 ;
+ mTotalAllocatedSize = 0 ;
+
+ for(U32 i = 0; i < mPoolList.size(); i++)
+ {
+ if(mPoolList[i])
+ {
+ mTotalReservedSize += mPoolList[i]->getTotalReservedSize() ;
+ mTotalAllocatedSize += mPoolList[i]->getTotalAllocatedSize() ;
+ }
+ }
+}
+
+#if __DEBUG_PRIVATE_MEM__
+//static
+char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size, const char* function, const int line)
+{
+ char* p ;
+
+ if(!poolp)
+ {
+ p = (char*)malloc(size) ;
+ }
+ else
+ {
+ p = poolp->allocate(size) ;
+ }
+
+ if(p)
+ {
+ char num[16] ;
+ sprintf(num, " line: %d ", line) ;
+ std::string str(function) ;
+ str += num;
+
+ sMemAllocationTracker[p] = str ;
+ }
+
+ return p ;
+}
+#else
+//static
+char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size)
+{
+#if _USE_PRIVATE_MEM_POOL_
+ if(!poolp)
+ {
+ return (char*)malloc(size) ;
+ }
+ else
+ {
+ return poolp->allocate(size) ;
+ }
+#else
+ return (char*)malloc(size) ;
+#endif
+}
+#endif
+
+//static
+void LLPrivateMemoryPoolManager::freeMem(LLPrivateMemoryPool* poolp, void* addr)
+{
+ if(!addr)
+ {
+ return ;
+ }
+
+#if __DEBUG_PRIVATE_MEM__
+ sMemAllocationTracker.erase((char*)addr) ;
+#endif
+
+#if _USE_PRIVATE_MEM_POOL_
+ if(poolp)
+ {
+ poolp->freeMem(addr) ;
+ }
+ else
+ {
+ free(addr) ;
+ }
+#else
+ free(addr) ;
+#endif
+}
+
+//--------------------------------------------------------------------
+//class LLPrivateMemoryPoolTester
+//--------------------------------------------------------------------
+#if 0
+LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::sInstance = NULL ;
+LLPrivateMemoryPool* LLPrivateMemoryPoolTester::sPool = NULL ;
+LLPrivateMemoryPoolTester::LLPrivateMemoryPoolTester()
+{
+}
+
+LLPrivateMemoryPoolTester::~LLPrivateMemoryPoolTester()
+{
+}
+
+//static
+LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::getInstance()
+{
+ if(!sInstance)
+ {
+ sInstance = ::new LLPrivateMemoryPoolTester() ;
+ }
+ return sInstance ;
+}
+
+//static
+void LLPrivateMemoryPoolTester::destroy()
+{
+ if(sInstance)
+ {
+ ::delete sInstance ;
+ sInstance = NULL ;
+ }
+
+ if(sPool)
+ {
+ LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
+ sPool = NULL ;
+ }
+}
+
+void LLPrivateMemoryPoolTester::run(S32 type)
+{
+ if(sPool)
+ {
+ LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
+ }
+ sPool = LLPrivateMemoryPoolManager::getInstance()->newPool(type) ;
+
+ //run the test
+ correctnessTest() ;
+ performanceTest() ;
+ //fragmentationtest() ;
+
+ //release pool.
+ LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
+ sPool = NULL ;
+}
+
+void LLPrivateMemoryPoolTester::test(U32 min_size, U32 max_size, U32 stride, U32 times,
+ bool random_deletion, bool output_statistics)
+{
+ U32 levels = (max_size - min_size) / stride + 1 ;
+ char*** p ;
+ U32 i, j ;
+ U32 total_allocated_size = 0 ;
+
+ //allocate space for p ;
+ if(!(p = ::new char**[times]) || !(*p = ::new char*[times * levels]))
+ {
+ llerrs << "memory initialization for p failed" << llendl ;
+ }
+
+ //init
+ for(i = 0 ; i < times; i++)
+ {
+ p[i] = *p + i * levels ;
+ for(j = 0 ; j < levels; j++)
+ {
+ p[i][j] = NULL ;
+ }
+ }
+
+ //allocation
+ U32 size ;
+ for(i = 0 ; i < times ; i++)
+ {
+ for(j = 0 ; j < levels; j++)
+ {
+ size = min_size + j * stride ;
+ p[i][j] = ALLOCATE_MEM(sPool, size) ;
+
+ total_allocated_size+= size ;
+
+ *(U32*)p[i][j] = i ;
+ *((U32*)p[i][j] + 1) = j ;
+ //p[i][j][size - 1] = '\0' ; //access the last element to verify the success of the allocation.
+
+ //randomly release memory
+ if(random_deletion)
+ {
+ S32 k = rand() % levels ;
+
+ if(p[i][k])
+ {
+ llassert_always(*(U32*)p[i][k] == i && *((U32*)p[i][k] + 1) == k) ;
+ FREE_MEM(sPool, p[i][k]) ;
+ total_allocated_size -= min_size + k * stride ;
+ p[i][k] = NULL ;
+ }
+ }
+ }
+ }
+
+ //output pool allocation statistics
+ if(output_statistics)
+ {
+ }
+
+ //release all memory allocations
+ for(i = 0 ; i < times; i++)
+ {
+ for(j = 0 ; j < levels; j++)
+ {
+ if(p[i][j])
+ {
+ llassert_always(*(U32*)p[i][j] == i && *((U32*)p[i][j] + 1) == j) ;
+ FREE_MEM(sPool, p[i][j]) ;
+ total_allocated_size -= min_size + j * stride ;
+ p[i][j] = NULL ;
+ }
+ }
+ }
+
+ ::delete[] *p ;
+ ::delete[] p ;
+}
+
+void LLPrivateMemoryPoolTester::testAndTime(U32 size, U32 times)
+{
+ LLTimer timer ;
+
+ llinfos << " -**********************- " << llendl ;
+ llinfos << "test size: " << size << " test times: " << times << llendl ;
+
+ timer.reset() ;
+ char** p = new char*[times] ;
+
+ //using the customized memory pool
+ //allocation
+ for(U32 i = 0 ; i < times; i++)
+ {
+ p[i] = ALLOCATE_MEM(sPool, size) ;
+ if(!p[i])
+ {
+ llerrs << "allocation failed" << llendl ;
+ }
+ }
+ //de-allocation
+ for(U32 i = 0 ; i < times; i++)
+ {
+ FREE_MEM(sPool, p[i]) ;
+ p[i] = NULL ;
+ }
+ llinfos << "time spent using customized memory pool: " << timer.getElapsedTimeF32() << llendl ;
+
+ timer.reset() ;
+
+ //using the standard allocator/de-allocator:
+ //allocation
+ for(U32 i = 0 ; i < times; i++)
+ {
+ p[i] = ::new char[size] ;
+ if(!p[i])
+ {
+ llerrs << "allocation failed" << llendl ;
+ }
+ }
+ //de-allocation
+ for(U32 i = 0 ; i < times; i++)
+ {
+ ::delete[] p[i] ;
+ p[i] = NULL ;
+ }
+ llinfos << "time spent using standard allocator/de-allocator: " << timer.getElapsedTimeF32() << llendl ;
+
+ delete[] p;
+}
+
+void LLPrivateMemoryPoolTester::correctnessTest()
+{
+ //try many different sized allocation, and all kinds of edge cases, access the allocated memory
+ //to see if allocation is right.
+
+ //edge case
+ char* p = ALLOCATE_MEM(sPool, 0) ;
+ FREE_MEM(sPool, p) ;
+
+ //small sized
+ // [8 bytes, 2KB), each asks for 256 allocations and deallocations
+ test(8, 2040, 8, 256, true, true) ;
+
+ //medium sized
+ //[2KB, 512KB), each asks for 16 allocations and deallocations
+ test(2048, 512 * 1024 - 2048, 2048, 16, true, true) ;
+
+ //large sized
+ //[512KB, 4MB], each asks for 8 allocations and deallocations
+ test(512 * 1024, 4 * 1024 * 1024, 64 * 1024, 6, true, true) ;
+}
+
+void LLPrivateMemoryPoolTester::performanceTest()
+{
+ U32 test_size[3] = {768, 3* 1024, 3* 1024 * 1024};
+
+ //small sized
+ testAndTime(test_size[0], 8) ;
+
+ //medium sized
+ testAndTime(test_size[1], 8) ;
+
+ //large sized
+ testAndTime(test_size[2], 8) ;
+}
+
+void LLPrivateMemoryPoolTester::fragmentationtest()
+{
+ //for internal fragmentation statistics:
+ //every time when asking for a new chunk during correctness test, and performance test,
+ //print out the chunk usage statistices.
+}
+#endif
+//--------------------------------------------------------------------
diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h
index 3bd1403576..26488423a3 100644
--- a/indra/llcommon/llmemory.h
+++ b/indra/llcommon/llmemory.h
@@ -27,7 +27,6 @@
#define LLMEMORY_H
#include "llmemtype.h"
-
#if LL_DEBUG
inline void* ll_aligned_malloc( size_t size, int align )
{
@@ -105,6 +104,10 @@ inline void ll_aligned_free_32(void *p)
#define ll_aligned_free_32 free
#endif // LL_DEBUG
+#ifndef __DEBUG_PRIVATE_MEM__
+#define __DEBUG_PRIVATE_MEM__ 0
+#endif
+
class LL_COMMON_API LLMemory
{
public:
@@ -115,8 +118,24 @@ public:
// Return value is zero if not known.
static U64 getCurrentRSS();
static U32 getWorkingSetSize();
+ static void* tryToAlloc(void* address, U32 size);
+ static void initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure);
+ static void updateMemoryInfo() ;
+ static void logMemoryInfo(BOOL update = FALSE);
+ static S32 isMemoryPoolLow();
+
+ static U32 getAvailableMemKB() ;
+ static U32 getMaxMemKB() ;
+ static U32 getAllocatedMemKB() ;
private:
static char* reserveMem;
+ static U32 sAvailPhysicalMemInKB ;
+ static U32 sMaxPhysicalMemInKB ;
+ static U32 sAllocatedMemInKB;
+ static U32 sAllocatedPageSizeInKB ;
+
+ static U32 sMaxHeapSizeInKB;
+ static BOOL sEnableMemoryFailurePrevention;
};
//----------------------------------------------------------------------------
@@ -163,6 +182,308 @@ private:
//----------------------------------------------------------------------------
+
+//
+//class LLPrivateMemoryPool defines a private memory pool for an application to use, so the application does not
+//need to access the heap directly fro each memory allocation. Throught this, the allocation speed is faster,
+//and reduces virtaul address space gragmentation problem.
+//Note: this class is thread-safe by passing true to the constructor function. However, you do not need to do this unless
+//you are sure the memory allocation and de-allocation will happen in different threads. To make the pool thread safe
+//increases allocation and deallocation cost.
+//
+class LL_COMMON_API LLPrivateMemoryPool
+{
+ friend class LLPrivateMemoryPoolManager ;
+
+public:
+ class LL_COMMON_API LLMemoryBlock //each block is devided into slots uniformly
+ {
+ public:
+ LLMemoryBlock() ;
+ ~LLMemoryBlock() ;
+
+ void init(char* buffer, U32 buffer_size, U32 slot_size) ;
+ void setBuffer(char* buffer, U32 buffer_size) ;
+
+ char* allocate() ;
+ void freeMem(void* addr) ;
+
+ bool empty() {return !mAllocatedSlots;}
+ bool isFull() {return mAllocatedSlots == mTotalSlots;}
+ bool isFree() {return !mTotalSlots;}
+
+ U32 getSlotSize()const {return mSlotSize;}
+ U32 getTotalSlots()const {return mTotalSlots;}
+ U32 getBufferSize()const {return mBufferSize;}
+ char* getBuffer() const {return mBuffer;}
+
+ //debug use
+ void resetBitMap() ;
+ private:
+ char* mBuffer;
+ U32 mSlotSize ; //when the block is not initialized, it is the buffer size.
+ U32 mBufferSize ;
+ U32 mUsageBits ;
+ U8 mTotalSlots ;
+ U8 mAllocatedSlots ;
+ U8 mDummySize ; //size of extra bytes reserved for mUsageBits.
+
+ public:
+ LLMemoryBlock* mPrev ;
+ LLMemoryBlock* mNext ;
+ LLMemoryBlock* mSelf ;
+
+ struct CompareAddress
+ {
+ bool operator()(const LLMemoryBlock* const& lhs, const LLMemoryBlock* const& rhs)
+ {
+ return (U32)lhs->getBuffer() < (U32)rhs->getBuffer();
+ }
+ };
+ };
+
+ class LL_COMMON_API LLMemoryChunk //is divided into memory blocks.
+ {
+ public:
+ LLMemoryChunk() ;
+ ~LLMemoryChunk() ;
+
+ void init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size) ;
+ void setBuffer(char* buffer, U32 buffer_size) ;
+
+ bool empty() ;
+
+ char* allocate(U32 size) ;
+ void freeMem(void* addr) ;
+
+ char* getBuffer() const {return mBuffer;}
+ U32 getBufferSize() const {return mBufferSize;}
+ U32 getAllocatedSize() const {return mAlloatedSize;}
+
+ bool containsAddress(const char* addr) const;
+
+ static U32 getMaxOverhead(U32 data_buffer_size, U32 min_slot_size,
+ U32 max_slot_size, U32 min_block_size, U32 max_block_size) ;
+
+ void dump() ;
+
+ private:
+ U32 getPageIndex(U32 addr) ;
+ U32 getBlockLevel(U32 size) ;
+ U16 getPageLevel(U32 size) ;
+ LLMemoryBlock* addBlock(U32 blk_idx) ;
+ void popAvailBlockList(U32 blk_idx) ;
+ void addToFreeSpace(LLMemoryBlock* blk) ;
+ void removeFromFreeSpace(LLMemoryBlock* blk) ;
+ void removeBlock(LLMemoryBlock* blk) ;
+ void addToAvailBlockList(LLMemoryBlock* blk) ;
+ U32 calcBlockSize(U32 slot_size);
+ LLMemoryBlock* createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx) ;
+
+ private:
+ LLMemoryBlock** mAvailBlockList ;//256 by mMinSlotSize
+ LLMemoryBlock** mFreeSpaceList;
+ LLMemoryBlock* mBlocks ; //index of blocks by address.
+
+ char* mBuffer ;
+ U32 mBufferSize ;
+ char* mDataBuffer ;
+ char* mMetaBuffer ;
+ U32 mMinBlockSize ;
+ U32 mMinSlotSize ;
+ U32 mMaxSlotSize ;
+ U32 mAlloatedSize ;
+ U16 mBlockLevels;
+ U16 mPartitionLevels;
+
+ public:
+ //form a linked list
+ LLMemoryChunk* mNext ;
+ LLMemoryChunk* mPrev ;
+
+ LLMemoryChunk* mHashNext ;
+ } ;
+
+private:
+ LLPrivateMemoryPool(S32 type) ;
+ ~LLPrivateMemoryPool() ;
+
+ char *allocate(U32 size) ;
+ void freeMem(void* addr) ;
+
+ void dump() ;
+ U32 getTotalAllocatedSize() ;
+ U32 getTotalReservedSize() {return mReservedPoolSize;}
+ S32 getType() const {return mType; }
+ bool isEmpty() const {return !mNumOfChunks; }
+
+private:
+ void lock() ;
+ void unlock() ;
+ S32 getChunkIndex(U32 size) ;
+ LLMemoryChunk* addChunk(S32 chunk_index) ;
+ void checkSize(U32 asked_size) ;
+ void removeChunk(LLMemoryChunk* chunk) ;
+ U16 findHashKey(const char* addr);
+ void addToHashTable(LLMemoryChunk* chunk) ;
+ void removeFromHashTable(LLMemoryChunk* chunk) ;
+ void rehash() ;
+ bool fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk) ;
+ LLMemoryChunk* findChunk(const char* addr) ;
+
+ void destroyPool() ;
+
+public:
+ enum
+ {
+ SMALL_ALLOCATION = 0, //from 8 bytes to 2KB(exclusive), page size 2KB, max chunk size is 4MB.
+ MEDIUM_ALLOCATION, //from 2KB to 512KB(exclusive), page size 32KB, max chunk size 4MB
+ LARGE_ALLOCATION, //from 512KB to 4MB(inclusive), page size 64KB, max chunk size 16MB
+ SUPER_ALLOCATION //allocation larger than 4MB.
+ };
+
+ enum
+ {
+ STATIC = 0 , //static pool(each alllocation stays for a long time) without threading support
+ VOLATILE, //Volatile pool(each allocation stays for a very short time) without threading support
+ STATIC_THREADED, //static pool with threading support
+ VOLATILE_THREADED, //volatile pool with threading support
+ MAX_TYPES
+ }; //pool types
+
+private:
+ LLMutex* mMutexp ;
+ U32 mMaxPoolSize;
+ U32 mReservedPoolSize ;
+
+ LLMemoryChunk* mChunkList[SUPER_ALLOCATION] ; //all memory chunks reserved by this pool, sorted by address
+ std::vector<LLMemoryChunk*> mChunkHashList ;
+ U16 mNumOfChunks ;
+ U16 mHashFactor ;
+
+ S32 mType ;
+};
+
+class LL_COMMON_API LLPrivateMemoryPoolManager
+{
+private:
+ LLPrivateMemoryPoolManager() ;
+ ~LLPrivateMemoryPoolManager() ;
+
+public:
+ static LLPrivateMemoryPoolManager* getInstance() ;
+ static void destroyClass() ;
+
+ LLPrivateMemoryPool* newPool(S32 type) ;
+ void deletePool(LLPrivateMemoryPool* pool) ;
+
+private:
+ static LLPrivateMemoryPoolManager* sInstance ;
+ std::vector<LLPrivateMemoryPool*> mPoolList ;
+
+public:
+ //debug and statistics info.
+ void updateStatistics() ;
+
+ U32 mTotalReservedSize ;
+ U32 mTotalAllocatedSize ;
+
+public:
+#if __DEBUG_PRIVATE_MEM__
+ static char* allocate(LLPrivateMemoryPool* poolp, U32 size, const char* function, const int line) ;
+
+ typedef std::map<char*, std::string> mem_allocation_info_t ;
+ static mem_allocation_info_t sMemAllocationTracker;
+#else
+ static char* allocate(LLPrivateMemoryPool* poolp, U32 size) ;
+#endif
+ static void freeMem(LLPrivateMemoryPool* poolp, void* addr) ;
+};
+
+//-------------------------------------------------------------------------------------
+#if __DEBUG_PRIVATE_MEM__
+#define ALLOCATE_MEM(poolp, size) LLPrivateMemoryPoolManager::allocate((poolp), (size), __FUNCTION__, __LINE__)
+#else
+#define ALLOCATE_MEM(poolp, size) LLPrivateMemoryPoolManager::allocate((poolp), (size))
+#endif
+#define FREE_MEM(poolp, addr) LLPrivateMemoryPoolManager::freeMem((poolp), (addr))
+//-------------------------------------------------------------------------------------
+
+//
+//the below singleton is used to test the private memory pool.
+//
+#if 0
+class LL_COMMON_API LLPrivateMemoryPoolTester
+{
+private:
+ LLPrivateMemoryPoolTester() ;
+ ~LLPrivateMemoryPoolTester() ;
+
+public:
+ static LLPrivateMemoryPoolTester* getInstance() ;
+ static void destroy() ;
+
+ void run(S32 type) ;
+
+private:
+ void correctnessTest() ;
+ void performanceTest() ;
+ void fragmentationtest() ;
+
+ void test(U32 min_size, U32 max_size, U32 stride, U32 times, bool random_deletion, bool output_statistics) ;
+ void testAndTime(U32 size, U32 times) ;
+
+#if 0
+public:
+ void* operator new(size_t size)
+ {
+ return (void*)sPool->allocate(size) ;
+ }
+ void operator delete(void* addr)
+ {
+ sPool->freeMem(addr) ;
+ }
+ void* operator new[](size_t size)
+ {
+ return (void*)sPool->allocate(size) ;
+ }
+ void operator delete[](void* addr)
+ {
+ sPool->freeMem(addr) ;
+ }
+#endif
+
+private:
+ static LLPrivateMemoryPoolTester* sInstance;
+ static LLPrivateMemoryPool* sPool ;
+ static LLPrivateMemoryPool* sThreadedPool ;
+};
+#if 0
+//static
+void* LLPrivateMemoryPoolTester::operator new(size_t size)
+{
+ return (void*)sPool->allocate(size) ;
+}
+
+//static
+void LLPrivateMemoryPoolTester::operator delete(void* addr)
+{
+ sPool->free(addr) ;
+}
+
+//static
+void* LLPrivateMemoryPoolTester::operator new[](size_t size)
+{
+ return (void*)sPool->allocate(size) ;
+}
+
+//static
+void LLPrivateMemoryPoolTester::operator delete[](void* addr)
+{
+ sPool->free(addr) ;
+}
+#endif
+#endif
// LLRefCount moved to llrefcount.h
// LLPointer moved to llpointer.h
diff --git a/indra/llcommon/llscopedvolatileaprpool.h b/indra/llcommon/llscopedvolatileaprpool.h
new file mode 100644
index 0000000000..dbaf4edcad
--- /dev/null
+++ b/indra/llcommon/llscopedvolatileaprpool.h
@@ -0,0 +1,52 @@
+/**
+ * @file llscopedvolatileaprpool.h
+ * @brief Implementation of LLScopedVolatileAPRPool
+ *
+ * $LicenseInfo:firstyear=2010&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2011, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ */
+
+#ifndef LL_LLSCOPEDVOLATILEAPRPOOL_H
+#define LL_LLSCOPEDVOLATILEAPRPOOL_H
+
+#include "llthread.h"
+
+/** Scoped volatile memory pool.
+ *
+ * As the LLVolatileAPRPool should never keep allocations very
+ * long, its most common use is for allocations with a lifetime
+ * equal to it's scope.
+ *
+ * This is a convenience class that makes just a little easier to type.
+ */
+class LL_COMMON_API LLScopedVolatileAPRPool
+{
+private:
+ LLVolatileAPRPool& mPool;
+ apr_pool_t* mScopedAPRpool; // The use of apr_pool_t is OK here.
+public:
+ LLScopedVolatileAPRPool() : mPool(LLThreadLocalData::tldata().mVolatileAPRPool), mScopedAPRpool(mPool.getVolatileAPRPool()) { }
+ ~LLScopedVolatileAPRPool() { mPool.clearVolatileAPRPool(); }
+ //! @attention Only use this to pass the underlaying pointer to a libapr-1 function that requires it.
+ operator apr_pool_t*() const { return mScopedAPRpool; } // The use of apr_pool_t is OK here.
+};
+
+#endif
diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp
index d9400fb5b3..bdde1b5c48 100644
--- a/indra/llcommon/llthread.cpp
+++ b/indra/llcommon/llthread.cpp
@@ -36,6 +36,12 @@
#include <sched.h>
#endif
+#if !LL_DARWIN
+U32 ll_thread_local local_thread_ID = 0;
+#endif
+
+U32 LLThread::sIDIter = 0;
+
//----------------------------------------------------------------------------
// Usage:
// void run_func(LLThread* thread)
@@ -56,12 +62,6 @@
//
//----------------------------------------------------------------------------
-#if !LL_DARWIN
-U32 ll_thread_local sThreadID = 0;
-#endif
-
-U32 LLThread::sIDIter = 0;
-
LL_COMMON_API void assert_main_thread()
{
static U32 s_thread_id = LLThread::currentID();
@@ -79,9 +79,12 @@ void *APR_THREAD_FUNC LLThread::staticRun(apr_thread_t *apr_threadp, void *datap
LLThread *threadp = (LLThread *)datap;
#if !LL_DARWIN
- sThreadID = threadp->mID;
+ local_thread_ID = threadp->mID;
#endif
+ // Create a thread local data.
+ LLThreadLocalData::create(threadp);
+
// Run the user supplied function
threadp->run();
@@ -94,40 +97,22 @@ void *APR_THREAD_FUNC LLThread::staticRun(apr_thread_t *apr_threadp, void *datap
}
-LLThread::LLThread(const std::string& name, apr_pool_t *poolp) :
- mPaused(FALSE),
+LLThread::LLThread(std::string const& name) :
+ mPaused(false),
mName(name),
mAPRThreadp(NULL),
- mStatus(STOPPED)
+ mStatus(STOPPED),
+ mThreadLocalData(NULL)
{
- mID = ++sIDIter;
+ mID = ++sIDIter; //flaw: assume this is called only in the main thread!
- // Thread creation probably CAN be paranoid about APR being initialized, if necessary
- if (poolp)
- {
- mIsLocalPool = FALSE;
- mAPRPoolp = poolp;
- }
- else
- {
- mIsLocalPool = TRUE;
- apr_pool_create(&mAPRPoolp, NULL); // Create a subpool for this thread
- }
- mRunCondition = new LLCondition(mAPRPoolp);
-
- mLocalAPRFilePoolp = NULL ;
+ mRunCondition = new LLCondition;
}
LLThread::~LLThread()
{
shutdown();
-
- if(mLocalAPRFilePoolp)
- {
- delete mLocalAPRFilePoolp ;
- mLocalAPRFilePoolp = NULL ;
- }
}
void LLThread::shutdown()
@@ -164,7 +149,7 @@ void LLThread::shutdown()
if (!isStopped())
{
// This thread just wouldn't stop, even though we gave it time
- //llwarns << "LLThread::~LLThread() exiting thread before clean exit!" << llendl;
+ //llwarns << "LLThread::shutdown() exiting thread before clean exit!" << llendl;
// Put a stake in its heart.
apr_thread_exit(mAPRThreadp, -1);
return;
@@ -174,15 +159,8 @@ void LLThread::shutdown()
delete mRunCondition;
mRunCondition = 0;
-
- if (mIsLocalPool && mAPRPoolp)
- {
- apr_pool_destroy(mAPRPoolp);
- mAPRPoolp = 0;
- }
}
-
void LLThread::start()
{
llassert(isStopped());
@@ -191,7 +169,7 @@ void LLThread::start()
mStatus = RUNNING;
apr_status_t status =
- apr_thread_create(&mAPRThreadp, NULL, staticRun, (void *)this, mAPRPoolp);
+ apr_thread_create(&mAPRThreadp, NULL, staticRun, (void *)this, tldata().mRootPool());
if(status == APR_SUCCESS)
{
@@ -216,7 +194,7 @@ void LLThread::pause()
if (!mPaused)
{
// this will cause the thread to stop execution as soon as checkPause() is called
- mPaused = 1; // Does not need to be atomic since this is only set/unset from the main thread
+ mPaused = true; // Does not need to be atomic since this is only set/unset from the main thread
}
}
@@ -224,7 +202,7 @@ void LLThread::unpause()
{
if (mPaused)
{
- mPaused = 0;
+ mPaused = false;
}
wake(); // wake up the thread if necessary
@@ -301,115 +279,76 @@ void LLThread::wakeLocked()
}
}
-//============================================================================
-
-LLMutex::LLMutex(apr_pool_t *poolp) :
- mAPRMutexp(NULL), mCount(0), mLockingThread(NO_THREAD)
-{
- //if (poolp)
- //{
- // mIsLocalPool = FALSE;
- // mAPRPoolp = poolp;
- //}
- //else
- {
- mIsLocalPool = TRUE;
- apr_pool_create(&mAPRPoolp, NULL); // Create a subpool for this thread
- }
- apr_thread_mutex_create(&mAPRMutexp, APR_THREAD_MUTEX_UNNESTED, mAPRPoolp);
-}
+#ifdef SHOW_ASSERT
+// This allows the use of llassert(is_main_thread()) to assure the current thread is the main thread.
+static apr_os_thread_t main_thread_id;
+LL_COMMON_API bool is_main_thread(void) { return apr_os_thread_equal(main_thread_id, apr_os_thread_current()); }
+#endif
+// The thread private handle to access the LLThreadLocalData instance.
+apr_threadkey_t* LLThreadLocalData::sThreadLocalDataKey;
-LLMutex::~LLMutex()
+//static
+void LLThreadLocalData::init(void)
{
-#if MUTEX_DEBUG
- llassert_always(!isLocked()); // better not be locked!
-#endif
- apr_thread_mutex_destroy(mAPRMutexp);
- mAPRMutexp = NULL;
- if (mIsLocalPool)
+ // Only do this once.
+ if (sThreadLocalDataKey)
{
- apr_pool_destroy(mAPRPoolp);
+ return;
}
-}
+ apr_status_t status = apr_threadkey_private_create(&sThreadLocalDataKey, &LLThreadLocalData::destroy, LLAPRRootPool::get()());
+ ll_apr_assert_status(status); // Or out of memory, or system-imposed limit on the
+ // total number of keys per process {PTHREAD_KEYS_MAX}
+ // has been exceeded.
-void LLMutex::lock()
-{
-#if LL_DARWIN
- if (mLockingThread == LLThread::currentID())
-#else
- if (mLockingThread == sThreadID)
-#endif
- { //redundant lock
- mCount++;
- return;
- }
-
- apr_thread_mutex_lock(mAPRMutexp);
-
-#if MUTEX_DEBUG
- // Have to have the lock before we can access the debug info
- U32 id = LLThread::currentID();
- if (mIsLocked[id] != FALSE)
- llerrs << "Already locked in Thread: " << id << llendl;
- mIsLocked[id] = TRUE;
-#endif
+ // Create the thread-local data for the main thread (this function is called by the main thread).
+ LLThreadLocalData::create(NULL);
-#if LL_DARWIN
- mLockingThread = LLThread::currentID();
-#else
- mLockingThread = sThreadID;
+#ifdef SHOW_ASSERT
+ // This function is called by the main thread.
+ main_thread_id = apr_os_thread_current();
#endif
}
-void LLMutex::unlock()
+// This is called once for every thread when the thread is destructed.
+//static
+void LLThreadLocalData::destroy(void* thread_local_data)
{
- if (mCount > 0)
- { //not the root unlock
- mCount--;
- return;
- }
-
-#if MUTEX_DEBUG
- // Access the debug info while we have the lock
- U32 id = LLThread::currentID();
- if (mIsLocked[id] != TRUE)
- llerrs << "Not locked in Thread: " << id << llendl;
- mIsLocked[id] = FALSE;
-#endif
-
- mLockingThread = NO_THREAD;
- apr_thread_mutex_unlock(mAPRMutexp);
+ delete static_cast<LLThreadLocalData*>(thread_local_data);
}
-bool LLMutex::isLocked()
+//static
+void LLThreadLocalData::create(LLThread* threadp)
{
- apr_status_t status = apr_thread_mutex_trylock(mAPRMutexp);
- if (APR_STATUS_IS_EBUSY(status))
+ LLThreadLocalData* new_tld = new LLThreadLocalData;
+ if (threadp)
{
- return true;
- }
- else
- {
- apr_thread_mutex_unlock(mAPRMutexp);
- return false;
+ threadp->mThreadLocalData = new_tld;
}
+ apr_status_t status = apr_threadkey_private_set(new_tld, sThreadLocalDataKey);
+ llassert_always(status == APR_SUCCESS);
}
-U32 LLMutex::lockingThread() const
+//static
+LLThreadLocalData& LLThreadLocalData::tldata(void)
{
- return mLockingThread;
+ if (!sThreadLocalDataKey)
+ {
+ LLThreadLocalData::init();
+ }
+
+ void* data;
+ apr_status_t status = apr_threadkey_private_get(&data, sThreadLocalDataKey);
+ llassert_always(status == APR_SUCCESS);
+ return *static_cast<LLThreadLocalData*>(data);
}
//============================================================================
-LLCondition::LLCondition(apr_pool_t *poolp) :
- LLMutex(poolp)
+LLCondition::LLCondition(LLAPRPool& parent) : LLMutex(parent)
{
- // base class (LLMutex) has already ensured that mAPRPoolp is set up.
-
- apr_thread_cond_create(&mAPRCondp, mAPRPoolp);
+ apr_thread_cond_create(&mAPRCondp, mPool());
}
@@ -422,15 +361,6 @@ LLCondition::~LLCondition()
void LLCondition::wait()
{
- if (!isLocked())
- { //mAPRMutexp MUST be locked before calling apr_thread_cond_wait
- apr_thread_mutex_lock(mAPRMutexp);
-#if MUTEX_DEBUG
- // avoid asserts on destruction in non-release builds
- U32 id = LLThread::currentID();
- mIsLocked[id] = TRUE;
-#endif
- }
apr_thread_cond_wait(mAPRCondp, mAPRMutexp);
}
@@ -445,6 +375,44 @@ void LLCondition::broadcast()
}
//============================================================================
+LLMutexBase::LLMutexBase() :
+ mLockingThread(NO_THREAD),
+ mCount(0)
+{
+}
+
+void LLMutexBase::lock()
+{
+#if LL_DARWIN
+ if (mLockingThread == LLThread::currentID())
+#else
+ if (mLockingThread == local_thread_ID)
+#endif
+ { //redundant lock
+ mCount++;
+ return;
+ }
+
+ apr_thread_mutex_lock(mAPRMutexp);
+
+#if LL_DARWIN
+ mLockingThread = LLThread::currentID();
+#else
+ mLockingThread = local_thread_ID;
+#endif
+}
+
+void LLMutexBase::unlock()
+{
+ if (mCount > 0)
+ { //not the root unlock
+ mCount--;
+ return;
+ }
+ mLockingThread = NO_THREAD;
+
+ apr_thread_mutex_unlock(mAPRMutexp);
+}
//----------------------------------------------------------------------------
@@ -456,7 +424,7 @@ void LLThreadSafeRefCount::initThreadSafeRefCount()
{
if (!sMutex)
{
- sMutex = new LLMutex(0);
+ sMutex = new LLMutex;
}
}
diff --git a/indra/llcommon/llthread.h b/indra/llcommon/llthread.h
index 40291a2569..c732e3bc77 100644
--- a/indra/llcommon/llthread.h
+++ b/indra/llcommon/llthread.h
@@ -29,7 +29,13 @@
#include "llapp.h"
#include "llapr.h"
+#include "llmemory.h"
#include "apr_thread_cond.h"
+#include "llaprpool.h"
+
+#ifdef SHOW_ASSERT
+extern LL_COMMON_API bool is_main_thread(void);
+#endif
class LLThread;
class LLMutex;
@@ -41,6 +47,22 @@ class LLCondition;
#define ll_thread_local __thread
#endif
+class LL_COMMON_API LLThreadLocalData
+{
+private:
+ static apr_threadkey_t* sThreadLocalDataKey;
+
+public:
+ // Thread-local memory pools.
+ LLAPRRootPool mRootPool;
+ LLVolatileAPRPool mVolatileAPRPool;
+
+ static void init(void);
+ static void destroy(void* thread_local_data);
+ static void create(LLThread* pthread);
+ static LLThreadLocalData& tldata(void);
+};
+
class LL_COMMON_API LLThread
{
private:
@@ -54,7 +76,7 @@ public:
QUITTING= 2 // Someone wants this thread to quit
} EThreadStatus;
- LLThread(const std::string& name, apr_pool_t *poolp = NULL);
+ LLThread(std::string const& name);
virtual ~LLThread(); // Warning! You almost NEVER want to destroy a thread unless it's in the STOPPED state.
virtual void shutdown(); // stops the thread
@@ -69,7 +91,7 @@ public:
// Called from MAIN THREAD.
void pause();
void unpause();
- bool isPaused() { return isStopped() || mPaused == TRUE; }
+ bool isPaused() { return isStopped() || mPaused; }
// Cause the thread to wake up and check its condition
void wake();
@@ -83,13 +105,11 @@ public:
// this kicks off the apr thread
void start(void);
- apr_pool_t *getAPRPool() { return mAPRPoolp; }
- LLVolatileAPRPool* getLocalAPRFilePool() { return mLocalAPRFilePoolp ; }
-
- U32 getID() const { return mID; }
+ // Return thread-local data for the current thread.
+ static LLThreadLocalData& tldata(void) { return LLThreadLocalData::tldata(); }
private:
- BOOL mPaused;
+ bool mPaused;
// static function passed to APR thread creation routine
static void *APR_THREAD_FUNC staticRun(apr_thread_t *apr_threadp, void *datap);
@@ -99,15 +119,11 @@ protected:
LLCondition* mRunCondition;
apr_thread_t *mAPRThreadp;
- apr_pool_t *mAPRPoolp;
- BOOL mIsLocalPool;
EThreadStatus mStatus;
U32 mID;
-
- //a local apr_pool for APRFile operations in this thread. If it exists, LLAPRFile::sAPRFilePoolp should not be used.
- //Note: this pool is used by APRFile ONLY, do NOT use it for any other purposes.
- // otherwise it will cause severe memory leaking!!! --bao
- LLVolatileAPRPool *mLocalAPRFilePoolp ;
+
+ friend void LLThreadLocalData::create(LLThread* threadp);
+ LLThreadLocalData* mThreadLocalData;
void setQuitting();
@@ -137,7 +153,15 @@ protected:
#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO)
-class LL_COMMON_API LLMutex
+#ifdef MUTEX_DEBUG
+// We really shouldn't be using recursive locks. Make sure of that in debug mode.
+#define MUTEX_FLAG APR_THREAD_MUTEX_UNNESTED
+#else
+// Use the fastest platform-optimal lock behavior (can be recursive or non-recursive).
+#define MUTEX_FLAG APR_THREAD_MUTEX_DEFAULT
+#endif
+
+class LL_COMMON_API LLMutexBase
{
public:
typedef enum
@@ -145,32 +169,73 @@ public:
NO_THREAD = 0xFFFFFFFF
} e_locking_thread;
- LLMutex(apr_pool_t *apr_poolp); // NULL pool constructs a new pool for the mutex
- virtual ~LLMutex();
-
- void lock(); // blocks
- void unlock();
- bool isLocked(); // non-blocking, but does do a lock/unlock so not free
- U32 lockingThread() const; //get ID of locking thread
-
+ LLMutexBase() ;
+
+ void lock() ;
+ void unlock() ;
+ // Returns true if lock was obtained successfully.
+ bool trylock() { return !APR_STATUS_IS_EBUSY(apr_thread_mutex_trylock(mAPRMutexp)); }
+
+ // non-blocking, but does do a lock/unlock so not free
+ bool isLocked() { bool is_not_locked = trylock(); if (is_not_locked) unlock(); return !is_not_locked; }
+
protected:
- apr_thread_mutex_t *mAPRMutexp;
+ // mAPRMutexp is initialized and uninitialized in the derived class.
+ apr_thread_mutex_t* mAPRMutexp;
mutable U32 mCount;
mutable U32 mLockingThread;
-
- apr_pool_t *mAPRPoolp;
- BOOL mIsLocalPool;
-
-#if MUTEX_DEBUG
- std::map<U32, BOOL> mIsLocked;
+};
+
+class LL_COMMON_API LLMutex : public LLMutexBase
+{
+public:
+ LLMutex(LLAPRPool& parent = LLThread::tldata().mRootPool) : mPool(parent)
+ {
+ apr_thread_mutex_create(&mAPRMutexp, MUTEX_FLAG, mPool());
+ }
+ ~LLMutex()
+ {
+ llassert(!isLocked()); // better not be locked!
+ apr_thread_mutex_destroy(mAPRMutexp);
+ mAPRMutexp = NULL;
+ }
+
+protected:
+ LLAPRPool mPool;
+};
+
+#if APR_HAS_THREADS
+// No need to use a root pool in this case.
+typedef LLMutex LLMutexRootPool;
+#else // APR_HAS_THREADS
+class LL_COMMON_API LLMutexRootPool : public LLMutexBase
+{
+public:
+ LLMutexRootPool(void)
+ {
+ apr_thread_mutex_create(&mAPRMutexp, MUTEX_FLAG, mRootPool());
+ }
+ ~LLMutexRootPool()
+ {
+#if APR_POOL_DEBUG
+ // It is allowed to destruct root pools from a different thread.
+ mRootPool.grab_ownership();
#endif
+ llassert(!isLocked());
+ apr_thread_mutex_destroy(mAPRMutexp);
+ mAPRMutexp = NULL;
+ }
+
+protected:
+ LLAPRRootPool mRootPool;
};
+#endif // APR_HAS_THREADS
// Actually a condition/mutex pair (since each condition needs to be associated with a mutex).
class LL_COMMON_API LLCondition : public LLMutex
{
public:
- LLCondition(apr_pool_t *apr_poolp); // Defaults to global pool, could use the thread pool as well.
+ LLCondition(LLAPRPool& parent = LLThread::tldata().mRootPool);
~LLCondition();
void wait(); // blocks
@@ -181,10 +246,10 @@ protected:
apr_thread_cond_t *mAPRCondp;
};
-class LLMutexLock
+class LL_COMMON_API LLMutexLock
{
public:
- LLMutexLock(LLMutex* mutex)
+ LLMutexLock(LLMutexBase* mutex)
{
mMutex = mutex;
mMutex->lock();
@@ -194,7 +259,7 @@ public:
mMutex->unlock();
}
private:
- LLMutex* mMutex;
+ LLMutexBase* mMutex;
};
//============================================================================
diff --git a/indra/llcommon/llthreadsafequeue.cpp b/indra/llcommon/llthreadsafequeue.cpp
index 8a73e632a9..05d24944f3 100644
--- a/indra/llcommon/llthreadsafequeue.cpp
+++ b/indra/llcommon/llthreadsafequeue.cpp
@@ -34,19 +34,11 @@
//-----------------------------------------------------------------------------
-LLThreadSafeQueueImplementation::LLThreadSafeQueueImplementation(apr_pool_t * pool, unsigned int capacity):
- mOwnsPool(pool == 0),
- mPool(pool),
+LLThreadSafeQueueImplementation::LLThreadSafeQueueImplementation(unsigned int capacity):
mQueue(0)
{
- if(mOwnsPool) {
- apr_status_t status = apr_pool_create(&mPool, 0);
- if(status != APR_SUCCESS) throw LLThreadSafeQueueError("failed to allocate pool");
- } else {
- ; // No op.
- }
-
- apr_status_t status = apr_queue_create(&mQueue, capacity, mPool);
+ mPool.create();
+ apr_status_t status = apr_queue_create(&mQueue, capacity, mPool());
if(status != APR_SUCCESS) throw LLThreadSafeQueueError("failed to allocate queue");
}
@@ -59,7 +51,6 @@ LLThreadSafeQueueImplementation::~LLThreadSafeQueueImplementation()
" elements;" << "memory will be leaked" << LL_ENDL;
apr_queue_term(mQueue);
}
- if(mOwnsPool && (mPool != 0)) apr_pool_destroy(mPool);
}
diff --git a/indra/llcommon/llthreadsafequeue.h b/indra/llcommon/llthreadsafequeue.h
index 58cac38769..43d0b396f2 100644
--- a/indra/llcommon/llthreadsafequeue.h
+++ b/indra/llcommon/llthreadsafequeue.h
@@ -30,9 +30,9 @@
#include <string>
#include <stdexcept>
+#include "llaprpool.h"
-struct apr_pool_t; // From apr_pools.h
class LLThreadSafeQueueImplementation; // See below.
@@ -75,7 +75,7 @@ struct apr_queue_t; // From apr_queue.h
class LL_COMMON_API LLThreadSafeQueueImplementation
{
public:
- LLThreadSafeQueueImplementation(apr_pool_t * pool, unsigned int capacity);
+ LLThreadSafeQueueImplementation(unsigned int capacity);
~LLThreadSafeQueueImplementation();
void pushFront(void * element);
bool tryPushFront(void * element);
@@ -84,8 +84,7 @@ public:
size_t size();
private:
- bool mOwnsPool;
- apr_pool_t * mPool;
+ LLAPRPool mPool; // The pool used for mQueue.
apr_queue_t * mQueue;
};
@@ -99,9 +98,8 @@ class LLThreadSafeQueue
public:
typedef ElementT value_type;
- // If the pool is set to NULL one will be allocated and managed by this
- // queue.
- LLThreadSafeQueue(apr_pool_t * pool = 0, unsigned int capacity = 1024);
+ // Constructor.
+ LLThreadSafeQueue(unsigned int capacity = 1024);
// Add an element to the front of queue (will block if the queue has
// reached capacity).
@@ -139,8 +137,8 @@ private:
template<typename ElementT>
-LLThreadSafeQueue<ElementT>::LLThreadSafeQueue(apr_pool_t * pool, unsigned int capacity):
- mImplementation(pool, capacity)
+LLThreadSafeQueue<ElementT>::LLThreadSafeQueue(unsigned int capacity) :
+ mImplementation(capacity)
{
; // No op.
}
diff --git a/indra/llcommon/llworkerthread.cpp b/indra/llcommon/llworkerthread.cpp
index 3ac50832fd..6b308bb917 100644
--- a/indra/llcommon/llworkerthread.cpp
+++ b/indra/llcommon/llworkerthread.cpp
@@ -37,12 +37,7 @@
LLWorkerThread::LLWorkerThread(const std::string& name, bool threaded) :
LLQueuedThread(name, threaded)
{
- mDeleteMutex = new LLMutex(NULL);
-
- if(!mLocalAPRFilePoolp)
- {
- mLocalAPRFilePoolp = new LLVolatileAPRPool() ;
- }
+ mDeleteMutex = new LLMutex;
}
LLWorkerThread::~LLWorkerThread()
@@ -204,7 +199,6 @@ LLWorkerClass::LLWorkerClass(LLWorkerThread* workerthread, const std::string& na
mWorkerClassName(name),
mRequestHandle(LLWorkerThread::nullHandle()),
mRequestPriority(LLWorkerThread::PRIORITY_NORMAL),
- mMutex(NULL),
mWorkFlags(0)
{
if (!mWorkerThread)
diff --git a/indra/llcommon/llworkerthread.h b/indra/llcommon/llworkerthread.h
index 9bff18303e..bef5ef53fe 100644
--- a/indra/llcommon/llworkerthread.h
+++ b/indra/llcommon/llworkerthread.h
@@ -94,7 +94,6 @@ public:
private:
void deleteWorker(LLWorkerClass* workerclass); // schedule for deletion
-
};
//============================================================================
@@ -194,7 +193,7 @@ protected:
U32 mRequestPriority; // last priority set
private:
- LLMutex mMutex;
+ LLMutexRootPool mMutex; // Use LLMutexRootPool since this object is created and destructed by multiple threads.
LLAtomicU32 mWorkFlags;
};