diff options
Diffstat (limited to 'indra/llcommon')
38 files changed, 4569 insertions, 927 deletions
| diff --git a/indra/llcommon/CMakeLists.txt b/indra/llcommon/CMakeLists.txt index 9910281b64..6f39aba976 100644 --- a/indra/llcommon/CMakeLists.txt +++ b/indra/llcommon/CMakeLists.txt @@ -31,6 +31,7 @@ set(llcommon_SOURCE_FILES      llallocator_heap_profile.cpp      llapp.cpp      llapr.cpp +    llaprpool.cpp      llassettype.cpp      llavatarname.cpp      llbase32.cpp @@ -80,6 +81,7 @@ set(llcommon_SOURCE_FILES      llrand.cpp      llrefcount.cpp      llrun.cpp +    llscopedvolatileaprpool.h      llsd.cpp      llsdserialize.cpp      llsdserialize_xml.cpp @@ -115,13 +117,14 @@ set(llcommon_HEADER_FILES      indra_constants.h      linden_common.h      linked_lists.h -    llaccountingquota.h +    llaccountingcost.h      llallocator.h      llallocator_heap_profile.h      llagentconstants.h      llavatarname.h      llapp.h      llapr.h +    llaprpool.h      llassettype.h      llassoclist.h      llavatarconstants.h @@ -317,7 +320,9 @@ if (LL_TESTS)    LL_ADD_INTEGRATION_TEST(lllazy "" "${test_libs}")    LL_ADD_INTEGRATION_TEST(llprocessor "" "${test_libs}")    LL_ADD_INTEGRATION_TEST(llrand "" "${test_libs}") -  LL_ADD_INTEGRATION_TEST(llsdserialize "" "${test_libs}") +  LL_ADD_INTEGRATION_TEST(llsdserialize "" "${test_libs}" +                          "${PYTHON_EXECUTABLE}" "${CMAKE_CURRENT_SOURCE_DIR}/tests/setpython.py") +  LL_ADD_INTEGRATION_TEST(llsingleton "" "${test_libs}")                              LL_ADD_INTEGRATION_TEST(llstring "" "${test_libs}")    LL_ADD_INTEGRATION_TEST(lltreeiterators "" "${test_libs}")    LL_ADD_INTEGRATION_TEST(lluri "" "${test_libs}") diff --git a/indra/llcommon/indra_constants.h b/indra/llcommon/indra_constants.h index d0f287657e..0745696ef3 100644 --- a/indra/llcommon/indra_constants.h +++ b/indra/llcommon/indra_constants.h @@ -387,8 +387,6 @@ const S32 MAP_SIM_RETURN_NULL_SIMS 	= 0x00010000;  const S32 MAP_SIM_PRELUDE 			= 0x00020000;  // Crash reporter behavior -const char* const CRASH_SETTINGS_FILE = "settings_crash_behavior.xml"; -const char* const CRASH_BEHAVIOR_SETTING = "CrashSubmitBehavior";  const S32 CRASH_BEHAVIOR_ASK = 0;  const S32 CRASH_BEHAVIOR_ALWAYS_SEND = 1;  const S32 CRASH_BEHAVIOR_NEVER_SEND = 2; diff --git a/indra/llcommon/llaccountingquota.h b/indra/llcommon/llaccountingcost.h index 140333de07..0ef3b50c6d 100644 --- a/indra/llcommon/llaccountingquota.h +++ b/indra/llcommon/llaccountingcost.h @@ -1,5 +1,5 @@  /**  - * @file llaccountingquota.h + * @file llaccountingcost.h   * @   *   * $LicenseInfo:firstyear=2001&license=viewerlgpl$ @@ -58,22 +58,28 @@ struct ParcelQuota  	F32 mParcelCapacity;  }; -struct SelectionQuota +//SelectionQuota atm does not require a id +struct SelectionCost  { -	SelectionQuota( LLUUID localId, F32 renderCost, F32 physicsCost, F32 networkCost, F32 simulationCost ) -	: mLocalId( localId) -	, mRenderCost( renderCost ) -	, mPhysicsCost( physicsCost ) +	SelectionCost( /*LLTransactionID transactionId, */ F32 physicsCost, F32 networkCost, F32 simulationCost ) +	//: mTransactionId( transactionId) +	: mPhysicsCost( physicsCost )  	, mNetworkCost( networkCost )  	, mSimulationCost( simulationCost )  	{  	} -	SelectionQuota() {} +	SelectionCost() +	: mPhysicsCost( 0.0f ) +	, mNetworkCost( 0.0f ) +	, mSimulationCost( 0.0f ) +	{} -	F32 mRenderCost, mPhysicsCost, mNetworkCost, mSimulationCost;	 -	LLUUID mLocalId; +	F32 mPhysicsCost, mNetworkCost, mSimulationCost;	 +	//LLTransactionID mTransactionId;  }; +typedef enum { Roots = 0 , Prims } eSelectionType; +  #endif diff --git a/indra/llcommon/llapp.cpp b/indra/llcommon/llapp.cpp index ed192a9975..a8b7106078 100644 --- a/indra/llcommon/llapp.cpp +++ b/indra/llcommon/llapp.cpp @@ -137,10 +137,6 @@ void LLApp::commonCtor()  		mOptions.append(sd);  	} -	// Make sure we clean up APR when we exit -	// Don't need to do this if we're cleaning up APR in the destructor -	//atexit(ll_cleanup_apr); -  	// Set the application to this instance.  	sApplication = this; diff --git a/indra/llcommon/llapr.cpp b/indra/llcommon/llapr.cpp index d1c44c9403..1e4a51102e 100644 --- a/indra/llcommon/llapr.cpp +++ b/indra/llcommon/llapr.cpp @@ -29,212 +29,8 @@  #include "linden_common.h"  #include "llapr.h"  #include "apr_dso.h" +#include "llscopedvolatileaprpool.h" -apr_pool_t *gAPRPoolp = NULL; // Global APR memory pool -LLVolatileAPRPool *LLAPRFile::sAPRFilePoolp = NULL ; //global volatile APR memory pool. -apr_thread_mutex_t *gLogMutexp = NULL; -apr_thread_mutex_t *gCallStacksLogMutexp = NULL; - -const S32 FULL_VOLATILE_APR_POOL = 1024 ; //number of references to LLVolatileAPRPool - -void ll_init_apr() -{ -	if (!gAPRPoolp) -	{ -		// Initialize APR and create the global pool -		apr_initialize(); -		apr_pool_create(&gAPRPoolp, NULL); -		 -		// Initialize the logging mutex -		apr_thread_mutex_create(&gLogMutexp, APR_THREAD_MUTEX_UNNESTED, gAPRPoolp); -		apr_thread_mutex_create(&gCallStacksLogMutexp, APR_THREAD_MUTEX_UNNESTED, gAPRPoolp); -	} - -	if(!LLAPRFile::sAPRFilePoolp) -	{ -		LLAPRFile::sAPRFilePoolp = new LLVolatileAPRPool(FALSE) ; -	} -} - - -void ll_cleanup_apr() -{ -	LL_INFOS("APR") << "Cleaning up APR" << LL_ENDL; - -	if (gLogMutexp) -	{ -		// Clean up the logging mutex - -		// All other threads NEED to be done before we clean up APR, so this is okay. -		apr_thread_mutex_destroy(gLogMutexp); -		gLogMutexp = NULL; -	} -	if (gCallStacksLogMutexp) -	{ -		// Clean up the logging mutex - -		// All other threads NEED to be done before we clean up APR, so this is okay. -		apr_thread_mutex_destroy(gCallStacksLogMutexp); -		gCallStacksLogMutexp = NULL; -	} -	if (gAPRPoolp) -	{ -		apr_pool_destroy(gAPRPoolp); -		gAPRPoolp = NULL; -	} -	if (LLAPRFile::sAPRFilePoolp) -	{ -		delete LLAPRFile::sAPRFilePoolp ; -		LLAPRFile::sAPRFilePoolp = NULL ; -	} -	apr_terminate(); -} - -// -// -//LLAPRPool -// -LLAPRPool::LLAPRPool(apr_pool_t *parent, apr_size_t size, BOOL releasePoolFlag) 	 -	: mParent(parent), -	mReleasePoolFlag(releasePoolFlag), -	mMaxSize(size), -	mPool(NULL) -{	 -	createAPRPool() ; -} - -LLAPRPool::~LLAPRPool()  -{ -	releaseAPRPool() ; -} - -void LLAPRPool::createAPRPool() -{ -	if(mPool) -	{ -		return ; -	} - -	mStatus = apr_pool_create(&mPool, mParent); -	ll_apr_warn_status(mStatus) ; - -	if(mMaxSize > 0) //size is the number of blocks (which is usually 4K), NOT bytes. -	{ -		apr_allocator_t *allocator = apr_pool_allocator_get(mPool);  -		if (allocator)  -		{  -			apr_allocator_max_free_set(allocator, mMaxSize) ; -		} -	} -} - -void LLAPRPool::releaseAPRPool() -{ -	if(!mPool) -	{ -		return ; -	} - -	if(!mParent || mReleasePoolFlag) -	{ -		apr_pool_destroy(mPool) ; -		mPool = NULL ; -	} -} - -//virtual -apr_pool_t* LLAPRPool::getAPRPool()  -{	 -	return mPool ;  -} - -LLVolatileAPRPool::LLVolatileAPRPool(BOOL is_local, apr_pool_t *parent, apr_size_t size, BOOL releasePoolFlag)  -				  : LLAPRPool(parent, size, releasePoolFlag), -				  mNumActiveRef(0), -				  mNumTotalRef(0), -				  mMutexPool(NULL), -				  mMutexp(NULL) -{ -	//create mutex -	if(!is_local) //not a local apr_pool, that is: shared by multiple threads. -	{ -		apr_pool_create(&mMutexPool, NULL); // Create a pool for mutex -		apr_thread_mutex_create(&mMutexp, APR_THREAD_MUTEX_UNNESTED, mMutexPool); -	} -} - -LLVolatileAPRPool::~LLVolatileAPRPool() -{ -	//delete mutex -	if(mMutexp) -	{ -		apr_thread_mutex_destroy(mMutexp); -		apr_pool_destroy(mMutexPool); -	} -} - -// -//define this virtual function to avoid any mistakenly calling LLAPRPool::getAPRPool(). -// -//virtual  -apr_pool_t* LLVolatileAPRPool::getAPRPool()  -{ -	return LLVolatileAPRPool::getVolatileAPRPool() ; -} - -apr_pool_t* LLVolatileAPRPool::getVolatileAPRPool()  -{	 -	LLScopedLock lock(mMutexp) ; - -	mNumTotalRef++ ; -	mNumActiveRef++ ; - -	if(!mPool) -	{ -		createAPRPool() ; -	} -	 -	return mPool ; -} - -void LLVolatileAPRPool::clearVolatileAPRPool()  -{ -	LLScopedLock lock(mMutexp) ; - -	if(mNumActiveRef > 0) -	{ -		mNumActiveRef--; -		if(mNumActiveRef < 1) -		{ -			if(isFull())  -			{ -				mNumTotalRef = 0 ; - -				//destroy the apr_pool. -				releaseAPRPool() ; -			} -			else  -			{ -				//This does not actually free the memory,  -				//it just allows the pool to re-use this memory for the next allocation.  -				apr_pool_clear(mPool) ; -			} -		} -	} -	else -	{ -		llassert_always(mNumActiveRef > 0) ; -	} - -	//paranoia check if the pool is jammed. -	//will remove the check before going to release. -	llassert_always(mNumTotalRef < (FULL_VOLATILE_APR_POOL << 2)) ; -} - -BOOL LLVolatileAPRPool::isFull() -{ -	return mNumTotalRef > FULL_VOLATILE_APR_POOL ; -}  //---------------------------------------------------------------------  //  // LLScopedLock @@ -313,15 +109,17 @@ void ll_apr_assert_status(apr_status_t status, apr_dso_handle_t *handle)  //  LLAPRFile::LLAPRFile()  	: mFile(NULL), -	  mCurrentFilePoolp(NULL) +	  mVolatileFilePoolp(NULL), +	  mRegularFilePoolp(NULL)  {  } -LLAPRFile::LLAPRFile(const std::string& filename, apr_int32_t flags, LLVolatileAPRPool* pool) +LLAPRFile::LLAPRFile(std::string const& filename, apr_int32_t flags, S32* sizep, access_t access_type)  	: mFile(NULL), -	  mCurrentFilePoolp(NULL) +	  mVolatileFilePoolp(NULL), +	  mRegularFilePoolp(NULL)  { -	open(filename, flags, pool); +	open(filename, flags, access_type, sizep);  }  LLAPRFile::~LLAPRFile() @@ -338,36 +136,58 @@ apr_status_t LLAPRFile::close()  		mFile = NULL ;  	} -	if(mCurrentFilePoolp) +	if (mVolatileFilePoolp)  	{ -		mCurrentFilePoolp->clearVolatileAPRPool() ; -		mCurrentFilePoolp = NULL ; +		mVolatileFilePoolp->clearVolatileAPRPool() ; +		mVolatileFilePoolp = NULL ; +	} + +	if (mRegularFilePoolp) +	{ +		delete mRegularFilePoolp; +		mRegularFilePoolp = NULL;  	}  	return ret ;  } -apr_status_t LLAPRFile::open(const std::string& filename, apr_int32_t flags, LLVolatileAPRPool* pool, S32* sizep) +apr_status_t LLAPRFile::open(std::string const& filename, apr_int32_t flags, access_t access_type, S32* sizep)  { -	apr_status_t s ; - -	//check if already open some file -	llassert_always(!mFile) ; -	llassert_always(!mCurrentFilePoolp) ; -	 -	apr_pool_t* apr_pool = pool ? pool->getVolatileAPRPool() : NULL ; -	s = apr_file_open(&mFile, filename.c_str(), flags, APR_OS_DEFAULT, getAPRFilePool(apr_pool)); +	llassert_always(!mFile); +	llassert_always(!mVolatileFilePoolp && !mRegularFilePoolp); -	if (s != APR_SUCCESS || !mFile) +	apr_status_t status; +	{ +		apr_pool_t* apr_file_open_pool;	// The use of apr_pool_t is OK here. +										// This is a temporary variable for a pool that is passed directly to apr_file_open below. +		if (access_type == short_lived) +		{ +			// Use a "volatile" thread-local pool. +			mVolatileFilePoolp = &LLThreadLocalData::tldata().mVolatileAPRPool; +			// Access the pool and increment its reference count. +			// The reference count of LLVolatileAPRPool objects will be decremented +			// again in LLAPRFile::close by calling mVolatileFilePoolp->clearVolatileAPRPool(). +			apr_file_open_pool = mVolatileFilePoolp->getVolatileAPRPool(); +		} +		else +		{ +			mRegularFilePoolp = new LLAPRPool(LLThreadLocalData::tldata().mRootPool); +			apr_file_open_pool = (*mRegularFilePoolp)(); +		} +		status = apr_file_open(&mFile, filename.c_str(), flags, APR_OS_DEFAULT, apr_file_open_pool); +	} +	if (status != APR_SUCCESS || !mFile)  	{  		mFile = NULL ; -		 +		close() ;  		if (sizep)  		{  			*sizep = 0;  		} +		return status;  	} -	else if (sizep) + +	if (sizep)  	{  		S32 file_size = 0;  		apr_off_t offset = 0; @@ -381,49 +201,7 @@ apr_status_t LLAPRFile::open(const std::string& filename, apr_int32_t flags, LLV  		*sizep = file_size;  	} -	if(!mCurrentFilePoolp) -	{ -		mCurrentFilePoolp = pool ; - -		if(!mFile) -		{ -			close() ; -		} -	} - -	return s ; -} - -//use gAPRPoolp. -apr_status_t LLAPRFile::open(const std::string& filename, apr_int32_t flags, BOOL use_global_pool) -{ -	apr_status_t s; - -	//check if already open some file -	llassert_always(!mFile) ; -	llassert_always(!mCurrentFilePoolp) ; -	llassert_always(use_global_pool) ; //be aware of using gAPRPoolp. -	 -	s = apr_file_open(&mFile, filename.c_str(), flags, APR_OS_DEFAULT, gAPRPoolp); -	if (s != APR_SUCCESS || !mFile) -	{ -		mFile = NULL ; -		close() ; -		return s; -	} - -	return s; -} - -apr_pool_t* LLAPRFile::getAPRFilePool(apr_pool_t* pool) -{	 -	if(!pool) -	{ -		mCurrentFilePoolp = sAPRFilePoolp ; -		return mCurrentFilePoolp->getVolatileAPRPool() ; -	} - -	return pool ; +	return status;  }  // File I/O @@ -482,45 +260,6 @@ S32 LLAPRFile::seek(apr_seek_where_t where, S32 offset)  //  //static -apr_status_t LLAPRFile::close(apr_file_t* file_handle, LLVolatileAPRPool* pool)  -{ -	apr_status_t ret = APR_SUCCESS ; -	if(file_handle) -	{ -		ret = apr_file_close(file_handle); -		file_handle = NULL ; -	} - -	if(pool) -	{ -		pool->clearVolatileAPRPool() ; -	} - -	return ret ; -} - -//static -apr_file_t* LLAPRFile::open(const std::string& filename, LLVolatileAPRPool* pool, apr_int32_t flags) -{ -	apr_status_t s; -	apr_file_t* file_handle ; - -	pool = pool ? pool : LLAPRFile::sAPRFilePoolp ; - -	s = apr_file_open(&file_handle, filename.c_str(), flags, APR_OS_DEFAULT, pool->getVolatileAPRPool()); -	if (s != APR_SUCCESS || !file_handle) -	{ -		ll_apr_warn_status(s); -		LL_WARNS("APR") << " Attempting to open filename: " << filename << LL_ENDL; -		file_handle = NULL ; -		close(file_handle, pool) ; -		return NULL; -	} - -	return file_handle ; -} - -//static  S32 LLAPRFile::seek(apr_file_t* file_handle, apr_seek_where_t where, S32 offset)  {  	if(!file_handle) @@ -553,13 +292,15 @@ S32 LLAPRFile::seek(apr_file_t* file_handle, apr_seek_where_t where, S32 offset)  }  //static -S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool) +S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes)  { -	//***************************************** -	apr_file_t* file_handle = open(filename, pool, APR_READ|APR_BINARY);  -	//*****************************************	 -	if (!file_handle) +	apr_file_t* file_handle; +	LLScopedVolatileAPRPool pool; +	apr_status_t s = apr_file_open(&file_handle, filename.c_str(), APR_READ|APR_BINARY, APR_OS_DEFAULT, pool); +	if (s != APR_SUCCESS || !file_handle)  	{ +		ll_apr_warn_status(s); +		LL_WARNS("APR") << " while attempting to open file \"" << filename << '"' << LL_ENDL;  		return 0;  	} @@ -589,14 +330,13 @@ S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nb  		}  	} -	//***************************************** -	close(file_handle, pool) ;  -	//***************************************** +	apr_file_close(file_handle); +  	return (S32)bytes_read;  }  //static -S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool) +S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes)  {  	apr_int32_t flags = APR_CREATE|APR_WRITE|APR_BINARY;  	if (offset < 0) @@ -605,11 +345,13 @@ S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 n  		offset = 0;  	} -	//***************************************** -	apr_file_t* file_handle = open(filename, pool, flags); -	//***************************************** -	if (!file_handle) +	apr_file_t* file_handle; +	LLScopedVolatileAPRPool pool; +	apr_status_t s = apr_file_open(&file_handle, filename.c_str(), flags, APR_OS_DEFAULT, pool); +	if (s != APR_SUCCESS || !file_handle)  	{ +		ll_apr_warn_status(s); +		LL_WARNS("APR") << " while attempting to open file \"" << filename << '"' << LL_ENDL;  		return 0;  	} @@ -639,21 +381,18 @@ S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 n  		}  	} -	//***************************************** -	LLAPRFile::close(file_handle, pool); -	//***************************************** +	apr_file_close(file_handle);  	return (S32)bytes_written;  }  //static -bool LLAPRFile::remove(const std::string& filename, LLVolatileAPRPool* pool) +bool LLAPRFile::remove(const std::string& filename)  {  	apr_status_t s; -	pool = pool ? pool : LLAPRFile::sAPRFilePoolp ; -	s = apr_file_remove(filename.c_str(), pool->getVolatileAPRPool()); -	pool->clearVolatileAPRPool() ; +	LLScopedVolatileAPRPool pool; +	s = apr_file_remove(filename.c_str(), pool);  	if (s != APR_SUCCESS)  	{ @@ -665,13 +404,12 @@ bool LLAPRFile::remove(const std::string& filename, LLVolatileAPRPool* pool)  }  //static -bool LLAPRFile::rename(const std::string& filename, const std::string& newname, LLVolatileAPRPool* pool) +bool LLAPRFile::rename(const std::string& filename, const std::string& newname)  {  	apr_status_t s; -	pool = pool ? pool : LLAPRFile::sAPRFilePoolp ; -	s = apr_file_rename(filename.c_str(), newname.c_str(), pool->getVolatileAPRPool()); -	pool->clearVolatileAPRPool() ; +	LLScopedVolatileAPRPool pool; +	s = apr_file_rename(filename.c_str(), newname.c_str(), pool);  	if (s != APR_SUCCESS)  	{ @@ -683,49 +421,44 @@ bool LLAPRFile::rename(const std::string& filename, const std::string& newname,  }  //static -bool LLAPRFile::isExist(const std::string& filename, LLVolatileAPRPool* pool, apr_int32_t flags) +bool LLAPRFile::isExist(const std::string& filename, apr_int32_t flags)  { -	apr_file_t* apr_file; +	apr_file_t* file_handle;  	apr_status_t s; -	pool = pool ? pool : LLAPRFile::sAPRFilePoolp ; -	s = apr_file_open(&apr_file, filename.c_str(), flags, APR_OS_DEFAULT, pool->getVolatileAPRPool());	 +	LLScopedVolatileAPRPool pool; +	s = apr_file_open(&file_handle, filename.c_str(), flags, APR_OS_DEFAULT, pool); -	if (s != APR_SUCCESS || !apr_file) +	if (s != APR_SUCCESS || !file_handle)  	{ -		pool->clearVolatileAPRPool() ;  		return false;  	}  	else  	{ -		apr_file_close(apr_file) ; -		pool->clearVolatileAPRPool() ; +		apr_file_close(file_handle);  		return true;  	}  }  //static -S32 LLAPRFile::size(const std::string& filename, LLVolatileAPRPool* pool) +S32 LLAPRFile::size(const std::string& filename)  { -	apr_file_t* apr_file; +	apr_file_t* file_handle;  	apr_finfo_t info;  	apr_status_t s; -	pool = pool ? pool : LLAPRFile::sAPRFilePoolp ; -	s = apr_file_open(&apr_file, filename.c_str(), APR_READ, APR_OS_DEFAULT, pool->getVolatileAPRPool()); +	LLScopedVolatileAPRPool pool; +	s = apr_file_open(&file_handle, filename.c_str(), APR_READ, APR_OS_DEFAULT, pool); -	if (s != APR_SUCCESS || !apr_file) +	if (s != APR_SUCCESS || !file_handle)  	{		 -		pool->clearVolatileAPRPool() ; -		  		return 0;  	}  	else  	{ -		apr_status_t s = apr_file_info_get(&info, APR_FINFO_SIZE, apr_file);		 +		apr_status_t s = apr_file_info_get(&info, APR_FINFO_SIZE, file_handle); -		apr_file_close(apr_file) ; -		pool->clearVolatileAPRPool() ; +		apr_file_close(file_handle) ;  		if (s == APR_SUCCESS)  		{ @@ -739,31 +472,29 @@ S32 LLAPRFile::size(const std::string& filename, LLVolatileAPRPool* pool)  }  //static -bool LLAPRFile::makeDir(const std::string& dirname, LLVolatileAPRPool* pool) +bool LLAPRFile::makeDir(const std::string& dirname)  {  	apr_status_t s; -	pool = pool ? pool : LLAPRFile::sAPRFilePoolp ; -	s = apr_dir_make(dirname.c_str(), APR_FPROT_OS_DEFAULT, pool->getVolatileAPRPool()); -	pool->clearVolatileAPRPool() ; +	LLScopedVolatileAPRPool pool; +	s = apr_dir_make(dirname.c_str(), APR_FPROT_OS_DEFAULT, pool);  	if (s != APR_SUCCESS)  	{  		ll_apr_warn_status(s); -		LL_WARNS("APR") << " Attempting to make directory: " << dirname << LL_ENDL; +		LL_WARNS("APR") << " while attempting to make directory: " << dirname << LL_ENDL;  		return false;  	}  	return true;  }  //static -bool LLAPRFile::removeDir(const std::string& dirname, LLVolatileAPRPool* pool) +bool LLAPRFile::removeDir(const std::string& dirname)  {  	apr_status_t s; -	pool = pool ? pool : LLAPRFile::sAPRFilePoolp ; -	s = apr_file_remove(dirname.c_str(), pool->getVolatileAPRPool()); -	pool->clearVolatileAPRPool() ; +	LLScopedVolatileAPRPool pool; +	s = apr_file_remove(dirname.c_str(), pool);  	if (s != APR_SUCCESS)  	{ diff --git a/indra/llcommon/llapr.h b/indra/llcommon/llapr.h index af33ce666f..3f846f1314 100644 --- a/indra/llcommon/llapr.h +++ b/indra/llcommon/llapr.h @@ -50,71 +50,9 @@  #include "apr_atomic.h"  #include "llstring.h" -extern LL_COMMON_API apr_thread_mutex_t* gLogMutexp; -extern apr_thread_mutex_t* gCallStacksLogMutexp; -  struct apr_dso_handle_t; - -/**  - * @brief initialize the common apr constructs -- apr itself, the - * global pool, and a mutex. - */ -void LL_COMMON_API ll_init_apr(); - -/**  - * @brief Cleanup those common apr constructs. - */ -void LL_COMMON_API ll_cleanup_apr(); - -// -//LL apr_pool -//manage apr_pool_t, destroy allocated apr_pool in the destruction function. -// -class LL_COMMON_API LLAPRPool -{ -public: -	LLAPRPool(apr_pool_t *parent = NULL, apr_size_t size = 0, BOOL releasePoolFlag = TRUE) ; -	virtual ~LLAPRPool() ; - -	virtual apr_pool_t* getAPRPool() ; -	apr_status_t getStatus() {return mStatus ; } - -protected: -	void releaseAPRPool() ; -	void createAPRPool() ; - -protected: -	apr_pool_t*  mPool ;              //pointing to an apr_pool -	apr_pool_t*  mParent ;			  //parent pool -	apr_size_t   mMaxSize ;           //max size of mPool, mPool should return memory to system if allocated memory beyond this limit. However it seems not to work. -	apr_status_t mStatus ;            //status when creating the pool -	BOOL         mReleasePoolFlag ;   //if set, mPool is destroyed when LLAPRPool is deleted. default value is true. -}; - -// -//volatile LL apr_pool -//which clears memory automatically. -//so it can not hold static data or data after memory is cleared -// -class LL_COMMON_API LLVolatileAPRPool : public LLAPRPool -{ -public: -	LLVolatileAPRPool(BOOL is_local = TRUE, apr_pool_t *parent = NULL, apr_size_t size = 0, BOOL releasePoolFlag = TRUE); -	virtual ~LLVolatileAPRPool(); - -	/*virtual*/ apr_pool_t* getAPRPool() ; //define this virtual function to avoid any mistakenly calling LLAPRPool::getAPRPool(). -	apr_pool_t* getVolatileAPRPool() ;	 -	void        clearVolatileAPRPool() ; - -	BOOL        isFull() ; -	 -private: -	S32 mNumActiveRef ; //number of active pointers pointing to the apr_pool. -	S32 mNumTotalRef ;  //number of total pointers pointing to the apr_pool since last creating.   - -	apr_thread_mutex_t *mMutexp; -	apr_pool_t         *mMutexPool; -} ; +class LLAPRPool; +class LLVolatileAPRPool;  /**    * @class LLScopedLock @@ -205,15 +143,20 @@ class LL_COMMON_API LLAPRFile : boost::noncopyable  	// make this non copyable since a copy closes the file  private:  	apr_file_t* mFile ; -	LLVolatileAPRPool *mCurrentFilePoolp ; //currently in use apr_pool, could be one of them: sAPRFilePoolp, or a temp pool.  +	LLVolatileAPRPool* mVolatileFilePoolp;	// (Thread local) APR pool currently in use. +	LLAPRPool* mRegularFilePoolp;		// ...or a regular pool.  public: +	enum access_t { +		long_lived,		// Use a global pool for long-lived file accesses. +		short_lived		// Use a volatile pool for short-lived file accesses. +	}; +  	LLAPRFile() ; -	LLAPRFile(const std::string& filename, apr_int32_t flags, LLVolatileAPRPool* pool = NULL); +	LLAPRFile(std::string const& filename, apr_int32_t flags, S32* sizep = NULL, access_t access_type = short_lived);  	~LLAPRFile() ; -	 -	apr_status_t open(const std::string& filename, apr_int32_t flags, LLVolatileAPRPool* pool = NULL, S32* sizep = NULL); -	apr_status_t open(const std::string& filename, apr_int32_t flags, BOOL use_global_pool); //use gAPRPoolp. + +	apr_status_t open(const std::string& filename, apr_int32_t flags, access_t access_type, S32* sizep = NULL);  	apr_status_t close() ;  	// Returns actual offset, -1 if seek fails @@ -226,32 +169,24 @@ public:  	apr_file_t* getFileHandle() {return mFile;}	 -private: -	apr_pool_t* getAPRFilePool(apr_pool_t* pool) ;	 -	  //  //*******************************************************************************************************************************  //static components  // -public: -	static LLVolatileAPRPool *sAPRFilePoolp ; //a global apr_pool for APRFile, which is used only when local pool does not exist. -  private: -	static apr_file_t* open(const std::string& filename, LLVolatileAPRPool* pool, apr_int32_t flags); -	static apr_status_t close(apr_file_t* file, LLVolatileAPRPool* pool) ;  	static S32 seek(apr_file_t* file, apr_seek_where_t where, S32 offset);  public:  	// returns false if failure: -	static bool remove(const std::string& filename, LLVolatileAPRPool* pool = NULL); -	static bool rename(const std::string& filename, const std::string& newname, LLVolatileAPRPool* pool = NULL); -	static bool isExist(const std::string& filename, LLVolatileAPRPool* pool = NULL, apr_int32_t flags = APR_READ); -	static S32 size(const std::string& filename, LLVolatileAPRPool* pool = NULL); -	static bool makeDir(const std::string& dirname, LLVolatileAPRPool* pool = NULL); -	static bool removeDir(const std::string& dirname, LLVolatileAPRPool* pool = NULL); +	static bool remove(const std::string& filename); +	static bool rename(const std::string& filename, const std::string& newname); +	static bool isExist(const std::string& filename, apr_int32_t flags = APR_READ); +	static S32 size(const std::string& filename); +	static bool makeDir(const std::string& dirname); +	static bool removeDir(const std::string& dirname);  	// Returns bytes read/written, 0 if read/write fails: -	static S32 readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool = NULL);	 -	static S32 writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool = NULL); // offset<0 means append +	static S32 readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes);	 +	static S32 writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes); // offset<0 means append  //*******************************************************************************************************************************  }; @@ -267,6 +202,4 @@ bool LL_COMMON_API ll_apr_warn_status(apr_status_t status, apr_dso_handle_t* han  void LL_COMMON_API ll_apr_assert_status(apr_status_t status);  void LL_COMMON_API ll_apr_assert_status(apr_status_t status, apr_dso_handle_t* handle); -extern "C" LL_COMMON_API apr_pool_t* gAPRPoolp; // Global APR memory pool -  #endif // LL_LLAPR_H diff --git a/indra/llcommon/llaprpool.cpp b/indra/llcommon/llaprpool.cpp new file mode 100644 index 0000000000..6f21b61b65 --- /dev/null +++ b/indra/llcommon/llaprpool.cpp @@ -0,0 +1,202 @@ +/** + * @file llaprpool.cpp + * + * $LicenseInfo:firstyear=2011&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2011, Linden Research, Inc. + *  + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + *  + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + *  + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA + *  + * Linden Research, Inc., 945 Battery Street, San Francisco, CA  94111  USA + * $/LicenseInfo$ + * + * CHANGELOG + *   and additional copyright holders. + * + *   04/04/2010 + *   - Initial version, written by Aleric Inglewood @ SL + * + *   10/11/2010 + *   - Added APR_HAS_THREADS #if's to allow creation and destruction + *     of subpools by threads other than the parent pool owner. + */ + +#include "linden_common.h" + +#include "llerror.h" +#include "llaprpool.h" +#include "llthread.h" + +// Create a subpool from parent. +void LLAPRPool::create(LLAPRPool& parent) +{ +	llassert(!mPool);			// Must be non-initialized. +	mParent = &parent; +	if (!mParent)				// Using the default parameter? +	{ +		// By default use the root pool of the current thread. +		mParent = &LLThreadLocalData::tldata().mRootPool; +	} +	llassert(mParent->mPool);	// Parent must be initialized. +#if APR_HAS_THREADS +	// As per the documentation of APR (ie http://apr.apache.org/docs/apr/1.4/apr__pools_8h.html): +	// +	// Note that most operations on pools are not thread-safe: a single pool should only be +	// accessed by a single thread at any given time. The one exception to this rule is creating +	// a subpool of a given pool: one or more threads can safely create subpools at the same +	// time that another thread accesses the parent pool. +	// +	// In other words, it's safe for any thread to create a (sub)pool, independent of who +	// owns the parent pool. +	mOwner = apr_os_thread_current(); +#else +	mOwner = mParent->mOwner; +	llassert(apr_os_thread_equal(mOwner, apr_os_thread_current())); +#endif +	apr_status_t const apr_pool_create_status = apr_pool_create(&mPool, mParent->mPool); +	llassert_always(apr_pool_create_status == APR_SUCCESS); +	llassert(mPool);			// Initialized. +	apr_pool_cleanup_register(mPool, this, &s_plain_cleanup, &apr_pool_cleanup_null); +} + +// Destroy the (sub)pool, if any. +void LLAPRPool::destroy(void) +{ +	// Only do anything if we are not already (being) destroyed. +	if (mPool) +	{ +#if !APR_HAS_THREADS +		// If we are a root pool, then every thread may destruct us: in that case +		// we have to assume that no other thread will use this pool concurrently, +		// of course. Otherwise, if we are a subpool, only the thread that owns +		// the parent may destruct us, since that is the pool that is still alive, +		// possibly being used by others and being altered here. +		llassert(!mParent || apr_os_thread_equal(mParent->mOwner, apr_os_thread_current())); +#endif +		apr_pool_t* pool = mPool;	// The use of apr_pool_t is OK here. +									// Temporary store before destroying the pool. +		mPool = NULL;				// Mark that we are BEING destructed. +		apr_pool_cleanup_kill(pool, this, &s_plain_cleanup); +		apr_pool_destroy(pool); +	} +} + +bool LLAPRPool::parent_is_being_destructed(void) +{ +	return mParent && (!mParent->mPool || mParent->parent_is_being_destructed()); +} + +LLAPRInitialization::LLAPRInitialization(void) +{ +	static bool apr_initialized = false; + +	if (!apr_initialized) +	{ +		apr_initialize(); +	} + +	apr_initialized = true; +} + +bool LLAPRRootPool::sCountInitialized = false; +apr_uint32_t volatile LLAPRRootPool::sCount; + +apr_thread_mutex_t* gLogMutexp; +apr_thread_mutex_t* gCallStacksLogMutexp; + +LLAPRRootPool::LLAPRRootPool(void) : LLAPRInitialization(), LLAPRPool(0) +{ +	// sCountInitialized don't need locking because when we get here there is still only a single thread. +	if (!sCountInitialized) +	{ +		// Initialize the logging mutex +		apr_thread_mutex_create(&gLogMutexp, APR_THREAD_MUTEX_UNNESTED, mPool); +		apr_thread_mutex_create(&gCallStacksLogMutexp, APR_THREAD_MUTEX_UNNESTED, mPool); + +		apr_status_t status = apr_atomic_init(mPool); +		llassert_always(status == APR_SUCCESS); +		apr_atomic_set32(&sCount, 1);	// Set to 1 to account for the global root pool. +		sCountInitialized = true; + +		// Initialize thread-local APR pool support. +		// Because this recursively calls LLAPRRootPool::LLAPRRootPool(void) +		// it must be done last, so that sCount is already initialized. +		LLThreadLocalData::init(); +	} +	apr_atomic_inc32(&sCount); +} + +LLAPRRootPool::~LLAPRRootPool() +{ +	if (!apr_atomic_dec32(&sCount)) +	{ +		// The last pool was destructed. Cleanup remainder of APR. +		LL_INFOS("APR") << "Cleaning up APR" << LL_ENDL; + +		if (gLogMutexp) +		{ +			// Clean up the logging mutex + +			// All other threads NEED to be done before we clean up APR, so this is okay. +			apr_thread_mutex_destroy(gLogMutexp); +			gLogMutexp = NULL; +		} +		if (gCallStacksLogMutexp) +		{ +			// Clean up the logging mutex + +			// All other threads NEED to be done before we clean up APR, so this is okay. +			apr_thread_mutex_destroy(gCallStacksLogMutexp); +			gCallStacksLogMutexp = NULL; +		} + +		// Must destroy ALL, and therefore this last LLAPRRootPool, before terminating APR. +		static_cast<LLAPRRootPool*>(this)->destroy(); + +		apr_terminate(); +	} +} + +//static +// Return a global root pool that is independent of LLThreadLocalData. +// Normally you should NOT use this. Only use for early initialization +// (before main) and deinitialization (after main). +LLAPRRootPool& LLAPRRootPool::get(void) +{ +  static LLAPRRootPool global_APRpool(0); +  return global_APRpool; +} + +void LLVolatileAPRPool::clearVolatileAPRPool() +{ +	llassert_always(mNumActiveRef > 0); +	if (--mNumActiveRef == 0) +	{ +		if (isOld()) +		{ +			destroy(); +			mNumTotalRef = 0 ; +		} +		else +		{ +			// This does not actually free the memory, +			// it just allows the pool to re-use this memory for the next allocation. +			clear(); +		} +	} + +	// Paranoia check if the pool is jammed. +	llassert(mNumTotalRef < (FULL_VOLATILE_APR_POOL << 2)) ; +} diff --git a/indra/llcommon/llaprpool.h b/indra/llcommon/llaprpool.h new file mode 100644 index 0000000000..bf4102c584 --- /dev/null +++ b/indra/llcommon/llaprpool.h @@ -0,0 +1,256 @@ +/** + * @file llaprpool.h + * @brief Implementation of LLAPRPool + * + * $LicenseInfo:firstyear=2011&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2011, Linden Research, Inc. + *  + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + *  + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + *  + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA + *  + * Linden Research, Inc., 945 Battery Street, San Francisco, CA  94111  USA + * $/LicenseInfo$ + * + * CHANGELOG + *   and additional copyright holders. + * + *   04/04/2010 + *   - Initial version, written by Aleric Inglewood @ SL + * + *   10/11/2010 + *   - Added APR_HAS_THREADS #if's to allow creation and destruction + *     of subpools by threads other than the parent pool owner. + * + *   05/02/2011 + *   - Fixed compilation on windows: Suppress compile warning 4996 + *     and include <winsock2.h> before including <ws2tcpip.h>, + *     by Merov Linden @ SL. + */ + +#ifndef LL_LLAPRPOOL_H +#define LL_LLAPRPOOL_H + +#ifdef LL_WINDOWS +#pragma warning(push) +#pragma warning(disable:4996) +#include <winsock2.h> +#include <ws2tcpip.h>		// Needed before including apr_portable.h +#pragma warning(pop) +#endif + +#include "apr_portable.h" +#include "apr_pools.h" +#include "llerror.h" + +extern void ll_init_apr(); + +/** + * @brief A wrapper around the APR memory pool API. + * + * Usage of this class should be restricted to passing it to libapr-1 function calls that need it. + * + */ +class LL_COMMON_API LLAPRPool +{ +protected: +	//! Pointer to the underlaying pool. NULL if not initialized. +	apr_pool_t* mPool;		// The use of apr_pool_t is OK here. +							// This is the wrapped pointer that it is all about! +	//! Pointer to the parent pool, if any. Only valid when mPool is non-zero. +	LLAPRPool* mParent; +	//! The thread that owns this memory pool. Only valid when mPool is non-zero. +	apr_os_thread_t mOwner; + +public: +	/// Construct an uninitialized (destructed) pool. +	LLAPRPool(void) : mPool(NULL) { } + +	/// Construct a subpool from an existing pool. +	/// This is not a copy-constructor, this class doesn't have one! +	LLAPRPool(LLAPRPool& parent) : mPool(NULL) { create(parent); } + +	/// Destruct the memory pool (free all of its subpools and allocated memory). +	~LLAPRPool() { destroy(); } + +protected: +	/// Create a pool that is allocated from the Operating System. Only used by LLAPRRootPool. +	LLAPRPool(int) : mPool(NULL), mParent(NULL), mOwner(apr_os_thread_current()) +	{ +		apr_status_t const apr_pool_create_status = apr_pool_create(&mPool, NULL); +		llassert_always(apr_pool_create_status == APR_SUCCESS); +		llassert(mPool); +		apr_pool_cleanup_register(mPool, this, &s_plain_cleanup, &apr_pool_cleanup_null); +	} + +public: +	/// Create a subpool from parent. May only be called for an uninitialized/destroyed pool. +	/// The default parameter causes the root pool of the current thread to be used. +	void create(LLAPRPool& parent = *static_cast<LLAPRPool*>(NULL)); + +	/// Destroy the (sub)pool, if any. +	void destroy(void); + +	// Use some safebool idiom (http://www.artima.com/cppsource/safebool.html) rather than operator bool. +	typedef LLAPRPool* const LLAPRPool::* const bool_type; +	/// Return true if the pool is initialized. +	operator bool_type() const { return mPool ? &LLAPRPool::mParent : 0; } + +	/// Painful, but we have to either provide access to this, or wrap +	/// every APR function call that needs an apr pool as argument. +	/// NEVER destroy a pool that is returned by this function! +	apr_pool_t* operator()(void) const		// The use of apr_pool_t is OK here. +	  										// This is the accessor for passing the pool to libapr-1 functions. +	{ +		llassert(mPool); +		llassert(apr_os_thread_equal(mOwner, apr_os_thread_current())); +		return mPool; +	} + +	/// Free all memory without destructing the pool. +	void clear(void) +	{ +		llassert(mPool); +		llassert(apr_os_thread_equal(mOwner, apr_os_thread_current())); +		apr_pool_clear(mPool); +	} + +// These methods would make this class 'complete' (as wrapper around the libapr +// pool functions), but we don't use memory pools in the viewer (only when +// we are forced to pass one to a libapr call), so don't define them in order +// not to encourage people to use them. +#if 0 +	void* palloc(size_t size) +	{ +		llassert(mPool); +		llassert(apr_os_thread_equal(mOwner, apr_os_thread_current())); +		return apr_palloc(mPool, size); +	} +	void* pcalloc(size_t size) +	{ +		llassert(mPool); +		llassert(apr_os_thread_equal(mOwner, apr_os_thread_current())); +		return apr_pcalloc(mPool, size); +	} +#endif + +private: +	bool parent_is_being_destructed(void); +	static apr_status_t s_plain_cleanup(void* userdata) { return static_cast<LLAPRPool*>(userdata)->plain_cleanup(); } + +	apr_status_t plain_cleanup(void) +	{ +		if (mPool && 						// We are not being destructed, +			parent_is_being_destructed())	// but our parent is. +		  // This means the pool is being destructed recursively by libapr +		  // because one of its parents is being destructed. +		{ +			mPool = NULL;	// Stop destroy() from destructing the pool again. +		} +		return APR_SUCCESS; +	} +}; + +class LLAPRInitialization +{ +public: +	LLAPRInitialization(void); +}; + +/** + * @brief Root memory pool (allocates memory from the operating system). + * + * This class should only be used by LLThreadLocalData + * (and LLMutexRootPool when APR_HAS_THREADS isn't defined). + */ +class LL_COMMON_API LLAPRRootPool : public LLAPRInitialization, public LLAPRPool +{ +private: +	/// Construct a root memory pool. Should only be used by LLThreadLocalData and LLMutexRootPool. +	friend class LLThreadLocalData; +#if !APR_HAS_THREADS +	friend class LLMutexRootPool; +#endif +	/// Construct a root memory pool. +	/// Should only be used by LLThreadLocalData. +	LLAPRRootPool(void); +	~LLAPRRootPool(); + +private: +	// Keep track of how many root pools exist and when the last one is destructed. +	static bool sCountInitialized; +	static apr_uint32_t volatile sCount; + +public: +	// Return a global root pool that is independent of LLThreadLocalData. +	// Normally you should not use this. Only use for early initialization +	// (before main) and deinitialization (after main). +	static LLAPRRootPool& get(void); + +#if APR_POOL_DEBUG +	void grab_ownership(void) +	{ +		// You need a patched libapr to use this. +		// See http://web.archiveorange.com/archive/v/5XO9y2zoxUOMt6Gmi1OI +		apr_pool_owner_set(mPool); +	} +#endif + +private: +	// Used for constructing the Special Global Root Pool (returned by LLAPRRootPool::get). +	// It is the same as the default constructor but omits to increment sCount. As a result, +	// we must be sure that at least one other LLAPRRootPool is created before termination +	// of the application (which is the case: we create one LLAPRRootPool per thread). +	LLAPRRootPool(int) : LLAPRInitialization(), LLAPRPool(0) { } +}; + +/** Volatile memory pool + * + * 'Volatile' APR memory pool which normally only clears memory, + * and does not destroy the pool (the same pool is reused) for + * greater efficiency. However, as a safe guard the apr pool + * is destructed every FULL_VOLATILE_APR_POOL uses to allow + * the system memory to be allocated more efficiently and not + * get scattered through RAM. + */ +class LL_COMMON_API LLVolatileAPRPool : protected LLAPRPool +{ +public: +	LLVolatileAPRPool(void) : mNumActiveRef(0), mNumTotalRef(0) { } + +	void clearVolatileAPRPool(void); + +	bool isOld(void) const { return mNumTotalRef > FULL_VOLATILE_APR_POOL; } +	bool isUnused() const { return mNumActiveRef == 0; } + +private: +	friend class LLScopedVolatileAPRPool; +	friend class LLAPRFile; +	apr_pool_t* getVolatileAPRPool(void)	// The use of apr_pool_t is OK here. +	{ +		if (!mPool) create(); +		++mNumActiveRef; +		++mNumTotalRef; +		return LLAPRPool::operator()(); +	} + +private: +	S32 mNumActiveRef;	// Number of active uses of the pool. +	S32 mNumTotalRef;	// Number of total uses of the pool since last creation. + +	// Maximum number of references to LLVolatileAPRPool until the pool is recreated. +	static S32 const FULL_VOLATILE_APR_POOL = 1024; +}; + +#endif // LL_LLAPRPOOL_H diff --git a/indra/llcommon/llcommon.cpp b/indra/llcommon/llcommon.cpp index 8be9e4f4de..b8a7394852 100644 --- a/indra/llcommon/llcommon.cpp +++ b/indra/llcommon/llcommon.cpp @@ -31,17 +31,9 @@  #include "llthread.h"  //static -BOOL LLCommon::sAprInitialized = FALSE; - -//static  void LLCommon::initClass()  {  	LLMemory::initClass(); -	if (!sAprInitialized) -	{ -		ll_init_apr(); -		sAprInitialized = TRUE; -	}  	LLTimer::initClass();  	LLThreadSafeRefCount::initThreadSafeRefCount();  // 	LLWorkerThread::initClass(); @@ -55,10 +47,5 @@ void LLCommon::cleanupClass()  // 	LLWorkerThread::cleanupClass();  	LLThreadSafeRefCount::cleanupThreadSafeRefCount();  	LLTimer::cleanupClass(); -	if (sAprInitialized) -	{ -		ll_cleanup_apr(); -		sAprInitialized = FALSE; -	}  	LLMemory::cleanupClass();  } diff --git a/indra/llcommon/llcommon.h b/indra/llcommon/llcommon.h index ca9cad5d05..171590f3d8 100644 --- a/indra/llcommon/llcommon.h +++ b/indra/llcommon/llcommon.h @@ -35,8 +35,6 @@ class LL_COMMON_API LLCommon  public:  	static void initClass();  	static void cleanupClass(); -private: -	static BOOL sAprInitialized;  };  #endif diff --git a/indra/llcommon/llerror.cpp b/indra/llcommon/llerror.cpp index bb64152407..bda9d7c177 100644 --- a/indra/llcommon/llerror.cpp +++ b/indra/llcommon/llerror.cpp @@ -379,7 +379,7 @@ namespace  	{  		/* This pattern, of returning a reference to a static function  		   variable, is to ensure that this global is constructed before -		   it is used, no matter what the global initializeation sequence +		   it is used, no matter what the global initialization sequence  		   is.  		   See C++ FAQ Lite, sections 10.12 through 10.14  		*/ @@ -866,6 +866,9 @@ You get:  */ +extern apr_thread_mutex_t* gLogMutexp; +extern apr_thread_mutex_t* gCallStacksLogMutexp; +  namespace {  	bool checkLevelMap(const LevelMap& map, const std::string& key,  						LLError::ELevel& level) diff --git a/indra/llcommon/llerror.h b/indra/llcommon/llerror.h index 4a42241c4f..369f2a7a97 100644 --- a/indra/llcommon/llerror.h +++ b/indra/llcommon/llerror.h @@ -39,7 +39,7 @@  	Information for most users: -	Code can log messages with constuctions like this: +	Code can log messages with constructions like this:  		LL_INFOS("StringTag") << "request to fizzbip agent " << agent_id  			<< " denied due to timeout" << LL_ENDL; @@ -47,9 +47,9 @@  	Messages can be logged to one of four increasing levels of concern,  	using one of four "streams": -		LL_DEBUGS("StringTag")	- debug messages that are normally supressed -		LL_INFOS("StringTag")	- informational messages that are normall shown -		LL_WARNS("StringTag")	- warning messages that singal a problem +		LL_DEBUGS("StringTag")	- debug messages that are normally suppressed +		LL_INFOS("StringTag")	- informational messages that are normal shown +		LL_WARNS("StringTag")	- warning messages that signal a problem  		LL_ERRS("StringTag")	- error messages that are major, unrecoverable failures  	The later (LL_ERRS("StringTag")) automatically crashes the process after the message @@ -90,7 +90,7 @@  		WARN: LLFoo::doSomething: called with a big value for i: 283 -	Which messages are logged and which are supressed can be controled at run +	Which messages are logged and which are suppressed can be controlled at run  	time from the live file logcontrol.xml based on function, class and/or   	source file.  See etc/logcontrol-dev.xml for details. @@ -106,7 +106,7 @@ namespace LLError  	enum ELevel  	{  		LEVEL_ALL = 0, -			// used to indicate that all messagess should be logged +			// used to indicate that all messages should be logged  		LEVEL_DEBUG = 0,  		LEVEL_INFO = 1, @@ -220,7 +220,7 @@ namespace LLError  	// See top of file for example of how to use this  typedef LLError::NoClassInfo _LL_CLASS_TO_LOG; -	// Outside a class declartion, or in class without LOG_CLASS(), this +	// Outside a class declaration, or in class without LOG_CLASS(), this  	// typedef causes the messages to not be associated with any class. @@ -296,5 +296,4 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;  		Such computation is done iff the message will be logged.  	*/ -  #endif // LL_LLERROR_H diff --git a/indra/llcommon/lleventtimer.cpp b/indra/llcommon/lleventtimer.cpp index 7743826c60..0d96e03da4 100644 --- a/indra/llcommon/lleventtimer.cpp +++ b/indra/llcommon/lleventtimer.cpp @@ -58,19 +58,15 @@ LLEventTimer::~LLEventTimer()  void LLEventTimer::updateClass()   {  	std::list<LLEventTimer*> completed_timers; - +	for (instance_iter iter = beginInstances(); iter != endInstances(); )   	{ -		LLInstanceTrackerScopedGuard guard; -		for (instance_iter iter = guard.beginInstances(); iter != guard.endInstances(); )  -		{ -			LLEventTimer& timer = *iter++; -			F32 et = timer.mEventTimer.getElapsedTimeF32(); -			if (timer.mEventTimer.getStarted() && et > timer.mPeriod) { -				timer.mEventTimer.reset(); -				if ( timer.tick() ) -				{ -					completed_timers.push_back( &timer ); -				} +		LLEventTimer& timer = *iter++; +		F32 et = timer.mEventTimer.getElapsedTimeF32(); +		if (timer.mEventTimer.getStarted() && et > timer.mPeriod) { +			timer.mEventTimer.reset(); +			if ( timer.tick() ) +			{ +				completed_timers.push_back( &timer );  			}  		}  	} diff --git a/indra/llcommon/llfasttimer_class.cpp b/indra/llcommon/llfasttimer_class.cpp index bd594b06cf..463f558c2c 100644 --- a/indra/llcommon/llfasttimer_class.cpp +++ b/indra/llcommon/llfasttimer_class.cpp @@ -219,15 +219,20 @@ LLFastTimer::DeclareTimer::DeclareTimer(const std::string& name)  // static  void LLFastTimer::DeclareTimer::updateCachedPointers()  { -	DeclareTimer::LLInstanceTrackerScopedGuard guard;  	// propagate frame state pointers to timer declarations -	for (DeclareTimer::instance_iter it = guard.beginInstances(); -		it != guard.endInstances(); -		++it) +	for (instance_iter it = beginInstances(); it != endInstances(); ++it)  	{  		// update cached pointer  		it->mFrameState = &it->mTimer.getFrameState();  	} + +	// also update frame states of timers on stack +	LLFastTimer* cur_timerp = LLFastTimer::sCurTimerData.mCurTimer; +	while(cur_timerp->mLastTimerData.mCurTimer != cur_timerp)	 +	{ +		cur_timerp->mFrameState = &cur_timerp->mFrameState->mTimer->getFrameState(); +		cur_timerp = cur_timerp->mLastTimerData.mCurTimer; +	}  }  //static @@ -298,14 +303,15 @@ LLFastTimer::NamedTimer::~NamedTimer()  std::string LLFastTimer::NamedTimer::getToolTip(S32 history_idx)  { +	F64 ms_multiplier = 1000.0 / (F64)LLFastTimer::countsPerSecond();  	if (history_idx < 0)  	{ -		// by default, show average number of calls -		return llformat("%s (%d calls)", getName().c_str(), (S32)getCallAverage()); +		// by default, show average number of call +		return llformat("%s (%d ms, %d calls)", getName().c_str(), (S32)(getCountAverage() * ms_multiplier), (S32)getCallAverage());  	}  	else  	{ -		return llformat("%s (%d calls)", getName().c_str(), (S32)getHistoricalCalls(history_idx)); +		return llformat("%s (%d ms, %d calls)", getName().c_str(), (S32)(getHistoricalCount(history_idx) * ms_multiplier), (S32)getHistoricalCalls(history_idx));  	}  } @@ -388,10 +394,7 @@ void LLFastTimer::NamedTimer::buildHierarchy()  	// set up initial tree  	{ -		NamedTimer::LLInstanceTrackerScopedGuard guard; -		for (instance_iter it = guard.beginInstances(); -		     it != guard.endInstances(); -		     ++it) +		for (instance_iter it = beginInstances(); it != endInstances(); ++it)  		{  			NamedTimer& timer = *it;  			if (&timer == NamedTimerFactory::instance().getRootTimer()) continue; @@ -519,10 +522,7 @@ void LLFastTimer::NamedTimer::resetFrame()  		LLSD sd;  		{ -			NamedTimer::LLInstanceTrackerScopedGuard guard; -			for (NamedTimer::instance_iter it = guard.beginInstances(); -			     it != guard.endInstances(); -			     ++it) +			for (instance_iter it = beginInstances(); it != endInstances(); ++it)  			{  				NamedTimer& timer = *it;  				FrameState& info = timer.getFrameState(); @@ -559,7 +559,7 @@ void LLFastTimer::NamedTimer::resetFrame()  		llassert_always(timerp->mFrameStateIndex < (S32)getFrameStateList().size());  	} -	// sort timers by dfs traversal order to improve cache coherency +	// sort timers by DFS traversal order to improve cache coherency  	std::sort(getFrameStateList().begin(), getFrameStateList().end(), SortTimersDFS());  	// update pointers into framestatelist now that we've sorted it @@ -567,10 +567,7 @@ void LLFastTimer::NamedTimer::resetFrame()  	// reset for next frame  	{ -		NamedTimer::LLInstanceTrackerScopedGuard guard; -		for (NamedTimer::instance_iter it = guard.beginInstances(); -		     it != guard.endInstances(); -		     ++it) +		for (instance_iter it = beginInstances(); it != endInstances(); ++it)  		{  			NamedTimer& timer = *it; @@ -614,10 +611,7 @@ void LLFastTimer::NamedTimer::reset()  	// reset all history  	{ -		NamedTimer::LLInstanceTrackerScopedGuard guard; -		for (NamedTimer::instance_iter it = guard.beginInstances(); -		     it != guard.endInstances(); -		     ++it) +		for (instance_iter it = beginInstances(); it != endInstances(); ++it)  		{  			NamedTimer& timer = *it;  			if (&timer != NamedTimerFactory::instance().getRootTimer())  @@ -700,17 +694,7 @@ void LLFastTimer::nextFrame()  		llinfos << "Slow frame, fast timers inaccurate" << llendl;  	} -	if (sPauseHistory) -	{ -		sResetHistory = true; -	} -	else if (sResetHistory) -	{ -		sLastFrameIndex = 0; -		sCurFrameIndex = 0; -		sResetHistory = false; -	} -	else // not paused +	if (!sPauseHistory)  	{  		NamedTimer::processTimes();  		sLastFrameIndex = sCurFrameIndex++; @@ -865,7 +849,7 @@ std::string LLFastTimer::sClockType = "rdtsc";  #else  //LL_COMMON_API U64 get_clock_count(); // in lltimer.cpp -// These use QueryPerformanceCounter, which is arguably fine and also works on amd architectures. +// These use QueryPerformanceCounter, which is arguably fine and also works on AMD architectures.  U32 LLFastTimer::getCPUClockCount32()  {  	return (U32)(get_clock_count()>>8); diff --git a/indra/llcommon/llfasttimer_class.h b/indra/llcommon/llfasttimer_class.h index 827747f0c6..f481e968a6 100644 --- a/indra/llcommon/llfasttimer_class.h +++ b/indra/llcommon/llfasttimer_class.h @@ -66,7 +66,7 @@ public:  	public:  		~NamedTimer(); -		enum { HISTORY_NUM = 60 }; +		enum { HISTORY_NUM = 300 };  		const std::string& getName() const { return mName; }  		NamedTimer* getParent() const { return mParent; } diff --git a/indra/llcommon/llfixedbuffer.cpp b/indra/llcommon/llfixedbuffer.cpp index d394f179fb..4b5cdbe288 100644 --- a/indra/llcommon/llfixedbuffer.cpp +++ b/indra/llcommon/llfixedbuffer.cpp @@ -30,8 +30,7 @@  LLFixedBuffer::LLFixedBuffer(const U32 max_lines)  	: LLLineBuffer(), -	  mMaxLines(max_lines), -	  mMutex(NULL) +	  mMaxLines(max_lines)  {  	mTimer.reset();  } diff --git a/indra/llcommon/llinstancetracker.cpp b/indra/llcommon/llinstancetracker.cpp index f576204511..5dc3ea5d7b 100644 --- a/indra/llcommon/llinstancetracker.cpp +++ b/indra/llcommon/llinstancetracker.cpp @@ -35,14 +35,15 @@  //static   void * & LLInstanceTrackerBase::getInstances(std::type_info const & info)  { -	static std::map<std::string, void *> instances; +	typedef std::map<std::string, void *> InstancesMap; +	static InstancesMap instances; -	std::string k = info.name(); -	if(instances.find(k) == instances.end()) -	{ -		instances[k] = NULL; -	} - -	return instances[k]; +	// std::map::insert() is just what we want here. You attempt to insert a +	// (key, value) pair. If the specified key doesn't yet exist, it inserts +	// the pair and returns a std::pair of (iterator, true). If the specified +	// key DOES exist, insert() simply returns (iterator, false). One lookup +	// handles both cases. +	return instances.insert(InstancesMap::value_type(info.name(), +													 InstancesMap::mapped_type())) +		.first->second;  } - diff --git a/indra/llcommon/llinstancetracker.h b/indra/llcommon/llinstancetracker.h index b971b2f914..5a3990a8df 100644 --- a/indra/llcommon/llinstancetracker.h +++ b/indra/llcommon/llinstancetracker.h @@ -29,6 +29,7 @@  #define LL_LLINSTANCETRACKER_H  #include <map> +#include <typeinfo>  #include "string_table.h"  #include <boost/utility.hpp> @@ -37,10 +38,40 @@  #include <boost/iterator/transform_iterator.hpp>  #include <boost/iterator/indirect_iterator.hpp> +/** + * Base class manages "class-static" data that must actually have singleton + * semantics: one instance per process, rather than one instance per module as + * sometimes happens with data simply declared static. + */  class LL_COMMON_API LLInstanceTrackerBase : public boost::noncopyable  { -	protected: -		static void * & getInstances(std::type_info const & info); +protected: +	/// Get a process-unique void* pointer slot for the specified type_info +	static void * & getInstances(std::type_info const & info); + +	/// Find or create a STATICDATA instance for the specified TRACKED class. +	/// STATICDATA must be default-constructible. +	template<typename STATICDATA, class TRACKED> +	static STATICDATA& getStatic() +	{ +		void *& instances = getInstances(typeid(TRACKED)); +		if (! instances) +		{ +			instances = new STATICDATA; +		} +		return *static_cast<STATICDATA*>(instances); +	} + +    /// It's not essential to derive your STATICDATA (for use with +    /// getStatic()) from StaticBase; it's just that both known +    /// implementations do. +    struct StaticBase +    { +        StaticBase(): +            sIterationNestDepth(0) +        {} +        S32 sIterationNestDepth; +    };  };  /// This mix-in class adds support for tracking all instances of the specified class parameter T @@ -50,15 +81,89 @@ class LL_COMMON_API LLInstanceTrackerBase : public boost::noncopyable  template<typename T, typename KEY = T*>  class LLInstanceTracker : public LLInstanceTrackerBase  { -	typedef typename std::map<KEY, T*> InstanceMap;  	typedef LLInstanceTracker<T, KEY> MyT; -	typedef boost::function<const KEY&(typename InstanceMap::value_type&)> KeyGetter; -	typedef boost::function<T*(typename InstanceMap::value_type&)> InstancePtrGetter; +	typedef typename std::map<KEY, T*> InstanceMap; +	struct StaticData: public StaticBase +	{ +		InstanceMap sMap; +	}; +	static StaticData& getStatic() { return LLInstanceTrackerBase::getStatic<StaticData, MyT>(); } +	static InstanceMap& getMap_() { return getStatic().sMap; } +  public: -	/// Dereferencing key_iter gives you a const KEY& -	typedef boost::transform_iterator<KeyGetter, typename InstanceMap::iterator> key_iter; -	/// Dereferencing instance_iter gives you a T& -	typedef boost::indirect_iterator< boost::transform_iterator<InstancePtrGetter, typename InstanceMap::iterator> > instance_iter; +	class instance_iter : public boost::iterator_facade<instance_iter, T, boost::forward_traversal_tag> +	{ +	public: +		typedef boost::iterator_facade<instance_iter, T, boost::forward_traversal_tag> super_t; +		 +		instance_iter(const typename InstanceMap::iterator& it) +		:	mIterator(it) +		{ +			++getStatic().sIterationNestDepth; +		} + +		~instance_iter() +		{ +			--getStatic().sIterationNestDepth; +		} + + +	private: +		friend class boost::iterator_core_access; + +		void increment() { mIterator++; } +		bool equal(instance_iter const& other) const +		{ +			return mIterator == other.mIterator; +		} + +		T& dereference() const +		{ +			return *(mIterator->second); +		} + +		typename InstanceMap::iterator mIterator; +	}; + +	class key_iter : public boost::iterator_facade<key_iter, KEY, boost::forward_traversal_tag> +	{ +	public: +		typedef boost::iterator_facade<key_iter, KEY, boost::forward_traversal_tag> super_t; + +		key_iter(typename InstanceMap::iterator it) +			:	mIterator(it) +		{ +			++getStatic().sIterationNestDepth; +		} + +		key_iter(const key_iter& other) +			:	mIterator(other.mIterator) +		{ +			++getStatic().sIterationNestDepth; +		} + +		~key_iter() +		{ +			--getStatic().sIterationNestDepth; +		} + + +	private: +		friend class boost::iterator_core_access; + +		void increment() { mIterator++; } +		bool equal(key_iter const& other) const +		{ +			return mIterator == other.mIterator; +		} + +		KEY& dereference() const +		{ +			return const_cast<KEY&>(mIterator->first); +		} + +		typename InstanceMap::iterator mIterator; +	};  	static T* getInstance(const KEY& k)  	{ @@ -66,57 +171,51 @@ public:  		return (found == getMap_().end()) ? NULL : found->second;  	} -	static key_iter beginKeys() -	{ -		return boost::make_transform_iterator(getMap_().begin(), -											  boost::bind(&InstanceMap::value_type::first, _1)); +	static instance_iter beginInstances()  +	{	 +		return instance_iter(getMap_().begin());   	} -	static key_iter endKeys() + +	static instance_iter endInstances()   	{ -		return boost::make_transform_iterator(getMap_().end(), -											  boost::bind(&InstanceMap::value_type::first, _1)); +		return instance_iter(getMap_().end());  	} -	static instance_iter beginInstances() + +	static S32 instanceCount() { return getMap_().size(); } + +	static key_iter beginKeys()  	{ -		return instance_iter(boost::make_transform_iterator(getMap_().begin(), -															boost::bind(&InstanceMap::value_type::second, _1))); +		return key_iter(getMap_().begin());  	} -	static instance_iter endInstances() +	static key_iter endKeys()  	{ -		return instance_iter(boost::make_transform_iterator(getMap_().end(), -															boost::bind(&InstanceMap::value_type::second, _1))); +		return key_iter(getMap_().end());  	} -	static S32 instanceCount() { return getMap_().size(); } +  protected:  	LLInstanceTracker(KEY key) { add_(key); } -	virtual ~LLInstanceTracker() { remove_(); } +	virtual ~LLInstanceTracker()  +	{  +		// it's unsafe to delete instances of this type while all instances are being iterated over. +		llassert_always(getStatic().sIterationNestDepth == 0); +		remove_();		 +	}  	virtual void setKey(KEY key) { remove_(); add_(key); } -	virtual const KEY& getKey() const { return mKey; } +	virtual const KEY& getKey() const { return mInstanceKey; }  private:  	void add_(KEY key)   	{  -		mKey = key;  +		mInstanceKey = key;   		getMap_()[key] = static_cast<T*>(this);   	}  	void remove_()  	{ -		getMap_().erase(mKey); +		getMap_().erase(mInstanceKey);  	} -    static InstanceMap& getMap_() -    { -		void * & instances = getInstances(typeid(MyT)); -        if (! instances) -        { -            instances = new InstanceMap; -        } -        return * static_cast<InstanceMap*>(instances); -    } -  private: - -	KEY mKey; +	KEY mInstanceKey;  };  /// explicit specialization for default case where KEY is T* @@ -124,73 +223,78 @@ private:  template<typename T>  class LLInstanceTracker<T, T*> : public LLInstanceTrackerBase  { -	typedef typename std::set<T*> InstanceSet;  	typedef LLInstanceTracker<T, T*> MyT; +	typedef typename std::set<T*> InstanceSet; +	struct StaticData: public StaticBase +	{ +		InstanceSet sSet; +	}; +	static StaticData& getStatic() { return LLInstanceTrackerBase::getStatic<StaticData, MyT>(); } +	static InstanceSet& getSet_() { return getStatic().sSet; } +  public: -	/// Dereferencing key_iter gives you a T* (since T* is the key) -	typedef typename InstanceSet::iterator key_iter; -	/// Dereferencing instance_iter gives you a T& -	typedef boost::indirect_iterator<key_iter> instance_iter;  	/// for completeness of analogy with the generic implementation  	static T* getInstance(T* k) { return k; }  	static S32 instanceCount() { return getSet_().size(); } -	// Instantiate this to get access to iterators for this type.  It's a 'guard' in the sense -	// that it treats deletes of this type as errors as long as there is an instance of -	// this class alive in scope somewhere (i.e. deleting while iterating is bad). -	class LLInstanceTrackerScopedGuard +	class instance_iter : public boost::iterator_facade<instance_iter, T, boost::forward_traversal_tag>  	{  	public: -		LLInstanceTrackerScopedGuard() +		instance_iter(const typename InstanceSet::iterator& it) +		:	mIterator(it) +		{ +			++getStatic().sIterationNestDepth; +		} + +		instance_iter(const instance_iter& other) +		:	mIterator(other.mIterator) +		{ +			++getStatic().sIterationNestDepth; +		} + +		~instance_iter()  		{ -			++sIterationNestDepth; +			--getStatic().sIterationNestDepth;  		} -		~LLInstanceTrackerScopedGuard() +	private: +		friend class boost::iterator_core_access; + +		void increment() { mIterator++; } +		bool equal(instance_iter const& other) const  		{ -			--sIterationNestDepth; +			return mIterator == other.mIterator;  		} -		static instance_iter beginInstances() {	return instance_iter(getSet_().begin()); } -		static instance_iter endInstances() { return instance_iter(getSet_().end()); } -		static key_iter beginKeys() { return getSet_().begin(); } -		static key_iter endKeys()   { return getSet_().end(); } +		T& dereference() const +		{ +			return **mIterator; +		} + +		typename InstanceSet::iterator mIterator;  	}; +	static instance_iter beginInstances() {	return instance_iter(getSet_().begin()); } +	static instance_iter endInstances() { return instance_iter(getSet_().end()); } +  protected:  	LLInstanceTracker()  	{ -		// it's safe but unpredictable to create instances of this type while all instances are being iterated over.  I hate unpredictable.  This assert will probably be turned on early in the next development cycle. -		//llassert(sIterationNestDepth == 0); +		// it's safe but unpredictable to create instances of this type while all instances are being iterated over.  I hate unpredictable.	 This assert will probably be turned on early in the next development cycle.  		getSet_().insert(static_cast<T*>(this));  	}  	virtual ~LLInstanceTracker()  	{  		// it's unsafe to delete instances of this type while all instances are being iterated over. -		llassert(sIterationNestDepth == 0); +		llassert_always(getStatic().sIterationNestDepth == 0);  		getSet_().erase(static_cast<T*>(this));  	}  	LLInstanceTracker(const LLInstanceTracker& other)  	{ -		//llassert(sIterationNestDepth == 0);  		getSet_().insert(static_cast<T*>(this));  	} - -	static InstanceSet& getSet_() -	{ -		void * & instances = getInstances(typeid(MyT)); -		if (! instances) -		{ -			instances = new InstanceSet; -		} -		return * static_cast<InstanceSet *>(instances); -	} - -	static S32 sIterationNestDepth;  }; -template <typename T> S32 LLInstanceTracker<T, T*>::sIterationNestDepth = 0; -  #endif diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp index 21d1c84d69..8c02ad8290 100644 --- a/indra/llcommon/llmemory.cpp +++ b/indra/llcommon/llmemory.cpp @@ -26,14 +26,13 @@  #include "linden_common.h" -#include "llmemory.h" -#if MEM_TRACK_MEM +//#if MEM_TRACK_MEM  #include "llthread.h" -#endif +//#endif  #if defined(LL_WINDOWS) -# include <windows.h> +//# include <windows.h>  # include <psapi.h>  #elif defined(LL_DARWIN)  # include <sys/types.h> @@ -43,10 +42,24 @@  # include <unistd.h>  #endif +#include "llmemory.h" + +#include "llsys.h" +#include "llframetimer.h"  //----------------------------------------------------------------------------  //static  char* LLMemory::reserveMem = 0; +U32 LLMemory::sAvailPhysicalMemInKB = U32_MAX ; +U32 LLMemory::sMaxPhysicalMemInKB = 0; +U32 LLMemory::sAllocatedMemInKB = 0; +U32 LLMemory::sAllocatedPageSizeInKB = 0 ; +U32 LLMemory::sMaxHeapSizeInKB = U32_MAX ; +BOOL LLMemory::sEnableMemoryFailurePrevention = FALSE; + +#if __DEBUG_PRIVATE_MEM__ +LLPrivateMemoryPoolManager::mem_allocation_info_t LLPrivateMemoryPoolManager::sMemAllocationTracker; +#endif  //static  void LLMemory::initClass() @@ -71,6 +84,148 @@ void LLMemory::freeReserve()  	reserveMem = NULL;  } +//static  +void LLMemory::initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure) +{ +	sMaxHeapSizeInKB = (U32)(max_heap_size_gb * 1024 * 1024) ; +	sEnableMemoryFailurePrevention = prevent_heap_failure ; +} + +//static  +void LLMemory::updateMemoryInfo()  +{ +#if LL_WINDOWS	 +	HANDLE self = GetCurrentProcess(); +	PROCESS_MEMORY_COUNTERS counters; +	 +	if (!GetProcessMemoryInfo(self, &counters, sizeof(counters))) +	{ +		llwarns << "GetProcessMemoryInfo failed" << llendl; +		return ; +	} + +	sAllocatedMemInKB = (U32)(counters.WorkingSetSize / 1024) ; +	sAllocatedPageSizeInKB = (U32)(counters.PagefileUsage / 1024) ; + +	U32 avail_phys, avail_virtual; +	LLMemoryInfo::getAvailableMemoryKB(avail_phys, avail_virtual) ; +	sMaxPhysicalMemInKB = llmin(avail_phys + sAllocatedMemInKB, sMaxHeapSizeInKB); + +	if(sMaxPhysicalMemInKB > sAllocatedMemInKB) +	{ +		sAvailPhysicalMemInKB = sMaxPhysicalMemInKB - sAllocatedMemInKB ; +	} +	else +	{ +		sAvailPhysicalMemInKB = 0 ; +	} +#else +	//not valid for other systems for now. +	sAllocatedMemInKB = (U32)(LLMemory::getCurrentRSS() / 1024) ; +	sMaxPhysicalMemInKB = U32_MAX ; +	sAvailPhysicalMemInKB = U32_MAX ; +#endif + +	return ; +} + +// +//this function is to test if there is enough space with the size in the virtual address space. +//it does not do any real allocation +//if success, it returns the address where the memory chunk can fit in; +//otherwise it returns NULL. +// +//static  +void* LLMemory::tryToAlloc(void* address, U32 size) +{ +#if LL_WINDOWS +	address = VirtualAlloc(address, size, MEM_RESERVE | MEM_TOP_DOWN, PAGE_NOACCESS) ; +	if(address) +	{ +		if(!VirtualFree(address, 0, MEM_RELEASE)) +		{ +			llerrs << "error happens when free some memory reservation." << llendl ; +		} +	} +	return address ; +#else +	return (void*)0x01 ; //skip checking +#endif	 +} + +//static  +void LLMemory::logMemoryInfo(BOOL update) +{ +	if(update) +	{ +		updateMemoryInfo() ; +	} + +	llinfos << "Current allocated physical memory(KB): " << sAllocatedMemInKB << llendl ; +	llinfos << "Current allocated page size (KB): " << sAllocatedPageSizeInKB << llendl ; +	llinfos << "Current availabe physical memory(KB): " << sAvailPhysicalMemInKB << llendl ; +	llinfos << "Current max usable memory(KB): " << sMaxPhysicalMemInKB << llendl ; +} + +//return 0: everything is normal; +//return 1: the memory pool is low, but not in danger; +//return -1: the memory pool is in danger, is about to crash. +//static  +S32 LLMemory::isMemoryPoolLow() +{ +	static const U32 LOW_MEMEOY_POOL_THRESHOLD_KB = 64 * 1024 ; //64 MB for emergency use + +	if(!sEnableMemoryFailurePrevention) +	{ +		return 0 ; //no memory failure prevention. +	} + +	if(sAvailPhysicalMemInKB < (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2)) //out of physical memory +	{ +		return -1 ; +	} + +	if(sAllocatedPageSizeInKB + (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2) > sMaxHeapSizeInKB) //out of virtual address space. +	{ +		return -1 ; +	} + +	return (S32)(sAvailPhysicalMemInKB < LOW_MEMEOY_POOL_THRESHOLD_KB ||  +		sAllocatedPageSizeInKB + LOW_MEMEOY_POOL_THRESHOLD_KB > sMaxHeapSizeInKB) ; +} + +//static  +U32 LLMemory::getAvailableMemKB()  +{ +	return sAvailPhysicalMemInKB ; +} + +//static  +U32 LLMemory::getMaxMemKB()  +{ +	return sMaxPhysicalMemInKB ; +} + +//static  +U32 LLMemory::getAllocatedMemKB()  +{ +	return sAllocatedMemInKB ; +} + +void* ll_allocate (size_t size) +{ +	if (size == 0) +	{ +		llwarns << "Null allocation" << llendl; +	} +	void *p = malloc(size); +	if (p == NULL) +	{ +		LLMemory::freeReserve(); +		llerrs << "Out of memory Error" << llendl; +	} +	return p; +}  //---------------------------------------------------------------------------- @@ -237,7 +392,7 @@ U64 LLMemory::getCurrentRSS()  U32 LLMemory::getWorkingSetSize()  { -	return 0 ; +	return 0;  }  #endif @@ -258,7 +413,7 @@ LLMemTracker::LLMemTracker()  	mDrawnIndex = 0 ;  	mPaused = FALSE ; -	mMutexp = new LLMutex(NULL) ; +	mMutexp = new LLMutex() ;  	mStringBuffer = new char*[128] ;  	mStringBuffer[0] = new char[mCapacity * 128] ;  	for(S32 i = 1 ; i < mCapacity ; i++) @@ -376,3 +531,1661 @@ const char* LLMemTracker::getNextLine()  #endif //MEM_TRACK_MEM  //-------------------------------------------------------------------------------------------------- + +//-------------------------------------------------------------------------------------------------- +//-------------------------------------------------------------------------------------------------- +//minimum slot size and minimal slot size interval +const U32 ATOMIC_MEM_SLOT = 16 ; //bytes + +//minimum block sizes (page size) for small allocation, medium allocation, large allocation  +const U32 MIN_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {2 << 10, 4 << 10, 16 << 10} ; // + +//maximum block sizes for small allocation, medium allocation, large allocation  +const U32 MAX_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {64 << 10, 1 << 20, 4 << 20} ; + +//minimum slot sizes for small allocation, medium allocation, large allocation  +const U32 MIN_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION]  = {ATOMIC_MEM_SLOT, 2 << 10, 512 << 10}; + +//maximum slot sizes for small allocation, medium allocation, large allocation  +const U32 MAX_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION]  = {(2 << 10) - ATOMIC_MEM_SLOT, (512 - 2) << 10, 4 << 20}; + +//size of a block with multiple slots can not exceed CUT_OFF_SIZE +const U32 CUT_OFF_SIZE = (64 << 10) ; //64 KB + +//max number of slots in a block +const U32 MAX_NUM_SLOTS_IN_A_BLOCK = llmin(MIN_BLOCK_SIZES[0] / ATOMIC_MEM_SLOT, ATOMIC_MEM_SLOT * 8) ; + +//------------------------------------------------------------- +//align val to be integer times of ATOMIC_MEM_SLOT +U32 align(U32 val) +{ +	U32 aligned = (val / ATOMIC_MEM_SLOT) * ATOMIC_MEM_SLOT ; +	if(aligned < val) +	{ +		aligned += ATOMIC_MEM_SLOT ; +	} + +	return aligned ; +} + +//------------------------------------------------------------- +//class LLPrivateMemoryPool::LLMemoryBlock +//------------------------------------------------------------- +// +//each memory block could fit for two page sizes: 0.75 * mSlotSize, which starts from the beginning of the memory chunk and grow towards the end of the +//the block; another is mSlotSize, which starts from the end of the block and grows towards the beginning of the block. +// +LLPrivateMemoryPool::LLMemoryBlock::LLMemoryBlock() +{ +	//empty +} +		 +LLPrivateMemoryPool::LLMemoryBlock::~LLMemoryBlock()  +{ +	//empty +} + +//create and initialize a memory block +void LLPrivateMemoryPool::LLMemoryBlock::init(char* buffer, U32 buffer_size, U32 slot_size) +{ +	mBuffer = buffer ; +	mBufferSize = buffer_size ; +	mSlotSize = slot_size ; +	mTotalSlots = buffer_size / mSlotSize ;	 +	 +	llassert_always(buffer_size / mSlotSize <= MAX_NUM_SLOTS_IN_A_BLOCK) ; //max number is 128 +	 +	mAllocatedSlots = 0 ; +	mDummySize = 0 ; + +	//init the bit map. +	//mark free bits	 +	if(mTotalSlots > 32) //reserve extra space from mBuffer to store bitmap if needed. +	{ +		mDummySize = ATOMIC_MEM_SLOT ;		 +		mTotalSlots -= (mDummySize + mSlotSize - 1) / mSlotSize ; +		mUsageBits = 0 ; + +		S32 usage_bit_len = (mTotalSlots + 31) / 32 ; +		 +		for(S32 i = 0 ; i < usage_bit_len - 1 ; i++) +		{ +			*((U32*)mBuffer + i) = 0 ; +		} +		for(S32 i = usage_bit_len - 1 ; i < mDummySize / sizeof(U32) ; i++) +		{ +			*((U32*)mBuffer + i) = 0xffffffff ; +		} + +		if(mTotalSlots & 31) +		{ +			*((U32*)mBuffer + usage_bit_len - 2) = (0xffffffff << (mTotalSlots & 31)) ; +		}		 +	}	 +	else//no extra bitmap space reserved +	{ +		mUsageBits = 0 ; +		if(mTotalSlots & 31) +		{ +			mUsageBits = (0xffffffff << (mTotalSlots & 31)) ; +		} +	} + +	mSelf = this ; +	mNext = NULL ; +	mPrev = NULL ; + +	llassert_always(mTotalSlots > 0) ; +} + +//mark this block to be free with the memory [mBuffer, mBuffer + mBufferSize). +void LLPrivateMemoryPool::LLMemoryBlock::setBuffer(char* buffer, U32 buffer_size) +{ +	mBuffer = buffer ; +	mBufferSize = buffer_size ; +	mSelf = NULL ; +	mTotalSlots = 0 ; //set the block is free. +} + +//reserve a slot +char* LLPrivateMemoryPool::LLMemoryBlock::allocate()  +{ +	llassert_always(mAllocatedSlots < mTotalSlots) ; +	 +	//find a free slot +	U32* bits = NULL ; +	U32  k = 0 ; +	if(mUsageBits != 0xffffffff) +	{ +		bits = &mUsageBits ; +	} +	else if(mDummySize > 0)//go to extra space +	{		 +		for(S32 i = 0 ; i < mDummySize / sizeof(U32); i++) +		{ +			if(*((U32*)mBuffer + i) != 0xffffffff) +			{ +				bits = (U32*)mBuffer + i ; +				k = i + 1 ; +				break ; +			} +		} +	}	 +	S32 idx = 0 ; +	U32 tmp = *bits ; +	for(; tmp & 1 ; tmp >>= 1, idx++) ; + +	//set the slot reserved +	if(!idx) +	{ +		*bits |= 1 ; +	} +	else +	{ +		*bits |= (1 << idx) ; +	} + +	mAllocatedSlots++ ; +	 +	return mBuffer + mDummySize + (k * 32 + idx) * mSlotSize ; +} + +//free a slot +void  LLPrivateMemoryPool::LLMemoryBlock::freeMem(void* addr)  +{ +	//bit index +	U32 idx = ((U32)addr - (U32)mBuffer - mDummySize) / mSlotSize ; + +	U32* bits = &mUsageBits ; +	if(idx >= 32) +	{ +		bits = (U32*)mBuffer + (idx - 32) / 32 ; +	} + +	//reset the bit +	if(idx & 31) +	{ +		*bits &= ~(1 << (idx & 31)) ; +	} +	else +	{ +		*bits &= ~1 ; +	} + +	mAllocatedSlots-- ; +} + +//for debug use: reset the entire bitmap. +void  LLPrivateMemoryPool::LLMemoryBlock::resetBitMap() +{ +	for(S32 i = 0 ; i < mDummySize / sizeof(U32) ; i++) +	{ +		*((U32*)mBuffer + i) = 0 ; +	} +	mUsageBits = 0 ; +} +//------------------------------------------------------------------- +//class LLMemoryChunk +//-------------------------------------------------------------------- +LLPrivateMemoryPool::LLMemoryChunk::LLMemoryChunk() +{ +	//empty +} + +LLPrivateMemoryPool::LLMemoryChunk::~LLMemoryChunk() +{ +	//empty +} + +//create and init a memory chunk +void LLPrivateMemoryPool::LLMemoryChunk::init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size)  +{ +	mBuffer = buffer ; +	mBufferSize = buffer_size ; +	mAlloatedSize = 0 ; + +	mMetaBuffer = mBuffer + sizeof(LLMemoryChunk) ; + +	mMinBlockSize = min_block_size; //page size +	mMinSlotSize = min_slot_size; +	mMaxSlotSize = max_slot_size ; +	mBlockLevels = mMaxSlotSize / mMinSlotSize ; +	mPartitionLevels = max_block_size / mMinBlockSize + 1 ; + +	S32 max_num_blocks = (buffer_size - sizeof(LLMemoryChunk) - mBlockLevels * sizeof(LLMemoryBlock*) - mPartitionLevels * sizeof(LLMemoryBlock*)) /  +		                 (mMinBlockSize + sizeof(LLMemoryBlock)) ; +	//meta data space +	mBlocks = (LLMemoryBlock*)mMetaBuffer ; //space reserved for all memory blocks. +	mAvailBlockList = (LLMemoryBlock**)((char*)mBlocks + sizeof(LLMemoryBlock) * max_num_blocks) ;  +	mFreeSpaceList = (LLMemoryBlock**)((char*)mAvailBlockList + sizeof(LLMemoryBlock*) * mBlockLevels) ;  +	 +	//data buffer, which can be used for allocation +	mDataBuffer = (char*)mFreeSpaceList + sizeof(LLMemoryBlock*) * mPartitionLevels ; +	 +	//alignmnet +	mDataBuffer = mBuffer + align(mDataBuffer - mBuffer) ; +	 +	//init +	for(U32 i = 0 ; i < mBlockLevels; i++) +	{ +		mAvailBlockList[i] = NULL ; +	} +	for(U32 i = 0 ; i < mPartitionLevels ; i++) +	{ +		mFreeSpaceList[i] = NULL ; +	} + +	//assign the entire chunk to the first block +	mBlocks[0].mPrev = NULL ; +	mBlocks[0].mNext = NULL ; +	mBlocks[0].setBuffer(mDataBuffer, buffer_size - (mDataBuffer - mBuffer)) ; +	addToFreeSpace(&mBlocks[0]) ; + +	mNext = NULL ; +	mPrev = NULL ; +} + +//static  +U32 LLPrivateMemoryPool::LLMemoryChunk::getMaxOverhead(U32 data_buffer_size, U32 min_slot_size,  +													   U32 max_slot_size, U32 min_block_size, U32 max_block_size) +{ +	//for large allocations, reserve some extra memory for meta data to avoid wasting much  +	if(data_buffer_size / min_slot_size < 64) //large allocations +	{ +		U32 overhead = sizeof(LLMemoryChunk) + (data_buffer_size / min_block_size) * sizeof(LLMemoryBlock) + +			sizeof(LLMemoryBlock*) * (max_slot_size / min_slot_size) + sizeof(LLMemoryBlock*) * (max_block_size / min_block_size + 1) ; + +		//round to integer times of min_block_size +		overhead = ((overhead + min_block_size - 1) / min_block_size) * min_block_size ; +		return overhead ; +	} +	else +	{ +		return 0 ; //do not reserve extra overhead if for small allocations +	} +} + +char* LLPrivateMemoryPool::LLMemoryChunk::allocate(U32 size) +{ +	if(mMinSlotSize > size) +	{ +		size = mMinSlotSize ; +	} +	if(mAlloatedSize + size  > mBufferSize - (mDataBuffer - mBuffer)) +	{ +		return NULL ; //no enough space in this chunk. +	} + +	char* p = NULL ; +	U32 blk_idx = getBlockLevel(size); + +	LLMemoryBlock* blk = NULL ; + +	//check if there is free block available +	if(mAvailBlockList[blk_idx]) +	{ +		blk = mAvailBlockList[blk_idx] ; +		p = blk->allocate() ; +		 +		if(blk->isFull()) +		{ +			popAvailBlockList(blk_idx) ; +		} +	} + +	//ask for a new block +	if(!p) +	{ +		blk = addBlock(blk_idx) ; +		if(blk) +		{ +			p = blk->allocate() ; + +			if(blk->isFull()) +			{ +				popAvailBlockList(blk_idx) ; +			} +		} +	} + +	//ask for space from larger blocks +	if(!p) +	{ +		for(S32 i = blk_idx + 1 ; i < mBlockLevels; i++) +		{ +			if(mAvailBlockList[i]) +			{ +				blk = mAvailBlockList[i] ; +				p = blk->allocate() ; + +				if(blk->isFull()) +				{ +					popAvailBlockList(i) ; +				} +				break ; +			} +		} +	} + +	if(p && blk) +	{		 +		mAlloatedSize += blk->getSlotSize() ; +	} +	return p ; +} + +void LLPrivateMemoryPool::LLMemoryChunk::freeMem(void* addr) +{	 +	U32 blk_idx = getPageIndex((U32)addr) ; +	LLMemoryBlock* blk = (LLMemoryBlock*)(mMetaBuffer + blk_idx * sizeof(LLMemoryBlock)) ; +	blk = blk->mSelf ; + +	bool was_full = blk->isFull() ; +	blk->freeMem(addr) ; +	mAlloatedSize -= blk->getSlotSize() ; + +	if(blk->empty()) +	{ +		removeBlock(blk) ; +	} +	else if(was_full) +	{ +		addToAvailBlockList(blk) ; +	}	 +} + +bool LLPrivateMemoryPool::LLMemoryChunk::empty() +{ +	return !mAlloatedSize ; +} + +bool LLPrivateMemoryPool::LLMemoryChunk::containsAddress(const char* addr) const +{ +	return (U32)mBuffer <= (U32)addr && (U32)mBuffer + mBufferSize > (U32)addr ; +} + +//debug use +void LLPrivateMemoryPool::LLMemoryChunk::dump() +{ +#if 0 +	//sanity check +	//for(S32 i = 0 ; i < mBlockLevels ; i++) +	//{ +	//	LLMemoryBlock* blk = mAvailBlockList[i] ; +	//	while(blk) +	//	{ +	//		blk_list.push_back(blk) ; +	//		blk = blk->mNext ; +	//	} +	//} +	for(S32 i = 0 ; i < mPartitionLevels ; i++) +	{ +		LLMemoryBlock* blk = mFreeSpaceList[i] ; +		while(blk) +		{ +			blk_list.push_back(blk) ; +			blk = blk->mNext ; +		} +	} + +	std::sort(blk_list.begin(), blk_list.end(), LLMemoryBlock::CompareAddress()); + +	U32 total_size = blk_list[0]->getBufferSize() ; +	for(U32 i = 1 ; i < blk_list.size(); i++) +	{ +		total_size += blk_list[i]->getBufferSize() ; +		if((U32)blk_list[i]->getBuffer() < (U32)blk_list[i-1]->getBuffer() + blk_list[i-1]->getBufferSize()) +		{ +			llerrs << "buffer corrupted." << llendl ; +		} +	} + +	llassert_always(total_size + mMinBlockSize >= mBufferSize - ((U32)mDataBuffer - (U32)mBuffer)) ; + +	U32 blk_num = (mBufferSize - (mDataBuffer - mBuffer)) / mMinBlockSize ; +	for(U32 i = 0 ; i < blk_num ; ) +	{ +		LLMemoryBlock* blk = &mBlocks[i] ; +		if(blk->mSelf) +		{ +			U32 end = blk->getBufferSize() / mMinBlockSize ; +			for(U32 j = 0 ; j < end ; j++) +			{ +				llassert_always(blk->mSelf == blk || !blk->mSelf) ; +			} +			i += end ; +		} +		else +		{ +			llerrs << "gap happens" << llendl ; +		} +	} +#endif +#if 0 +	llinfos << "---------------------------" << llendl ; +	llinfos << "Chunk buffer: " << (U32)getBuffer() << " size: " << getBufferSize() << llendl ; + +	llinfos << "available blocks ... " << llendl ; +	for(S32 i = 0 ; i < mBlockLevels ; i++) +	{ +		LLMemoryBlock* blk = mAvailBlockList[i] ; +		while(blk) +		{ +			llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ; +			blk = blk->mNext ; +		} +	} + +	llinfos << "free blocks ... " << llendl ; +	for(S32 i = 0 ; i < mPartitionLevels ; i++) +	{ +		LLMemoryBlock* blk = mFreeSpaceList[i] ; +		while(blk) +		{ +			llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ; +			blk = blk->mNext ; +		} +	} +#endif +} + +//compute the size for a block, the size is round to integer times of mMinBlockSize. +U32 LLPrivateMemoryPool::LLMemoryChunk::calcBlockSize(U32 slot_size) +{ +	// +	//Note: we try to make a block to have 32 slots if the size is not over 32 pages +	//32 is the number of bits of an integer in a 32-bit system +	// + +	U32 block_size; +	U32 cut_off_size = llmin(CUT_OFF_SIZE, (U32)(mMinBlockSize << 5)) ; + +	if((slot_size << 5) <= mMinBlockSize)//for small allocations, return one page  +	{ +		block_size = mMinBlockSize ; +	} +	else if(slot_size >= cut_off_size)//for large allocations, return one-slot block +	{ +		block_size = (slot_size / mMinBlockSize) * mMinBlockSize ; +		if(block_size < slot_size) +		{ +			block_size += mMinBlockSize ; +		} +	} +	else //medium allocations +	{ +		if((slot_size << 5) >= cut_off_size) +		{ +			block_size = cut_off_size ; +		} +		else +		{ +			block_size = ((slot_size << 5) / mMinBlockSize) * mMinBlockSize ; +		} +	} + +	llassert_always(block_size >= slot_size) ; + +	return block_size ; +} + +//create a new block in the chunk +LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::addBlock(U32 blk_idx) +{	 +	U32 slot_size = mMinSlotSize * (blk_idx + 1) ; +	U32 preferred_block_size = calcBlockSize(slot_size) ;	 +	U16 idx = getPageLevel(preferred_block_size);  +	LLMemoryBlock* blk = NULL ; +	 +	if(mFreeSpaceList[idx])//if there is free slot for blk_idx +	{ +		blk = createNewBlock(mFreeSpaceList[idx], preferred_block_size, slot_size, blk_idx) ; +	} +	else if(mFreeSpaceList[mPartitionLevels - 1]) //search free pool +	{		 +		blk = createNewBlock(mFreeSpaceList[mPartitionLevels - 1], preferred_block_size, slot_size, blk_idx) ; +	} +	else //search for other non-preferred but enough space slot. +	{ +		S32 min_idx = 0 ; +		if(slot_size > mMinBlockSize) +		{ +			min_idx = getPageLevel(slot_size) ; +		} +		for(S32 i = (S32)idx - 1 ; i >= min_idx ; i--) //search the small slots first +		{ +			if(mFreeSpaceList[i]) +			{ +				U32 new_preferred_block_size = mFreeSpaceList[i]->getBufferSize(); +				new_preferred_block_size = (new_preferred_block_size / mMinBlockSize) * mMinBlockSize ; //round to integer times of mMinBlockSize. + +				//create a NEW BLOCK THERE. +				if(new_preferred_block_size >= slot_size) //at least there is space for one slot. +				{ +					 +					blk = createNewBlock(mFreeSpaceList[i], new_preferred_block_size, slot_size, blk_idx) ; +				} +				break ; +			}  +		} + +		if(!blk) +		{ +			for(U16 i = idx + 1 ; i < mPartitionLevels - 1; i++) //search the large slots  +			{ +				if(mFreeSpaceList[i]) +				{ +					//create a NEW BLOCK THERE. +					blk = createNewBlock(mFreeSpaceList[i], preferred_block_size, slot_size, blk_idx) ; +					break ; +				}  +			} +		} +	} + +	return blk ; +} + +//create a new block at the designed location +LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx) +{ +	//unlink from the free space +	removeFromFreeSpace(blk) ; + +	//check the rest space +	U32 new_free_blk_size = blk->getBufferSize() - buffer_size ;	 +	if(new_free_blk_size < mMinBlockSize) //can not partition the memory into size smaller than mMinBlockSize +	{ +		new_free_blk_size = 0 ; //discard the last small extra space. +	}			 + +	//add the rest space back to the free list +	if(new_free_blk_size > 0) //blk still has free space +	{ +		LLMemoryBlock* next_blk = blk + (buffer_size / mMinBlockSize) ; +		next_blk->mPrev = NULL ; +		next_blk->mNext = NULL ; +		next_blk->setBuffer(blk->getBuffer() + buffer_size, new_free_blk_size) ; +		addToFreeSpace(next_blk) ; +	} + +	blk->init(blk->getBuffer(), buffer_size, slot_size) ; +	//insert to the available block list... +	mAvailBlockList[blk_idx] = blk ; + +	//mark the address map: all blocks covered by this block space pointing back to this block. +	U32 end = (buffer_size / mMinBlockSize) ; +	for(U32 i = 1 ; i < end ; i++) +	{ +		(blk + i)->mSelf = blk ; +	} + +	return blk ; +} + +//delete a block, release the block to the free pool. +void LLPrivateMemoryPool::LLMemoryChunk::removeBlock(LLMemoryBlock* blk) +{ +	//remove from the available block list +	if(blk->mPrev) +	{ +		blk->mPrev->mNext = blk->mNext ; +	} +	if(blk->mNext) +	{ +		blk->mNext->mPrev = blk->mPrev ; +	} +	U32 blk_idx = getBlockLevel(blk->getSlotSize()); +	if(mAvailBlockList[blk_idx] == blk) +	{ +		mAvailBlockList[blk_idx] = blk->mNext ; +	} + +	blk->mNext = NULL ; +	blk->mPrev = NULL ; +	 +	//mark it free +	blk->setBuffer(blk->getBuffer(), blk->getBufferSize()) ; + +#if 1 +	//merge blk with neighbors if possible +	if(blk->getBuffer() > mDataBuffer) //has the left neighbor +	{ +		if((blk - 1)->mSelf->isFree()) +		{ +			LLMemoryBlock* left_blk = (blk - 1)->mSelf ; +			removeFromFreeSpace((blk - 1)->mSelf); +			left_blk->setBuffer(left_blk->getBuffer(), left_blk->getBufferSize() + blk->getBufferSize()) ; +			blk = left_blk ; +		} +	} +	if(blk->getBuffer() + blk->getBufferSize() <= mBuffer + mBufferSize - mMinBlockSize) //has the right neighbor +	{ +		U32 d = blk->getBufferSize() / mMinBlockSize ; +		if((blk + d)->isFree()) +		{ +			LLMemoryBlock* right_blk = blk + d ; +			removeFromFreeSpace(blk + d) ; +			blk->setBuffer(blk->getBuffer(), blk->getBufferSize() + right_blk->getBufferSize()) ; +		} +	} +#endif +	 +	addToFreeSpace(blk) ; + +	return ; +} + +//the top block in the list is full, pop it out of the list +void LLPrivateMemoryPool::LLMemoryChunk::popAvailBlockList(U32 blk_idx)  +{ +	if(mAvailBlockList[blk_idx]) +	{ +		LLMemoryBlock* next = mAvailBlockList[blk_idx]->mNext ; +		if(next) +		{ +			next->mPrev = NULL ; +		} +		mAvailBlockList[blk_idx]->mPrev = NULL ; +		mAvailBlockList[blk_idx]->mNext = NULL ; +		mAvailBlockList[blk_idx] = next ; +	} +} + +//add the block back to the free pool +void LLPrivateMemoryPool::LLMemoryChunk::addToFreeSpace(LLMemoryBlock* blk)  +{ +	llassert_always(!blk->mPrev) ; +	llassert_always(!blk->mNext) ; + +	U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1; + +	(blk + free_idx)->mSelf = blk ; //mark the end pointing back to the head. +	free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ; + +	blk->mNext = mFreeSpaceList[free_idx] ; +	if(mFreeSpaceList[free_idx]) +	{ +		mFreeSpaceList[free_idx]->mPrev = blk ; +	} +	mFreeSpaceList[free_idx] = blk ; +	blk->mPrev = NULL ; +	blk->mSelf = blk ; +	 +	return ; +} + +//remove the space from the free pool +void LLPrivateMemoryPool::LLMemoryChunk::removeFromFreeSpace(LLMemoryBlock* blk)  +{ +	U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1; +	free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ; + +	if(mFreeSpaceList[free_idx] == blk) +	{ +		mFreeSpaceList[free_idx] = blk->mNext ; +	} +	if(blk->mPrev) +	{ +		blk->mPrev->mNext = blk->mNext ; +	} +	if(blk->mNext) +	{ +		blk->mNext->mPrev = blk->mPrev ; +	} +	blk->mNext = NULL ; +	blk->mPrev = NULL ; +	blk->mSelf = NULL ; + +	return ; +} + +void LLPrivateMemoryPool::LLMemoryChunk::addToAvailBlockList(LLMemoryBlock* blk)  +{ +	llassert_always(!blk->mPrev) ; +	llassert_always(!blk->mNext) ; + +	U32 blk_idx = getBlockLevel(blk->getSlotSize()); + +	blk->mNext = mAvailBlockList[blk_idx] ; +	if(blk->mNext) +	{ +		blk->mNext->mPrev = blk ; +	} +	blk->mPrev = NULL ; +	mAvailBlockList[blk_idx] = blk ; + +	return ; +} + +U32 LLPrivateMemoryPool::LLMemoryChunk::getPageIndex(U32 addr) +{ +	return (addr - (U32)mDataBuffer) / mMinBlockSize ; +} + +//for mAvailBlockList +U32 LLPrivateMemoryPool::LLMemoryChunk::getBlockLevel(U32 size) +{ +	llassert(size >= mMinSlotSize && size <= mMaxSlotSize) ; + +	//start from 0 +	return (size + mMinSlotSize - 1) / mMinSlotSize - 1 ; +} + +//for mFreeSpaceList +U16 LLPrivateMemoryPool::LLMemoryChunk::getPageLevel(U32 size) +{ +	//start from 0 +	U16 level = size / mMinBlockSize - 1 ; +	if(level >= mPartitionLevels) +	{ +		level = mPartitionLevels - 1 ; +	} +	return level ; +} + +//------------------------------------------------------------------- +//class LLPrivateMemoryPool +//-------------------------------------------------------------------- +const U32 CHUNK_SIZE = 4 << 20 ; //4 MB +const U32 LARGE_CHUNK_SIZE = 4 * CHUNK_SIZE ; //16 MB +LLPrivateMemoryPool::LLPrivateMemoryPool(S32 type) : +	mMutexp(NULL),	 +	mReservedPoolSize(0), +	mHashFactor(1), +	mType(type) +{ +	const U32 MAX_POOL_SIZE = 256 * 1024 * 1024 ; //256 MB + +	mMaxPoolSize = MAX_POOL_SIZE ; +	if(type == STATIC_THREADED || type == VOLATILE_THREADED) +	{ +		mMutexp = new LLMutex ; +	} + +	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		mChunkList[i] = NULL ; +	}	 +	 +	mNumOfChunks = 0 ; +} + +LLPrivateMemoryPool::~LLPrivateMemoryPool() +{ +	destroyPool(); +	delete mMutexp ; +} + +char* LLPrivateMemoryPool::allocate(U32 size) +{	 +	if(!size) +	{ +		return NULL ; +	} + +	//if the asked size larger than MAX_BLOCK_SIZE, fetch from heap directly, the pool does not manage it +	if(size >= CHUNK_SIZE) +	{ +		return (char*)malloc(size) ; +	} + +	char* p = NULL ; + +	//find the appropriate chunk +	S32 chunk_idx = getChunkIndex(size) ; +	 +	lock() ; + +	LLMemoryChunk* chunk = mChunkList[chunk_idx]; +	while(chunk) +	{ +		if((p = chunk->allocate(size))) +		{ +			break ; +		} +		chunk = chunk->mNext ; +	} +	 +	//fetch new memory chunk +	if(!p) +	{ +		if(mReservedPoolSize + CHUNK_SIZE > mMaxPoolSize) +		{ +			chunk = mChunkList[chunk_idx]; +			while(chunk) +			{ +				if((p = chunk->allocate(size))) +				{ +					break ; +				} +				chunk = chunk->mNext ; +			} +		} + +		chunk = addChunk(chunk_idx) ; +		if(chunk) +		{ +			p = chunk->allocate(size) ; +		} +	} + +	unlock() ; + +	return p ; +} + +void LLPrivateMemoryPool::freeMem(void* addr) +{ +	if(!addr) +	{ +		return ; +	} +	 +	lock() ; +	 +	LLMemoryChunk* chunk = findChunk((char*)addr) ; +	 +	if(!chunk) +	{ +		free(addr) ; //release from heap +	} +	else +	{ +		chunk->freeMem(addr) ; + +		if(chunk->empty()) +		{ +			removeChunk(chunk) ; +		} +	} +	 +	unlock() ; +} + +void LLPrivateMemoryPool::dump() +{ +} + +U32 LLPrivateMemoryPool::getTotalAllocatedSize() +{ +	U32 total_allocated = 0 ; + +	LLMemoryChunk* chunk ; +	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		chunk = mChunkList[i]; +		while(chunk) +		{ +			total_allocated += chunk->getAllocatedSize() ; +			chunk = chunk->mNext ; +		} +	} + +	return total_allocated ; +} + +void LLPrivateMemoryPool::lock() +{ +	if(mMutexp) +	{ +		mMutexp->lock() ; +	} +} + +void LLPrivateMemoryPool::unlock() +{ +	if(mMutexp) +	{ +		mMutexp->unlock() ; +	} +} + +S32  LLPrivateMemoryPool::getChunkIndex(U32 size)  +{ +	S32 i ; +	for(i = 0 ; size > MAX_SLOT_SIZES[i]; i++); +	 +	llassert_always(i < SUPER_ALLOCATION); + +	return i ; +} + +//destroy the entire pool +void  LLPrivateMemoryPool::destroyPool() +{ +	lock() ; + +	if(mNumOfChunks > 0) +	{ +		llwarns << "There is some memory not freed when destroy the memory pool!" << llendl ; +	} + +	mNumOfChunks = 0 ; +	mChunkHashList.clear() ; +	mHashFactor = 1 ; +	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		mChunkList[i] = NULL ; +	} + +	unlock() ; +} + +void  LLPrivateMemoryPool::checkSize(U32 asked_size) +{ +	if(mReservedPoolSize + asked_size > mMaxPoolSize) +	{ +		llinfos << "Max pool size: " << mMaxPoolSize << llendl ; +		llinfos << "Total reserved size: " << mReservedPoolSize + asked_size << llendl ; +		llinfos << "Total_allocated Size: " << getTotalAllocatedSize() << llendl ; + +		llerrs << "The pool is overflowing..." << llendl ; +	} +} + +LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_index) +{ +	U32 preferred_size ; +	U32 overhead ; +	if(chunk_index < LARGE_ALLOCATION) +	{ +		preferred_size = CHUNK_SIZE ; //4MB +		overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index], +			MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ; +	} +	else +	{ +		preferred_size = LARGE_CHUNK_SIZE ; //16MB +		overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index],  +			MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ; +	} + +	checkSize(preferred_size + overhead) ; +	mReservedPoolSize += preferred_size + overhead ; + +	char* buffer = (char*)malloc(preferred_size + overhead) ; +	if(!buffer) +	{ +		return NULL ; +	} +	 +	LLMemoryChunk* chunk = new (buffer) LLMemoryChunk() ; +	chunk->init(buffer, preferred_size + overhead, MIN_SLOT_SIZES[chunk_index], +		MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ; + +	//add to the tail of the linked list +	{ +		if(!mChunkList[chunk_index]) +		{ +			mChunkList[chunk_index] = chunk ; +		} +		else +		{ +			LLMemoryChunk* cur = mChunkList[chunk_index] ; +			while(cur->mNext) +			{ +				cur = cur->mNext ; +			} +			cur->mNext = chunk ; +			chunk->mPrev = cur ; +		} +	} + +	//insert into the hash table +	addToHashTable(chunk) ; +	 +	mNumOfChunks++; + +	return chunk ; +} + +void LLPrivateMemoryPool::removeChunk(LLMemoryChunk* chunk)  +{ +	if(!chunk) +	{ +		return ; +	} + +	//remove from the linked list +	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		if(mChunkList[i] == chunk) +		{ +			mChunkList[i] = chunk->mNext ; +		} +	} + +	if(chunk->mPrev) +	{ +		chunk->mPrev->mNext = chunk->mNext ; +	} +	if(chunk->mNext) +	{ +		chunk->mNext->mPrev = chunk->mPrev ; +	} + +	//remove from the hash table +	removeFromHashTable(chunk) ; +	 +	mNumOfChunks--; +	mReservedPoolSize -= chunk->getBufferSize() ; +	 +	//release memory +	free(chunk->getBuffer()) ; +} + +U16 LLPrivateMemoryPool::findHashKey(const char* addr) +{ +	return (((U32)addr) / CHUNK_SIZE) % mHashFactor ; +} + +LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::findChunk(const char* addr) +{ +	U16 key = findHashKey(addr) ;	 +	if(mChunkHashList.size() <= key) +	{ +		return NULL ; +	} + +	return mChunkHashList[key].findChunk(addr) ;	 +} + +void LLPrivateMemoryPool::addToHashTable(LLMemoryChunk* chunk)  +{ +	static const U16 HASH_FACTORS[] = {41, 83, 193, 317, 419, 523, 0xFFFF};  +	 +	U16 i ; +	if(mChunkHashList.empty()) +	{ +		mHashFactor = HASH_FACTORS[0] ; +		rehash() ;		 +	} + +	U16 start_key = findHashKey(chunk->getBuffer()) ; +	U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ; +	bool need_rehash = false ; +	 +	if(mChunkHashList[start_key].hasElement(chunk)) +	{ +		return; //already inserted. +	} +	need_rehash = mChunkHashList[start_key].add(chunk) ; +	 +	if(start_key == end_key && !need_rehash) +	{ +		return ; //done +	} + +	if(!need_rehash) +	{ +		need_rehash = mChunkHashList[end_key].add(chunk) ; +	} + +	if(!need_rehash) +	{ +		if(end_key < start_key) +		{ +			need_rehash = fillHashTable(start_key + 1, mHashFactor, chunk) ; +			if(!need_rehash) +			{ +				need_rehash = fillHashTable(0, end_key, chunk) ; +			} +		} +		else +		{ +			need_rehash = fillHashTable(start_key + 1, end_key, chunk) ; +		} +	} +	 +	if(need_rehash) +	{ +		i = 0 ; +		while(HASH_FACTORS[i] <= mHashFactor) i++; + +		mHashFactor = HASH_FACTORS[i] ; +		llassert_always(mHashFactor != 0xFFFF) ;//stop point to prevent endlessly recursive calls + +		rehash() ; +	} +} + +void LLPrivateMemoryPool::removeFromHashTable(LLMemoryChunk* chunk)  +{ +	U16 start_key = findHashKey(chunk->getBuffer()) ; +	U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ; +	 +	mChunkHashList[start_key].remove(chunk) ; +	if(start_key == end_key) +	{ +		return ; //done +	} + +	mChunkHashList[end_key].remove(chunk) ; +	 +	if(end_key < start_key) +	{ +		for(U16 i = start_key + 1 ; i < mHashFactor; i++) +		{ +			mChunkHashList[i].remove(chunk) ; +		} +		for(U16 i = 0 ; i < end_key; i++) +		{ +			mChunkHashList[i].remove(chunk) ; +		} +	} +	else +	{ +		for(U16 i = start_key + 1 ; i < end_key; i++) +		{ +			mChunkHashList[i].remove(chunk) ; +		} +	} +} + +void LLPrivateMemoryPool::rehash() +{ +	llinfos << "new hash factor: " << mHashFactor << llendl ; + +	mChunkHashList.clear() ; +	mChunkHashList.resize(mHashFactor) ; + +	LLMemoryChunk* chunk ; +	for(U16 i = 0 ; i < SUPER_ALLOCATION ; i++) +	{ +		chunk = mChunkList[i] ;  +		while(chunk) +		{ +			addToHashTable(chunk) ; +			chunk = chunk->mNext ; +		} +	} +} + +bool LLPrivateMemoryPool::fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk) +{ +	for(U16 i = start; i < end; i++) +	{ +		if(mChunkHashList[i].add(chunk)) +		{			 +			return true ; +		}		 +	} + +	return false ; +} + +//-------------------------------------------------------------------- +// class LLChunkHashElement +//-------------------------------------------------------------------- +LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::LLChunkHashElement::findChunk(const char* addr) +{ +	if(mFirst && mFirst->containsAddress(addr)) +	{ +		return mFirst ; +	} +	else if(mSecond && mSecond->containsAddress(addr)) +	{ +		return mSecond ; +	} + +	return NULL ; +} + +//return false if successfully inserted to the hash slot. +bool LLPrivateMemoryPool::LLChunkHashElement::add(LLPrivateMemoryPool::LLMemoryChunk* chunk) +{ +	llassert_always(!hasElement(chunk)) ; + +	if(!mFirst) +	{ +		mFirst = chunk ; +	} +	else if(!mSecond) +	{ +		mSecond = chunk ; +	} +	else +	{ +		return true ; //failed +	} + +	return false ; +} + +void LLPrivateMemoryPool::LLChunkHashElement::remove(LLPrivateMemoryPool::LLMemoryChunk* chunk) +{ +	if(mFirst == chunk) +	{ +		mFirst = NULL ; +	} +	else if(mSecond ==chunk) +	{ +		mSecond = NULL ; +	} +	else +	{ +		llerrs << "This slot does not contain this chunk!" << llendl ; +	} +} + +//-------------------------------------------------------------------- +//class LLPrivateMemoryPoolManager +//-------------------------------------------------------------------- +LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::sInstance = NULL ; + +LLPrivateMemoryPoolManager::LLPrivateMemoryPoolManager(BOOL enabled)  +{ +	mPoolList.resize(LLPrivateMemoryPool::MAX_TYPES) ; + +	for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++) +	{ +		mPoolList[i] = NULL ; +	} + +	mPrivatePoolEnabled = enabled ; +} + +LLPrivateMemoryPoolManager::~LLPrivateMemoryPoolManager()  +{ + +#if __DEBUG_PRIVATE_MEM__ +	if(!sMemAllocationTracker.empty()) +	{ +		llwarns << "there is potential memory leaking here. The list of not freed memory blocks are from: " <<llendl ; + +		S32 k = 0 ; +		for(mem_allocation_info_t::iterator iter = sMemAllocationTracker.begin() ; iter != sMemAllocationTracker.end() ; ++iter) +		{ +			llinfos << k++ << ", " << iter->second << llendl ; +		} +		sMemAllocationTracker.clear() ; +	} +#endif + +#if 0 +	//all private pools should be released by their owners before reaching here. +	for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++) +	{ +		llassert_always(!mPoolList[i]) ; +	} +	mPoolList.clear() ; + +#else +	//forcefully release all memory +	for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++) +	{ +		if(mPoolList[i]) +		{ +			delete mPoolList[i] ; +			mPoolList[i] = NULL ; +		} +	} +	mPoolList.clear() ; +#endif +} + +//static  +void LLPrivateMemoryPoolManager::initClass(BOOL enabled)  +{ +	llassert_always(!sInstance) ; + +	sInstance = new LLPrivateMemoryPoolManager(enabled) ; +} + +//static  +LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::getInstance()  +{ +	//if(!sInstance) +	//{ +	//	sInstance = new LLPrivateMemoryPoolManager(FALSE) ; +	//} +	return sInstance ; +} +	 +//static  +void LLPrivateMemoryPoolManager::destroyClass()  +{ +	if(sInstance) +	{ +		delete sInstance ; +		sInstance = NULL ; +	} +} + +LLPrivateMemoryPool* LLPrivateMemoryPoolManager::newPool(S32 type)  +{ +	if(!mPrivatePoolEnabled) +	{ +		return NULL ; +	} + +	if(!mPoolList[type]) +	{ +		mPoolList[type] = new LLPrivateMemoryPool(type) ; +	} + +	return mPoolList[type] ; +} + +void LLPrivateMemoryPoolManager::deletePool(LLPrivateMemoryPool* pool)  +{ +	if(pool && pool->isEmpty()) +	{ +		mPoolList[pool->getType()] = NULL ; +		delete pool; +	} +} + +//debug +void LLPrivateMemoryPoolManager::updateStatistics() +{ +	mTotalReservedSize = 0 ; +	mTotalAllocatedSize = 0 ; + +	for(U32 i = 0; i < mPoolList.size(); i++) +	{ +		if(mPoolList[i]) +		{ +			mTotalReservedSize += mPoolList[i]->getTotalReservedSize() ; +			mTotalAllocatedSize += mPoolList[i]->getTotalAllocatedSize() ; +		} +	} +} + +#if __DEBUG_PRIVATE_MEM__ +//static  +char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size, const char* function, const int line)  +{ +	char* p ; + +	if(!poolp) +	{ +		p = (char*)malloc(size) ; +	} +	else +	{ +		p = poolp->allocate(size) ; +	} +	 +	if(p) +	{ +		char num[16] ; +		sprintf(num, " line: %d ", line) ; +		std::string str(function) ; +		str += num;  + +		sMemAllocationTracker[p] = str ; +	} + +	return p ; +}	 +#else +//static  +char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size)  +{ +	if(poolp) +	{ +		return poolp->allocate(size) ;		 +	} +	else +	{ +		return (char*)malloc(size) ; +	} +} +#endif + +//static  +void  LLPrivateMemoryPoolManager::freeMem(LLPrivateMemoryPool* poolp, void* addr)  +{ +	if(!addr) +	{ +		return ; +	} + +#if __DEBUG_PRIVATE_MEM__ +	sMemAllocationTracker.erase((char*)addr) ; +#endif + +	if(poolp) +	{ +		poolp->freeMem(addr) ; +	} +	else +	{ +		free(addr) ; +	}	 +} + +//-------------------------------------------------------------------- +//class LLPrivateMemoryPoolTester +//-------------------------------------------------------------------- +#if 0 +LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::sInstance = NULL ; +LLPrivateMemoryPool* LLPrivateMemoryPoolTester::sPool = NULL ; +LLPrivateMemoryPoolTester::LLPrivateMemoryPoolTester() +{	 +} +	 +LLPrivateMemoryPoolTester::~LLPrivateMemoryPoolTester()  +{	 +} + +//static  +LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::getInstance()  +{ +	if(!sInstance) +	{ +		sInstance = ::new LLPrivateMemoryPoolTester() ; +	} +	return sInstance ; +} + +//static  +void LLPrivateMemoryPoolTester::destroy() +{ +	if(sInstance) +	{ +		::delete sInstance ; +		sInstance = NULL ; +	} + +	if(sPool) +	{ +		LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ; +		sPool = NULL ; +	} +} + +void LLPrivateMemoryPoolTester::run(S32 type)  +{ +	if(sPool) +	{ +		LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ; +	} +	sPool = LLPrivateMemoryPoolManager::getInstance()->newPool(type) ; + +	//run the test +	correctnessTest() ; +	performanceTest() ; +	//fragmentationtest() ; + +	//release pool. +	LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ; +	sPool = NULL ; +} + +void LLPrivateMemoryPoolTester::test(U32 min_size, U32 max_size, U32 stride, U32 times,  +									 bool random_deletion, bool output_statistics) +{ +	U32 levels = (max_size - min_size) / stride + 1 ; +	char*** p ; +	U32 i, j ; +	U32 total_allocated_size = 0 ; + +	//allocate space for p ; +	if(!(p = ::new char**[times]) || !(*p = ::new char*[times * levels])) +	{ +		llerrs << "memory initialization for p failed" << llendl ; +	} + +	//init +	for(i = 0 ; i < times; i++) +	{ +		p[i] = *p + i * levels ; +		for(j = 0 ; j < levels; j++) +		{ +			p[i][j] = NULL ; +		} +	} + +	//allocation +	U32 size ; +	for(i = 0 ; i < times ; i++) +	{ +		for(j = 0 ; j < levels; j++)  +		{ +			size = min_size + j * stride ; +			p[i][j] = ALLOCATE_MEM(sPool, size) ; + +			total_allocated_size+= size ; + +			*(U32*)p[i][j] = i ; +			*((U32*)p[i][j] + 1) = j ; +			//p[i][j][size - 1] = '\0' ; //access the last element to verify the success of the allocation. + +			//randomly release memory +			if(random_deletion) +			{ +				S32 k = rand() % levels ; + +				if(p[i][k]) +				{ +					llassert_always(*(U32*)p[i][k] == i && *((U32*)p[i][k] + 1) == k) ; +					FREE_MEM(sPool, p[i][k]) ; +					total_allocated_size -= min_size + k * stride ; +					p[i][k] = NULL ; +				} +			} +		} +	} + +	//output pool allocation statistics +	if(output_statistics) +	{ +	} + +	//release all memory allocations +	for(i = 0 ; i < times; i++) +	{ +		for(j = 0 ; j < levels; j++) +		{ +			if(p[i][j]) +			{ +				llassert_always(*(U32*)p[i][j] == i && *((U32*)p[i][j] + 1) == j) ; +				FREE_MEM(sPool, p[i][j]) ; +				total_allocated_size -= min_size + j * stride ; +				p[i][j] = NULL ; +			} +		} +	} + +	::delete[] *p ; +	::delete[] p ; +} + +void LLPrivateMemoryPoolTester::testAndTime(U32 size, U32 times) +{ +	LLTimer timer ; + +	llinfos << " -**********************- " << llendl ; +	llinfos << "test size: " << size << " test times: " << times << llendl ; + +	timer.reset() ; +	char** p = new char*[times] ; +		 +	//using the customized memory pool +	//allocation +	for(U32 i = 0 ; i < times; i++) +	{ +		p[i] = ALLOCATE_MEM(sPool, size) ; +		if(!p[i]) +		{ +			llerrs << "allocation failed" << llendl ; +		} +	} +	//de-allocation +	for(U32 i = 0 ; i < times; i++) +	{ +		FREE_MEM(sPool, p[i]) ; +		p[i] = NULL ; +	} +	llinfos << "time spent using customized memory pool: " << timer.getElapsedTimeF32() << llendl ; + +	timer.reset() ; + +	//using the standard allocator/de-allocator: +	//allocation +	for(U32 i = 0 ; i < times; i++) +	{ +		p[i] = ::new char[size] ; +		if(!p[i]) +		{ +			llerrs << "allocation failed" << llendl ; +		} +	} +	//de-allocation +	for(U32 i = 0 ; i < times; i++) +	{ +		::delete[] p[i] ; +		p[i] = NULL ; +	} +	llinfos << "time spent using standard allocator/de-allocator: " << timer.getElapsedTimeF32() << llendl ; + +	delete[] p; +} + +void LLPrivateMemoryPoolTester::correctnessTest()  +{ +	//try many different sized allocation, and all kinds of edge cases, access the allocated memory  +	//to see if allocation is right. +	 +	//edge case +	char* p = ALLOCATE_MEM(sPool, 0) ; +	FREE_MEM(sPool, p) ; + +	//small sized +	// [8 bytes, 2KB), each asks for 256 allocations and deallocations +	test(8, 2040, 8, 256, true, true) ; +	 +	//medium sized +	//[2KB, 512KB), each asks for 16 allocations and deallocations +	test(2048, 512 * 1024 - 2048, 2048, 16, true, true) ; + +	//large sized +	//[512KB, 4MB], each asks for 8 allocations and deallocations +	test(512 * 1024, 4 * 1024 * 1024, 64 * 1024, 6, true, true) ; +} + +void LLPrivateMemoryPoolTester::performanceTest()  +{ +	U32 test_size[3] = {768, 3* 1024, 3* 1024 * 1024}; +	 +	//small sized +	testAndTime(test_size[0], 8) ; +	 +	//medium sized +	testAndTime(test_size[1], 8) ; + +	//large sized +	testAndTime(test_size[2], 8) ; +} + +void LLPrivateMemoryPoolTester::fragmentationtest()  +{ +	//for internal fragmentation statistics: +	//every time when asking for a new chunk during correctness test, and performance test, +	//print out the chunk usage statistices. +} +#endif +//-------------------------------------------------------------------- diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h index 3bd1403576..db753f0d8b 100644 --- a/indra/llcommon/llmemory.h +++ b/indra/llcommon/llmemory.h @@ -27,7 +27,6 @@  #define LLMEMORY_H  #include "llmemtype.h" -  #if LL_DEBUG  inline void* ll_aligned_malloc( size_t size, int align )  { @@ -105,6 +104,10 @@ inline void ll_aligned_free_32(void *p)  #define ll_aligned_free_32 free  #endif // LL_DEBUG +#ifndef __DEBUG_PRIVATE_MEM__ +#define __DEBUG_PRIVATE_MEM__  0 +#endif +  class LL_COMMON_API LLMemory  {  public: @@ -115,8 +118,24 @@ public:  	// Return value is zero if not known.  	static U64 getCurrentRSS();  	static U32 getWorkingSetSize(); +	static void* tryToAlloc(void* address, U32 size); +	static void initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure); +	static void updateMemoryInfo() ; +	static void logMemoryInfo(BOOL update = FALSE); +	static S32  isMemoryPoolLow(); + +	static U32 getAvailableMemKB() ; +	static U32 getMaxMemKB() ; +	static U32 getAllocatedMemKB() ;  private:  	static char* reserveMem; +	static U32 sAvailPhysicalMemInKB ; +	static U32 sMaxPhysicalMemInKB ; +	static U32 sAllocatedMemInKB; +	static U32 sAllocatedPageSizeInKB ; + +	static U32 sMaxHeapSizeInKB; +	static BOOL sEnableMemoryFailurePrevention;  };  //---------------------------------------------------------------------------- @@ -163,6 +182,326 @@ private:  //---------------------------------------------------------------------------- + +// +//class LLPrivateMemoryPool defines a private memory pool for an application to use, so the application does not +//need to access the heap directly fro each memory allocation. Throught this, the allocation speed is faster,  +//and reduces virtaul address space gragmentation problem. +//Note: this class is thread-safe by passing true to the constructor function. However, you do not need to do this unless +//you are sure the memory allocation and de-allocation will happen in different threads. To make the pool thread safe +//increases allocation and deallocation cost. +// +class LL_COMMON_API LLPrivateMemoryPool +{ +	friend class LLPrivateMemoryPoolManager ; + +public: +	class LL_COMMON_API LLMemoryBlock //each block is devided into slots uniformly +	{ +	public:  +		LLMemoryBlock() ; +		~LLMemoryBlock() ; + +		void init(char* buffer, U32 buffer_size, U32 slot_size) ; +		void setBuffer(char* buffer, U32 buffer_size) ; + +		char* allocate() ; +		void  freeMem(void* addr) ; + +		bool empty() {return !mAllocatedSlots;} +		bool isFull() {return mAllocatedSlots == mTotalSlots;} +		bool isFree() {return !mTotalSlots;} + +		U32  getSlotSize()const {return mSlotSize;} +		U32  getTotalSlots()const {return mTotalSlots;} +		U32  getBufferSize()const {return mBufferSize;} +		char* getBuffer() const {return mBuffer;} + +		//debug use +		void resetBitMap() ; +	private: +		char* mBuffer; +		U32   mSlotSize ; //when the block is not initialized, it is the buffer size. +		U32   mBufferSize ; +		U32   mUsageBits ; +		U8    mTotalSlots ; +		U8    mAllocatedSlots ; +		U8    mDummySize ; //size of extra bytes reserved for mUsageBits. + +	public: +		LLMemoryBlock* mPrev ; +		LLMemoryBlock* mNext ; +		LLMemoryBlock* mSelf ; + +		struct CompareAddress +		{ +			bool operator()(const LLMemoryBlock* const& lhs, const LLMemoryBlock* const& rhs) +			{ +				return (U32)lhs->getBuffer() < (U32)rhs->getBuffer(); +			} +		}; +	}; + +	class LL_COMMON_API LLMemoryChunk //is divided into memory blocks. +	{ +	public: +		LLMemoryChunk() ; +		~LLMemoryChunk() ; + +		void init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size) ; +		void setBuffer(char* buffer, U32 buffer_size) ; + +		bool empty() ; +		 +		char* allocate(U32 size) ; +		void  freeMem(void* addr) ; + +		char* getBuffer() const {return mBuffer;} +		U32 getBufferSize() const {return mBufferSize;} +		U32 getAllocatedSize() const {return mAlloatedSize;} + +		bool containsAddress(const char* addr) const; + +		static U32 getMaxOverhead(U32 data_buffer_size, U32 min_slot_size,  +													   U32 max_slot_size, U32 min_block_size, U32 max_block_size) ; +	 +		void dump() ; + +	private: +		U32 getPageIndex(U32 addr) ; +		U32 getBlockLevel(U32 size) ; +		U16 getPageLevel(U32 size) ; +		LLMemoryBlock* addBlock(U32 blk_idx) ; +		void popAvailBlockList(U32 blk_idx) ; +		void addToFreeSpace(LLMemoryBlock* blk) ; +		void removeFromFreeSpace(LLMemoryBlock* blk) ; +		void removeBlock(LLMemoryBlock* blk) ; +		void addToAvailBlockList(LLMemoryBlock* blk) ; +		U32  calcBlockSize(U32 slot_size); +		LLMemoryBlock* createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx) ; + +	private: +		LLMemoryBlock** mAvailBlockList ;//256 by mMinSlotSize +		LLMemoryBlock** mFreeSpaceList; +		LLMemoryBlock*  mBlocks ; //index of blocks by address. +		 +		char* mBuffer ; +		U32   mBufferSize ; +		char* mDataBuffer ; +		char* mMetaBuffer ; +		U32   mMinBlockSize ; +		U32   mMinSlotSize ; +		U32   mMaxSlotSize ; +		U32   mAlloatedSize ; +		U16   mBlockLevels; +		U16   mPartitionLevels; + +	public: +		//form a linked list +		LLMemoryChunk* mNext ; +		LLMemoryChunk* mPrev ; +	} ; + +private: +	LLPrivateMemoryPool(S32 type) ; +	~LLPrivateMemoryPool() ; + +	char *allocate(U32 size) ; +	void  freeMem(void* addr) ; +	 +	void  dump() ; +	U32   getTotalAllocatedSize() ; +	U32   getTotalReservedSize() {return mReservedPoolSize;} +	S32   getType() const {return mType; } +	bool  isEmpty() const {return !mNumOfChunks; } + +private: +	void lock() ; +	void unlock() ;	 +	S32 getChunkIndex(U32 size) ; +	LLMemoryChunk*  addChunk(S32 chunk_index) ; +	void checkSize(U32 asked_size) ; +	void removeChunk(LLMemoryChunk* chunk) ; +	U16  findHashKey(const char* addr); +	void addToHashTable(LLMemoryChunk* chunk) ; +	void removeFromHashTable(LLMemoryChunk* chunk) ; +	void rehash() ; +	bool fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk) ; +	LLMemoryChunk* findChunk(const char* addr) ; + +	void destroyPool() ; + +public: +	enum +	{ +		SMALL_ALLOCATION = 0, //from 8 bytes to 2KB(exclusive), page size 2KB, max chunk size is 4MB. +		MEDIUM_ALLOCATION,    //from 2KB to 512KB(exclusive), page size 32KB, max chunk size 4MB +		LARGE_ALLOCATION,     //from 512KB to 4MB(inclusive), page size 64KB, max chunk size 16MB +		SUPER_ALLOCATION      //allocation larger than 4MB. +	}; + +	enum +	{ +		STATIC = 0 ,       //static pool(each alllocation stays for a long time) without threading support +		VOLATILE,          //Volatile pool(each allocation stays for a very short time) without threading support +		STATIC_THREADED,   //static pool with threading support +		VOLATILE_THREADED, //volatile pool with threading support +		MAX_TYPES +	}; //pool types + +private: +	LLMutex* mMutexp ; +	U32  mMaxPoolSize; +	U32  mReservedPoolSize ;	 + +	LLMemoryChunk* mChunkList[SUPER_ALLOCATION] ; //all memory chunks reserved by this pool, sorted by address	 +	U16 mNumOfChunks ; +	U16 mHashFactor ; + +	S32 mType ; + +	class LLChunkHashElement +	{ +	public: +		LLChunkHashElement() {mFirst = NULL ; mSecond = NULL ;} + +		bool add(LLMemoryChunk* chunk) ; +		void remove(LLMemoryChunk* chunk) ; +		LLMemoryChunk* findChunk(const char* addr) ; + +		bool empty() {return !mFirst && !mSecond; } +		bool full()  {return mFirst && mSecond; } +		bool hasElement(LLMemoryChunk* chunk) {return mFirst == chunk || mSecond == chunk;} + +	private: +		LLMemoryChunk* mFirst ; +		LLMemoryChunk* mSecond ; +	}; +	std::vector<LLChunkHashElement> mChunkHashList ; +}; + +class LL_COMMON_API LLPrivateMemoryPoolManager +{ +private: +	LLPrivateMemoryPoolManager(BOOL enabled) ; +	~LLPrivateMemoryPoolManager() ; + +public:	 +	static LLPrivateMemoryPoolManager* getInstance() ; +	static void initClass(BOOL enabled) ; +	static void destroyClass() ; + +	LLPrivateMemoryPool* newPool(S32 type) ; +	void deletePool(LLPrivateMemoryPool* pool) ; + +private: +	static LLPrivateMemoryPoolManager* sInstance ; +	std::vector<LLPrivateMemoryPool*> mPoolList ; +	BOOL mPrivatePoolEnabled; + +public: +	//debug and statistics info. +	void updateStatistics() ; + +	U32 mTotalReservedSize ; +	U32 mTotalAllocatedSize ; + +public: +#if __DEBUG_PRIVATE_MEM__ +	static char* allocate(LLPrivateMemoryPool* poolp, U32 size, const char* function, const int line) ;	 +	 +	typedef std::map<char*, std::string> mem_allocation_info_t ; +	static mem_allocation_info_t sMemAllocationTracker; +#else +	static char* allocate(LLPrivateMemoryPool* poolp, U32 size) ;	 +#endif +	static void  freeMem(LLPrivateMemoryPool* poolp, void* addr) ; +}; + +//------------------------------------------------------------------------------------- +#if __DEBUG_PRIVATE_MEM__ +#define ALLOCATE_MEM(poolp, size) LLPrivateMemoryPoolManager::allocate((poolp), (size), __FUNCTION__, __LINE__) +#else +#define ALLOCATE_MEM(poolp, size) LLPrivateMemoryPoolManager::allocate((poolp), (size)) +#endif +#define FREE_MEM(poolp, addr) LLPrivateMemoryPoolManager::freeMem((poolp), (addr)) +//------------------------------------------------------------------------------------- + +// +//the below singleton is used to test the private memory pool. +// +#if 0 +class LL_COMMON_API LLPrivateMemoryPoolTester +{ +private: +	LLPrivateMemoryPoolTester() ; +	~LLPrivateMemoryPoolTester() ; + +public: +	static LLPrivateMemoryPoolTester* getInstance() ; +	static void destroy() ; + +	void run(S32 type) ;	 + +private: +	void correctnessTest() ; +	void performanceTest() ; +	void fragmentationtest() ; + +	void test(U32 min_size, U32 max_size, U32 stride, U32 times, bool random_deletion, bool output_statistics) ; +	void testAndTime(U32 size, U32 times) ; + +#if 0 +public: +	void* operator new(size_t size) +	{ +		return (void*)sPool->allocate(size) ; +	} +    void  operator delete(void* addr) +	{ +		sPool->freeMem(addr) ; +	} +	void* operator new[](size_t size) +	{ +		return (void*)sPool->allocate(size) ; +	} +    void  operator delete[](void* addr) +	{ +		sPool->freeMem(addr) ; +	} +#endif + +private: +	static LLPrivateMemoryPoolTester* sInstance; +	static LLPrivateMemoryPool* sPool ; +	static LLPrivateMemoryPool* sThreadedPool ; +}; +#if 0 +//static +void* LLPrivateMemoryPoolTester::operator new(size_t size) +{ +	return (void*)sPool->allocate(size) ; +} + +//static +void  LLPrivateMemoryPoolTester::operator delete(void* addr) +{ +	sPool->free(addr) ; +} + +//static +void* LLPrivateMemoryPoolTester::operator new[](size_t size) +{ +	return (void*)sPool->allocate(size) ; +} + +//static +void  LLPrivateMemoryPoolTester::operator delete[](void* addr) +{ +	sPool->free(addr) ; +} +#endif +#endif  // LLRefCount moved to llrefcount.h  // LLPointer moved to llpointer.h diff --git a/indra/llcommon/llscopedvolatileaprpool.h b/indra/llcommon/llscopedvolatileaprpool.h new file mode 100644 index 0000000000..dbaf4edcad --- /dev/null +++ b/indra/llcommon/llscopedvolatileaprpool.h @@ -0,0 +1,52 @@ +/** + * @file llscopedvolatileaprpool.h + * @brief Implementation of LLScopedVolatileAPRPool + * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2011, Linden Research, Inc. + *  + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + *  + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + *  + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA + *  + * Linden Research, Inc., 945 Battery Street, San Francisco, CA  94111  USA + * $/LicenseInfo$ + */ + +#ifndef LL_LLSCOPEDVOLATILEAPRPOOL_H +#define LL_LLSCOPEDVOLATILEAPRPOOL_H + +#include "llthread.h" + +/** Scoped volatile memory pool. + * + * As the LLVolatileAPRPool should never keep allocations very + * long, its most common use is for allocations with a lifetime + * equal to it's scope. + * + * This is a convenience class that makes just a little easier to type. + */ +class LL_COMMON_API LLScopedVolatileAPRPool +{ +private: +	LLVolatileAPRPool& mPool; +	apr_pool_t* mScopedAPRpool;		// The use of apr_pool_t is OK here. +public: +	LLScopedVolatileAPRPool() : mPool(LLThreadLocalData::tldata().mVolatileAPRPool), mScopedAPRpool(mPool.getVolatileAPRPool()) { } +	~LLScopedVolatileAPRPool() { mPool.clearVolatileAPRPool(); } +	//! @attention Only use this to pass the underlaying pointer to a libapr-1 function that requires it. +	operator apr_pool_t*() const { return mScopedAPRpool; }		// The use of apr_pool_t is OK here. +}; + +#endif diff --git a/indra/llcommon/llsdserialize_xml.cpp b/indra/llcommon/llsdserialize_xml.cpp index c5a7c6fc15..bf216d41bf 100644 --- a/indra/llcommon/llsdserialize_xml.cpp +++ b/indra/llcommon/llsdserialize_xml.cpp @@ -354,6 +354,7 @@ static unsigned get_till_eol(std::istream& input, char *buf, unsigned bufsize)  	return count;  } +LLFastTimer::DeclareTimer FTM_SD_PARSE_READ_STREAM("LLSD Read Stream");  S32 LLSDXMLParser::Impl::parse(std::istream& input, LLSD& data)  {  	XML_Status status; @@ -373,10 +374,13 @@ S32 LLSDXMLParser::Impl::parse(std::istream& input, LLSD& data)  		{  			break;  		} -		count = get_till_eol(input, (char *)buffer, BUFFER_SIZE); -		if (!count) -		{ -			break; +		{ LLFastTimer _(FTM_SD_PARSE_READ_STREAM); +		 +			count = get_till_eol(input, (char *)buffer, BUFFER_SIZE); +			if (!count) +			{ +				break; +			}  		}  		status = XML_ParseBuffer(mParser, count, false); @@ -716,6 +720,7 @@ void LLSDXMLParser::Impl::endElementHandler(const XML_Char* name)  		case ELEMENT_INTEGER:  			{  				S32 i; +				// sscanf okay here with different locales - ints don't change for different locale settings like floats do.  				if ( sscanf(mCurrentContent.c_str(), "%d", &i ) == 1 )  				{	// See if sscanf works - it's faster  					value = i; @@ -729,15 +734,19 @@ void LLSDXMLParser::Impl::endElementHandler(const XML_Char* name)  		case ELEMENT_REAL:  			{ -				F64 r; -				if ( sscanf(mCurrentContent.c_str(), "%lf", &r ) == 1 ) -				{	// See if sscanf works - it's faster -					value = r; -				} -				else -				{ -					value = LLSD(mCurrentContent).asReal(); -				} +				value = LLSD(mCurrentContent).asReal(); +				// removed since this breaks when locale has decimal separator that isn't '.' +				// investigated changing local to something compatible each time but deemed higher +				// risk that just using LLSD.asReal() each time. +				//F64 r; +				//if ( sscanf(mCurrentContent.c_str(), "%lf", &r ) == 1 ) +				//{	// See if sscanf works - it's faster +				//	value = r; +				//} +				//else +				//{ +				//	value = LLSD(mCurrentContent).asReal(); +				//}  			}  			break; diff --git a/indra/llcommon/llsingleton.h b/indra/llcommon/llsingleton.h index 7aee1bb85f..49d99f2cd0 100644 --- a/indra/llcommon/llsingleton.h +++ b/indra/llcommon/llsingleton.h @@ -100,12 +100,6 @@ private:  		DELETED  	} EInitState; -	static void deleteSingleton() -	{ -		delete getData().mSingletonInstance; -		getData().mSingletonInstance = NULL; -	} -	  	// stores pointer to singleton instance  	// and tracks initialization state of singleton  	struct SingletonInstanceData @@ -120,7 +114,10 @@ private:  		~SingletonInstanceData()  		{ -			deleteSingleton(); +			if (mInitState != DELETED) +			{ +				deleteSingleton(); +			}  		}  	}; @@ -132,6 +129,33 @@ public:  		data.mInitState = DELETED;  	} +	/** +	 * @brief Immediately delete the singleton. +	 * +	 * A subsequent call to LLProxy::getInstance() will construct a new +	 * instance of the class. +	 * +	 * LLSingletons are normally destroyed after main() has exited and the C++ +	 * runtime is cleaning up statically-constructed objects. Some classes +	 * derived from LLSingleton have objects that are part of a runtime system +	 * that is terminated before main() exits. Calling the destructor of those +	 * objects after the termination of their respective systems can cause +	 * crashes and other problems during termination of the project. Using this +	 * method to destroy the singleton early can prevent these crashes. +	 * +	 * An example where this is needed is for a LLSingleton that has an APR +	 * object as a member that makes APR calls on destruction. The APR system is +	 * shut down explicitly before main() exits. This causes a crash on exit. +	 * Using this method before the call to apr_terminate() and NOT calling +	 * getInstance() again will prevent the crash. +	 */ +	static void deleteSingleton() +	{ +		delete getData().mSingletonInstance; +		getData().mSingletonInstance = NULL; +		getData().mInitState = DELETED; +	} +  	static SingletonInstanceData& getData()  	{  		// this is static to cache the lookup results diff --git a/indra/llcommon/llstring.cpp b/indra/llcommon/llstring.cpp index f3b48b0156..e7fe656808 100644 --- a/indra/llcommon/llstring.cpp +++ b/indra/llcommon/llstring.cpp @@ -936,13 +936,18 @@ LLStringUtil::size_type LLStringUtil::getSubstitution(const std::string& instr,  {  	const std::string delims (","); -	// Find the first ] -	size_type pos2 = instr.find(']', start); +	// Find the first [ +	size_type pos1 = instr.find('[', start); +	if (pos1 == std::string::npos) +		return std::string::npos; + +	//Find the first ] after the initial [ +	size_type pos2 = instr.find(']', pos1);  	if (pos2 == std::string::npos)  		return std::string::npos; -	// Find the last [ before ] -	size_type pos1 = instr.find_last_of('[', pos2-1); +	// Find the last [ before ] in case of nested [[]] +	pos1 = instr.find_last_of('[', pos2-1);  	if (pos1 == std::string::npos || pos1 < start)  		return std::string::npos; diff --git a/indra/llcommon/llsys.cpp b/indra/llcommon/llsys.cpp index e8616a9be6..d781687175 100644 --- a/indra/llcommon/llsys.cpp +++ b/indra/llcommon/llsys.cpp @@ -1,6 +1,6 @@  /**    * @file llsys.cpp - * @brief Impelementation of the basic system query functions. + * @brief Implementation of the basic system query functions.   *   * $LicenseInfo:firstyear=2002&license=viewerlgpl$   * Second Life Viewer Source Code @@ -24,6 +24,10 @@   * $/LicenseInfo$   */ +#if LL_WINDOWS +#pragma warning (disable : 4355) // 'this' used in initializer list: yes, intentionally +#endif +  #include "linden_common.h"  #include "llsys.h" @@ -36,22 +40,45 @@  #endif  #include "llprocessor.h" +#include "llerrorcontrol.h" +#include "llevents.h" +#include "lltimer.h" +#include "llsdserialize.h" +#include "llsdutil.h" +#include <boost/bind.hpp> +#include <boost/circular_buffer.hpp> +#include <boost/regex.hpp> +#include <boost/foreach.hpp> +#include <boost/lexical_cast.hpp> +#include <boost/range.hpp> +#include <boost/utility/enable_if.hpp> +#include <boost/type_traits/is_integral.hpp> +#include <boost/type_traits/is_float.hpp> + +using namespace llsd;  #if LL_WINDOWS  #	define WIN32_LEAN_AND_MEAN  #	include <winsock2.h>  #	include <windows.h> +#   include <psapi.h>               // GetPerformanceInfo() et al.  #elif LL_DARWIN  #	include <errno.h>  #	include <sys/sysctl.h>  #	include <sys/utsname.h>  #	include <stdint.h>  #	include <Carbon/Carbon.h> +#   include <stdexcept> +#	include <mach/host_info.h> +#	include <mach/mach_host.h> +#	include <mach/task.h> +#	include <mach/task_info.h>  #elif LL_LINUX  #	include <errno.h>  #	include <sys/utsname.h>  #	include <unistd.h>  #	include <sys/sysinfo.h> +#   include <stdexcept>  const char MEMINFO_FILE[] = "/proc/meminfo";  #elif LL_SOLARIS  #	include <stdio.h> @@ -70,6 +97,15 @@ extern int errno;  static const S32 CPUINFO_BUFFER_SIZE = 16383;  LLCPUInfo gSysCPU; +// Don't log memory info any more often than this. It also serves as our +// framerate sample size. +static const F32 MEM_INFO_THROTTLE = 20; +// Sliding window of samples. We intentionally limit the length of time we +// remember "the slowest" framerate because framerate is very slow at login. +// If we only triggered FrameWatcher logging when the session framerate +// dropped below the login framerate, we'd have very little additional data. +static const F32 MEM_INFO_WINDOW = 10*60; +  #if LL_WINDOWS  #ifndef DLLVERSIONINFO  typedef struct _DllVersionInfo @@ -613,8 +649,78 @@ void LLCPUInfo::stream(std::ostream& s) const  	s << "->mCPUString:  " << mCPUString << std::endl;  } +// Helper class for LLMemoryInfo: accumulate stats in the form we store for +// LLMemoryInfo::getStatsMap(). +class Stats +{ +public: +	Stats(): +		mStats(LLSD::emptyMap()) +	{} + +	// Store every integer type as LLSD::Integer. +	template <class T> +	void add(const LLSD::String& name, const T& value, +			 typename boost::enable_if<boost::is_integral<T> >::type* = 0) +	{ +		mStats[name] = LLSD::Integer(value); +	} + +	// Store every floating-point type as LLSD::Real. +	template <class T> +	void add(const LLSD::String& name, const T& value, +			 typename boost::enable_if<boost::is_float<T> >::type* = 0) +	{ +		mStats[name] = LLSD::Real(value); +	} + +	// Hope that LLSD::Date values are sufficiently unambiguous. +	void add(const LLSD::String& name, const LLSD::Date& value) +	{ +		mStats[name] = value; +	} + +	LLSD get() const { return mStats; } + +private: +	LLSD mStats; +}; + +// Wrap boost::regex_match() with a function that doesn't throw. +template <typename S, typename M, typename R> +static bool regex_match_no_exc(const S& string, M& match, const R& regex) +{ +    try +    { +        return boost::regex_match(string, match, regex); +    } +    catch (const std::runtime_error& e) +    { +        LL_WARNS("LLMemoryInfo") << "error matching with '" << regex.str() << "': " +                                 << e.what() << ":\n'" << string << "'" << LL_ENDL; +        return false; +    } +} + +// Wrap boost::regex_search() with a function that doesn't throw. +template <typename S, typename M, typename R> +static bool regex_search_no_exc(const S& string, M& match, const R& regex) +{ +    try +    { +        return boost::regex_search(string, match, regex); +    } +    catch (const std::runtime_error& e) +    { +        LL_WARNS("LLMemoryInfo") << "error searching with '" << regex.str() << "': " +                                 << e.what() << ":\n'" << string << "'" << LL_ENDL; +        return false; +    } +} +  LLMemoryInfo::LLMemoryInfo()  { +	refresh();  }  #if LL_WINDOWS @@ -638,11 +744,7 @@ static U32 LLMemoryAdjustKBResult(U32 inKB)  U32 LLMemoryInfo::getPhysicalMemoryKB() const  {  #if LL_WINDOWS -	MEMORYSTATUSEX state; -	state.dwLength = sizeof(state); -	GlobalMemoryStatusEx(&state); - -	return LLMemoryAdjustKBResult((U32)(state.ullTotalPhys >> 10)); +	return LLMemoryAdjustKBResult(mStatsMap["Total Physical KB"].asInteger());  #elif LL_DARWIN  	// This might work on Linux as well.  Someone check... @@ -690,12 +792,82 @@ U32 LLMemoryInfo::getPhysicalMemoryClamped() const  void LLMemoryInfo::getAvailableMemoryKB(U32& avail_physical_mem_kb, U32& avail_virtual_mem_kb)  {  #if LL_WINDOWS -	MEMORYSTATUSEX state; -	state.dwLength = sizeof(state); -	GlobalMemoryStatusEx(&state); +	// Sigh, this shouldn't be a static method, then we wouldn't have to +	// reload this data separately from refresh() +	LLSD statsMap(loadStatsMap()); + +	avail_physical_mem_kb = statsMap["Avail Physical KB"].asInteger(); +	avail_virtual_mem_kb  = statsMap["Avail Virtual KB"].asInteger(); -	avail_physical_mem_kb = (U32)(state.ullAvailPhys/1024) ; -	avail_virtual_mem_kb = (U32)(state.ullAvailVirtual/1024) ; +#elif LL_DARWIN +	// mStatsMap is derived from vm_stat, look for (e.g.) "kb free": +	// $ vm_stat +	// Mach Virtual Memory Statistics: (page size of 4096 bytes) +	// Pages free:                   462078. +	// Pages active:                 142010. +	// Pages inactive:               220007. +	// Pages wired down:             159552. +	// "Translation faults":      220825184. +	// Pages copy-on-write:         2104153. +	// Pages zero filled:         167034876. +	// Pages reactivated:             65153. +	// Pageins:                     2097212. +	// Pageouts:                      41759. +	// Object cache: 841598 hits of 7629869 lookups (11% hit rate) +	avail_physical_mem_kb = -1 ; +	avail_virtual_mem_kb = -1 ; + +#elif LL_LINUX +	// mStatsMap is derived from MEMINFO_FILE: +	// $ cat /proc/meminfo +	// MemTotal:        4108424 kB +	// MemFree:         1244064 kB +	// Buffers:           85164 kB +	// Cached:          1990264 kB +	// SwapCached:            0 kB +	// Active:          1176648 kB +	// Inactive:        1427532 kB +	// Active(anon):     529152 kB +	// Inactive(anon):    15924 kB +	// Active(file):     647496 kB +	// Inactive(file):  1411608 kB +	// Unevictable:          16 kB +	// Mlocked:              16 kB +	// HighTotal:       3266316 kB +	// HighFree:         721308 kB +	// LowTotal:         842108 kB +	// LowFree:          522756 kB +	// SwapTotal:       6384632 kB +	// SwapFree:        6384632 kB +	// Dirty:                28 kB +	// Writeback:             0 kB +	// AnonPages:        528820 kB +	// Mapped:            89472 kB +	// Shmem:             16324 kB +	// Slab:             159624 kB +	// SReclaimable:     145168 kB +	// SUnreclaim:        14456 kB +	// KernelStack:        2560 kB +	// PageTables:         5560 kB +	// NFS_Unstable:          0 kB +	// Bounce:                0 kB +	// WritebackTmp:          0 kB +	// CommitLimit:     8438844 kB +	// Committed_AS:    1271596 kB +	// VmallocTotal:     122880 kB +	// VmallocUsed:       65252 kB +	// VmallocChunk:      52356 kB +	// HardwareCorrupted:     0 kB +	// HugePages_Total:       0 +	// HugePages_Free:        0 +	// HugePages_Rsvd:        0 +	// HugePages_Surp:        0 +	// Hugepagesize:       2048 kB +	// DirectMap4k:      434168 kB +	// DirectMap2M:      477184 kB +	// (could also run 'free', but easier to read a file than run a program) +	avail_physical_mem_kb = -1 ; +	avail_virtual_mem_kb = -1 ;  #else  	//do not know how to collect available memory info for other systems. @@ -708,56 +880,283 @@ void LLMemoryInfo::getAvailableMemoryKB(U32& avail_physical_mem_kb, U32& avail_v  void LLMemoryInfo::stream(std::ostream& s) const  { +	// We want these memory stats to be easy to grep from the log, along with +	// the timestamp. So preface each line with the timestamp and a +	// distinctive marker. Without that, we'd have to search the log for the +	// introducer line, then read subsequent lines, etc... +	std::string pfx(LLError::utcTime() + " <mem> "); + +	// Max key length +	size_t key_width(0); +	BOOST_FOREACH(const MapEntry& pair, inMap(mStatsMap)) +	{ +		size_t len(pair.first.length()); +		if (len > key_width) +		{ +			key_width = len; +		} +	} + +	// Now stream stats +	BOOST_FOREACH(const MapEntry& pair, inMap(mStatsMap)) +	{ +		s << pfx << std::setw(key_width+1) << (pair.first + ':') << ' '; +		LLSD value(pair.second); +		if (value.isInteger()) +			s << std::setw(12) << value.asInteger(); +		else if (value.isReal()) +			s << std::fixed << std::setprecision(1) << value.asReal(); +		else if (value.isDate()) +			value.asDate().toStream(s); +		else +			s << value;           // just use default LLSD formatting +		s << std::endl; +	} +} + +LLSD LLMemoryInfo::getStatsMap() const +{ +	return mStatsMap; +} + +LLMemoryInfo& LLMemoryInfo::refresh() +{ +	mStatsMap = loadStatsMap(); + +	LL_DEBUGS("LLMemoryInfo") << "Populated mStatsMap:\n"; +	LLSDSerialize::toPrettyXML(mStatsMap, LL_CONT); +	LL_ENDL; + +	return *this; +} + +LLSD LLMemoryInfo::loadStatsMap() +{ +	// This implementation is derived from stream() code (as of 2011-06-29). +	Stats stats; + +	// associate timestamp for analysis over time +	stats.add("timestamp", LLDate::now()); +  #if LL_WINDOWS  	MEMORYSTATUSEX state;  	state.dwLength = sizeof(state);  	GlobalMemoryStatusEx(&state); -	s << "Percent Memory use: " << (U32)state.dwMemoryLoad << '%' << std::endl; -	s << "Total Physical KB:  " << (U32)(state.ullTotalPhys/1024) << std::endl; -	s << "Avail Physical KB:  " << (U32)(state.ullAvailPhys/1024) << std::endl; -	s << "Total page KB:      " << (U32)(state.ullTotalPageFile/1024) << std::endl; -	s << "Avail page KB:      " << (U32)(state.ullAvailPageFile/1024) << std::endl; -	s << "Total Virtual KB:   " << (U32)(state.ullTotalVirtual/1024) << std::endl; -	s << "Avail Virtual KB:   " << (U32)(state.ullAvailVirtual/1024) << std::endl; +	stats.add("Percent Memory use", state.dwMemoryLoad); +	stats.add("Total Physical KB",  state.ullTotalPhys/1024); +	stats.add("Avail Physical KB",  state.ullAvailPhys/1024); +	stats.add("Total page KB",      state.ullTotalPageFile/1024); +	stats.add("Avail page KB",      state.ullAvailPageFile/1024); +	stats.add("Total Virtual KB",   state.ullTotalVirtual/1024); +	stats.add("Avail Virtual KB",   state.ullAvailVirtual/1024); + +	PERFORMANCE_INFORMATION perf; +	perf.cb = sizeof(perf); +	GetPerformanceInfo(&perf, sizeof(perf)); + +	SIZE_T pagekb(perf.PageSize/1024); +	stats.add("CommitTotal KB",     perf.CommitTotal * pagekb); +	stats.add("CommitLimit KB",     perf.CommitLimit * pagekb); +	stats.add("CommitPeak KB",      perf.CommitPeak * pagekb); +	stats.add("PhysicalTotal KB",   perf.PhysicalTotal * pagekb); +	stats.add("PhysicalAvail KB",   perf.PhysicalAvailable * pagekb); +	stats.add("SystemCache KB",     perf.SystemCache * pagekb); +	stats.add("KernelTotal KB",     perf.KernelTotal * pagekb); +	stats.add("KernelPaged KB",     perf.KernelPaged * pagekb); +	stats.add("KernelNonpaged KB",  perf.KernelNonpaged * pagekb); +	stats.add("PageSize KB",        pagekb); +	stats.add("HandleCount",        perf.HandleCount); +	stats.add("ProcessCount",       perf.ProcessCount); +	stats.add("ThreadCount",        perf.ThreadCount); + +	PROCESS_MEMORY_COUNTERS_EX pmem; +	pmem.cb = sizeof(pmem); +	// GetProcessMemoryInfo() is documented to accept either +	// PROCESS_MEMORY_COUNTERS* or PROCESS_MEMORY_COUNTERS_EX*, presumably +	// using the redundant size info to distinguish. But its prototype +	// specifically accepts PROCESS_MEMORY_COUNTERS*, and since this is a +	// classic-C API, PROCESS_MEMORY_COUNTERS_EX isn't a subclass. Cast the +	// pointer. +	GetProcessMemoryInfo(GetCurrentProcess(), PPROCESS_MEMORY_COUNTERS(&pmem), sizeof(pmem)); + +	stats.add("Page Fault Count",              pmem.PageFaultCount); +	stats.add("PeakWorkingSetSize KB",         pmem.PeakWorkingSetSize/1024); +	stats.add("WorkingSetSize KB",             pmem.WorkingSetSize/1024); +	stats.add("QutaPeakPagedPoolUsage KB",     pmem.QuotaPeakPagedPoolUsage/1024); +	stats.add("QuotaPagedPoolUsage KB",        pmem.QuotaPagedPoolUsage/1024); +	stats.add("QuotaPeakNonPagedPoolUsage KB", pmem.QuotaPeakNonPagedPoolUsage/1024); +	stats.add("QuotaNonPagedPoolUsage KB",     pmem.QuotaNonPagedPoolUsage/1024); +	stats.add("PagefileUsage KB",              pmem.PagefileUsage/1024); +	stats.add("PeakPagefileUsage KB",          pmem.PeakPagefileUsage/1024); +	stats.add("PrivateUsage KB",               pmem.PrivateUsage/1024); +  #elif LL_DARWIN -	uint64_t phys = 0; -	size_t len = sizeof(phys);	 +	const vm_size_t pagekb(vm_page_size / 1024); +	 +	// +	// Collect the vm_stat's +	// -	if(sysctlbyname("hw.memsize", &phys, &len, NULL, 0) == 0)  	{ -		s << "Total Physical KB:  " << phys/1024 << std::endl; -	} -	else +		vm_statistics_data_t vmstat; +		mach_msg_type_number_t vmstatCount = HOST_VM_INFO_COUNT; + +		if (host_statistics(mach_host_self(), HOST_VM_INFO, (host_info_t) &vmstat, &vmstatCount) != KERN_SUCCESS)  	{ -		s << "Unable to collect memory information"; +			LL_WARNS("LLMemoryInfo") << "Unable to collect memory information" << LL_ENDL; +		} +		else +		{ +			stats.add("Pages free KB",		pagekb * vmstat.free_count); +			stats.add("Pages active KB",	pagekb * vmstat.active_count); +			stats.add("Pages inactive KB",	pagekb * vmstat.inactive_count); +			stats.add("Pages wired KB",		pagekb * vmstat.wire_count); + +			stats.add("Pages zero fill",		vmstat.zero_fill_count); +			stats.add("Page reactivations",		vmstat.reactivations); +			stats.add("Page-ins",				vmstat.pageins); +			stats.add("Page-outs",				vmstat.pageouts); +			 +			stats.add("Faults",					vmstat.faults); +			stats.add("Faults copy-on-write",	vmstat.cow_faults); +			 +			stats.add("Cache lookups",			vmstat.lookups); +			stats.add("Cache hits",				vmstat.hits); +			 +			stats.add("Page purgeable count",	vmstat.purgeable_count); +			stats.add("Page purges",			vmstat.purges); +			 +			stats.add("Page speculative reads",	vmstat.speculative_count); +		}  	} + +	// +	// Collect the misc task info +	// + +		{ +		task_events_info_data_t taskinfo; +		unsigned taskinfoSize = sizeof(taskinfo); +		 +		if (task_info(mach_task_self(), TASK_EVENTS_INFO, (task_info_t) &taskinfo, &taskinfoSize) != KERN_SUCCESS) +					{ +			LL_WARNS("LLMemoryInfo") << "Unable to collect task information" << LL_ENDL; +			} +			else +			{ +			stats.add("Task page-ins",					taskinfo.pageins); +			stats.add("Task copy-on-write faults",		taskinfo.cow_faults); +			stats.add("Task messages sent",				taskinfo.messages_sent); +			stats.add("Task messages received",			taskinfo.messages_received); +			stats.add("Task mach system call count",	taskinfo.syscalls_mach); +			stats.add("Task unix system call count",	taskinfo.syscalls_unix); +			stats.add("Task context switch count",		taskinfo.csw); +			} +	}	 +	 +	// +	// Collect the basic task info +	// + +		{ +		task_basic_info_64_data_t taskinfo; +		unsigned taskinfoSize = sizeof(taskinfo); +		 +		if (task_info(mach_task_self(), TASK_BASIC_INFO_64, (task_info_t) &taskinfo, &taskinfoSize) != KERN_SUCCESS) +			{ +			LL_WARNS("LLMemoryInfo") << "Unable to collect task information" << LL_ENDL; +				} +				else +				{ +			stats.add("Basic suspend count",					taskinfo.suspend_count); +			stats.add("Basic virtual memory KB",				taskinfo.virtual_size / 1024); +			stats.add("Basic resident memory KB",				taskinfo.resident_size / 1024); +			stats.add("Basic new thread policy",				taskinfo.policy); +		} +	} +  #elif LL_SOLARIS -        U64 phys = 0; +	U64 phys = 0; -        phys = (U64)(sysconf(_SC_PHYS_PAGES)) * (U64)(sysconf(_SC_PAGESIZE)/1024); +	phys = (U64)(sysconf(_SC_PHYS_PAGES)) * (U64)(sysconf(_SC_PAGESIZE)/1024); -        s << "Total Physical KB:  " << phys << std::endl; -#else -	// *NOTE: This works on linux. What will it do on other systems? -	LLFILE* meminfo = LLFile::fopen(MEMINFO_FILE,"rb"); -	if(meminfo) +	stats.add("Total Physical KB", phys); + +#elif LL_LINUX +	std::ifstream meminfo(MEMINFO_FILE); +	if (meminfo.is_open())  	{ -		char line[MAX_STRING];		/* Flawfinder: ignore */ -		memset(line, 0, MAX_STRING); -		while(fgets(line, MAX_STRING, meminfo)) +		// MemTotal:		4108424 kB +		// MemFree:			1244064 kB +		// Buffers:			  85164 kB +		// Cached:			1990264 kB +		// SwapCached:			  0 kB +		// Active:			1176648 kB +		// Inactive:		1427532 kB +		// ... +		// VmallocTotal:	 122880 kB +		// VmallocUsed:		  65252 kB +		// VmallocChunk:	  52356 kB +		// HardwareCorrupted:	  0 kB +		// HugePages_Total:		  0 +		// HugePages_Free:		  0 +		// HugePages_Rsvd:		  0 +		// HugePages_Surp:		  0 +		// Hugepagesize:	   2048 kB +		// DirectMap4k:		 434168 kB +		// DirectMap2M:		 477184 kB + +		// Intentionally don't pass the boost::no_except flag. This +		// boost::regex object is constructed with a string literal, so it +		// should be valid every time. If it becomes invalid, we WANT an +		// exception, hopefully even before the dev checks in. +		boost::regex stat_rx("(.+): +([0-9]+)( kB)?"); +		boost::smatch matched; + +		std::string line; +		while (std::getline(meminfo, line))  		{ -			line[strlen(line)-1] = ' ';		 /*Flawfinder: ignore*/ -			s << line; +			LL_DEBUGS("LLMemoryInfo") << line << LL_ENDL; +			if (regex_match_no_exc(line, matched, stat_rx)) +			{ +				// e.g. "MemTotal:		4108424 kB" +				LLSD::String key(matched[1].first, matched[1].second); +				LLSD::String value_str(matched[2].first, matched[2].second); +				LLSD::Integer value(0); +				try +				{ +					value = boost::lexical_cast<LLSD::Integer>(value_str); +				} +				catch (const boost::bad_lexical_cast&) +				{ +					LL_WARNS("LLMemoryInfo") << "couldn't parse '" << value_str +											 << "' in " << MEMINFO_FILE << " line: " +											 << line << LL_ENDL; +					continue; +				} +				// Store this statistic. +				stats.add(key, value); +			} +			else +			{ +				LL_WARNS("LLMemoryInfo") << "unrecognized " << MEMINFO_FILE << " line: " +										 << line << LL_ENDL; +			}  		} -		fclose(meminfo);  	}  	else  	{ -		s << "Unable to collect memory information"; +		LL_WARNS("LLMemoryInfo") << "Unable to collect memory information" << LL_ENDL;  	} + +#else +	LL_WARNS("LLMemoryInfo") << "Unknown system; unable to collect memory information" << LL_ENDL; +  #endif + +	return stats.get();  }  std::ostream& operator<<(std::ostream& s, const LLOSInfo& info) @@ -778,6 +1177,143 @@ std::ostream& operator<<(std::ostream& s, const LLMemoryInfo& info)  	return s;  } +class FrameWatcher +{ +public: +    FrameWatcher(): +        // Hooking onto the "mainloop" event pump gets us one call per frame. +        mConnection(LLEventPumps::instance() +                    .obtain("mainloop") +                    .listen("FrameWatcher", boost::bind(&FrameWatcher::tick, this, _1))), +        // Initializing mSampleStart to an invalid timestamp alerts us to skip +        // trying to compute framerate on the first call. +        mSampleStart(-1), +        // Initializing mSampleEnd to 0 ensures that we treat the first call +        // as the completion of a sample window. +        mSampleEnd(0), +        mFrames(0), +        // Both MEM_INFO_WINDOW and MEM_INFO_THROTTLE are in seconds. We need +        // the number of integer MEM_INFO_THROTTLE sample slots that will fit +        // in MEM_INFO_WINDOW. Round up. +        mSamples(int((MEM_INFO_WINDOW / MEM_INFO_THROTTLE) + 0.7)), +        // Initializing to F32_MAX means that the first real frame will become +        // the slowest ever, which sounds like a good idea. +        mSlowest(F32_MAX) +    {} + +    bool tick(const LLSD&) +    { +        F32 timestamp(mTimer.getElapsedTimeF32()); + +        // Count this frame in the interval just completed. +        ++mFrames; + +        // Have we finished a sample window yet? +        if (timestamp < mSampleEnd) +        { +            // no, just keep waiting +            return false; +        } + +        // Set up for next sample window. Capture values for previous frame in +        // local variables and reset data members. +        U32 frames(mFrames); +        F32 sampleStart(mSampleStart); +        // No frames yet in next window +        mFrames = 0; +        // which starts right now +        mSampleStart = timestamp; +        // and ends MEM_INFO_THROTTLE seconds in the future +        mSampleEnd = mSampleStart + MEM_INFO_THROTTLE; + +        // On the very first call, that's all we can do, no framerate +        // computation is possible. +        if (sampleStart < 0) +        { +            return false; +        } + +        // How long did this actually take? As framerate slows, the duration +        // of the frame we just finished could push us WELL beyond our desired +        // sample window size. +        F32 elapsed(timestamp - sampleStart); +        F32 framerate(frames/elapsed); + +        // Remember previous slowest framerate because we're just about to +        // update it. +        F32 slowest(mSlowest); +        // Remember previous number of samples. +        boost::circular_buffer<F32>::size_type prevSize(mSamples.size()); + +        // Capture new framerate in our samples buffer. Once the buffer is +        // full (after MEM_INFO_WINDOW seconds), this will displace the oldest +        // sample. ("So they all rolled over, and one fell out...") +        mSamples.push_back(framerate); + +        // Calculate the new minimum framerate. I know of no way to update a +        // rolling minimum without ever rescanning the buffer. But since there +        // are only a few tens of items in this buffer, rescanning it is +        // probably cheaper (and certainly easier to reason about) than +        // attempting to optimize away some of the scans. +        mSlowest = framerate;       // pick an arbitrary entry to start +        for (boost::circular_buffer<F32>::const_iterator si(mSamples.begin()), send(mSamples.end()); +             si != send; ++si) +        { +            if (*si < mSlowest) +            { +                mSlowest = *si; +            } +        } + +        // We're especially interested in memory as framerate drops. Only log +        // when framerate drops below the slowest framerate we remember. +        // (Should always be true for the end of the very first sample +        // window.) +        if (framerate >= slowest) +        { +            return false; +        } +        // Congratulations, we've hit a new low.  :-P + +        LL_INFOS("FrameWatcher") << ' '; +        if (! prevSize) +        { +            LL_CONT << "initial framerate "; +        } +        else +        { +            LL_CONT << "slowest framerate for last " << int(prevSize * MEM_INFO_THROTTLE) +                    << " seconds "; +        } +        LL_CONT << std::fixed << std::setprecision(1) << framerate << '\n' +                << LLMemoryInfo() << LL_ENDL; + +        return false; +    } + +private: +    // Storing the connection in an LLTempBoundListener ensures it will be +    // disconnected when we're destroyed. +    LLTempBoundListener mConnection; +    // Track elapsed time +    LLTimer mTimer; +    // Some of what you see here is in fact redundant with functionality you +    // can get from LLTimer. Unfortunately the LLTimer API is missing the +    // feature we need: has at least the stated interval elapsed, and if so, +    // exactly how long has passed? So we have to do it by hand, sigh. +    // Time at start, end of sample window +    F32 mSampleStart, mSampleEnd; +    // Frames this sample window +    U32 mFrames; +    // Sliding window of framerate samples +    boost::circular_buffer<F32> mSamples; +    // Slowest framerate in mSamples +    F32 mSlowest; +}; + +// Need an instance of FrameWatcher before it does any good +static FrameWatcher sFrameWatcher; +  BOOL gunzip_file(const std::string& srcfile, const std::string& dstfile)  {  	std::string tmpfile; diff --git a/indra/llcommon/llsys.h b/indra/llcommon/llsys.h index 41a4f25000..739e795d3a 100644 --- a/indra/llcommon/llsys.h +++ b/indra/llcommon/llsys.h @@ -36,6 +36,7 @@  //  llinfos << info << llendl;  // +#include "llsd.h"  #include <iosfwd>  #include <string> @@ -117,6 +118,27 @@ public:  	//get the available memory infomation in KiloBytes.  	static void getAvailableMemoryKB(U32& avail_physical_mem_kb, U32& avail_virtual_mem_kb); + +	// Retrieve a map of memory statistics. The keys of the map are platform- +	// dependent. The values are in kilobytes to try to avoid integer overflow. +	LLSD getStatsMap() const; + +	// Re-fetch memory data (as reported by stream() and getStatsMap()) from the +	// system. Normally this is fetched at construction time. Return (*this) +	// to permit usage of the form: +	// @code +	// LLMemoryInfo info; +	// ... +	// info.refresh().getStatsMap(); +	// @endcode +	LLMemoryInfo& refresh(); + +private: +	// set mStatsMap +	static LLSD loadStatsMap(); + +	// Memory stats for getStatsMap(). +	LLSD mStatsMap;  }; diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp index d9400fb5b3..bdde1b5c48 100644 --- a/indra/llcommon/llthread.cpp +++ b/indra/llcommon/llthread.cpp @@ -36,6 +36,12 @@  #include <sched.h>  #endif +#if !LL_DARWIN +U32 ll_thread_local local_thread_ID = 0; +#endif  + +U32 LLThread::sIDIter = 0; +  //----------------------------------------------------------------------------  // Usage:  // void run_func(LLThread* thread) @@ -56,12 +62,6 @@  //   //---------------------------------------------------------------------------- -#if !LL_DARWIN -U32 ll_thread_local sThreadID = 0; -#endif  - -U32 LLThread::sIDIter = 0; -  LL_COMMON_API void assert_main_thread()  {  	static U32 s_thread_id = LLThread::currentID(); @@ -79,9 +79,12 @@ void *APR_THREAD_FUNC LLThread::staticRun(apr_thread_t *apr_threadp, void *datap  	LLThread *threadp = (LLThread *)datap;  #if !LL_DARWIN -	sThreadID = threadp->mID; +	local_thread_ID = threadp->mID;  #endif +	// Create a thread local data. +	LLThreadLocalData::create(threadp); +  	// Run the user supplied function  	threadp->run(); @@ -94,40 +97,22 @@ void *APR_THREAD_FUNC LLThread::staticRun(apr_thread_t *apr_threadp, void *datap  } -LLThread::LLThread(const std::string& name, apr_pool_t *poolp) : -	mPaused(FALSE), +LLThread::LLThread(std::string const& name) : +	mPaused(false),  	mName(name),  	mAPRThreadp(NULL), -	mStatus(STOPPED) +	mStatus(STOPPED), +	mThreadLocalData(NULL)  { -	mID = ++sIDIter; +	mID = ++sIDIter; //flaw: assume this is called only in the main thread! -	// Thread creation probably CAN be paranoid about APR being initialized, if necessary -	if (poolp) -	{ -		mIsLocalPool = FALSE; -		mAPRPoolp = poolp; -	} -	else -	{ -		mIsLocalPool = TRUE; -		apr_pool_create(&mAPRPoolp, NULL); // Create a subpool for this thread -	} -	mRunCondition = new LLCondition(mAPRPoolp); - -	mLocalAPRFilePoolp = NULL ; +	mRunCondition = new LLCondition;  }  LLThread::~LLThread()  {  	shutdown(); - -	if(mLocalAPRFilePoolp) -	{ -		delete mLocalAPRFilePoolp ; -		mLocalAPRFilePoolp = NULL ; -	}  }  void LLThread::shutdown() @@ -164,7 +149,7 @@ void LLThread::shutdown()  		if (!isStopped())  		{  			// This thread just wouldn't stop, even though we gave it time -			//llwarns << "LLThread::~LLThread() exiting thread before clean exit!" << llendl; +			//llwarns << "LLThread::shutdown() exiting thread before clean exit!" << llendl;  			// Put a stake in its heart.  			apr_thread_exit(mAPRThreadp, -1);  			return; @@ -174,15 +159,8 @@ void LLThread::shutdown()  	delete mRunCondition;  	mRunCondition = 0; -	 -	if (mIsLocalPool && mAPRPoolp) -	{ -		apr_pool_destroy(mAPRPoolp); -		mAPRPoolp = 0; -	}  } -  void LLThread::start()  {  	llassert(isStopped()); @@ -191,7 +169,7 @@ void LLThread::start()  	mStatus = RUNNING;  	apr_status_t status = -		apr_thread_create(&mAPRThreadp, NULL, staticRun, (void *)this, mAPRPoolp); +		apr_thread_create(&mAPRThreadp, NULL, staticRun, (void *)this, tldata().mRootPool());  	if(status == APR_SUCCESS)  	{	 @@ -216,7 +194,7 @@ void LLThread::pause()  	if (!mPaused)  	{  		// this will cause the thread to stop execution as soon as checkPause() is called -		mPaused = 1;		// Does not need to be atomic since this is only set/unset from the main thread +		mPaused = true;		// Does not need to be atomic since this is only set/unset from the main thread  	}	  } @@ -224,7 +202,7 @@ void LLThread::unpause()  {  	if (mPaused)  	{ -		mPaused = 0; +		mPaused = false;  	}  	wake(); // wake up the thread if necessary @@ -301,115 +279,76 @@ void LLThread::wakeLocked()  	}  } -//============================================================================ - -LLMutex::LLMutex(apr_pool_t *poolp) : -	mAPRMutexp(NULL), mCount(0), mLockingThread(NO_THREAD) -{ -	//if (poolp) -	//{ -	//	mIsLocalPool = FALSE; -	//	mAPRPoolp = poolp; -	//} -	//else -	{ -		mIsLocalPool = TRUE; -		apr_pool_create(&mAPRPoolp, NULL); // Create a subpool for this thread -	} -	apr_thread_mutex_create(&mAPRMutexp, APR_THREAD_MUTEX_UNNESTED, mAPRPoolp); -} +#ifdef SHOW_ASSERT +// This allows the use of llassert(is_main_thread()) to assure the current thread is the main thread. +static apr_os_thread_t main_thread_id; +LL_COMMON_API bool is_main_thread(void) { return apr_os_thread_equal(main_thread_id, apr_os_thread_current()); } +#endif +// The thread private handle to access the LLThreadLocalData instance. +apr_threadkey_t* LLThreadLocalData::sThreadLocalDataKey; -LLMutex::~LLMutex() +//static +void LLThreadLocalData::init(void)  { -#if MUTEX_DEBUG -	llassert_always(!isLocked()); // better not be locked! -#endif -	apr_thread_mutex_destroy(mAPRMutexp); -	mAPRMutexp = NULL; -	if (mIsLocalPool) +	// Only do this once. +	if (sThreadLocalDataKey)  	{ -		apr_pool_destroy(mAPRPoolp); +		return;  	} -} +	apr_status_t status = apr_threadkey_private_create(&sThreadLocalDataKey, &LLThreadLocalData::destroy, LLAPRRootPool::get()()); +	ll_apr_assert_status(status);   // Or out of memory, or system-imposed limit on the +									// total number of keys per process {PTHREAD_KEYS_MAX} +									// has been exceeded. -void LLMutex::lock() -{ -#if LL_DARWIN -	if (mLockingThread == LLThread::currentID()) -#else -	if (mLockingThread == sThreadID) -#endif -	{ //redundant lock -		mCount++; -		return; -	} -	 -	apr_thread_mutex_lock(mAPRMutexp); -	 -#if MUTEX_DEBUG -	// Have to have the lock before we can access the debug info -	U32 id = LLThread::currentID(); -	if (mIsLocked[id] != FALSE) -		llerrs << "Already locked in Thread: " << id << llendl; -	mIsLocked[id] = TRUE; -#endif +	// Create the thread-local data for the main thread (this function is called by the main thread). +	LLThreadLocalData::create(NULL); -#if LL_DARWIN -	mLockingThread = LLThread::currentID(); -#else -	mLockingThread = sThreadID; +#ifdef SHOW_ASSERT +	// This function is called by the main thread. +	main_thread_id = apr_os_thread_current();  #endif  } -void LLMutex::unlock() +// This is called once for every thread when the thread is destructed. +//static +void LLThreadLocalData::destroy(void* thread_local_data)  { -	if (mCount > 0) -	{ //not the root unlock -		mCount--; -		return; -	} -	 -#if MUTEX_DEBUG -	// Access the debug info while we have the lock -	U32 id = LLThread::currentID(); -	if (mIsLocked[id] != TRUE) -		llerrs << "Not locked in Thread: " << id << llendl;	 -	mIsLocked[id] = FALSE; -#endif - -	mLockingThread = NO_THREAD; -	apr_thread_mutex_unlock(mAPRMutexp); +	delete static_cast<LLThreadLocalData*>(thread_local_data);  } -bool LLMutex::isLocked() +//static +void LLThreadLocalData::create(LLThread* threadp)  { -	apr_status_t status = apr_thread_mutex_trylock(mAPRMutexp); -	if (APR_STATUS_IS_EBUSY(status)) +	LLThreadLocalData* new_tld = new LLThreadLocalData; +	if (threadp)  	{ -		return true; -	} -	else -	{ -		apr_thread_mutex_unlock(mAPRMutexp); -		return false; +		threadp->mThreadLocalData = new_tld;  	} +	apr_status_t status = apr_threadkey_private_set(new_tld, sThreadLocalDataKey); +	llassert_always(status == APR_SUCCESS);  } -U32 LLMutex::lockingThread() const +//static +LLThreadLocalData& LLThreadLocalData::tldata(void)  { -	return mLockingThread; +	if (!sThreadLocalDataKey) +	{ +		LLThreadLocalData::init(); +	} + +	void* data; +	apr_status_t status = apr_threadkey_private_get(&data, sThreadLocalDataKey); +	llassert_always(status == APR_SUCCESS); +	return *static_cast<LLThreadLocalData*>(data);  }  //============================================================================ -LLCondition::LLCondition(apr_pool_t *poolp) : -	LLMutex(poolp) +LLCondition::LLCondition(LLAPRPool& parent) : LLMutex(parent)  { -	// base class (LLMutex) has already ensured that mAPRPoolp is set up. - -	apr_thread_cond_create(&mAPRCondp, mAPRPoolp); +	apr_thread_cond_create(&mAPRCondp, mPool());  } @@ -422,15 +361,6 @@ LLCondition::~LLCondition()  void LLCondition::wait()  { -	if (!isLocked()) -	{ //mAPRMutexp MUST be locked before calling apr_thread_cond_wait -		apr_thread_mutex_lock(mAPRMutexp); -#if MUTEX_DEBUG -		// avoid asserts on destruction in non-release builds -		U32 id = LLThread::currentID(); -		mIsLocked[id] = TRUE; -#endif -	}  	apr_thread_cond_wait(mAPRCondp, mAPRMutexp);  } @@ -445,6 +375,44 @@ void LLCondition::broadcast()  }  //============================================================================ +LLMutexBase::LLMutexBase() : +	mLockingThread(NO_THREAD), +	mCount(0) +{ +} + +void LLMutexBase::lock()  +{  +#if LL_DARWIN +	if (mLockingThread == LLThread::currentID()) +#else +	if (mLockingThread == local_thread_ID) +#endif +	{ //redundant lock +		mCount++; +		return; +	} + +	apr_thread_mutex_lock(mAPRMutexp);  + +#if LL_DARWIN +	mLockingThread = LLThread::currentID(); +#else +	mLockingThread = local_thread_ID; +#endif +} + +void LLMutexBase::unlock()  +{  +	if (mCount > 0) +	{ //not the root unlock +		mCount--; +		return; +	} +	mLockingThread = NO_THREAD; + +	apr_thread_mutex_unlock(mAPRMutexp);  +}  //---------------------------------------------------------------------------- @@ -456,7 +424,7 @@ void LLThreadSafeRefCount::initThreadSafeRefCount()  {  	if (!sMutex)  	{ -		sMutex = new LLMutex(0); +		sMutex = new LLMutex;  	}  } diff --git a/indra/llcommon/llthread.h b/indra/llcommon/llthread.h index 40291a2569..b631b96252 100644 --- a/indra/llcommon/llthread.h +++ b/indra/llcommon/llthread.h @@ -29,7 +29,13 @@  #include "llapp.h"  #include "llapr.h" +#include "llmemory.h"  #include "apr_thread_cond.h" +#include "llaprpool.h" + +#ifdef SHOW_ASSERT +extern LL_COMMON_API bool is_main_thread(void); +#endif  class LLThread;  class LLMutex; @@ -41,6 +47,22 @@ class LLCondition;  #define ll_thread_local __thread  #endif +class LL_COMMON_API LLThreadLocalData +{ +private: +	static apr_threadkey_t* sThreadLocalDataKey; + +public: +	// Thread-local memory pools. +	LLAPRRootPool mRootPool; +	LLVolatileAPRPool mVolatileAPRPool; + +	static void init(void); +	static void destroy(void* thread_local_data); +	static void create(LLThread* pthread); +	static LLThreadLocalData& tldata(void); +}; +  class LL_COMMON_API LLThread  {  private: @@ -54,7 +76,7 @@ public:  		QUITTING= 2 	// Someone wants this thread to quit  	} EThreadStatus; -	LLThread(const std::string& name, apr_pool_t *poolp = NULL); +	LLThread(std::string const& name);  	virtual ~LLThread(); // Warning!  You almost NEVER want to destroy a thread unless it's in the STOPPED state.  	virtual void shutdown(); // stops the thread @@ -69,7 +91,7 @@ public:  	// Called from MAIN THREAD.  	void pause();  	void unpause(); -	bool isPaused() { return isStopped() || mPaused == TRUE; } +	bool isPaused() { return isStopped() || mPaused; }  	// Cause the thread to wake up and check its condition  	void wake(); @@ -83,13 +105,11 @@ public:  	// this kicks off the apr thread  	void start(void); -	apr_pool_t *getAPRPool() { return mAPRPoolp; } -	LLVolatileAPRPool* getLocalAPRFilePool() { return mLocalAPRFilePoolp ; } - -	U32 getID() const { return mID; } +	// Return thread-local data for the current thread. +	static LLThreadLocalData& tldata(void) { return LLThreadLocalData::tldata(); }  private: -	BOOL				mPaused; +	bool				mPaused;  	// static function passed to APR thread creation routine  	static void *APR_THREAD_FUNC staticRun(apr_thread_t *apr_threadp, void *datap); @@ -99,15 +119,11 @@ protected:  	LLCondition*		mRunCondition;  	apr_thread_t		*mAPRThreadp; -	apr_pool_t			*mAPRPoolp; -	BOOL				mIsLocalPool;  	EThreadStatus		mStatus;  	U32					mID; - -	//a local apr_pool for APRFile operations in this thread. If it exists, LLAPRFile::sAPRFilePoolp should not be used. -	//Note: this pool is used by APRFile ONLY, do NOT use it for any other purposes. -	//      otherwise it will cause severe memory leaking!!! --bao -	LLVolatileAPRPool  *mLocalAPRFilePoolp ;  +	 +	friend void LLThreadLocalData::create(LLThread* threadp); +	LLThreadLocalData*	mThreadLocalData;  	void setQuitting(); @@ -137,7 +153,15 @@ protected:  #define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO) -class LL_COMMON_API LLMutex +#ifdef MUTEX_DEBUG +// We really shouldn't be using recursive locks. Make sure of that in debug mode. +#define MUTEX_FLAG APR_THREAD_MUTEX_UNNESTED +#else +// Use the fastest platform-optimal lock behavior (can be recursive or non-recursive). +#define MUTEX_FLAG APR_THREAD_MUTEX_DEFAULT +#endif + +class LL_COMMON_API LLMutexBase  {  public:  	typedef enum @@ -145,32 +169,74 @@ public:  		NO_THREAD = 0xFFFFFFFF  	} e_locking_thread; -	LLMutex(apr_pool_t *apr_poolp); // NULL pool constructs a new pool for the mutex -	virtual ~LLMutex(); -	 -	void lock();		// blocks -	void unlock(); -	bool isLocked(); 	// non-blocking, but does do a lock/unlock so not free -	U32 lockingThread() const; //get ID of locking thread -	 +	LLMutexBase() ; + +	void lock() ; +	void unlock() ; +	// Returns true if lock was obtained successfully. +	bool trylock() { return !APR_STATUS_IS_EBUSY(apr_thread_mutex_trylock(mAPRMutexp)); } + +	// non-blocking, but does do a lock/unlock so not free +	bool isLocked() { bool is_not_locked = trylock(); if (is_not_locked) unlock(); return !is_not_locked; } +  protected: -	apr_thread_mutex_t *mAPRMutexp; +	// mAPRMutexp is initialized and uninitialized in the derived class. +	apr_thread_mutex_t* mAPRMutexp;  	mutable U32			mCount;  	mutable U32			mLockingThread; -	 -	apr_pool_t			*mAPRPoolp; -	BOOL				mIsLocalPool; -	 -#if MUTEX_DEBUG -	std::map<U32, BOOL> mIsLocked; +}; + +class LL_COMMON_API LLMutex : public LLMutexBase +{ +public: +	LLMutex(LLAPRPool& parent = LLThread::tldata().mRootPool) : mPool(parent) +	{ +		apr_thread_mutex_create(&mAPRMutexp, MUTEX_FLAG, mPool()); +	} +	~LLMutex() +	{ +		//this assertion erroneously triggers whenever an LLCondition is destroyed +		//llassert(!isLocked()); // better not be locked! +		apr_thread_mutex_destroy(mAPRMutexp); +		mAPRMutexp = NULL; +	} + +protected: +	LLAPRPool mPool; +}; + +#if APR_HAS_THREADS +// No need to use a root pool in this case. +typedef LLMutex LLMutexRootPool; +#else // APR_HAS_THREADS +class LL_COMMON_API LLMutexRootPool : public LLMutexBase +{ +public: +	LLMutexRootPool(void) +	{ +		apr_thread_mutex_create(&mAPRMutexp, MUTEX_FLAG, mRootPool()); +	} +	~LLMutexRootPool() +	{ +#if APR_POOL_DEBUG +		// It is allowed to destruct root pools from a different thread. +		mRootPool.grab_ownership();  #endif +		llassert(!isLocked()); +		apr_thread_mutex_destroy(mAPRMutexp); +		mAPRMutexp = NULL; +	} + +protected: +	LLAPRRootPool mRootPool;  }; +#endif // APR_HAS_THREADS  // Actually a condition/mutex pair (since each condition needs to be associated with a mutex).  class LL_COMMON_API LLCondition : public LLMutex  {  public: -	LLCondition(apr_pool_t *apr_poolp); // Defaults to global pool, could use the thread pool as well. +	LLCondition(LLAPRPool& parent = LLThread::tldata().mRootPool);  	~LLCondition();  	void wait();		// blocks @@ -181,10 +247,10 @@ protected:  	apr_thread_cond_t *mAPRCondp;  }; -class LLMutexLock +class LL_COMMON_API LLMutexLock  {  public: -	LLMutexLock(LLMutex* mutex) +	LLMutexLock(LLMutexBase* mutex)  	{  		mMutex = mutex;  		mMutex->lock(); @@ -194,7 +260,7 @@ public:  		mMutex->unlock();  	}  private: -	LLMutex* mMutex; +	LLMutexBase* mMutex;  };  //============================================================================ diff --git a/indra/llcommon/llthreadsafequeue.cpp b/indra/llcommon/llthreadsafequeue.cpp index 8a73e632a9..05d24944f3 100644 --- a/indra/llcommon/llthreadsafequeue.cpp +++ b/indra/llcommon/llthreadsafequeue.cpp @@ -34,19 +34,11 @@  //----------------------------------------------------------------------------- -LLThreadSafeQueueImplementation::LLThreadSafeQueueImplementation(apr_pool_t * pool, unsigned int capacity): -	mOwnsPool(pool == 0), -	mPool(pool), +LLThreadSafeQueueImplementation::LLThreadSafeQueueImplementation(unsigned int capacity):  	mQueue(0)  { -	if(mOwnsPool) { -		apr_status_t status = apr_pool_create(&mPool, 0); -		if(status != APR_SUCCESS) throw LLThreadSafeQueueError("failed to allocate pool"); -	} else { -		; // No op. -	} -	 -	apr_status_t status = apr_queue_create(&mQueue, capacity, mPool); +	mPool.create(); +	apr_status_t status = apr_queue_create(&mQueue, capacity, mPool());  	if(status != APR_SUCCESS) throw LLThreadSafeQueueError("failed to allocate queue");  } @@ -59,7 +51,6 @@ LLThreadSafeQueueImplementation::~LLThreadSafeQueueImplementation()  			" elements;" << "memory will be leaked" << LL_ENDL;  		apr_queue_term(mQueue);  	} -	if(mOwnsPool && (mPool != 0)) apr_pool_destroy(mPool);  } diff --git a/indra/llcommon/llthreadsafequeue.h b/indra/llcommon/llthreadsafequeue.h index 58cac38769..43d0b396f2 100644 --- a/indra/llcommon/llthreadsafequeue.h +++ b/indra/llcommon/llthreadsafequeue.h @@ -30,9 +30,9 @@  #include <string>  #include <stdexcept> +#include "llaprpool.h" -struct apr_pool_t; // From apr_pools.h  class LLThreadSafeQueueImplementation; // See below. @@ -75,7 +75,7 @@ struct apr_queue_t; // From apr_queue.h  class LL_COMMON_API LLThreadSafeQueueImplementation  {  public: -	LLThreadSafeQueueImplementation(apr_pool_t * pool, unsigned int capacity); +	LLThreadSafeQueueImplementation(unsigned int capacity);  	~LLThreadSafeQueueImplementation();  	void pushFront(void * element);  	bool tryPushFront(void * element); @@ -84,8 +84,7 @@ public:  	size_t size();  private: -	bool mOwnsPool; -	apr_pool_t * mPool; +	LLAPRPool mPool;			// The pool used for mQueue.  	apr_queue_t * mQueue;  }; @@ -99,9 +98,8 @@ class LLThreadSafeQueue  public:  	typedef ElementT value_type; -	// If the pool is set to NULL one will be allocated and managed by this -	// queue. -	LLThreadSafeQueue(apr_pool_t * pool = 0, unsigned int capacity = 1024); +	// Constructor. +	LLThreadSafeQueue(unsigned int capacity = 1024);  	// Add an element to the front of queue (will block if the queue has  	// reached capacity). @@ -139,8 +137,8 @@ private:  template<typename ElementT> -LLThreadSafeQueue<ElementT>::LLThreadSafeQueue(apr_pool_t * pool, unsigned int capacity): -	mImplementation(pool, capacity) +LLThreadSafeQueue<ElementT>::LLThreadSafeQueue(unsigned int capacity) : +	mImplementation(capacity)  {  	; // No op.  } diff --git a/indra/llcommon/llversionviewer.h b/indra/llcommon/llversionviewer.h index 0018b8e844..a4b2e06908 100644 --- a/indra/llcommon/llversionviewer.h +++ b/indra/llcommon/llversionviewer.h @@ -27,9 +27,9 @@  #ifndef LL_LLVERSIONVIEWER_H  #define LL_LLVERSIONVIEWER_H -const S32 LL_VERSION_MAJOR = 2; -const S32 LL_VERSION_MINOR = 8; -const S32 LL_VERSION_PATCH = 1; +const S32 LL_VERSION_MAJOR = 3; +const S32 LL_VERSION_MINOR = 0; +const S32 LL_VERSION_PATCH = 6;  const S32 LL_VERSION_BUILD = 0;  const char * const LL_CHANNEL = "Second Life Developer"; diff --git a/indra/llcommon/llworkerthread.cpp b/indra/llcommon/llworkerthread.cpp index 3ac50832fd..6b308bb917 100644 --- a/indra/llcommon/llworkerthread.cpp +++ b/indra/llcommon/llworkerthread.cpp @@ -37,12 +37,7 @@  LLWorkerThread::LLWorkerThread(const std::string& name, bool threaded) :  	LLQueuedThread(name, threaded)  { -	mDeleteMutex = new LLMutex(NULL); - -	if(!mLocalAPRFilePoolp) -	{ -		mLocalAPRFilePoolp = new LLVolatileAPRPool() ; -	} +	mDeleteMutex = new LLMutex;  }  LLWorkerThread::~LLWorkerThread() @@ -204,7 +199,6 @@ LLWorkerClass::LLWorkerClass(LLWorkerThread* workerthread, const std::string& na  	  mWorkerClassName(name),  	  mRequestHandle(LLWorkerThread::nullHandle()),  	  mRequestPriority(LLWorkerThread::PRIORITY_NORMAL), -	  mMutex(NULL),  	  mWorkFlags(0)  {  	if (!mWorkerThread) diff --git a/indra/llcommon/llworkerthread.h b/indra/llcommon/llworkerthread.h index 9bff18303e..bef5ef53fe 100644 --- a/indra/llcommon/llworkerthread.h +++ b/indra/llcommon/llworkerthread.h @@ -94,7 +94,6 @@ public:  private:  	void deleteWorker(LLWorkerClass* workerclass); // schedule for deletion -	  };  //============================================================================ @@ -194,7 +193,7 @@ protected:  	U32 mRequestPriority; // last priority set  private: -	LLMutex mMutex; +	LLMutexRootPool mMutex;		// Use LLMutexRootPool since this object is created and destructed by multiple threads.  	LLAtomicU32 mWorkFlags;  }; diff --git a/indra/llcommon/tests/llinstancetracker_test.cpp b/indra/llcommon/tests/llinstancetracker_test.cpp index c7cb488ca1..b34d1c5fd3 100644 --- a/indra/llcommon/tests/llinstancetracker_test.cpp +++ b/indra/llcommon/tests/llinstancetracker_test.cpp @@ -40,6 +40,7 @@  #include <boost/scoped_ptr.hpp>  // other Linden headers  #include "../test/lltut.h" +#include "wrapllerrs.h"  struct Keyed: public LLInstanceTracker<Keyed, std::string>  { @@ -151,33 +152,81 @@ namespace tut      {          Unkeyed one, two, three;          typedef std::set<Unkeyed*> KeySet; -        KeySet keys; -        keys.insert(&one); -        keys.insert(&two); -        keys.insert(&three); -	{ -		Unkeyed::LLInstanceTrackerScopedGuard guard; -		for (Unkeyed::key_iter ki(guard.beginKeys()), kend(guard.endKeys()); -		     ki != kend; ++ki) -		{ -			ensure_equals("spurious key", keys.erase(*ki), 1); -		} -	} -        ensure_equals("unreported key", keys.size(), 0); - +              KeySet instances;          instances.insert(&one);          instances.insert(&two);          instances.insert(&three); -	{ -		Unkeyed::LLInstanceTrackerScopedGuard guard; -		for (Unkeyed::instance_iter ii(guard.beginInstances()), iend(guard.endInstances()); -		     ii != iend; ++ii) + +		for (Unkeyed::instance_iter ii(Unkeyed::beginInstances()), iend(Unkeyed::endInstances()); ii != iend; ++ii)  		{  			Unkeyed& ref = *ii;  			ensure_equals("spurious instance", instances.erase(&ref), 1);  		} -	} +          ensure_equals("unreported instance", instances.size(), 0);      } + +    template<> template<> +    void object::test<5>() +    { +        set_test_name("delete Keyed with outstanding instance_iter"); +        std::string what; +        Keyed* keyed = new Keyed("one"); +        { +            WrapLL_ERRS wrapper; +            Keyed::instance_iter i(Keyed::beginInstances()); +            try +            { +                delete keyed; +            } +            catch (const WrapLL_ERRS::FatalException& e) +            { +                what = e.what(); +            } +        } +        ensure(! what.empty()); +    } + +    template<> template<> +    void object::test<6>() +    { +        set_test_name("delete Keyed with outstanding key_iter"); +        std::string what; +        Keyed* keyed = new Keyed("one"); +        { +            WrapLL_ERRS wrapper; +            Keyed::key_iter i(Keyed::beginKeys()); +            try +            { +                delete keyed; +            } +            catch (const WrapLL_ERRS::FatalException& e) +            { +                what = e.what(); +            } +        } +        ensure(! what.empty()); +    } + +    template<> template<> +    void object::test<7>() +    { +        set_test_name("delete Unkeyed with outstanding instance_iter"); +        std::string what; +        Unkeyed* unkeyed = new Unkeyed; +        { +            WrapLL_ERRS wrapper; +            Unkeyed::instance_iter i(Unkeyed::beginInstances()); +            try +            { +                delete unkeyed; +            } +            catch (const WrapLL_ERRS::FatalException& e) +            { +                what = e.what(); +            } +        } +        ensure(! what.empty()); +    }  } // namespace tut diff --git a/indra/llcommon/tests/llsdserialize_test.cpp b/indra/llcommon/tests/llsdserialize_test.cpp index 7b4c7d6a48..72322c3b72 100644 --- a/indra/llcommon/tests/llsdserialize_test.cpp +++ b/indra/llcommon/tests/llsdserialize_test.cpp @@ -25,35 +25,293 @@   * $/LicenseInfo$   */ -#if !LL_WINDOWS + +#include "linden_common.h" + +#if LL_WINDOWS +#include <winsock2.h> +typedef U32 uint32_t; +#include <process.h> +#include <io.h> +#else +#include <unistd.h>  #include <netinet/in.h> +#include <errno.h> +#include <fcntl.h> +#include <sys/stat.h> +#include <sys/wait.h> +#include "llprocesslauncher.h"  #endif -#include "linden_common.h" +#include <sstream> + +/*==========================================================================*| +// Whoops, seems Linden's Boost package and the viewer are built with +// different settings of VC's /Zc:wchar_t switch! Using Boost.Filesystem +// pathname operations produces Windows link errors: +// unresolved external symbol "private: static class std::codecvt<unsigned short, +// char,int> const * & __cdecl boost::filesystem3::path::wchar_t_codecvt_facet()" +// unresolved external symbol "void __cdecl boost::filesystem3::path_traits::convert()" +// See: +// http://boost.2283326.n4.nabble.com/filesystem-v3-unicode-and-std-codecvt-linker-error-td3455549.html +// which points to: +// http://msdn.microsoft.com/en-us/library/dh8che7s%28v=VS.100%29.aspx + +// As we're not trying to preserve compatibility with old Boost.Filesystem +// code, but rather writing brand-new code, use the newest available +// Filesystem API. +#define BOOST_FILESYSTEM_VERSION 3 +#include "boost/filesystem.hpp" +#include "boost/filesystem/v3/fstream.hpp" +|*==========================================================================*/ +#include "boost/range.hpp" +#include "boost/foreach.hpp" +#include "boost/function.hpp" +#include "boost/lambda/lambda.hpp" +#include "boost/lambda/bind.hpp" +namespace lambda = boost::lambda; +/*==========================================================================*| +// Aaaarrgh, Linden's Boost package doesn't even include Boost.Iostreams! +#include "boost/iostreams/stream.hpp" +#include "boost/iostreams/device/file_descriptor.hpp" +|*==========================================================================*/ +  #include "../llsd.h"  #include "../llsdserialize.h" +#include "llsdutil.h"  #include "../llformat.h"  #include "../test/lltut.h" +#include "stringize.h" +std::vector<U8> string_to_vector(const std::string& str) +{ +	return std::vector<U8>(str.begin(), str.end()); +} -#if LL_WINDOWS -#include <winsock2.h> -typedef U32 uint32_t; -#endif +#if ! LL_WINDOWS +// We want to call strerror_r(), but alarmingly, there are two different +// variants. The one that returns int always populates the passed buffer +// (except in case of error), whereas the other one always returns a valid +// char* but might or might not populate the passed buffer. How do we know +// which one we're getting? Define adapters for each and let the compiler +// select the applicable adapter. -std::vector<U8> string_to_vector(std::string str) +// strerror_r() returns char* +std::string message_from(int /*orig_errno*/, const char* /*buffer*/, const char* strerror_ret)  { -	// bc LLSD can't... -	size_t len = (size_t)str.length(); -	std::vector<U8> v(len); -	for (size_t i = 0; i < len ; i++) -	{ -		v[i] = str[i]; -	} -	return v; +    return strerror_ret;  } +// strerror_r() returns int +std::string message_from(int orig_errno, const char* buffer, int strerror_ret) +{ +    if (strerror_ret == 0) +    { +        return buffer; +    } +    // Here strerror_r() has set errno. Since strerror_r() has already failed, +    // seems like a poor bet to call it again to diagnose its own error... +    int stre_errno = errno; +    if (stre_errno == ERANGE) +    { +        return STRINGIZE("strerror_r() can't explain errno " << orig_errno +                         << " (buffer too small)"); +    } +    if (stre_errno == EINVAL) +    { +        return STRINGIZE("unknown errno " << orig_errno); +    } +    // Here we don't even understand the errno from strerror_r()! +    return STRINGIZE("strerror_r() can't explain errno " << orig_errno +                     << " (error " << stre_errno << ')'); +} +#endif  // ! LL_WINDOWS + +// boost::filesystem::temp_directory_path() isn't yet in Boost 1.45! :-( +std::string temp_directory_path() +{ +#if LL_WINDOWS +    char buffer[4096]; +    GetTempPathA(sizeof(buffer), buffer); +    return buffer; + +#else  // LL_DARWIN, LL_LINUX +    static const char* vars[] = { "TMPDIR", "TMP", "TEMP", "TEMPDIR" }; +    BOOST_FOREACH(const char* var, vars) +    { +        const char* found = getenv(var); +        if (found) +            return found; +    } +    return "/tmp"; +#endif // LL_DARWIN, LL_LINUX +} + +// Windows presents a kinda sorta compatibility layer. Code to the yucky +// Windows names because they're less likely than the Posix names to collide +// with any other names in this source. +#if LL_WINDOWS +#define _remove   DeleteFileA +#else  // ! LL_WINDOWS +#define _open     open +#define _write    write +#define _close    close +#define _remove   remove +#endif  // ! LL_WINDOWS + +// Create a text file with specified content "somewhere in the +// filesystem," cleaning up when it goes out of scope. +class NamedTempFile +{ +public: +    // Function that accepts an ostream ref and (presumably) writes stuff to +    // it, e.g.: +    // (lambda::_1 << "the value is " << 17 << '\n') +    typedef boost::function<void(std::ostream&)> Streamer; + +    NamedTempFile(const std::string& ext, const std::string& content): +        mPath(temp_directory_path()) +    { +        createFile(ext, lambda::_1 << content); +    } + +    // Disambiguate when passing string literal +    NamedTempFile(const std::string& ext, const char* content): +        mPath(temp_directory_path()) +    { +        createFile(ext, lambda::_1 << content); +    } + +    NamedTempFile(const std::string& ext, const Streamer& func): +        mPath(temp_directory_path()) +    { +        createFile(ext, func); +    } + +    ~NamedTempFile() +    { +        _remove(mPath.c_str()); +    } + +    std::string getName() const { return mPath; } + +private: +    void createFile(const std::string& ext, const Streamer& func) +    { +        // Silly maybe, but use 'ext' as the name prefix. Strip off a leading +        // '.' if present. +        int pfx_offset = ((! ext.empty()) && ext[0] == '.')? 1 : 0; + +#if ! LL_WINDOWS +        // Make sure mPath ends with a directory separator, if it doesn't already. +        if (mPath.empty() || +            ! (mPath[mPath.length() - 1] == '\\' || mPath[mPath.length() - 1] == '/')) +        { +            mPath.append("/"); +        } + +        // mkstemp() accepts and modifies a char* template string. Generate +        // the template string, then copy to modifiable storage. +        // mkstemp() requires its template string to end in six X's. +        mPath += ext.substr(pfx_offset) + "XXXXXX"; +        // Copy to vector<char> +        std::vector<char> pathtemplate(mPath.begin(), mPath.end()); +        // append a nul byte for classic-C semantics +        pathtemplate.push_back('\0'); +        // std::vector promises that a pointer to the 0th element is the same +        // as a pointer to a contiguous classic-C array +        int fd(mkstemp(&pathtemplate[0])); +        if (fd == -1) +        { +            // The documented errno values (http://linux.die.net/man/3/mkstemp) +            // are used in a somewhat unusual way, so provide context-specific +            // errors. +            if (errno == EEXIST) +            { +                LL_ERRS("NamedTempFile") << "mkstemp(\"" << mPath +                                         << "\") could not create unique file " << LL_ENDL; +            } +            if (errno == EINVAL) +            { +                LL_ERRS("NamedTempFile") << "bad mkstemp() file path template '" +                                         << mPath << "'" << LL_ENDL; +            } +            // Shrug, something else +            int mkst_errno = errno; +            char buffer[256]; +            LL_ERRS("NamedTempFile") << "mkstemp(\"" << mPath << "\") failed: " +                                     << message_from(mkst_errno, buffer, +                                                     strerror_r(mkst_errno, buffer, sizeof(buffer))) +                                     << LL_ENDL; +        } +        // mkstemp() seems to have worked! Capture the modified filename. +        // Avoid the nul byte we appended. +        mPath.assign(pathtemplate.begin(), (pathtemplate.end()-1)); + +/*==========================================================================*| +        // Define an ostream on the open fd. Tell it to close fd on destruction. +        boost::iostreams::stream<boost::iostreams::file_descriptor_sink> +            out(fd, boost::iostreams::close_handle); +|*==========================================================================*/ + +        // Write desired content. +        std::ostringstream out; +        // Stream stuff to it. +        func(out); + +        std::string data(out.str()); +        int written(_write(fd, data.c_str(), data.length())); +        int closed(_close(fd)); +        llassert_always(written == data.length() && closed == 0); + +#else // LL_WINDOWS +        // GetTempFileName() is documented to require a MAX_PATH buffer. +        char tempname[MAX_PATH]; +        // Use 'ext' as filename prefix, but skip leading '.' if any. +        // The 0 param is very important: requests iterating until we get a +        // unique name. +        if (0 == GetTempFileNameA(mPath.c_str(), ext.c_str() + pfx_offset, 0, tempname)) +        { +            // I always have to look up this call...  :-P +            LPSTR msgptr; +            FormatMessageA( +                FORMAT_MESSAGE_ALLOCATE_BUFFER |  +                FORMAT_MESSAGE_FROM_SYSTEM | +                FORMAT_MESSAGE_IGNORE_INSERTS, +                NULL, +                GetLastError(), +                MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), +                LPSTR(&msgptr),     // have to cast (char**) to (char*) +                0, NULL ); +            LL_ERRS("NamedTempFile") << "GetTempFileName(\"" << mPath << "\", \"" +                                     << (ext.c_str() + pfx_offset) << "\") failed: " +                                     << msgptr << LL_ENDL; +            LocalFree(msgptr); +        } +        // GetTempFileName() appears to have worked! Capture the actual +        // filename. +        mPath = tempname; +        // Open the file and stream content to it. Destructor will close. +        std::ofstream out(tempname); +        func(out); + +#endif  // LL_WINDOWS +    } + +    void peep() +    { +        std::cout << "File '" << mPath << "' contains:\n"; +        std::ifstream reader(mPath.c_str()); +        std::string line; +        while (std::getline(reader, line)) +            std::cout << line << '\n'; +        std::cout << "---\n"; +    } + +    std::string mPath; +}; +  namespace tut  {  	struct sd_xml_data @@ -1494,5 +1752,223 @@ namespace tut  		ensureBinaryAndNotation("map", test);  		ensureBinaryAndXML("map", test);  	} -} +    struct TestPythonCompatible +    { +        TestPythonCompatible(): +            // Note the peculiar insertion of __FILE__ into this string. Since +            // this script is being written into a platform-dependent temp +            // directory, we can't locate indra/lib/python relative to +            // Python's __file__. Use __FILE__ instead, navigating relative +            // to this C++ source file. Use Python raw-string syntax so +            // Windows pathname backslashes won't mislead Python's string +            // scanner. +            import_llsd("import os.path\n" +                        "import sys\n" +                        "sys.path.insert(0,\n" +                        "    os.path.join(os.path.dirname(r'" __FILE__ "'),\n" +                        "                 os.pardir, os.pardir, 'lib', 'python'))\n" +                        "try:\n" +                        "    from llbase import llsd\n" +                        "except ImportError:\n" +                        "    from indra.base import llsd\n") +        {} +        ~TestPythonCompatible() {} + +        std::string import_llsd; + +        template <typename CONTENT> +        void python(const std::string& desc, const CONTENT& script, int expect=0) +        { +            const char* PYTHON(getenv("PYTHON")); +            ensure("Set $PYTHON to the Python interpreter", PYTHON); + +            NamedTempFile scriptfile(".py", script); + +#if LL_WINDOWS +            std::string q("\""); +            std::string qPYTHON(q + PYTHON + q); +            std::string qscript(q + scriptfile.getName() + q); +            int rc = _spawnl(_P_WAIT, PYTHON, qPYTHON.c_str(), qscript.c_str(), NULL); +            if (rc == -1) +            { +                char buffer[256]; +                strerror_s(buffer, errno); // C++ can infer the buffer size!  :-O +                ensure(STRINGIZE("Couldn't run Python " << desc << "script: " << buffer), false); +            } +            else +            { +                ensure_equals(STRINGIZE(desc << " script terminated with rc " << rc), rc, expect); +            } + +#else  // LL_DARWIN, LL_LINUX +            LLProcessLauncher py; +            py.setExecutable(PYTHON); +            py.addArgument(scriptfile.getName()); +            ensure_equals(STRINGIZE("Couldn't launch " << desc << " script"), py.launch(), 0); +            // Implementing timeout would mean messing with alarm() and +            // catching SIGALRM... later maybe... +            int status(0); +            if (waitpid(py.getProcessID(), &status, 0) == -1) +            { +                int waitpid_errno(errno); +                ensure_equals(STRINGIZE("Couldn't retrieve rc from " << desc << " script: " +                                        "waitpid() errno " << waitpid_errno), +                              waitpid_errno, ECHILD); +            } +            else +            { +                if (WIFEXITED(status)) +                { +                    int rc(WEXITSTATUS(status)); +                    ensure_equals(STRINGIZE(desc << " script terminated with rc " << rc), +                                  rc, expect); +                } +                else if (WIFSIGNALED(status)) +                { +                    ensure(STRINGIZE(desc << " script terminated by signal " << WTERMSIG(status)), +                           false); +                } +                else +                { +                    ensure(STRINGIZE(desc << " script produced impossible status " << status), +                           false); +                } +            } +#endif +        } +    }; + +    typedef tut::test_group<TestPythonCompatible> TestPythonCompatibleGroup; +    typedef TestPythonCompatibleGroup::object TestPythonCompatibleObject; +    TestPythonCompatibleGroup pycompat("LLSD serialize Python compatibility"); + +    template<> template<> +    void TestPythonCompatibleObject::test<1>() +    { +        set_test_name("verify python()"); +        python("hello", +               "import sys\n" +               "sys.exit(17)\n", +               17);                 // expect nonzero rc +    } + +    template<> template<> +    void TestPythonCompatibleObject::test<2>() +    { +        set_test_name("verify NamedTempFile"); +        python("platform", +               "import sys\n" +               "print 'Running on', sys.platform\n"); +    } + +    template<> template<> +    void TestPythonCompatibleObject::test<3>() +    { +        set_test_name("verify sequence to Python"); + +        LLSD cdata(LLSDArray(17)(3.14) +                  ("This string\n" +                   "has several\n" +                   "lines.")); + +        const char pydata[] = +            "def verify(iterable):\n" +            "    it = iter(iterable)\n" +            "    assert it.next() == 17\n" +            "    assert abs(it.next() - 3.14) < 0.01\n" +            "    assert it.next() == '''\\\n" +            "This string\n" +            "has several\n" +            "lines.'''\n" +            "    try:\n" +            "        it.next()\n" +            "    except StopIteration:\n" +            "        pass\n" +            "    else:\n" +            "        assert False, 'Too many data items'\n"; + +        // Create a something.llsd file containing 'data' serialized to +        // notation. It's important to separate with newlines because Python's +        // llsd module doesn't support parsing from a file stream, only from a +        // string, so we have to know how much of the file to read into a +        // string. +        NamedTempFile file(".llsd", +                           // NamedTempFile's boost::function constructor +                           // takes a callable. To this callable it passes the +                           // std::ostream with which it's writing the +                           // NamedTempFile. This lambda-based expression +                           // first calls LLSD::Serialize() with that ostream, +                           // then streams a newline to it, etc. +                           (lambda::bind(LLSDSerialize::toNotation, cdata[0], lambda::_1), +                            lambda::_1 << '\n', +                            lambda::bind(LLSDSerialize::toNotation, cdata[1], lambda::_1), +                            lambda::_1 << '\n', +                            lambda::bind(LLSDSerialize::toNotation, cdata[2], lambda::_1), +                            lambda::_1 << '\n')); + +        python("read C++ notation", +               lambda::_1 << +               import_llsd << +               "def parse_each(iterable):\n" +               "    for item in iterable:\n" +               "        yield llsd.parse(item)\n" << +               pydata << +               // Don't forget raw-string syntax for Windows pathnames. +               "verify(parse_each(open(r'" << file.getName() << "')))\n"); +    } + +    template<> template<> +    void TestPythonCompatibleObject::test<4>() +    { +        set_test_name("verify sequence from Python"); + +        // Create an empty data file. This is just a placeholder for our +        // script to write into. Create it to establish a unique name that +        // we know. +        NamedTempFile file(".llsd", ""); + +        python("write Python notation", +               lambda::_1 << +               "from __future__ import with_statement\n" << +               import_llsd << +               "DATA = [\n" +               "    17,\n" +               "    3.14,\n" +               "    '''\\\n" +               "This string\n" +               "has several\n" +               "lines.''',\n" +               "]\n" +               // Don't forget raw-string syntax for Windows pathnames. +               // N.B. Using 'print' implicitly adds newlines. +               "with open(r'" << file.getName() << "', 'w') as f:\n" +               "    for item in DATA:\n" +               "        print >>f, llsd.format_notation(item)\n"); + +        std::ifstream inf(file.getName().c_str()); +        LLSD item; +        // Notice that we're not doing anything special to parse out the +        // newlines: LLSDSerialize::fromNotation ignores them. While it would +        // seem they're not strictly necessary, going in this direction, we +        // want to ensure that notation-separated-by-newlines works in both +        // directions -- since in practice, a given file might be read by +        // either language. +        ensure_equals("Failed to read LLSD::Integer from Python", +                      LLSDSerialize::fromNotation(item, inf, LLSDSerialize::SIZE_UNLIMITED), +                      1); +        ensure_equals(item.asInteger(), 17); +        ensure_equals("Failed to read LLSD::Real from Python", +                      LLSDSerialize::fromNotation(item, inf, LLSDSerialize::SIZE_UNLIMITED), +                      1); +        ensure_approximately_equals("Bad LLSD::Real value from Python", +                                    item.asReal(), 3.14, 7); // 7 bits ~= 0.01 +        ensure_equals("Failed to read LLSD::String from Python", +                      LLSDSerialize::fromNotation(item, inf, LLSDSerialize::SIZE_UNLIMITED), +                      1); +        ensure_equals(item.asString(),  +                      "This string\n" +                      "has several\n" +                      "lines."); +    } +} diff --git a/indra/llcommon/tests/llsingleton_test.cpp b/indra/llcommon/tests/llsingleton_test.cpp new file mode 100644 index 0000000000..385289aefe --- /dev/null +++ b/indra/llcommon/tests/llsingleton_test.cpp @@ -0,0 +1,76 @@ +/**  + * @file llsingleton_test.cpp + * @date 2011-08-11 + * @brief Unit test for the LLSingleton class + * + * $LicenseInfo:firstyear=2011&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2011, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA  94111  USA + * $/LicenseInfo$ + */ + +#include "linden_common.h" + +#include "llsingleton.h" +#include "../test/lltut.h" + +namespace tut +{ +	struct singleton +	{ +		// We need a class created with the LLSingleton template to test with. +		class LLSingletonTest: public LLSingleton<LLSingletonTest> +		{ + +		}; +	}; + +	typedef test_group<singleton> singleton_t; +	typedef singleton_t::object singleton_object_t; +	tut::singleton_t tut_singleton("LLSingleton"); + +	template<> template<> +	void singleton_object_t::test<1>() +	{ + +	} +	template<> template<> +	void singleton_object_t::test<2>() +	{ +		LLSingletonTest* singleton_test = LLSingletonTest::getInstance(); +		ensure(singleton_test); +	} +	template<> template<> +	void singleton_object_t::test<3>() +	{ +		//Construct the instance +		LLSingletonTest::getInstance(); +		ensure(LLSingletonTest::instanceExists()); + +		//Delete the instance +		LLSingletonTest::deleteSingleton(); +		ensure(LLSingletonTest::destroyed()); +		ensure(!LLSingletonTest::instanceExists()); + +		//Construct it again. +		LLSingletonTest* singleton_test = LLSingletonTest::getInstance(); +		ensure(singleton_test); +		ensure(LLSingletonTest::instanceExists()); +	} +} diff --git a/indra/llcommon/tests/llstring_test.cpp b/indra/llcommon/tests/llstring_test.cpp index 304e91ed92..6a1cbf652a 100644 --- a/indra/llcommon/tests/llstring_test.cpp +++ b/indra/llcommon/tests/llstring_test.cpp @@ -624,6 +624,14 @@ namespace tut  		subcount = LLStringUtil::format(s, fmt_map);  		ensure_equals("LLStringUtil::format: Assorted Test2 result", s, "?Am I not a long string?short[A]bbbaaaba[A]");  		ensure_equals("LLStringUtil::format: Assorted Test2 result count", 9, subcount); +		 +		// Test on nested brackets +		std::string srcs6 = "[[TRICK1]][[A]][[B]][[AAA]][[BBB]][[TRICK2]][[KEYLONGER]][[KEYSHORTER]]?[[DELETE]]"; +		s = srcs6; +		subcount = LLStringUtil::format(s, fmt_map); +		ensure_equals("LLStringUtil::format: Assorted Test2 result", s, "[[A]][a][b][aaa][bbb][[A]][short][Am I not a long string?]?[]"); +		ensure_equals("LLStringUtil::format: Assorted Test2 result count", 9, subcount); +  		// Test an assorted substitution  		std::string srcs8 = "foo[DELETE]bar?"; diff --git a/indra/llcommon/tests/setpython.py b/indra/llcommon/tests/setpython.py new file mode 100644 index 0000000000..df7b90428e --- /dev/null +++ b/indra/llcommon/tests/setpython.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +"""\ +@file   setpython.py +@author Nat Goodspeed +@date   2011-07-13 +@brief  Set PYTHON environment variable for tests that care. + +$LicenseInfo:firstyear=2011&license=viewerlgpl$ +Copyright (c) 2011, Linden Research, Inc. +$/LicenseInfo$ +""" + +import os +import sys +import subprocess + +if __name__ == "__main__": +    os.environ["PYTHON"] = sys.executable +    sys.exit(subprocess.call(sys.argv[1:])) | 
