diff options
Diffstat (limited to 'indra/llcommon')
-rw-r--r-- | indra/llcommon/llapp.cpp | 3 | ||||
-rw-r--r-- | indra/llcommon/llmemory.cpp | 31 | ||||
-rw-r--r-- | indra/llcommon/llmemory.h | 35 | ||||
-rw-r--r-- | indra/llcommon/llqueuedthread.cpp | 4 | ||||
-rw-r--r-- | indra/llcommon/llthread.cpp | 35 | ||||
-rw-r--r-- | indra/llcommon/llthread.h | 31 |
6 files changed, 85 insertions, 54 deletions
diff --git a/indra/llcommon/llapp.cpp b/indra/llcommon/llapp.cpp index ed192a9975..ca258900c7 100644 --- a/indra/llcommon/llapp.cpp +++ b/indra/llcommon/llapp.cpp @@ -289,6 +289,7 @@ void LLApp::setupErrorHandling() // occasionally checks to see if the app is in an error state, and sees if it needs to be run. #if LL_WINDOWS +#if LL_SEND_CRASH_REPORTS // This sets a callback to handle w32 signals to the console window. // The viewer shouldn't be affected, sicne its a windowed app. SetConsoleCtrlHandler( (PHANDLER_ROUTINE) ConsoleCtrlHandler, TRUE); @@ -300,7 +301,7 @@ void LLApp::setupErrorHandling() mExceptionHandler = new google_breakpad::ExceptionHandler( L"C:\\Temp\\", 0, windows_post_minidump_callback, 0, google_breakpad::ExceptionHandler::HANDLER_ALL); } - +#endif #else // // Start up signal handling. diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp index afaf366668..70ad10ad55 100644 --- a/indra/llcommon/llmemory.cpp +++ b/indra/llcommon/llmemory.cpp @@ -252,21 +252,6 @@ U32 LLMemory::getAllocatedMemKB() return sAllocatedMemInKB ; } -void* ll_allocate (size_t size) -{ - if (size == 0) - { - llwarns << "Null allocation" << llendl; - } - void *p = malloc(size); - if (p == NULL) - { - LLMemory::freeReserve(); - llerrs << "Out of memory Error" << llendl; - } - return p; -} - //---------------------------------------------------------------------------- #if defined(LL_WINDOWS) @@ -1365,7 +1350,7 @@ char* LLPrivateMemoryPool::allocate(U32 size) //if the asked size larger than MAX_BLOCK_SIZE, fetch from heap directly, the pool does not manage it if(size >= CHUNK_SIZE) { - return (char*)malloc(size) ; + return (char*)ll_aligned_malloc_16(size) ; } char* p = NULL ; @@ -1422,7 +1407,7 @@ char* LLPrivateMemoryPool::allocate(U32 size) to_log = false ; } - return (char*)malloc(size) ; + return (char*)ll_aligned_malloc_16(size) ; } return p ; @@ -1441,7 +1426,7 @@ void LLPrivateMemoryPool::freeMem(void* addr) if(!chunk) { - free(addr) ; //release from heap + ll_aligned_free_16(addr) ; //release from heap } else { @@ -1565,7 +1550,7 @@ LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_inde mReservedPoolSize += preferred_size + overhead ; - char* buffer = (char*)malloc(preferred_size + overhead) ; + char* buffer = (char*)ll_aligned_malloc_16(preferred_size + overhead) ; if(!buffer) { return NULL ; @@ -1633,7 +1618,7 @@ void LLPrivateMemoryPool::removeChunk(LLMemoryChunk* chunk) mReservedPoolSize -= chunk->getBufferSize() ; //release memory - free(chunk->getBuffer()) ; + ll_aligned_free_16(chunk->getBuffer()) ; } U16 LLPrivateMemoryPool::findHashKey(const char* addr) @@ -1977,7 +1962,7 @@ char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size, if(!poolp) { - p = (char*)malloc(size) ; + p = (char*)ll_aligned_malloc_16(size) ; } else { @@ -2006,7 +1991,7 @@ char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size) } else { - return (char*)malloc(size) ; + return (char*)ll_aligned_malloc_16(size) ; } } #endif @@ -2031,7 +2016,7 @@ void LLPrivateMemoryPoolManager::freeMem(LLPrivateMemoryPool* poolp, void* addr { if(!sPrivatePoolEnabled) { - free(addr) ; //private pool is disabled. + ll_aligned_free_16(addr) ; //private pool is disabled. } else if(!sInstance) //the private memory manager is destroyed, try the dangling list { diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h index 08e2a2caa6..40cde485cf 100644 --- a/indra/llcommon/llmemory.h +++ b/indra/llcommon/llmemory.h @@ -27,6 +27,13 @@ #define LLMEMORY_H #include "llmemtype.h" + +#if LL_WINDOWS && LL_DEBUG +#define LL_CHECK_MEMORY llassert(_CrtCheckMemory()); +#else +#define LL_CHECK_MEMORY +#endif + inline void* ll_aligned_malloc( size_t size, int align ) { void* mem = malloc( size + (align - 1) + sizeof(void*) ); @@ -58,33 +65,39 @@ inline void* ll_aligned_malloc_16(size_t size) // returned hunk MUST be freed wi #endif } -inline void* ll_aligned_realloc_16(void* ptr, size_t size) // returned hunk MUST be freed with ll_aligned_free_16(). +inline void ll_aligned_free_16(void *p) { #if defined(LL_WINDOWS) - return _aligned_realloc(ptr, size, 16); + _aligned_free(p); #elif defined(LL_DARWIN) - return realloc(ptr,size); // default osx malloc is 16 byte aligned. + return free(p); #else - // The realloc alignment test is skipped on Linux because the ll_aligned_realloc_16() - // function is not implemented to ensure alignment (see alignment_test.cpp) - return realloc(ptr,size); // FIXME not guaranteed to be aligned. + free(p); // posix_memalign() is compatible with heap deallocator #endif } -inline void ll_aligned_free_16(void *p) +inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // returned hunk MUST be freed with ll_aligned_free_16(). { #if defined(LL_WINDOWS) - _aligned_free(p); + return _aligned_realloc(ptr, size, 16); #elif defined(LL_DARWIN) - return free(p); + return realloc(ptr,size); // default osx malloc is 16 byte aligned. #else - free(p); // posix_memalign() is compatible with heap deallocator + //FIXME: memcpy is SLOW + void* ret = ll_aligned_malloc_16(size); + if (ptr) + { + memcpy(ret, ptr, old_size); + ll_aligned_free_16(ptr); + } + return ret; #endif } + #else // USE_TCMALLOC // ll_aligned_foo_16 are not needed with tcmalloc #define ll_aligned_malloc_16 malloc -#define ll_aligned_realloc_16 realloc +#define ll_aligned_realloc_16(a,b,c) realloc(a,b) #define ll_aligned_free_16 free #endif // USE_TCMALLOC diff --git a/indra/llcommon/llqueuedthread.cpp b/indra/llcommon/llqueuedthread.cpp index 1738c16dea..abf47a0f57 100644 --- a/indra/llcommon/llqueuedthread.cpp +++ b/indra/llcommon/llqueuedthread.cpp @@ -134,8 +134,8 @@ S32 LLQueuedThread::updateQueue(F32 max_time_ms) pending = getPending(); if(pending > 0) { - unpause(); - } + unpause(); + } } else { diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp index a6ad6b125c..c2fbb544a8 100644 --- a/indra/llcommon/llthread.cpp +++ b/indra/llcommon/llthread.cpp @@ -114,7 +114,7 @@ LLThread::LLThread(const std::string& name, apr_pool_t *poolp) : apr_pool_create(&mAPRPoolp, NULL); // Create a subpool for this thread } mRunCondition = new LLCondition(mAPRPoolp); - + mDataLock = new LLMutex(mAPRPoolp); mLocalAPRFilePoolp = NULL ; } @@ -173,7 +173,10 @@ void LLThread::shutdown() } delete mRunCondition; - mRunCondition = 0; + mRunCondition = NULL; + + delete mDataLock; + mDataLock = NULL; if (mIsLocalPool && mAPRPoolp) { @@ -242,28 +245,30 @@ bool LLThread::runCondition(void) // Stop thread execution if requested until unpaused. void LLThread::checkPause() { - mRunCondition->lock(); + mDataLock->lock(); // This is in a while loop because the pthread API allows for spurious wakeups. while(shouldSleep()) { + mDataLock->unlock(); mRunCondition->wait(); // unlocks mRunCondition + mDataLock->lock(); // mRunCondition is locked when the thread wakes up } - mRunCondition->unlock(); + mDataLock->unlock(); } //============================================================================ void LLThread::setQuitting() { - mRunCondition->lock(); + mDataLock->lock(); if (mStatus == RUNNING) { mStatus = QUITTING; } - mRunCondition->unlock(); + mDataLock->unlock(); wake(); } @@ -285,12 +290,12 @@ void LLThread::yield() void LLThread::wake() { - mRunCondition->lock(); + mDataLock->lock(); if(!shouldSleep()) { mRunCondition->signal(); } - mRunCondition->unlock(); + mDataLock->unlock(); } void LLThread::wakeLocked() @@ -481,6 +486,19 @@ LLThreadSafeRefCount::LLThreadSafeRefCount() : { } +LLThreadSafeRefCount::LLThreadSafeRefCount(const LLThreadSafeRefCount& src) +{ + if (sMutex) + { + sMutex->lock(); + } + mRef = 0; + if (sMutex) + { + sMutex->unlock(); + } +} + LLThreadSafeRefCount::~LLThreadSafeRefCount() { if (mRef != 0) @@ -489,6 +507,7 @@ LLThreadSafeRefCount::~LLThreadSafeRefCount() } } + //============================================================================ LLResponder::~LLResponder() diff --git a/indra/llcommon/llthread.h b/indra/llcommon/llthread.h index cf39696b4f..c7984d6dbc 100644 --- a/indra/llcommon/llthread.h +++ b/indra/llcommon/llthread.h @@ -98,6 +98,7 @@ private: protected: std::string mName; LLCondition* mRunCondition; + LLMutex* mDataLock; apr_thread_t *mAPRThreadp; apr_pool_t *mAPRPoolp; @@ -123,15 +124,15 @@ protected: inline void unlockData(); // This is the predicate that decides whether the thread should sleep. - // It should only be called with mRunCondition locked, since the virtual runCondition() function may need to access + // It should only be called with mDataLock locked, since the virtual runCondition() function may need to access // data structures that are thread-unsafe. bool shouldSleep(void) { return (mStatus == RUNNING) && (isPaused() || (!runCondition())); } // To avoid spurious signals (and the associated context switches) when the condition may or may not have changed, you can do the following: - // mRunCondition->lock(); + // mDataLock->lock(); // if(!shouldSleep()) // mRunCondition->signal(); - // mRunCondition->unlock(); + // mDataLock->unlock(); }; //============================================================================ @@ -206,12 +207,12 @@ private: void LLThread::lockData() { - mRunCondition->lock(); + mDataLock->lock(); } void LLThread::unlockData() { - mRunCondition->unlock(); + mDataLock->unlock(); } @@ -228,15 +229,27 @@ public: private: static LLMutex* sMutex; -private: - LLThreadSafeRefCount(const LLThreadSafeRefCount&); // not implemented - LLThreadSafeRefCount&operator=(const LLThreadSafeRefCount&); // not implemented - protected: virtual ~LLThreadSafeRefCount(); // use unref() public: LLThreadSafeRefCount(); + LLThreadSafeRefCount(const LLThreadSafeRefCount&); + LLThreadSafeRefCount& operator=(const LLThreadSafeRefCount& ref) + { + if (sMutex) + { + sMutex->lock(); + } + mRef = 0; + if (sMutex) + { + sMutex->unlock(); + } + return *this; + } + + void ref() { |