From 858cde02f7edd4eea9162fe1e824e84ca3097621 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Fri, 11 Aug 2023 22:03:32 +0300 Subject: SL-19744 Small cleanup --- indra/llcommon/llapp.cpp | 8 +------- indra/llcommon/llapp.h | 1 - 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llapp.cpp b/indra/llcommon/llapp.cpp index b99166991f..90d0c28eb1 100644 --- a/indra/llcommon/llapp.cpp +++ b/indra/llcommon/llapp.cpp @@ -104,7 +104,6 @@ BOOL LLApp::sLogInSignal = FALSE; // Keeps track of application status LLScalarCond LLApp::sStatus{LLApp::APP_STATUS_STOPPED}; LLAppErrorHandler LLApp::sErrorHandler = NULL; -BOOL LLApp::sErrorThreadRunning = FALSE; LLApp::LLApp() @@ -787,13 +786,8 @@ void default_unix_signal_handler(int signum, siginfo_t *info, void *) return; } - // Flag status to ERROR, so thread_error does its work. + // Flag status to ERROR LLApp::setError(); - // Block in the signal handler until somebody says that we're done. - while (LLApp::sErrorThreadRunning && !LLApp::isStopped()) - { - ms_sleep(10); - } if (LLApp::sLogInSignal) { diff --git a/indra/llcommon/llapp.h b/indra/llcommon/llapp.h index c832c8b142..a892bfeb1e 100644 --- a/indra/llcommon/llapp.h +++ b/indra/llcommon/llapp.h @@ -291,7 +291,6 @@ protected: static void setStatus(EAppStatus status); // Use this to change the application status. static LLScalarCond sStatus; // Reflects current application status - static BOOL sErrorThreadRunning; // Set while the error thread is running static BOOL sDisableCrashlogger; // Let the OS handle crashes for us. std::wstring mCrashReportPipeStr; //Name of pipe to use for crash reporting. -- cgit v1.2.3 From edf0874e0656c6f512df50ee52236209531ca329 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Fri, 11 Aug 2023 23:36:37 +0300 Subject: SL-18721 Viewer shutdown order changes Same thing as commit cf692c40b0b9f8d0d04cd10a02a84e3f697a2e99 which was removed due to shutdown freezes. Error thread is no longer there so doesn't cause any race sonditions, was not able to repro any issues so will ask QA to test shutdown --- indra/llcommon/threadpool.cpp | 12 ++++++++++-- indra/llcommon/threadpool.h | 3 ++- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/threadpool.cpp b/indra/llcommon/threadpool.cpp index d5adf11264..22bbff4478 100644 --- a/indra/llcommon/threadpool.cpp +++ b/indra/llcommon/threadpool.cpp @@ -21,11 +21,12 @@ #include "llevents.h" #include "stringize.h" -LL::ThreadPool::ThreadPool(const std::string& name, size_t threads, size_t capacity): +LL::ThreadPool::ThreadPool(const std::string& name, size_t threads, size_t capacity, bool auto_shutdown): super(name), mQueue(name, capacity), mName("ThreadPool:" + name), - mThreadCount(threads) + mThreadCount(threads), + mAutomaticShutdown(auto_shutdown) {} void LL::ThreadPool::start() @@ -39,6 +40,13 @@ void LL::ThreadPool::start() run(tname); }); } + + // Some threads might need to run longer than LLEventPumps + if (!mAutomaticShutdown) + { + return; + } + // Listen on "LLApp", and when the app is shutting down, close the queue // and join the workers. LLEventPumps::instance().obtain("LLApp").listen( diff --git a/indra/llcommon/threadpool.h b/indra/llcommon/threadpool.h index f8eec3b457..22c875edb9 100644 --- a/indra/llcommon/threadpool.h +++ b/indra/llcommon/threadpool.h @@ -31,7 +31,7 @@ namespace LL * Pass ThreadPool a string name. This can be used to look up the * relevant WorkQueue. */ - ThreadPool(const std::string& name, size_t threads=1, size_t capacity=1024); + ThreadPool(const std::string& name, size_t threads=1, size_t capacity=1024, bool auto_shutdown = true); virtual ~ThreadPool(); /** @@ -66,6 +66,7 @@ namespace LL std::string mName; size_t mThreadCount; std::vector> mThreads; + bool mAutomaticShutdown; }; } // namespace LL -- cgit v1.2.3 From d3ed94885bcce5b7216ee23dc729de5409b73e53 Mon Sep 17 00:00:00 2001 From: Alexander Gavriliuk Date: Thu, 17 Aug 2023 21:57:54 +0200 Subject: SL-20140 Optimization LLDictionary::addEntry() - avoid of double search --- indra/llcommon/lldictionary.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/lldictionary.h b/indra/llcommon/lldictionary.h index 5800ec5e5d..3e86767d7e 100644 --- a/indra/llcommon/lldictionary.h +++ b/indra/llcommon/lldictionary.h @@ -87,11 +87,10 @@ protected: } void addEntry(Index index, Entry *entry) { - if (lookup(index)) + if (!insert(value_type(index, entry)).second) { LL_ERRS() << "Dictionary entry already added (attempted to add duplicate entry)" << LL_ENDL; } - (*this)[index] = entry; } }; -- cgit v1.2.3 From 85efb85acfa098998c0f1249320f7e08288efdfc Mon Sep 17 00:00:00 2001 From: Alexander Gavriliuk Date: Wed, 23 Aug 2023 11:06:53 +0200 Subject: SL-19299 Viewer crashes after change 'Pick a physics model:' dropdown --- indra/llcommon/llpointer.h | 31 ++++++++++++++++--------------- indra/llcommon/llrefcount.cpp | 2 +- indra/llcommon/llrefcount.h | 12 ++++-------- 3 files changed, 21 insertions(+), 24 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llpointer.h b/indra/llcommon/llpointer.h index 9a6453ea48..2401951465 100644 --- a/indra/llcommon/llpointer.h +++ b/indra/llcommon/llpointer.h @@ -129,16 +129,6 @@ protected: void ref(); void unref(); #else - - void assign(const LLPointer& ptr) - { - if( mPointer != ptr.mPointer ) - { - unref(); - mPointer = ptr.mPointer; - ref(); - } - } void ref() { if (mPointer) @@ -161,7 +151,18 @@ protected: } } } -#endif +#endif // LL_LIBRARY_INCLUDE + + void assign(const LLPointer& ptr) + { + if (mPointer != ptr.mPointer) + { + unref(); + mPointer = ptr.mPointer; + ref(); + } + } + protected: Type* mPointer; }; @@ -264,7 +265,7 @@ protected: #ifdef LL_LIBRARY_INCLUDE void ref(); void unref(); -#else +#else // LL_LIBRARY_INCLUDE void ref() { if (mPointer) @@ -277,9 +278,9 @@ protected: { if (mPointer) { - const Type *tempp = mPointer; + const Type *temp = mPointer; mPointer = NULL; - tempp->unref(); + temp->unref(); if (mPointer != NULL) { LL_WARNS() << "Unreference did assignment to non-NULL because of destructor" << LL_ENDL; @@ -287,7 +288,7 @@ protected: } } } -#endif +#endif // LL_LIBRARY_INCLUDE protected: const Type* mPointer; }; diff --git a/indra/llcommon/llrefcount.cpp b/indra/llcommon/llrefcount.cpp index 6852b5536a..3eae252ed5 100644 --- a/indra/llcommon/llrefcount.cpp +++ b/indra/llcommon/llrefcount.cpp @@ -30,7 +30,7 @@ #include "llerror.h" // maximum reference count before sounding memory leak alarm -const S32 gMaxRefCount = S32_MAX; +const S32 gMaxRefCount = LL_REFCOUNT_FREE; LLRefCount::LLRefCount(const LLRefCount& other) : mRef(0) diff --git a/indra/llcommon/llrefcount.h b/indra/llcommon/llrefcount.h index 2080da1565..2281bf87da 100644 --- a/indra/llcommon/llrefcount.h +++ b/indra/llcommon/llrefcount.h @@ -51,21 +51,17 @@ protected: public: LLRefCount(); - inline void validateRefCount() const - { - llassert(mRef > 0); // ref count below 0, likely corrupted - llassert(mRef < gMaxRefCount); // ref count excessive, likely memory leak - } - inline void ref() const { + llassert(mRef != LL_REFCOUNT_FREE); // object is deleted mRef++; - validateRefCount(); + llassert(mRef < gMaxRefCount); // ref count excessive, likely memory leak } inline S32 unref() const { - validateRefCount(); + llassert(mRef != LL_REFCOUNT_FREE); // object is deleted + llassert(mRef > 0); // ref count below 1, likely corrupted if (0 == --mRef) { mRef = LL_REFCOUNT_FREE; // set to nonsense yet recognizable value to aid in debugging -- cgit v1.2.3 From beb6181863bbce18ff7f408014e02a1086bc9711 Mon Sep 17 00:00:00 2001 From: Alexander Gavriliuk Date: Wed, 23 Aug 2023 07:13:43 +0200 Subject: SL-19299 Code formatting in modified files --- indra/llcommon/llpointer.h | 114 +++++++++++++++++++++--------------------- indra/llcommon/llrefcount.cpp | 2 +- indra/llcommon/llrefcount.h | 34 ++++++------- 3 files changed, 75 insertions(+), 75 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llpointer.h b/indra/llcommon/llpointer.h index 2401951465..96ccfb481e 100644 --- a/indra/llcommon/llpointer.h +++ b/indra/llcommon/llpointer.h @@ -46,33 +46,32 @@ template class LLPointer { public: - - LLPointer() : + LLPointer() : mPointer(NULL) { } - LLPointer(Type* ptr) : + LLPointer(Type* ptr) : mPointer(ptr) { ref(); } - LLPointer(const LLPointer& ptr) : + LLPointer(const LLPointer& ptr) : mPointer(ptr.mPointer) { ref(); } - // support conversion up the type hierarchy. See Item 45 in Effective C++, 3rd Ed. + // Support conversion up the type hierarchy. See Item 45 in Effective C++, 3rd Ed. template - LLPointer(const LLPointer& ptr) : + LLPointer(const LLPointer& ptr) : mPointer(ptr.get()) { ref(); } - ~LLPointer() + ~LLPointer() { unref(); } @@ -83,39 +82,39 @@ public: const Type& operator*() const { return *mPointer; } Type& operator*() { return *mPointer; } - operator BOOL() const { return (mPointer != NULL); } - operator bool() const { return (mPointer != NULL); } + operator BOOL() const { return (mPointer != NULL); } + operator bool() const { return (mPointer != NULL); } bool operator!() const { return (mPointer == NULL); } bool isNull() const { return (mPointer == NULL); } bool notNull() const { return (mPointer != NULL); } - operator Type*() const { return mPointer; } - bool operator !=(Type* ptr) const { return (mPointer != ptr); } - bool operator ==(Type* ptr) const { return (mPointer == ptr); } - bool operator ==(const LLPointer& ptr) const { return (mPointer == ptr.mPointer); } - bool operator < (const LLPointer& ptr) const { return (mPointer < ptr.mPointer); } - bool operator > (const LLPointer& ptr) const { return (mPointer > ptr.mPointer); } + operator Type*() const { return mPointer; } + bool operator !=(Type* ptr) const { return (mPointer != ptr); } + bool operator ==(Type* ptr) const { return (mPointer == ptr); } + bool operator ==(const LLPointer& ptr) const { return (mPointer == ptr.mPointer); } + bool operator < (const LLPointer& ptr) const { return (mPointer < ptr.mPointer); } + bool operator > (const LLPointer& ptr) const { return (mPointer > ptr.mPointer); } - LLPointer& operator =(Type* ptr) - { + LLPointer& operator =(Type* ptr) + { assign(ptr); - return *this; + return *this; } - LLPointer& operator =(const LLPointer& ptr) - { + LLPointer& operator =(const LLPointer& ptr) + { assign(ptr); - return *this; + return *this; } // support assignment up the type hierarchy. See Item 45 in Effective C++, 3rd Ed. template - LLPointer& operator =(const LLPointer& ptr) - { + LLPointer& operator =(const LLPointer& ptr) + { assign(ptr.get()); - return *this; + return *this; } - + // Just exchange the pointers, which will not change the reference counts. static void swap(LLPointer& a, LLPointer& b) { @@ -170,18 +169,18 @@ protected: template class LLConstPointer { public: - LLConstPointer() : + LLConstPointer() : mPointer(NULL) { } - LLConstPointer(const Type* ptr) : + LLConstPointer(const Type* ptr) : mPointer(ptr) { ref(); } - LLConstPointer(const LLConstPointer& ptr) : + LLConstPointer(const LLConstPointer& ptr) : mPointer(ptr.mPointer) { ref(); @@ -189,7 +188,7 @@ public: // support conversion up the type hierarchy. See Item 45 in Effective C++, 3rd Ed. template - LLConstPointer(const LLConstPointer& ptr) : + LLConstPointer(const LLConstPointer& ptr) : mPointer(ptr.get()) { ref(); @@ -204,55 +203,55 @@ public: const Type* operator->() const { return mPointer; } const Type& operator*() const { return *mPointer; } - operator BOOL() const { return (mPointer != NULL); } - operator bool() const { return (mPointer != NULL); } + operator BOOL() const { return (mPointer != NULL); } + operator bool() const { return (mPointer != NULL); } bool operator!() const { return (mPointer == NULL); } bool isNull() const { return (mPointer == NULL); } bool notNull() const { return (mPointer != NULL); } - operator const Type*() const { return mPointer; } - bool operator !=(const Type* ptr) const { return (mPointer != ptr); } - bool operator ==(const Type* ptr) const { return (mPointer == ptr); } - bool operator ==(const LLConstPointer& ptr) const { return (mPointer == ptr.mPointer); } - bool operator < (const LLConstPointer& ptr) const { return (mPointer < ptr.mPointer); } - bool operator > (const LLConstPointer& ptr) const { return (mPointer > ptr.mPointer); } + operator const Type*() const { return mPointer; } + bool operator !=(const Type* ptr) const { return (mPointer != ptr); } + bool operator ==(const Type* ptr) const { return (mPointer == ptr); } + bool operator ==(const LLConstPointer& ptr) const { return (mPointer == ptr.mPointer); } + bool operator < (const LLConstPointer& ptr) const { return (mPointer < ptr.mPointer); } + bool operator > (const LLConstPointer& ptr) const { return (mPointer > ptr.mPointer); } - LLConstPointer& operator =(const Type* ptr) + LLConstPointer& operator =(const Type* ptr) { if( mPointer != ptr ) { - unref(); - mPointer = ptr; + unref(); + mPointer = ptr; ref(); } - return *this; + return *this; } - LLConstPointer& operator =(const LLConstPointer& ptr) - { + LLConstPointer& operator =(const LLConstPointer& ptr) + { if( mPointer != ptr.mPointer ) { - unref(); + unref(); mPointer = ptr.mPointer; ref(); } - return *this; + return *this; } // support assignment up the type hierarchy. See Item 45 in Effective C++, 3rd Ed. template - LLConstPointer& operator =(const LLConstPointer& ptr) - { + LLConstPointer& operator =(const LLConstPointer& ptr) + { if( mPointer != ptr.get() ) { - unref(); + unref(); mPointer = ptr.get(); ref(); } - return *this; + return *this; } - + // Just exchange the pointers, which will not change the reference counts. static void swap(LLConstPointer& a, LLConstPointer& b) { @@ -263,11 +262,11 @@ public: protected: #ifdef LL_LIBRARY_INCLUDE - void ref(); + void ref(); void unref(); #else // LL_LIBRARY_INCLUDE - void ref() - { + void ref() + { if (mPointer) { mPointer->ref(); @@ -289,6 +288,7 @@ protected: } } #endif // LL_LIBRARY_INCLUDE + protected: const Type* mPointer; }; @@ -298,13 +298,13 @@ class LLCopyOnWritePointer : public LLPointer { public: typedef LLCopyOnWritePointer self_t; - typedef LLPointer pointer_t; - - LLCopyOnWritePointer() + typedef LLPointer pointer_t; + + LLCopyOnWritePointer() : mStayUnique(false) {} - LLCopyOnWritePointer(Type* ptr) + LLCopyOnWritePointer(Type* ptr) : LLPointer(ptr), mStayUnique(false) {} diff --git a/indra/llcommon/llrefcount.cpp b/indra/llcommon/llrefcount.cpp index 3eae252ed5..3da94e7a8d 100644 --- a/indra/llcommon/llrefcount.cpp +++ b/indra/llcommon/llrefcount.cpp @@ -49,7 +49,7 @@ LLRefCount::LLRefCount() : } LLRefCount::~LLRefCount() -{ +{ if (mRef != LL_REFCOUNT_FREE && mRef != 0) { LL_ERRS() << "deleting non-zero reference" << LL_ENDL; diff --git a/indra/llcommon/llrefcount.h b/indra/llcommon/llrefcount.h index 2281bf87da..15e7175fc8 100644 --- a/indra/llcommon/llrefcount.h +++ b/indra/llcommon/llrefcount.h @@ -52,11 +52,11 @@ public: LLRefCount(); inline void ref() const - { + { llassert(mRef != LL_REFCOUNT_FREE); // object is deleted - mRef++; + mRef++; llassert(mRef < gMaxRefCount); // ref count excessive, likely memory leak - } + } inline S32 unref() const { @@ -64,7 +64,7 @@ public: llassert(mRef > 0); // ref count below 1, likely corrupted if (0 == --mRef) { - mRef = LL_REFCOUNT_FREE; // set to nonsense yet recognizable value to aid in debugging + mRef = LL_REFCOUNT_FREE; // set to nonsense yet recognizable value to aid in debugging delete this; return 0; } @@ -78,8 +78,8 @@ public: return mRef; } -private: - mutable S32 mRef; +private: + mutable S32 mRef; }; @@ -102,7 +102,7 @@ protected: public: LLThreadSafeRefCount(); LLThreadSafeRefCount(const LLThreadSafeRefCount&); - LLThreadSafeRefCount& operator=(const LLThreadSafeRefCount& ref) + LLThreadSafeRefCount& operator=(const LLThreadSafeRefCount& ref) { mRef = 0; return *this; @@ -110,8 +110,8 @@ public: void ref() { - mRef++; - } + mRef++; + } void unref() { @@ -132,36 +132,36 @@ public: return currentVal; } -private: - LLAtomicS32 mRef; +private: + LLAtomicS32 mRef; }; /** * intrusive pointer support for LLThreadSafeRefCount * this allows you to use boost::intrusive_ptr with any LLThreadSafeRefCount-derived type */ -inline void intrusive_ptr_add_ref(LLThreadSafeRefCount* p) +inline void intrusive_ptr_add_ref(LLThreadSafeRefCount* p) { p->ref(); } -inline void intrusive_ptr_release(LLThreadSafeRefCount* p) +inline void intrusive_ptr_release(LLThreadSafeRefCount* p) { - p->unref(); + p->unref(); } /** * intrusive pointer support * this allows you to use boost::intrusive_ptr with any LLRefCount-derived type */ -inline void intrusive_ptr_add_ref(LLRefCount* p) +inline void intrusive_ptr_add_ref(LLRefCount* p) { p->ref(); } -inline void intrusive_ptr_release(LLRefCount* p) +inline void intrusive_ptr_release(LLRefCount* p) { - p->unref(); + p->unref(); } #endif -- cgit v1.2.3 From 57d784f80728f9ecd75f8dbc989cd75cf328c353 Mon Sep 17 00:00:00 2001 From: Alexander Gavriliuk Date: Thu, 24 Aug 2023 16:02:08 +0200 Subject: SL-18620 Statistics->Advanced->Memory Usage no longer updating --- indra/llcommon/lltrace.cpp | 2 - indra/llcommon/lltrace.h | 82 ------ indra/llcommon/lltraceaccumulators.cpp | 69 ++--- indra/llcommon/lltraceaccumulators.h | 120 ++------- indra/llcommon/lltracerecording.cpp | 439 +++++++------------------------ indra/llcommon/lltracerecording.h | 109 +++----- indra/llcommon/lltracethreadrecorder.cpp | 14 +- 7 files changed, 191 insertions(+), 644 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/lltrace.cpp b/indra/llcommon/lltrace.cpp index ff671a8370..bce186054f 100644 --- a/indra/llcommon/lltrace.cpp +++ b/indra/llcommon/lltrace.cpp @@ -33,8 +33,6 @@ namespace LLTrace { -MemStatHandle gTraceMemStat("LLTrace"); - StatBase::StatBase( const char* name, const char* description ) : mName(name), mDescription(description ? description : "") diff --git a/indra/llcommon/lltrace.h b/indra/llcommon/lltrace.h index 580cf0a5fd..21a5803a76 100644 --- a/indra/llcommon/lltrace.h +++ b/indra/llcommon/lltrace.h @@ -193,61 +193,6 @@ void add(CountStatHandle& count, VALUE_T value) #endif } -template<> -class StatType -: public StatType -{ -public: - - StatType(const char* name, const char* description = "") - : StatType(name, description) - {} -}; - -template<> -class StatType -: public StatType -{ -public: - - StatType(const char* name, const char* description = "") - : StatType(name, description) - {} -}; - -class MemStatHandle : public StatType -{ -public: - typedef StatType stat_t; - MemStatHandle(const char* name, const char* description = "") - : stat_t(name, description) - { - mName = name; - } - - void setName(const char* name) - { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - mName = name; - setKey(name); - } - - /*virtual*/ const char* getUnitLabel() const { return "KB"; } - - StatType& allocations() - { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - return static_cast&>(*(StatType*)this); - } - - StatType& deallocations() - { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - return static_cast&>(*(StatType*)this); - } -}; - - // measures effective memory footprint of specified type // specialize to cover different types template @@ -334,33 +279,6 @@ struct MeasureMem, IS_MEM_TRACKABLE, IS_BYTES> } }; - -template -inline void claim_alloc(MemStatHandle& measurement, const T& value) -{ - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -#if LL_TRACE_ENABLED - auto size = MeasureMem::measureFootprint(value); - if(size == 0) return; - MemAccumulator& accumulator = measurement.getCurrentAccumulator(); - accumulator.mSize.sample(accumulator.mSize.hasValue() ? accumulator.mSize.getLastValue() + (F64)size : (F64)size); - accumulator.mAllocations.record(size); -#endif -} - -template -inline void disclaim_alloc(MemStatHandle& measurement, const T& value) -{ - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -#if LL_TRACE_ENABLED - auto size = MeasureMem::measureFootprint(value); - if(size == 0) return; - MemAccumulator& accumulator = measurement.getCurrentAccumulator(); - accumulator.mSize.sample(accumulator.mSize.hasValue() ? accumulator.mSize.getLastValue() - (F64)size : -(F64)size); - accumulator.mDeallocations.add(size); -#endif -} - } #endif // LL_LLTRACE_H diff --git a/indra/llcommon/lltraceaccumulators.cpp b/indra/llcommon/lltraceaccumulators.cpp index 6bd886ae98..5fafb53832 100644 --- a/indra/llcommon/lltraceaccumulators.cpp +++ b/indra/llcommon/lltraceaccumulators.cpp @@ -1,24 +1,24 @@ -/** +/** * @file lltracesampler.cpp * * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code * Copyright (C) 2012, Linden Research, Inc. - * + * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License only. - * + * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. - * + * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * + * * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ @@ -32,66 +32,45 @@ namespace LLTrace { -extern MemStatHandle gTraceMemStat; - - /////////////////////////////////////////////////////////////////////// // AccumulatorBufferGroup /////////////////////////////////////////////////////////////////////// -AccumulatorBufferGroup::AccumulatorBufferGroup() +AccumulatorBufferGroup::AccumulatorBufferGroup() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator)); - claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator)); - claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator)); - claim_alloc(gTraceMemStat, mStackTimers.capacity() * sizeof(TimeBlockAccumulator)); - claim_alloc(gTraceMemStat, mMemStats.capacity() * sizeof(MemAccumulator)); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; } AccumulatorBufferGroup::AccumulatorBufferGroup(const AccumulatorBufferGroup& other) : mCounts(other.mCounts), mSamples(other.mSamples), mEvents(other.mEvents), - mStackTimers(other.mStackTimers), - mMemStats(other.mMemStats) + mStackTimers(other.mStackTimers) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator)); - claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator)); - claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator)); - claim_alloc(gTraceMemStat, mStackTimers.capacity() * sizeof(TimeBlockAccumulator)); - claim_alloc(gTraceMemStat, mMemStats.capacity() * sizeof(MemAccumulator)); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; } AccumulatorBufferGroup::~AccumulatorBufferGroup() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - disclaim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator)); - disclaim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator)); - disclaim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator)); - disclaim_alloc(gTraceMemStat, mStackTimers.capacity() * sizeof(TimeBlockAccumulator)); - disclaim_alloc(gTraceMemStat, mMemStats.capacity() * sizeof(MemAccumulator)); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; } void AccumulatorBufferGroup::handOffTo(AccumulatorBufferGroup& other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; other.mCounts.reset(&mCounts); other.mSamples.reset(&mSamples); other.mEvents.reset(&mEvents); other.mStackTimers.reset(&mStackTimers); - other.mMemStats.reset(&mMemStats); } void AccumulatorBufferGroup::makeCurrent() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mCounts.makeCurrent(); mSamples.makeCurrent(); mEvents.makeCurrent(); mStackTimers.makeCurrent(); - mMemStats.makeCurrent(); ThreadRecorder* thread_recorder = get_thread_recorder(); AccumulatorBuffer& timer_accumulator_buffer = mStackTimers; @@ -109,12 +88,11 @@ void AccumulatorBufferGroup::makeCurrent() //static void AccumulatorBufferGroup::clearCurrent() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - AccumulatorBuffer::clearCurrent(); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + AccumulatorBuffer::clearCurrent(); AccumulatorBuffer::clearCurrent(); AccumulatorBuffer::clearCurrent(); AccumulatorBuffer::clearCurrent(); - AccumulatorBuffer::clearCurrent(); } bool AccumulatorBufferGroup::isCurrent() const @@ -124,44 +102,39 @@ bool AccumulatorBufferGroup::isCurrent() const void AccumulatorBufferGroup::append( const AccumulatorBufferGroup& other ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mCounts.addSamples(other.mCounts, SEQUENTIAL); mSamples.addSamples(other.mSamples, SEQUENTIAL); mEvents.addSamples(other.mEvents, SEQUENTIAL); - mMemStats.addSamples(other.mMemStats, SEQUENTIAL); mStackTimers.addSamples(other.mStackTimers, SEQUENTIAL); } void AccumulatorBufferGroup::merge( const AccumulatorBufferGroup& other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mCounts.addSamples(other.mCounts, NON_SEQUENTIAL); mSamples.addSamples(other.mSamples, NON_SEQUENTIAL); mEvents.addSamples(other.mEvents, NON_SEQUENTIAL); - mMemStats.addSamples(other.mMemStats, NON_SEQUENTIAL); // for now, hold out timers from merge, need to be displayed per thread //mStackTimers.addSamples(other.mStackTimers, NON_SEQUENTIAL); } void AccumulatorBufferGroup::reset(AccumulatorBufferGroup* other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mCounts.reset(other ? &other->mCounts : NULL); mSamples.reset(other ? &other->mSamples : NULL); mEvents.reset(other ? &other->mEvents : NULL); mStackTimers.reset(other ? &other->mStackTimers : NULL); - mMemStats.reset(other ? &other->mMemStats : NULL); } void AccumulatorBufferGroup::sync() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; if (isCurrent()) { F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds(); - mSamples.sync(time_stamp); - mMemStats.sync(time_stamp); } } @@ -197,10 +170,9 @@ F64 SampleAccumulator::mergeSumsOfSquares(const SampleAccumulator& a, const Samp return a.getSumOfSquares(); } - void SampleAccumulator::addSamples( const SampleAccumulator& other, EBufferAppendType append_type ) { - if (append_type == NON_SEQUENTIAL) + if (append_type == NON_SEQUENTIAL) { return; } @@ -299,7 +271,7 @@ void EventAccumulator::addSamples( const EventAccumulator& other, EBufferAppendT void EventAccumulator::reset( const EventAccumulator* other ) { - mNumSamples = 0; + mNumSamples = 0; mSum = 0; mMin = F32(NaN); mMax = F32(NaN); @@ -308,5 +280,4 @@ void EventAccumulator::reset( const EventAccumulator* other ) mLastValue = other ? other->mLastValue : NaN; } - } diff --git a/indra/llcommon/lltraceaccumulators.h b/indra/llcommon/lltraceaccumulators.h index 7267a44300..b9d577be9e 100644 --- a/indra/llcommon/lltraceaccumulators.h +++ b/indra/llcommon/lltraceaccumulators.h @@ -1,26 +1,26 @@ -/** +/** * @file lltraceaccumulators.h * @brief Storage for accumulating statistics * * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code * Copyright (C) 2012, Linden Research, Inc. - * + * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License only. - * + * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. - * + * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * + * * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ @@ -28,7 +28,6 @@ #ifndef LL_LLTRACEACCUMULATORS_H #define LL_LLTRACEACCUMULATORS_H - #include "stdtypes.h" #include "llpreprocessor.h" #include "llunits.h" @@ -66,7 +65,7 @@ namespace LLTrace : mStorageSize(0), mStorage(NULL) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; const AccumulatorBuffer& other = *getDefaultBuffer(); resize(sNextStorageSlot); for (S32 i = 0; i < sNextStorageSlot; i++) @@ -77,7 +76,7 @@ namespace LLTrace ~AccumulatorBuffer() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; if (isCurrent()) { LLThreadLocalSingletonPointer::setInstance(NULL); @@ -85,14 +84,14 @@ namespace LLTrace delete[] mStorage; } - LL_FORCE_INLINE ACCUMULATOR& operator[](size_t index) - { - return mStorage[index]; + LL_FORCE_INLINE ACCUMULATOR& operator[](size_t index) + { + return mStorage[index]; } LL_FORCE_INLINE const ACCUMULATOR& operator[](size_t index) const - { - return mStorage[index]; + { + return mStorage[index]; } @@ -100,7 +99,7 @@ namespace LLTrace : mStorageSize(0), mStorage(NULL) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; resize(sNextStorageSlot); for (S32 i = 0; i < sNextStorageSlot; i++) { @@ -110,7 +109,7 @@ namespace LLTrace void addSamples(const AccumulatorBuffer& other, EBufferAppendType append_type) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -120,7 +119,7 @@ namespace LLTrace void copyFrom(const AccumulatorBuffer& other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -130,7 +129,7 @@ namespace LLTrace void reset(const AccumulatorBuffer* other = NULL) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; llassert(mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -140,7 +139,7 @@ namespace LLTrace void sync(F64SecondsImplicit time_stamp) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; llassert(mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -160,13 +159,13 @@ namespace LLTrace static void clearCurrent() { - LLThreadLocalSingletonPointer::setInstance(NULL); + LLThreadLocalSingletonPointer::setInstance(NULL); } // NOTE: this is not thread-safe. We assume that slots are reserved in the main thread before any child threads are spawned size_t reserveSlot() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; size_t next_slot = sNextStorageSlot++; if (next_slot >= mStorageSize) { @@ -180,7 +179,7 @@ namespace LLTrace void resize(size_t new_size) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; if (new_size <= mStorageSize) return; ACCUMULATOR* old_storage = mStorage; @@ -214,14 +213,14 @@ namespace LLTrace return mStorageSize; } - static size_t getNumIndices() + static size_t getNumIndices() { return sNextStorageSlot; } static self_t* getDefaultBuffer() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; static bool sInitialized = false; if (!sInitialized) { @@ -336,7 +335,7 @@ namespace LLTrace void sample(F64 value) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds(); // store effect of last value @@ -399,7 +398,7 @@ namespace LLTrace F64 mMean, mSumOfSquares; - F64SecondsImplicit + F64SecondsImplicit mLastSampleTimeStamp, mTotalSamplingTime; @@ -409,7 +408,7 @@ namespace LLTrace S32 mNumSamples; // distinct from mNumSamples, since we might have inherited a last value from // a previous sampling period - bool mHasValue; + bool mHasValue; }; class CountAccumulator @@ -457,14 +456,14 @@ namespace LLTrace class alignas(32) TimeBlockAccumulator { - public: + public: typedef F64Seconds value_t; static F64Seconds getDefaultValue() { return F64Seconds(0); } typedef TimeBlockAccumulator self_t; // fake classes that allows us to view different facets of underlying statistic - struct CallCountFacet + struct CallCountFacet { typedef S32 value_t; }; @@ -515,12 +514,12 @@ namespace LLTrace BlockTimerStatHandle* getParent() { return mParent; } BlockTimerStatHandle* mBlock; - BlockTimerStatHandle* mParent; + BlockTimerStatHandle* mParent; std::vector mChildren; bool mCollapsed; bool mNeedsSorting; }; - + struct BlockTimerStackRecord { class BlockTimer* mActiveTimer; @@ -528,65 +527,6 @@ namespace LLTrace U64 mChildTime; }; - struct MemAccumulator - { - typedef F64Bytes value_t; - static F64Bytes getDefaultValue() { return F64Bytes(0); } - - typedef MemAccumulator self_t; - - // fake classes that allows us to view different facets of underlying statistic - struct AllocationFacet - { - typedef F64Bytes value_t; - static F64Bytes getDefaultValue() { return F64Bytes(0); } - }; - - struct DeallocationFacet - { - typedef F64Bytes value_t; - static F64Bytes getDefaultValue() { return F64Bytes(0); } - }; - - void addSamples(const MemAccumulator& other, EBufferAppendType append_type) - { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - mAllocations.addSamples(other.mAllocations, append_type); - mDeallocations.addSamples(other.mDeallocations, append_type); - - if (append_type == SEQUENTIAL) - { - mSize.addSamples(other.mSize, SEQUENTIAL); - } - else - { - F64 allocation_delta(other.mAllocations.getSum() - other.mDeallocations.getSum()); - mSize.sample(mSize.hasValue() - ? mSize.getLastValue() + allocation_delta - : allocation_delta); - } - } - - void reset(const MemAccumulator* other) - { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - mSize.reset(other ? &other->mSize : NULL); - mAllocations.reset(other ? &other->mAllocations : NULL); - mDeallocations.reset(other ? &other->mDeallocations : NULL); - } - - void sync(F64SecondsImplicit time_stamp) - { - mSize.sync(time_stamp); - } - - bool hasValue() const { return mSize.hasValue(); } - - SampleAccumulator mSize; - EventAccumulator mAllocations; - CountAccumulator mDeallocations; - }; - struct AccumulatorBufferGroup : public LLRefCount { AccumulatorBufferGroup(); @@ -607,9 +547,7 @@ namespace LLTrace AccumulatorBuffer mSamples; AccumulatorBuffer mEvents; AccumulatorBuffer mStackTimers; - AccumulatorBuffer mMemStats; }; } #endif // LL_LLTRACEACCUMULATORS_H - diff --git a/indra/llcommon/lltracerecording.cpp b/indra/llcommon/lltracerecording.cpp index bb3d667a42..075e7c1d28 100644 --- a/indra/llcommon/lltracerecording.cpp +++ b/indra/llcommon/lltracerecording.cpp @@ -1,24 +1,24 @@ -/** +/** * @file lltracesampler.cpp * * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code * Copyright (C) 2012, Linden Research, Inc. - * + * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License only. - * + * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. - * + * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * + * * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ @@ -32,7 +32,7 @@ #include "lltracethreadrecorder.h" #include "llthread.h" -inline F64 lerp(F64 a, F64 b, F64 u) +inline F64 lerp(F64 a, F64 b, F64 u) { return a + ((b - a) * u); } @@ -40,34 +40,29 @@ inline F64 lerp(F64 a, F64 b, F64 u) namespace LLTrace { -extern MemStatHandle gTraceMemStat; - /////////////////////////////////////////////////////////////////////// // Recording /////////////////////////////////////////////////////////////////////// -Recording::Recording(EPlayState state) +Recording::Recording(EPlayState state) : mElapsedSeconds(0), mActiveBuffers(NULL) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - claim_alloc(gTraceMemStat, this); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mBuffers = new AccumulatorBufferGroup(); - claim_alloc(gTraceMemStat, mBuffers); setPlayState(state); } Recording::Recording( const Recording& other ) : mActiveBuffers(NULL) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - claim_alloc(gTraceMemStat, this); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; *this = other; } Recording& Recording::operator = (const Recording& other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; // this will allow us to seamlessly start without affecting any data we've acquired from other setPlayState(PAUSED); @@ -85,14 +80,11 @@ Recording& Recording::operator = (const Recording& other) return *this; } - Recording::~Recording() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - disclaim_alloc(gTraceMemStat, this); - disclaim_alloc(gTraceMemStat, mBuffers); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - // allow recording destruction without thread recorder running, + // allow recording destruction without thread recorder running, // otherwise thread shutdown could crash if a recording outlives the thread recorder // besides, recording construction and destruction is fine without a recorder...just don't attempt to start one if (isStarted() && LLTrace::get_thread_recorder() != NULL) @@ -107,14 +99,14 @@ void Recording::update() #if LL_TRACE_ENABLED if (isStarted()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mElapsedSeconds += mSamplingTimer.getElapsedTimeF64(); - // must have - llassert(mActiveBuffers != NULL + // must have + llassert(mActiveBuffers != NULL && LLTrace::get_thread_recorder() != NULL); - if(!mActiveBuffers->isCurrent() && LLTrace::get_thread_recorder() != NULL) + if (!mActiveBuffers->isCurrent() && LLTrace::get_thread_recorder() != NULL) { AccumulatorBufferGroup* buffers = mBuffers.write(); LLTrace::get_thread_recorder()->deactivate(buffers); @@ -128,7 +120,7 @@ void Recording::update() void Recording::handleReset() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; #if LL_TRACE_ENABLED mBuffers.write()->reset(); @@ -139,7 +131,7 @@ void Recording::handleReset() void Recording::handleStart() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; #if LL_TRACE_ENABLED mSamplingTimer.reset(); mBuffers.setStayUnique(true); @@ -151,7 +143,7 @@ void Recording::handleStart() void Recording::handleStop() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; #if LL_TRACE_ENABLED mElapsedSeconds += mSamplingTimer.getElapsedTimeF64(); // must have thread recorder running on this thread @@ -204,7 +196,6 @@ F64Seconds Recording::getSum(const StatType return F64Seconds(((F64)(accumulator.mSelfTimeCounter) + (F64)(active_accumulator ? active_accumulator->mSelfTimeCounter : 0)) / (F64)LLTrace::BlockTimer::countsPerSecond()); } - S32 Recording::getSum(const StatType& stat) { update(); @@ -219,7 +210,7 @@ F64Seconds Recording::getPerSec(const StatType& stat) const TimeBlockAccumulator& accumulator = mBuffers->mStackTimers[stat.getIndex()]; const TimeBlockAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mStackTimers[stat.getIndex()] : NULL; - return F64Seconds((F64)(accumulator.mTotalTimeCounter + (active_accumulator ? active_accumulator->mTotalTimeCounter : 0)) + return F64Seconds((F64)(accumulator.mTotalTimeCounter + (active_accumulator ? active_accumulator->mTotalTimeCounter : 0)) / ((F64)LLTrace::BlockTimer::countsPerSecond() * mElapsedSeconds.value())); } @@ -241,144 +232,9 @@ F32 Recording::getPerSec(const StatType& s return (F32)(accumulator.mCalls + (active_accumulator ? active_accumulator->mCalls : 0)) / mElapsedSeconds.value(); } -bool Recording::hasValue(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return accumulator.mSize.hasValue() || (active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.hasValue() : false); -} - -F64Kilobytes Recording::getMin(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return F64Bytes(llmin(accumulator.mSize.getMin(), (active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.getMin() : F32_MAX))); -} - -F64Kilobytes Recording::getMean(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - - if (active_accumulator && active_accumulator->mSize.hasValue()) - { - F32 t = 0.0f; - S32 div = accumulator.mSize.getSampleCount() + active_accumulator->mSize.getSampleCount(); - if (div > 0) - { - t = active_accumulator->mSize.getSampleCount() / div; - } - return F64Bytes(lerp(accumulator.mSize.getMean(), active_accumulator->mSize.getMean(), t)); - } - else - { - return F64Bytes(accumulator.mSize.getMean()); - } -} - -F64Kilobytes Recording::getMax(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return F64Bytes(llmax(accumulator.mSize.getMax(), active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.getMax() : F32_MIN)); -} - -F64Kilobytes Recording::getStandardDeviation(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - if (active_accumulator && active_accumulator->hasValue()) - { - F64 sum_of_squares = SampleAccumulator::mergeSumsOfSquares(accumulator.mSize, active_accumulator->mSize); - return F64Bytes(sqrtf(sum_of_squares / (accumulator.mSize.getSamplingTime().value() + active_accumulator->mSize.getSamplingTime().value()))); - } - else - { - return F64Bytes(accumulator.mSize.getStandardDeviation()); - } -} - -F64Kilobytes Recording::getLastValue(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return F64Bytes(active_accumulator ? active_accumulator->mSize.getLastValue() : accumulator.mSize.getLastValue()); -} - -bool Recording::hasValue(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return accumulator.mAllocations.hasValue() || (active_accumulator ? active_accumulator->mAllocations.hasValue() : false); -} - -F64Kilobytes Recording::getSum(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return F64Bytes(accumulator.mAllocations.getSum() + (active_accumulator ? active_accumulator->mAllocations.getSum() : 0)); -} - -F64Kilobytes Recording::getPerSec(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return F64Bytes((accumulator.mAllocations.getSum() + (active_accumulator ? active_accumulator->mAllocations.getSum() : 0)) / mElapsedSeconds.value()); -} - -S32 Recording::getSampleCount(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return accumulator.mAllocations.getSampleCount() + (active_accumulator ? active_accumulator->mAllocations.getSampleCount() : 0); -} - -bool Recording::hasValue(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return accumulator.mDeallocations.hasValue() || (active_accumulator ? active_accumulator->mDeallocations.hasValue() : false); -} - - -F64Kilobytes Recording::getSum(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return F64Bytes(accumulator.mDeallocations.getSum() + (active_accumulator ? active_accumulator->mDeallocations.getSum() : 0)); -} - -F64Kilobytes Recording::getPerSec(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return F64Bytes((accumulator.mDeallocations.getSum() + (active_accumulator ? active_accumulator->mDeallocations.getSum() : 0)) / mElapsedSeconds.value()); -} - -S32 Recording::getSampleCount(const StatType& stat) -{ - update(); - const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; - const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; - return accumulator.mDeallocations.getSampleCount() + (active_accumulator ? active_accumulator->mDeallocations.getSampleCount() : 0); -} - bool Recording::hasValue(const StatType& stat) { - update(); + update(); const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()]; const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL; return accumulator.hasValue() || (active_accumulator ? active_accumulator->hasValue() : false); @@ -386,7 +242,7 @@ bool Recording::hasValue(const StatType& stat) F64 Recording::getSum(const StatType& stat) { - update(); + update(); const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()]; const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL; return accumulator.getSum() + (active_accumulator ? active_accumulator->getSum() : 0); @@ -394,7 +250,7 @@ F64 Recording::getSum(const StatType& stat) F64 Recording::getPerSec( const StatType& stat ) { - update(); + update(); const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()]; const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL; F64 sum = accumulator.getSum() + (active_accumulator ? active_accumulator->getSum() : 0); @@ -403,7 +259,7 @@ F64 Recording::getPerSec( const StatType& stat ) S32 Recording::getSampleCount( const StatType& stat ) { - update(); + update(); const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()]; const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL; return accumulator.getSampleCount() + (active_accumulator ? active_accumulator->getSampleCount() : 0); @@ -411,7 +267,7 @@ S32 Recording::getSampleCount( const StatType& stat ) bool Recording::hasValue(const StatType& stat) { - update(); + update(); const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()]; const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL; return accumulator.hasValue() || (active_accumulator && active_accumulator->hasValue()); @@ -419,7 +275,7 @@ bool Recording::hasValue(const StatType& stat) F64 Recording::getMin( const StatType& stat ) { - update(); + update(); const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()]; const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL; return llmin(accumulator.getMin(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMin() : F32_MAX); @@ -427,7 +283,7 @@ F64 Recording::getMin( const StatType& stat ) F64 Recording::getMax( const StatType& stat ) { - update(); + update(); const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()]; const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL; return llmax(accumulator.getMax(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMax() : F32_MIN); @@ -435,17 +291,17 @@ F64 Recording::getMax( const StatType& stat ) F64 Recording::getMean( const StatType& stat ) { - update(); + update(); const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()]; const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL; if (active_accumulator && active_accumulator->hasValue()) { - F32 t = 0.0f; - S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount(); - if (div > 0) - { - t = active_accumulator->getSampleCount() / div; - } + F32 t = 0.0f; + S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount(); + if (div > 0) + { + t = active_accumulator->getSampleCount() / div; + } return lerp(accumulator.getMean(), active_accumulator->getMean(), t); } else @@ -456,7 +312,7 @@ F64 Recording::getMean( const StatType& stat ) F64 Recording::getStandardDeviation( const StatType& stat ) { - update(); + update(); const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()]; const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL; @@ -473,7 +329,7 @@ F64 Recording::getStandardDeviation( const StatType& stat ) F64 Recording::getLastValue( const StatType& stat ) { - update(); + update(); const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()]; const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL; return (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getLastValue() : accumulator.getLastValue()); @@ -481,7 +337,7 @@ F64 Recording::getLastValue( const StatType& stat ) S32 Recording::getSampleCount( const StatType& stat ) { - update(); + update(); const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()]; const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL; return accumulator.getSampleCount() + (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getSampleCount() : 0); @@ -489,7 +345,7 @@ S32 Recording::getSampleCount( const StatType& stat ) bool Recording::hasValue(const StatType& stat) { - update(); + update(); const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()]; const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL; return accumulator.hasValue() || (active_accumulator && active_accumulator->hasValue()); @@ -497,7 +353,7 @@ bool Recording::hasValue(const StatType& stat) F64 Recording::getSum( const StatType& stat) { - update(); + update(); const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()]; const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL; return (F64)(accumulator.getSum() + (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getSum() : 0)); @@ -505,7 +361,7 @@ F64 Recording::getSum( const StatType& stat) F64 Recording::getMin( const StatType& stat ) { - update(); + update(); const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()]; const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL; return llmin(accumulator.getMin(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMin() : F32_MAX); @@ -513,7 +369,7 @@ F64 Recording::getMin( const StatType& stat ) F64 Recording::getMax( const StatType& stat ) { - update(); + update(); const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()]; const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL; return llmax(accumulator.getMax(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMax() : F32_MIN); @@ -521,17 +377,17 @@ F64 Recording::getMax( const StatType& stat ) F64 Recording::getMean( const StatType& stat ) { - update(); + update(); const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()]; const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL; if (active_accumulator && active_accumulator->hasValue()) { F32 t = 0.0f; - S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount(); - if (div > 0) - { - t = active_accumulator->getSampleCount() / div; - } + S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount(); + if (div > 0) + { + t = active_accumulator->getSampleCount() / div; + } return lerp(accumulator.getMean(), active_accumulator->getMean(), t); } else @@ -542,7 +398,7 @@ F64 Recording::getMean( const StatType& stat ) F64 Recording::getStandardDeviation( const StatType& stat ) { - update(); + update(); const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()]; const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL; @@ -559,7 +415,7 @@ F64 Recording::getStandardDeviation( const StatType& stat ) F64 Recording::getLastValue( const StatType& stat ) { - update(); + update(); const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()]; const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL; return active_accumulator ? active_accumulator->getLastValue() : accumulator.getLastValue(); @@ -567,7 +423,7 @@ F64 Recording::getLastValue( const StatType& stat ) S32 Recording::getSampleCount( const StatType& stat ) { - update(); + update(); const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()]; const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL; return accumulator.getSampleCount() + (active_accumulator ? active_accumulator->getSampleCount() : 0); @@ -577,7 +433,7 @@ S32 Recording::getSampleCount( const StatType& stat ) // PeriodicRecording /////////////////////////////////////////////////////////////////////// -PeriodicRecording::PeriodicRecording( size_t num_periods, EPlayState state) +PeriodicRecording::PeriodicRecording( size_t num_periods, EPlayState state) : mAutoResize(num_periods == 0), mCurPeriod(0), mNumRecordedPeriods(0), @@ -585,15 +441,13 @@ PeriodicRecording::PeriodicRecording( size_t num_periods, EPlayState state) // code in several methods. mRecordingPeriods(num_periods ? num_periods : 1) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; setPlayState(state); - claim_alloc(gTraceMemStat, this); } PeriodicRecording::~PeriodicRecording() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - disclaim_alloc(gTraceMemStat, this); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; } void PeriodicRecording::nextPeriod() @@ -615,12 +469,11 @@ void PeriodicRecording::nextPeriod() void PeriodicRecording::appendRecording(Recording& recording) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; getCurRecording().appendRecording(recording); nextPeriod(); } - void PeriodicRecording::appendPeriodicRecording( PeriodicRecording& other ) { LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; @@ -693,16 +546,14 @@ F64Seconds PeriodicRecording::getDuration() const return duration; } - LLTrace::Recording PeriodicRecording::snapshotCurRecording() const { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; Recording recording_copy(getCurRecording()); recording_copy.stop(); return recording_copy; } - Recording& PeriodicRecording::getLastRecording() { return getPrevRecording(1); @@ -737,19 +588,19 @@ const Recording& PeriodicRecording::getPrevRecording( size_t offset ) const void PeriodicRecording::handleStart() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; getCurRecording().start(); } void PeriodicRecording::handleStop() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; getCurRecording().pause(); } void PeriodicRecording::handleReset() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; getCurRecording().stop(); if (mAutoResize) @@ -771,13 +622,13 @@ void PeriodicRecording::handleReset() void PeriodicRecording::handleSplitTo(PeriodicRecording& other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; getCurRecording().splitTo(other.getCurRecording()); } F64 PeriodicRecording::getPeriodMin( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); bool has_value = false; @@ -792,14 +643,14 @@ F64 PeriodicRecording::getPeriodMin( const StatType& stat, siz } } - return has_value - ? min_val + return has_value + ? min_val : NaN; } F64 PeriodicRecording::getPeriodMax( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); bool has_value = false; @@ -814,15 +665,15 @@ F64 PeriodicRecording::getPeriodMax( const StatType& stat, siz } } - return has_value - ? max_val + return has_value + ? max_val : NaN; } // calculates means using aggregates per period F64 PeriodicRecording::getPeriodMean( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); F64 mean = 0; @@ -838,14 +689,14 @@ F64 PeriodicRecording::getPeriodMean( const StatType& stat, si } } - return valid_period_count + return valid_period_count ? mean / (F64)valid_period_count : NaN; } F64 PeriodicRecording::getPeriodStandardDeviation( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); F64 period_mean = getPeriodMean(stat, num_periods); @@ -870,7 +721,7 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); bool has_value = false; @@ -885,14 +736,14 @@ F64 PeriodicRecording::getPeriodMin( const StatType& stat, si } } - return has_value - ? min_val + return has_value + ? min_val : NaN; } F64 PeriodicRecording::getPeriodMax(const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); bool has_value = false; @@ -907,15 +758,15 @@ F64 PeriodicRecording::getPeriodMax(const StatType& stat, siz } } - return has_value - ? max_val + return has_value + ? max_val : NaN; } F64 PeriodicRecording::getPeriodMean( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); S32 valid_period_count = 0; @@ -938,7 +789,7 @@ F64 PeriodicRecording::getPeriodMean( const StatType& stat, s F64 PeriodicRecording::getPeriodMedian( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); std::vector buf; @@ -964,7 +815,7 @@ F64 PeriodicRecording::getPeriodMedian( const StatType& stat, F64 PeriodicRecording::getPeriodStandardDeviation( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); F64 period_mean = getPeriodMean(stat, num_periods); @@ -987,105 +838,13 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) -{ - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - num_periods = llmin(num_periods, getNumRecordedPeriods()); - - F64Kilobytes min_val(std::numeric_limits::max()); - for (size_t i = 1; i <= num_periods; i++) - { - Recording& recording = getPrevRecording(i); - min_val = llmin(min_val, recording.getMin(stat)); - } - - return min_val; -} - -F64Kilobytes PeriodicRecording::getPeriodMin(const MemStatHandle& stat, size_t num_periods) -{ - return getPeriodMin(static_cast&>(stat), num_periods); -} - -F64Kilobytes PeriodicRecording::getPeriodMax(const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/) -{ - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - num_periods = llmin(num_periods, getNumRecordedPeriods()); - - F64Kilobytes max_val(0.0); - for (size_t i = 1; i <= num_periods; i++) - { - Recording& recording = getPrevRecording(i); - max_val = llmax(max_val, recording.getMax(stat)); - } - - return max_val; -} - -F64Kilobytes PeriodicRecording::getPeriodMax(const MemStatHandle& stat, size_t num_periods) -{ - return getPeriodMax(static_cast&>(stat), num_periods); -} - -F64Kilobytes PeriodicRecording::getPeriodMean( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) -{ - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - num_periods = llmin(num_periods, getNumRecordedPeriods()); - - F64Kilobytes mean(0); - - for (size_t i = 1; i <= num_periods; i++) - { - Recording& recording = getPrevRecording(i); - mean += recording.getMean(stat); - } - - return mean / F64(num_periods); -} - -F64Kilobytes PeriodicRecording::getPeriodMean(const MemStatHandle& stat, size_t num_periods) -{ - return getPeriodMean(static_cast&>(stat), num_periods); -} - -F64Kilobytes PeriodicRecording::getPeriodStandardDeviation( const StatType& stat, size_t num_periods /*= std::numeric_limits::max()*/ ) -{ - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - num_periods = llmin(num_periods, getNumRecordedPeriods()); - - F64Kilobytes period_mean = getPeriodMean(stat, num_periods); - S32 valid_period_count = 0; - F64 sum_of_squares = 0; - - for (size_t i = 1; i <= num_periods; i++) - { - Recording& recording = getPrevRecording(i); - if (recording.hasValue(stat)) - { - F64Kilobytes delta = recording.getMean(stat) - period_mean; - sum_of_squares += delta.value() * delta.value(); - valid_period_count++; - } - } - - return F64Kilobytes(valid_period_count - ? sqrt(sum_of_squares / (F64)valid_period_count) - : NaN); -} - -F64Kilobytes PeriodicRecording::getPeriodStandardDeviation(const MemStatHandle& stat, size_t num_periods) -{ - return getPeriodStandardDeviation(static_cast&>(stat), num_periods); -} - /////////////////////////////////////////////////////////////////////// // ExtendableRecording /////////////////////////////////////////////////////////////////////// void ExtendableRecording::extend() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; // push the data back to accepted recording mAcceptedRecording.appendRecording(mPotentialRecording); // flush data, so we can start from scratch @@ -1094,76 +853,72 @@ void ExtendableRecording::extend() void ExtendableRecording::handleStart() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mPotentialRecording.start(); } void ExtendableRecording::handleStop() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mPotentialRecording.pause(); } void ExtendableRecording::handleReset() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mAcceptedRecording.reset(); mPotentialRecording.reset(); } void ExtendableRecording::handleSplitTo(ExtendableRecording& other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mPotentialRecording.splitTo(other.mPotentialRecording); } - /////////////////////////////////////////////////////////////////////// // ExtendablePeriodicRecording /////////////////////////////////////////////////////////////////////// - -ExtendablePeriodicRecording::ExtendablePeriodicRecording() -: mAcceptedRecording(0), +ExtendablePeriodicRecording::ExtendablePeriodicRecording() +: mAcceptedRecording(0), mPotentialRecording(0) {} void ExtendablePeriodicRecording::extend() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; // push the data back to accepted recording mAcceptedRecording.appendPeriodicRecording(mPotentialRecording); // flush data, so we can start from scratch mPotentialRecording.reset(); } - void ExtendablePeriodicRecording::handleStart() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mPotentialRecording.start(); } void ExtendablePeriodicRecording::handleStop() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mPotentialRecording.pause(); } void ExtendablePeriodicRecording::handleReset() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mAcceptedRecording.reset(); mPotentialRecording.reset(); } void ExtendablePeriodicRecording::handleSplitTo(ExtendablePeriodicRecording& other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mPotentialRecording.splitTo(other.mPotentialRecording); } - PeriodicRecording& get_frame_recording() { static thread_local PeriodicRecording sRecording(200, PeriodicRecording::STARTED); @@ -1174,7 +929,7 @@ PeriodicRecording& get_frame_recording() void LLStopWatchControlsMixinCommon::start() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; switch (mPlayState) { case STOPPED: @@ -1196,7 +951,7 @@ void LLStopWatchControlsMixinCommon::start() void LLStopWatchControlsMixinCommon::stop() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; switch (mPlayState) { case STOPPED: @@ -1216,7 +971,7 @@ void LLStopWatchControlsMixinCommon::stop() void LLStopWatchControlsMixinCommon::pause() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; switch (mPlayState) { case STOPPED: @@ -1236,7 +991,7 @@ void LLStopWatchControlsMixinCommon::pause() void LLStopWatchControlsMixinCommon::unpause() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; switch (mPlayState) { case STOPPED: @@ -1256,7 +1011,7 @@ void LLStopWatchControlsMixinCommon::unpause() void LLStopWatchControlsMixinCommon::resume() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; switch (mPlayState) { case STOPPED: @@ -1277,7 +1032,7 @@ void LLStopWatchControlsMixinCommon::resume() void LLStopWatchControlsMixinCommon::restart() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; switch (mPlayState) { case STOPPED: @@ -1301,13 +1056,13 @@ void LLStopWatchControlsMixinCommon::restart() void LLStopWatchControlsMixinCommon::reset() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; handleReset(); } void LLStopWatchControlsMixinCommon::setPlayState( EPlayState state ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; switch(state) { case STOPPED: diff --git a/indra/llcommon/lltracerecording.h b/indra/llcommon/lltracerecording.h index a6b1a67d02..61b9096ae2 100644 --- a/indra/llcommon/lltracerecording.h +++ b/indra/llcommon/lltracerecording.h @@ -1,25 +1,25 @@ -/** +/** * @file lltracerecording.h * @brief Sampling object for collecting runtime statistics originating from lltrace. * * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code * Copyright (C) 2012, Linden Research, Inc. - * + * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License only. - * + * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. - * + * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * + * * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ @@ -112,7 +112,6 @@ private: // atomically stop this object while starting the other // no data can be missed in between stop and start virtual void handleSplitTo(DERIVED& other) {}; - }; namespace LLTrace @@ -129,8 +128,6 @@ namespace LLTrace template class EventStatHandle; - class MemStatHandle; - template struct RelatedTypes { @@ -152,7 +149,7 @@ namespace LLTrace typedef S32 sum_t; }; - class Recording + class Recording : public LLStopWatchControlsMixin { public: @@ -182,24 +179,6 @@ namespace LLTrace F64Seconds getPerSec(const StatType& stat); F32 getPerSec(const StatType& stat); - // Memory accessors - bool hasValue(const StatType& stat); - F64Kilobytes getMin(const StatType& stat); - F64Kilobytes getMean(const StatType& stat); - F64Kilobytes getMax(const StatType& stat); - F64Kilobytes getStandardDeviation(const StatType& stat); - F64Kilobytes getLastValue(const StatType& stat); - - bool hasValue(const StatType& stat); - F64Kilobytes getSum(const StatType& stat); - F64Kilobytes getPerSec(const StatType& stat); - S32 getSampleCount(const StatType& stat); - - bool hasValue(const StatType& stat); - F64Kilobytes getSum(const StatType& stat); - F64Kilobytes getPerSec(const StatType& stat); - S32 getSampleCount(const StatType& stat); - // CountStatHandle accessors bool hasValue(const StatType& stat); F64 getSum(const StatType& stat); @@ -318,7 +297,7 @@ namespace LLTrace /*virtual*/ void handleSplitTo(Recording& other); // returns data for current thread - class ThreadRecorder* getThreadRecorder(); + class ThreadRecorder* getThreadRecorder(); LLTimer mSamplingTimer; F64Seconds mElapsedSeconds; @@ -335,10 +314,10 @@ namespace LLTrace ~PeriodicRecording(); void nextPeriod(); - auto getNumRecordedPeriods() - { + auto getNumRecordedPeriods() + { // current period counts if not active - return mNumRecordedPeriods + (isStarted() ? 0 : 1); + return mNumRecordedPeriods + (isStarted() ? 0 : 1); } F64Seconds getDuration() const; @@ -367,7 +346,7 @@ namespace LLTrace } return num_samples; } - + // // PERIODIC MIN // @@ -376,7 +355,7 @@ namespace LLTrace template typename T::value_t getPeriodMin(const StatType& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); bool has_value = false; @@ -391,15 +370,15 @@ namespace LLTrace } } - return has_value - ? min_val + return has_value + ? min_val : T::getDefaultValue(); } template T getPeriodMin(const CountStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return T(getPeriodMin(static_cast&>(stat), num_periods)); } @@ -407,7 +386,7 @@ namespace LLTrace template T getPeriodMin(const SampleStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return T(getPeriodMin(static_cast&>(stat), num_periods)); } @@ -415,17 +394,14 @@ namespace LLTrace template T getPeriodMin(const EventStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return T(getPeriodMin(static_cast&>(stat), num_periods)); } - F64Kilobytes getPeriodMin(const StatType& stat, size_t num_periods = std::numeric_limits::max()); - F64Kilobytes getPeriodMin(const MemStatHandle& stat, size_t num_periods = std::numeric_limits::max()); - template typename RelatedTypes::fractional_t getPeriodMinPerSec(const StatType& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); typename RelatedTypes::fractional_t min_val(std::numeric_limits::max()); @@ -440,7 +416,7 @@ namespace LLTrace template typename RelatedTypes::fractional_t getPeriodMinPerSec(const CountStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return typename RelatedTypes::fractional_t(getPeriodMinPerSec(static_cast&>(stat), num_periods)); } @@ -452,7 +428,7 @@ namespace LLTrace template typename T::value_t getPeriodMax(const StatType& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); bool has_value = false; @@ -467,15 +443,15 @@ namespace LLTrace } } - return has_value - ? max_val + return has_value + ? max_val : T::getDefaultValue(); } template T getPeriodMax(const CountStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return T(getPeriodMax(static_cast&>(stat), num_periods)); } @@ -483,7 +459,7 @@ namespace LLTrace template T getPeriodMax(const SampleStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return T(getPeriodMax(static_cast&>(stat), num_periods)); } @@ -491,17 +467,14 @@ namespace LLTrace template T getPeriodMax(const EventStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return T(getPeriodMax(static_cast&>(stat), num_periods)); } - F64Kilobytes getPeriodMax(const StatType& stat, size_t num_periods = std::numeric_limits::max()); - F64Kilobytes getPeriodMax(const MemStatHandle& stat, size_t num_periods = std::numeric_limits::max()); - template typename RelatedTypes::fractional_t getPeriodMaxPerSec(const StatType& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); F64 max_val = std::numeric_limits::min(); @@ -516,7 +489,7 @@ namespace LLTrace template typename RelatedTypes::fractional_t getPeriodMaxPerSec(const CountStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return typename RelatedTypes::fractional_t(getPeriodMaxPerSec(static_cast&>(stat), num_periods)); } @@ -528,7 +501,7 @@ namespace LLTrace template typename RelatedTypes::fractional_t getPeriodMean(const StatType& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); typename RelatedTypes::fractional_t mean(0); @@ -549,14 +522,14 @@ namespace LLTrace template typename RelatedTypes::fractional_t getPeriodMean(const CountStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return typename RelatedTypes::fractional_t(getPeriodMean(static_cast&>(stat), num_periods)); } F64 getPeriodMean(const StatType& stat, size_t num_periods = std::numeric_limits::max()); - template + template typename RelatedTypes::fractional_t getPeriodMean(const SampleStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return typename RelatedTypes::fractional_t(getPeriodMean(static_cast&>(stat), num_periods)); } @@ -564,17 +537,14 @@ namespace LLTrace template typename RelatedTypes::fractional_t getPeriodMean(const EventStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return typename RelatedTypes::fractional_t(getPeriodMean(static_cast&>(stat), num_periods)); } - F64Kilobytes getPeriodMean(const StatType& stat, size_t num_periods = std::numeric_limits::max()); - F64Kilobytes getPeriodMean(const MemStatHandle& stat, size_t num_periods = std::numeric_limits::max()); - template typename RelatedTypes::fractional_t getPeriodMeanPerSec(const StatType& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; num_periods = llmin(num_periods, getNumRecordedPeriods()); typename RelatedTypes::fractional_t mean = 0; @@ -596,7 +566,7 @@ namespace LLTrace template typename RelatedTypes::fractional_t getPeriodMeanPerSec(const CountStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return typename RelatedTypes::fractional_t(getPeriodMeanPerSec(static_cast&>(stat), num_periods)); } @@ -635,10 +605,10 @@ namespace LLTrace F64 getPeriodStandardDeviation(const StatType& stat, size_t num_periods = std::numeric_limits::max()); - template + template typename RelatedTypes::fractional_t getPeriodStandardDeviation(const SampleStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return typename RelatedTypes::fractional_t(getPeriodStandardDeviation(static_cast&>(stat), num_periods)); } @@ -646,13 +616,10 @@ namespace LLTrace template typename RelatedTypes::fractional_t getPeriodStandardDeviation(const EventStatHandle& stat, size_t num_periods = std::numeric_limits::max()) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; return typename RelatedTypes::fractional_t(getPeriodStandardDeviation(static_cast&>(stat), num_periods)); } - F64Kilobytes getPeriodStandardDeviation(const StatType& stat, size_t num_periods = std::numeric_limits::max()); - F64Kilobytes getPeriodStandardDeviation(const MemStatHandle& stat, size_t num_periods = std::numeric_limits::max()); - private: // implementation for LLStopWatchControlsMixin /*virtual*/ void handleStart(); @@ -731,7 +698,7 @@ namespace LLTrace PeriodicRecording& getResults() { return mAcceptedRecording; } const PeriodicRecording& getResults() const {return mAcceptedRecording;} - + void nextPeriod() { mPotentialRecording.nextPeriod(); } private: diff --git a/indra/llcommon/lltracethreadrecorder.cpp b/indra/llcommon/lltracethreadrecorder.cpp index 282c454a2a..914bfb55dc 100644 --- a/indra/llcommon/lltracethreadrecorder.cpp +++ b/indra/llcommon/lltracethreadrecorder.cpp @@ -32,7 +32,7 @@ namespace LLTrace { -extern MemStatHandle gTraceMemStat; +//extern MemStatHandle gTraceMemStat; static ThreadRecorder* sMasterThreadRecorder = NULL; @@ -81,9 +81,9 @@ void ThreadRecorder::init() BlockTimer::getRootTimeBlock().getCurrentAccumulator().mActiveCount = 1; - claim_alloc(gTraceMemStat, this); - claim_alloc(gTraceMemStat, mRootTimer); - claim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes); + //claim_alloc(gTraceMemStat, this); + //claim_alloc(gTraceMemStat, mRootTimer); + //claim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes); #endif } @@ -101,9 +101,9 @@ ThreadRecorder::~ThreadRecorder() #if LL_TRACE_ENABLED LLThreadLocalSingletonPointer::setInstance(NULL); - disclaim_alloc(gTraceMemStat, this); - disclaim_alloc(gTraceMemStat, sizeof(BlockTimer)); - disclaim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes); + //disclaim_alloc(gTraceMemStat, this); + //disclaim_alloc(gTraceMemStat, sizeof(BlockTimer)); + //disclaim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes); deactivate(&mThreadRecordingBuffers); -- cgit v1.2.3 From 488b51b863c7902ed9f58179f664a1779ad148fb Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Fri, 1 Sep 2023 00:04:20 +0300 Subject: MacOS build fix --- indra/llcommon/lldictionary.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/lldictionary.h b/indra/llcommon/lldictionary.h index 3e86767d7e..18664e340e 100644 --- a/indra/llcommon/lldictionary.h +++ b/indra/llcommon/lldictionary.h @@ -87,7 +87,7 @@ protected: } void addEntry(Index index, Entry *entry) { - if (!insert(value_type(index, entry)).second) + if (!this->emplace(index, entry).second) { LL_ERRS() << "Dictionary entry already added (attempted to add duplicate entry)" << LL_ENDL; } -- cgit v1.2.3 From 37246b99f698f53194c3d60b471e190af79a45fe Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Fri, 15 Sep 2023 01:22:27 +0300 Subject: SL-17135 Apr process creation crash looks like pool regularly gets corrupted, try using separate pool --- indra/llcommon/llapr.cpp | 8 +++++++- indra/llcommon/llprocess.cpp | 19 ++++++++++++++++--- indra/llcommon/llprocess.h | 1 + 3 files changed, 24 insertions(+), 4 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llapr.cpp b/indra/llcommon/llapr.cpp index 435531f86f..69466df2d1 100644 --- a/indra/llcommon/llapr.cpp +++ b/indra/llcommon/llapr.cpp @@ -38,6 +38,12 @@ const S32 FULL_VOLATILE_APR_POOL = 1024 ; //number of references to LLVolatileAP bool gAPRInitialized = false; +int abortfunc(int retcode) +{ + LL_WARNS("APR") << "Allocation failure in apr pool with code " << (S32)retcode << LL_ENDL; + return 0; +} + void ll_init_apr() { // Initialize APR and create the global pool @@ -45,7 +51,7 @@ void ll_init_apr() if (!gAPRPoolp) { - apr_pool_create(&gAPRPoolp, NULL); + apr_pool_create_ex(&gAPRPoolp, NULL, abortfunc, NULL); } if(!LLAPRFile::sAPRFilePoolp) diff --git a/indra/llcommon/llprocess.cpp b/indra/llcommon/llprocess.cpp index 97a38ea992..0d65762284 100644 --- a/indra/llcommon/llprocess.cpp +++ b/indra/llcommon/llprocess.cpp @@ -529,6 +529,7 @@ LLProcess::LLProcess(const LLSDOrParams& params): // preserve existing semantics, we promise that mAttached defaults to the // same setting as mAutokill. mAttached(params.attached.isProvided()? params.attached : params.autokill), + mPool(NULL), mPipes(NSLOTS) { // Hmm, when you construct a ptr_vector with a size, it merely reserves @@ -549,8 +550,14 @@ LLProcess::LLProcess(const LLSDOrParams& params): mPostend = params.postend; + apr_pool_create(&mPool, gAPRPoolp); + if (!mPool) + { + LLTHROW(LLProcessError(STRINGIZE("failed to create apr pool"))); + } + apr_procattr_t *procattr = NULL; - chkapr(apr_procattr_create(&procattr, gAPRPoolp)); + chkapr(apr_procattr_create(&procattr, mPool)); // IQA-490, CHOP-900: On Windows, ask APR to jump through hoops to // constrain the set of handles passed to the child process. Before we @@ -689,14 +696,14 @@ LLProcess::LLProcess(const LLSDOrParams& params): // one. Hand-expand chkapr() macro so we can fill in the actual command // string instead of the variable names. if (ll_apr_warn_status(apr_proc_create(&mProcess, argv[0], &argv[0], NULL, procattr, - gAPRPoolp))) + mPool))) { LLTHROW(LLProcessError(STRINGIZE(params << " failed"))); } // arrange to call status_callback() apr_proc_other_child_register(&mProcess, &LLProcess::status_callback, this, mProcess.in, - gAPRPoolp); + mPool); // and make sure we poll it once per "mainloop" tick sProcessListener.addPoll(*this); mStatus.mState = RUNNING; @@ -815,6 +822,12 @@ LLProcess::~LLProcess() { kill("destructor"); } + + if (mPool) + { + apr_pool_destroy(mPool); + mPool = NULL; + } } bool LLProcess::kill(const std::string& who) diff --git a/indra/llcommon/llprocess.h b/indra/llcommon/llprocess.h index e3386ad88e..0842f2eb07 100644 --- a/indra/llcommon/llprocess.h +++ b/indra/llcommon/llprocess.h @@ -568,6 +568,7 @@ private: // explicitly want this ptr_vector to be able to store NULLs typedef boost::ptr_vector< boost::nullable > PipeVector; PipeVector mPipes; + apr_pool_t* mPool; }; /// for logging -- cgit v1.2.3 From dc2fc3488d8d1ebb2e90520dd17325f08b7c538b Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Tue, 10 Oct 2023 22:32:11 +0300 Subject: Revert "SL-18721 Viewer shutdown order changes" This reverts commit edf0874e0656c6f512df50ee52236209531ca329. Reverted since it causes a significant uptick in shutdown freezes. Can't repro those freezes, will seek an alternate solution. --- indra/llcommon/threadpool.cpp | 12 ++---------- indra/llcommon/threadpool.h | 3 +-- 2 files changed, 3 insertions(+), 12 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/threadpool.cpp b/indra/llcommon/threadpool.cpp index 22bbff4478..d5adf11264 100644 --- a/indra/llcommon/threadpool.cpp +++ b/indra/llcommon/threadpool.cpp @@ -21,12 +21,11 @@ #include "llevents.h" #include "stringize.h" -LL::ThreadPool::ThreadPool(const std::string& name, size_t threads, size_t capacity, bool auto_shutdown): +LL::ThreadPool::ThreadPool(const std::string& name, size_t threads, size_t capacity): super(name), mQueue(name, capacity), mName("ThreadPool:" + name), - mThreadCount(threads), - mAutomaticShutdown(auto_shutdown) + mThreadCount(threads) {} void LL::ThreadPool::start() @@ -40,13 +39,6 @@ void LL::ThreadPool::start() run(tname); }); } - - // Some threads might need to run longer than LLEventPumps - if (!mAutomaticShutdown) - { - return; - } - // Listen on "LLApp", and when the app is shutting down, close the queue // and join the workers. LLEventPumps::instance().obtain("LLApp").listen( diff --git a/indra/llcommon/threadpool.h b/indra/llcommon/threadpool.h index 22c875edb9..f8eec3b457 100644 --- a/indra/llcommon/threadpool.h +++ b/indra/llcommon/threadpool.h @@ -31,7 +31,7 @@ namespace LL * Pass ThreadPool a string name. This can be used to look up the * relevant WorkQueue. */ - ThreadPool(const std::string& name, size_t threads=1, size_t capacity=1024, bool auto_shutdown = true); + ThreadPool(const std::string& name, size_t threads=1, size_t capacity=1024); virtual ~ThreadPool(); /** @@ -66,7 +66,6 @@ namespace LL std::string mName; size_t mThreadCount; std::vector> mThreads; - bool mAutomaticShutdown; }; } // namespace LL -- cgit v1.2.3 From 366d4439996cdb6cb5b3f116fedcb4c5ee8b4425 Mon Sep 17 00:00:00 2001 From: Alexander Gavriliuk Date: Wed, 11 Oct 2023 15:08:38 +0200 Subject: SL-20370 Change PDT to SLT on menu bar --- indra/llcommon/llstring.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llstring.cpp b/indra/llcommon/llstring.cpp index f6629803ee..f40e7ad45f 100644 --- a/indra/llcommon/llstring.cpp +++ b/indra/llcommon/llstring.cpp @@ -1235,9 +1235,17 @@ bool LLStringUtil::formatDatetime(std::string& replacement, std::string token, } else { +#if 0 + // EXT-1565 : Zai Lynch, James Linden : 15/Oct/09 + // [BSI] Feedback: Viewer clock mentions SLT, but would prefer it to show PST/PDT // "slt" = Second Life Time, which is deprecated. // If not utc or user local time, fallback to Pacific time replacement = LLStringOps::getPacificDaylightTime() ? "PDT" : "PST"; +#else + // SL-20370 : Steeltoe Linden : 29/Sep/23 + // Change "PDT" to "SLT" on menu bar + replacement = "SLT"; +#endif } return true; } -- cgit v1.2.3 From 6fb9a4640bad7bc88a52014ce23f7f8ad4a39c23 Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Tue, 31 Oct 2023 12:03:03 -0400 Subject: DRTVWR-588: Try to make threadsafequeue timing more robust. The test was coded to push (what's intended to be) the third entry with timestamp (now + 200ms), then (what's intended to be) the second entry with timestamp (now + 100ms). The trouble is that it was re-querying "now" each time. On a slow CI host, the clock might have advanced by more than 100ms between the first push and the second -- meaning that the second push would actually have a _later_ timestamp, and thus, even with the queue sorting properly, fail the test's order validation. Capture the timestamp once, then add both time deltas to the same time point to get the relative order right regardless of elapsed real time. --- indra/llcommon/tests/threadsafeschedule_test.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/tests/threadsafeschedule_test.cpp b/indra/llcommon/tests/threadsafeschedule_test.cpp index c421cc7b1c..8851590189 100644 --- a/indra/llcommon/tests/threadsafeschedule_test.cpp +++ b/indra/llcommon/tests/threadsafeschedule_test.cpp @@ -46,11 +46,12 @@ namespace tut // the real time required for each push() call. Explicitly increment // the timestamp for each one -- but since we're passing explicit // timestamps, make the queue reorder them. - queue.push(Queue::TimeTuple(Queue::Clock::now() + 200ms, "ghi")); + auto now{ Queue::Clock::now() }; + queue.push(Queue::TimeTuple(now + 200ms, "ghi")); // Given the various push() overloads, you have to match the type // exactly: conversions are ambiguous. queue.push("abc"s); - queue.push(Queue::Clock::now() + 100ms, "def"); + queue.push(now + 100ms, "def"); queue.close(); auto entry = queue.pop(); ensure_equals("failed to pop first", std::get<0>(entry), "abc"s); -- cgit v1.2.3 From 5f1008ab6b5dd59a80123be7580b7a1f1da4c259 Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Tue, 31 Oct 2023 12:05:07 -0400 Subject: DRTVWR-588: Enlarge default coroutine stack size. On a Windows CI host, we got the dreaded rc 3221225725 aka c00000fd aka stack overflow. --- indra/llcommon/llcoros.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llcoros.cpp b/indra/llcommon/llcoros.cpp index cfaf3415e7..191c1a9037 100644 --- a/indra/llcommon/llcoros.cpp +++ b/indra/llcommon/llcoros.cpp @@ -123,7 +123,7 @@ LLCoros::LLCoros(): // Previously we used // boost::context::guarded_stack_allocator::default_stacksize(); // empirically this is insufficient. - mStackSize(768*1024), + mStackSize(900*1024), // mCurrent does NOT own the current CoroData instance -- it simply // points to it. So initialize it with a no-op deleter. mCurrent{ [](CoroData*){} } -- cgit v1.2.3 From d255c3dda852731b6709ac4e9c9821b3be84ec86 Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Tue, 14 Nov 2023 20:29:51 -0500 Subject: DRTVWR-588: Try to fix sporadic llrand test failures. With GitHub viewer builds, every few weeks we've seen test failures when ll_frand() returns exactly 1.0. This is a problem for a function that's supposed to return [0.0 .. 1.0). Monty suggests that the problem is likely to be conversion of F32 to F64 to pass to fmod(), and then truncation of fmod()'s F64 result back to F32. Moved the clamping code to each size-specific ll_internal_random specialization. Monty also noted that a stateful static random number engine isn't thread-safe. Added a mutex lock. --- indra/llcommon/llrand.cpp | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llrand.cpp b/indra/llcommon/llrand.cpp index 33afc50cf7..20e25177f0 100644 --- a/indra/llcommon/llrand.cpp +++ b/indra/llcommon/llrand.cpp @@ -28,6 +28,7 @@ #include "llrand.h" #include "lluuid.h" +#include "mutex.h" /** * Through analysis, we have decided that we want to take values which @@ -58,8 +59,17 @@ * to restore uniform distribution. */ +static std::mutex gRandomGeneratorMutex; static LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed()); +inline F64 ll_internal_random_unclamped() +{ + // gRandomGenerator is a stateful static object, which is therefore not + // inherently thread-safe. Lock it before use. + std::unique_lock lk(gRandomGeneratorMutex); + return gRandomGenerator(); +} + // no default implementation, only specific F64 and F32 specializations template inline REAL ll_internal_random(); @@ -71,7 +81,7 @@ inline F64 ll_internal_random() // CPUs (or at least multi-threaded processes) seem to // occasionally give an obviously incorrect random number -- like // 5^15 or something. Sooooo, clamp it as described above. - F64 rv = gRandomGenerator(); + F64 rv{ ll_internal_random_unclamped() }; if(!((rv >= 0.0) && (rv < 1.0))) return fmod(rv, 1.0); return rv; } @@ -79,7 +89,13 @@ inline F64 ll_internal_random() template <> inline F32 ll_internal_random() { - return F32(ll_internal_random()); + // *HACK: clamp the result as described above. + // Per Monty, it's important to clamp using the correct fmodf() rather + // than expanding to F64 for fmod() and then truncating back to F32. Prior + // to this change, we were getting sporadic ll_frand() == 1.0 results. + F32 rv{ ll_internal_random_unclamped() }; + if(!((rv >= 0.0) && (rv < 1.0))) return fmodf(rv, 1.0f); + return rv; } /*------------------------------ F64 aliases -------------------------------*/ -- cgit v1.2.3 From 7670f190827b7d1e1c2a424ec6aa3379cb42ed52 Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Wed, 15 Nov 2023 10:11:30 -0500 Subject: SL-20546: Rely on CTAD for 'narrow' class. Now that we're building with C++17, we can use Class Template Argument Deduction to infer the type passed to the constructor of the 'narrow' class. We no longer require a narrow_holder class with a narrow() factory function. --- indra/llcommon/stdtypes.h | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/stdtypes.h b/indra/llcommon/stdtypes.h index 0b43d7ad4b..3aba9dda00 100644 --- a/indra/llcommon/stdtypes.h +++ b/indra/llcommon/stdtypes.h @@ -156,18 +156,15 @@ typedef int intptr_t; * type. */ // narrow_holder is a struct that accepts the passed value as its original -// type and provides templated conversion functions to other types. Once we're -// building with compilers that support Class Template Argument Deduction, we -// can rename this class template 'narrow' and eliminate the narrow() factory -// function below. +// type and provides templated conversion functions to other types. template -class narrow_holder +class narrow { private: FROM mValue; public: - narrow_holder(FROM value): mValue(value) {} + narrow(FROM value): mValue(value) {} /*---------------------- Narrowing unsigned to signed ----------------------*/ template (), which can be -/// implicitly converted to the target type. -template -inline -narrow_holder narrow(FROM value) -{ - return { value }; -} - #endif -- cgit v1.2.3 From d427d5dbfa09f0bdec743e75a41e8ea0ee4abeea Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Wed, 15 Nov 2023 10:12:12 -0500 Subject: SL-20546: Use narrow() explicit conversion from F64 to F32. --- indra/llcommon/llrand.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llrand.cpp b/indra/llcommon/llrand.cpp index 20e25177f0..702d6b34c9 100644 --- a/indra/llcommon/llrand.cpp +++ b/indra/llcommon/llrand.cpp @@ -93,7 +93,7 @@ inline F32 ll_internal_random() // Per Monty, it's important to clamp using the correct fmodf() rather // than expanding to F64 for fmod() and then truncating back to F32. Prior // to this change, we were getting sporadic ll_frand() == 1.0 results. - F32 rv{ ll_internal_random_unclamped() }; + F32 rv{ narrow(ll_internal_random_unclamped()) }; if(!((rv >= 0.0) && (rv < 1.0))) return fmodf(rv, 1.0f); return rv; } -- cgit v1.2.3 From 1c71a8e78e37d8605e009d623a5281ab4b509350 Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Wed, 15 Nov 2023 11:10:42 -0500 Subject: SL-20546: Even with C++17 CTAD, makeClassicCallback() still useful. --- indra/llcommon/classic_callback.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/classic_callback.h b/indra/llcommon/classic_callback.h index 1ad6dbc58f..009c25d67c 100644 --- a/indra/llcommon/classic_callback.h +++ b/indra/llcommon/classic_callback.h @@ -119,11 +119,11 @@ public: * ClassicCallback must not itself be copied or moved! Once you've passed * get_userdata() to some API, this object MUST remain at that address. */ - // However, we can't yet count on C++17 Class Template Argument Deduction, - // which means makeClassicCallback() is still useful, which means we MUST - // be able to return one to construct into caller's instance (move ctor). - // Possible defense: bool 'referenced' data member set by get_userdata(), - // with an llassert_always(! referenced) check in the move constructor. + // However, makeClassicCallback() is useful for deducing the CALLABLE + // type, which means we MUST be able to return one to construct into + // caller's instance (move ctor). Possible defense: bool 'referenced' data + // member set by get_userdata(), with an llassert_always(! referenced) + // check in the move constructor. ClassicCallback(ClassicCallback const&) = delete; ClassicCallback(ClassicCallback&&) = default; // delete; ClassicCallback& operator=(ClassicCallback const&) = delete; -- cgit v1.2.3 From e7ae20c96fccdad06e39a3f8e5fe61a812029242 Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Fri, 17 Nov 2023 10:24:14 -0500 Subject: SL-20546: Avoid promoting F32 to double just to compare bounds. --- indra/llcommon/llrand.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llrand.cpp b/indra/llcommon/llrand.cpp index 702d6b34c9..8206bf8e0c 100644 --- a/indra/llcommon/llrand.cpp +++ b/indra/llcommon/llrand.cpp @@ -94,7 +94,7 @@ inline F32 ll_internal_random() // than expanding to F64 for fmod() and then truncating back to F32. Prior // to this change, we were getting sporadic ll_frand() == 1.0 results. F32 rv{ narrow(ll_internal_random_unclamped()) }; - if(!((rv >= 0.0) && (rv < 1.0))) return fmodf(rv, 1.0f); + if(!((rv >= 0.0f) && (rv < 1.0f))) return fmodf(rv, 1.0f); return rv; } -- cgit v1.2.3 From 5fa7f69101a889009194eeddb927599d7536613f Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Fri, 17 Nov 2023 14:31:21 -0500 Subject: SL-20546: Defend llrand's random generator against concurrent access by making it thread_local. --- indra/llcommon/llrand.cpp | 18 +++++------------- indra/llcommon/llthread.cpp | 13 +++++++------ indra/llcommon/llthread.h | 2 +- 3 files changed, 13 insertions(+), 20 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llrand.cpp b/indra/llcommon/llrand.cpp index 8206bf8e0c..e4065e23bf 100644 --- a/indra/llcommon/llrand.cpp +++ b/indra/llcommon/llrand.cpp @@ -28,7 +28,6 @@ #include "llrand.h" #include "lluuid.h" -#include "mutex.h" /** * Through analysis, we have decided that we want to take values which @@ -59,16 +58,9 @@ * to restore uniform distribution. */ -static std::mutex gRandomGeneratorMutex; -static LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed()); - -inline F64 ll_internal_random_unclamped() -{ - // gRandomGenerator is a stateful static object, which is therefore not - // inherently thread-safe. Lock it before use. - std::unique_lock lk(gRandomGeneratorMutex); - return gRandomGenerator(); -} +// gRandomGenerator is a stateful static object, which is therefore not +// inherently thread-safe. +static thread_local LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed()); // no default implementation, only specific F64 and F32 specializations template @@ -81,7 +73,7 @@ inline F64 ll_internal_random() // CPUs (or at least multi-threaded processes) seem to // occasionally give an obviously incorrect random number -- like // 5^15 or something. Sooooo, clamp it as described above. - F64 rv{ ll_internal_random_unclamped() }; + F64 rv{ gRandomGenerator() }; if(!((rv >= 0.0) && (rv < 1.0))) return fmod(rv, 1.0); return rv; } @@ -93,7 +85,7 @@ inline F32 ll_internal_random() // Per Monty, it's important to clamp using the correct fmodf() rather // than expanding to F64 for fmod() and then truncating back to F32. Prior // to this change, we were getting sporadic ll_frand() == 1.0 results. - F32 rv{ narrow(ll_internal_random_unclamped()) }; + F32 rv{ narrow(gRandomGenerator()) }; if(!((rv >= 0.0f) && (rv < 1.0f))) return fmodf(rv, 1.0f); return rv; } diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp index a807acc56e..a051c7f575 100644 --- a/indra/llcommon/llthread.cpp +++ b/indra/llcommon/llthread.cpp @@ -112,15 +112,16 @@ LL_COMMON_API bool on_main_thread() return (LLThread::currentID() == main_thread()); } -LL_COMMON_API void assert_main_thread() +LL_COMMON_API bool assert_main_thread() { auto curr = LLThread::currentID(); auto main = main_thread(); - if (curr != main) - { - LL_WARNS() << "Illegal execution from thread id " << curr - << " outside main thread " << main << LL_ENDL; - } + if (curr == main) + return true; + + LL_WARNS() << "Illegal execution from thread id " << curr + << " outside main thread " << main << LL_ENDL; + return false; } // this function has become moot diff --git a/indra/llcommon/llthread.h b/indra/llcommon/llthread.h index 50202631e7..9f1c589fcd 100644 --- a/indra/llcommon/llthread.h +++ b/indra/llcommon/llthread.h @@ -152,7 +152,7 @@ public: //============================================================================ -extern LL_COMMON_API void assert_main_thread(); +extern LL_COMMON_API bool assert_main_thread(); extern LL_COMMON_API bool on_main_thread(); #endif // LL_LLTHREAD_H -- cgit v1.2.3 From 4a34a1196627c7e9998edde725d5e839f3ef61b9 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Sat, 20 Jan 2024 02:26:51 +0200 Subject: SL-18721 Shutdown fixes 1. After window closes viewer still takes some time to shut down, so added splash screen to not confuse users (and to see if something gets stuck) 2. Having two identical mWindowHandle caused confusion for me, so I split them. It looks like there might have been issues with thread being stuck because thread's handle wasn't cleaned up. 3. Made region clean mCacheMap immediately instead of spending time making copies on shutdown --- indra/llcommon/threadpool.cpp | 17 ++++++++++++++--- indra/llcommon/threadpool.h | 10 +++++++--- 2 files changed, 21 insertions(+), 6 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/threadpool.cpp b/indra/llcommon/threadpool.cpp index 3a9a5a2062..a063a01b82 100644 --- a/indra/llcommon/threadpool.cpp +++ b/indra/llcommon/threadpool.cpp @@ -60,12 +60,15 @@ struct sleepy_robin: public boost::fibers::algo::round_robin /***************************************************************************** * ThreadPoolBase *****************************************************************************/ -LL::ThreadPoolBase::ThreadPoolBase(const std::string& name, size_t threads, - WorkQueueBase* queue): +LL::ThreadPoolBase::ThreadPoolBase(const std::string& name, + size_t threads, + WorkQueueBase* queue, + bool auto_shutdown): super(name), mName("ThreadPool:" + name), mThreadCount(getConfiguredWidth(name, threads)), - mQueue(queue) + mQueue(queue), + mAutomaticShutdown(auto_shutdown) {} void LL::ThreadPoolBase::start() @@ -79,6 +82,14 @@ void LL::ThreadPoolBase::start() run(tname); }); } + + if (!mAutomaticShutdown) + { + // Some threads, like main window's might need to run a bit longer + // to wait for a proper shutdown message + return; + } + // Listen on "LLApp", and when the app is shutting down, close the queue // and join the workers. LLEventPumps::instance().obtain("LLApp").listen( diff --git a/indra/llcommon/threadpool.h b/indra/llcommon/threadpool.h index 60f4a0ce1b..fa16c6fe71 100644 --- a/indra/llcommon/threadpool.h +++ b/indra/llcommon/threadpool.h @@ -40,7 +40,7 @@ namespace LL * overrides this parameter. */ ThreadPoolBase(const std::string& name, size_t threads, - WorkQueueBase* queue); + WorkQueueBase* queue, bool auto_shutdown = true); virtual ~ThreadPoolBase(); /** @@ -87,6 +87,7 @@ namespace LL protected: std::unique_ptr mQueue; + bool mAutomaticShutdown; private: void run(const std::string& name); @@ -117,8 +118,11 @@ namespace LL * Constraining the queue can cause a submitter to block. Do not * constrain any ThreadPool accepting work from the main thread. */ - ThreadPoolUsing(const std::string& name, size_t threads=1, size_t capacity=1024*1024): - ThreadPoolBase(name, threads, new queue_t(name, capacity)) + ThreadPoolUsing(const std::string& name, + size_t threads=1, + size_t capacity=1024*1024, + bool auto_shutdown = true): + ThreadPoolBase(name, threads, new queue_t(name, capacity), auto_shutdown) {} ~ThreadPoolUsing() override {} -- cgit v1.2.3 From 2e5b105dffc41695d0a64c5b55eef7c28da49246 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Wed, 7 Feb 2024 22:50:28 +0200 Subject: SL-18721 Shutdown fixes #4 --- indra/llcommon/threadpool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/threadpool.h b/indra/llcommon/threadpool.h index fa16c6fe71..b8be7bb81a 100644 --- a/indra/llcommon/threadpool.h +++ b/indra/llcommon/threadpool.h @@ -87,6 +87,7 @@ namespace LL protected: std::unique_ptr mQueue; + std::vector> mThreads; bool mAutomaticShutdown; private: @@ -94,7 +95,6 @@ namespace LL std::string mName; size_t mThreadCount; - std::vector> mThreads; }; /** -- cgit v1.2.3 From a2552a555669490dc2ca173a48989d1b30e62c56 Mon Sep 17 00:00:00 2001 From: Alexander Gavriliuk Date: Thu, 8 Feb 2024 21:03:59 +0100 Subject: Build fix for Visual Studio patch --- indra/llcommon/llbase64.cpp | 4 ++-- indra/llcommon/llrand.cpp | 2 +- indra/llcommon/llsd.h | 4 ++-- indra/llcommon/llsdserialize.cpp | 2 +- indra/llcommon/llsdserialize_xml.cpp | 4 ++-- indra/llcommon/llsys.cpp | 2 +- indra/llcommon/lltrace.cpp | 2 +- indra/llcommon/lltraceaccumulators.cpp | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llbase64.cpp b/indra/llcommon/llbase64.cpp index bb85fe32a3..433b54f6f8 100644 --- a/indra/llcommon/llbase64.cpp +++ b/indra/llcommon/llbase64.cpp @@ -42,7 +42,7 @@ std::string LLBase64::encode(const U8* input, size_t input_size) && input_size > 0) { // Yes, it returns int. - int b64_buffer_length = apr_base64_encode_len(narrow(input_size)); + int b64_buffer_length = apr_base64_encode_len(narrow(input_size)); char* b64_buffer = new char[b64_buffer_length]; // This is faster than apr_base64_encode() if you know @@ -52,7 +52,7 @@ std::string LLBase64::encode(const U8* input, size_t input_size) b64_buffer_length = apr_base64_encode_binary( b64_buffer, input, - narrow(input_size)); + narrow(input_size)); output.assign(b64_buffer); delete[] b64_buffer; } diff --git a/indra/llcommon/llrand.cpp b/indra/llcommon/llrand.cpp index e4065e23bf..0192111574 100644 --- a/indra/llcommon/llrand.cpp +++ b/indra/llcommon/llrand.cpp @@ -85,7 +85,7 @@ inline F32 ll_internal_random() // Per Monty, it's important to clamp using the correct fmodf() rather // than expanding to F64 for fmod() and then truncating back to F32. Prior // to this change, we were getting sporadic ll_frand() == 1.0 results. - F32 rv{ narrow(gRandomGenerator()) }; + F32 rv{ narrow(gRandomGenerator()) }; if(!((rv >= 0.0f) && (rv < 1.0f))) return fmodf(rv, 1.0f); return rv; } diff --git a/indra/llcommon/llsd.h b/indra/llcommon/llsd.h index cdb9a7ed8a..8ed254919c 100644 --- a/indra/llcommon/llsd.h +++ b/indra/llcommon/llsd.h @@ -197,12 +197,12 @@ public: typename std::enable_if::value && ! std::is_same::value, bool>::type = true> - LLSD(VALUE v): LLSD(Integer(narrow(v))) {} + LLSD(VALUE v): LLSD(Integer(narrow(v))) {} // support construction from F32 et al. template ::value, bool>::type = true> - LLSD(VALUE v): LLSD(Real(narrow(v))) {} + LLSD(VALUE v): LLSD(Real(narrow(v))) {} //@} /** @name Scalar Assignment */ diff --git a/indra/llcommon/llsdserialize.cpp b/indra/llcommon/llsdserialize.cpp index a475be6293..76171f2dfd 100644 --- a/indra/llcommon/llsdserialize.cpp +++ b/indra/llcommon/llsdserialize.cpp @@ -2174,7 +2174,7 @@ std::string zip_llsd(LLSD& data) U8 out[CHUNK]; - strm.avail_in = narrow(source.size()); + strm.avail_in = narrow(source.size()); strm.next_in = (U8*) source.data(); U8* output = NULL; diff --git a/indra/llcommon/llsdserialize_xml.cpp b/indra/llcommon/llsdserialize_xml.cpp index 38b11eb32b..1511983596 100644 --- a/indra/llcommon/llsdserialize_xml.cpp +++ b/indra/llcommon/llsdserialize_xml.cpp @@ -196,12 +196,12 @@ S32 LLSDXMLFormatter::format_impl(const LLSD& data, std::ostream& ostr, // *FIX: memory inefficient. // *TODO: convert to use LLBase64 ostr << pre << ""; - int b64_buffer_length = apr_base64_encode_len(narrow(buffer.size())); + int b64_buffer_length = apr_base64_encode_len(narrow(buffer.size())); char* b64_buffer = new char[b64_buffer_length]; b64_buffer_length = apr_base64_encode_binary( b64_buffer, &buffer[0], - narrow(buffer.size())); + narrow(buffer.size())); ostr.write(b64_buffer, b64_buffer_length - 1); delete[] b64_buffer; ostr << "" << post; diff --git a/indra/llcommon/llsys.cpp b/indra/llcommon/llsys.cpp index 938685bae6..2bf12fb0eb 100644 --- a/indra/llcommon/llsys.cpp +++ b/indra/llcommon/llsys.cpp @@ -917,7 +917,7 @@ void LLMemoryInfo::stream(std::ostream& s) const // Now stream stats BOOST_FOREACH(const MapEntry& pair, inMap(mStatsMap)) { - s << pfx << std::setw(narrow(key_width+1)) << (pair.first + ':') << ' '; + s << pfx << std::setw(narrow(key_width+1)) << (pair.first + ':') << ' '; LLSD value(pair.second); if (value.isInteger()) s << std::setw(12) << value.asInteger(); diff --git a/indra/llcommon/lltrace.cpp b/indra/llcommon/lltrace.cpp index bce186054f..87457ad907 100644 --- a/indra/llcommon/lltrace.cpp +++ b/indra/llcommon/lltrace.cpp @@ -63,7 +63,7 @@ void TimeBlockTreeNode::setParent( BlockTimerStatHandle* parent ) llassert_always(parent != mBlock); llassert_always(parent != NULL); - TimeBlockTreeNode* parent_tree_node = get_thread_recorder()->getTimeBlockTreeNode(narrow(parent->getIndex())); + TimeBlockTreeNode* parent_tree_node = get_thread_recorder()->getTimeBlockTreeNode(narrow(parent->getIndex())); if (!parent_tree_node) return; if (mParent) diff --git a/indra/llcommon/lltraceaccumulators.cpp b/indra/llcommon/lltraceaccumulators.cpp index 5fafb53832..b5b32cba38 100644 --- a/indra/llcommon/lltraceaccumulators.cpp +++ b/indra/llcommon/lltraceaccumulators.cpp @@ -77,7 +77,7 @@ void AccumulatorBufferGroup::makeCurrent() // update stacktimer parent pointers for (size_t i = 0, end_i = mStackTimers.size(); i < end_i; i++) { - TimeBlockTreeNode* tree_node = thread_recorder->getTimeBlockTreeNode(narrow(i)); + TimeBlockTreeNode* tree_node = thread_recorder->getTimeBlockTreeNode(narrow(i)); if (tree_node) { timer_accumulator_buffer[i].mParent = tree_node->mParent; -- cgit v1.2.3 From da0f5ea0b4366ccc2b065103a7bc37552b1fe8de Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Thu, 22 Feb 2024 01:09:23 +0200 Subject: Viewer#863 Crash reading xml --- indra/llcommon/llsdserialize_xml.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/llsdserialize_xml.cpp b/indra/llcommon/llsdserialize_xml.cpp index 1511983596..db61f4ae41 100644 --- a/indra/llcommon/llsdserialize_xml.cpp +++ b/indra/llcommon/llsdserialize_xml.cpp @@ -404,11 +404,18 @@ S32 LLSDXMLParser::Impl::parse(std::istream& input, LLSD& data) if (buffer) { ((char*) buffer)[count ? count - 1 : 0] = '\0'; + if (mEmitErrors) + { + LL_INFOS() << "LLSDXMLParser::Impl::parse: XML_STATUS_ERROR parsing:" << (char*)buffer << LL_ENDL; + } } - if (mEmitErrors) - { - LL_INFOS() << "LLSDXMLParser::Impl::parse: XML_STATUS_ERROR parsing:" << (char*) buffer << LL_ENDL; - } + else + { + if (mEmitErrors) + { + LL_INFOS() << "LLSDXMLParser::Impl::parse: XML_STATUS_ERROR, null buffer" << LL_ENDL; + } + } data = LLSD(); return LLSDParser::PARSE_FAILURE; } -- cgit v1.2.3 From ae7b318e7f21d0d372d48a635ff1e2ea59c4acf6 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Thu, 22 Feb 2024 21:48:46 +0200 Subject: viewer#875 Crash at uri normalization Note that crash happened when setting LLProgressView::setMessage --- indra/llcommon/lluriparser.cpp | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/lluriparser.cpp b/indra/llcommon/lluriparser.cpp index e4f229dd16..f79a98a56d 100644 --- a/indra/llcommon/lluriparser.cpp +++ b/indra/llcommon/lluriparser.cpp @@ -164,8 +164,10 @@ void LLUriParser::extractParts() #if LL_DARWIN typedef void(*sighandler_t)(int); jmp_buf return_to_normalize; +static int sLastSignal = 0; void uri_signal_handler(int signal) { + sLastSignal = signal; // Apparently signal handler throwing an exception doesn't work. // This is ugly and unsafe due to not unwinding content of uriparser library, // but unless we have a way to catch this as NSexception, jump appears to be the only option. @@ -179,8 +181,10 @@ S32 LLUriParser::normalize() if (!mRes) { #if LL_DARWIN - sighandler_t last_handler; - last_handler = signal(SIGILL, &uri_signal_handler); // illegal instruction + sighandler_t last_sigill_handler, last_sigbus_handler; + last_sigill_handler = signal(SIGILL, &uri_signal_handler); // illegal instruction + last_sigbus_handler = signal(SIGBUS, &uri_signal_handler); + if (setjmp(return_to_normalize)) { // Issue: external library crashed via signal @@ -194,8 +198,9 @@ S32 LLUriParser::normalize() // if this can be handled by NSexception, it needs to be remade llassert(0); - LL_WARNS() << "Uriparser crashed with SIGILL, while processing: " << mNormalizedUri << LL_ENDL; - signal(SIGILL, last_handler); + LL_WARNS() << "Uriparser crashed with " << sLastSignal << " , while processing: " << mNormalizedUri << LL_ENDL; + signal(SIGILL, last_sigill_handler); + signal(SIGBUS, last_sigbus_handler); return 1; } #endif @@ -203,7 +208,8 @@ S32 LLUriParser::normalize() mRes = uriNormalizeSyntaxExA(&mUri, URI_NORMALIZE_SCHEME | URI_NORMALIZE_HOST); #if LL_DARWIN - signal(SIGILL, last_handler); + signal(SIGILL, last_sigill_handler); + signal(SIGBUS, last_sigbus_handler); #endif if (!mRes) @@ -226,7 +232,7 @@ S32 LLUriParser::normalize() } } - if(mTmpScheme) + if(mTmpScheme && mNormalizedUri.size() > 7) { mNormalizedUri = mNormalizedUri.substr(7); mTmpScheme = false; -- cgit v1.2.3 From 18ec799992e0e2571ed3d3a61454be682a81aa16 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Tue, 27 Feb 2024 23:02:00 +0200 Subject: SL-18721 Shutdown fixes #5 --- indra/llcommon/threadpool.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/threadpool.cpp b/indra/llcommon/threadpool.cpp index a063a01b82..c48989358e 100644 --- a/indra/llcommon/threadpool.cpp +++ b/indra/llcommon/threadpool.cpp @@ -120,8 +120,11 @@ void LL::ThreadPoolBase::close() mQueue->close(); for (auto& pair: mThreads) { - LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL; - pair.second.join(); + if (pair.second.joinable()) + { + LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL; + pair.second.join(); + } } LL_DEBUGS("ThreadPool") << mName << " shutdown complete" << LL_ENDL; } -- cgit v1.2.3 From 1161262029f9619fb02d81575382b64d82d9cd09 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Fri, 15 Mar 2024 00:04:19 +0200 Subject: SL-18721 Restore release behavior Closing window correctly caused a significant amount of logout freezes with no known reproes. Temporarily returning to old behavior were thread was killes without closing window and will reenable in later maints to hopefully get a scenario or at least more data of what is causing the freeze. --- indra/llcommon/threadpool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llcommon') diff --git a/indra/llcommon/threadpool.h b/indra/llcommon/threadpool.h index b8be7bb81a..74056aea17 100644 --- a/indra/llcommon/threadpool.h +++ b/indra/llcommon/threadpool.h @@ -55,7 +55,7 @@ namespace LL * ThreadPool listens for application shutdown messages on the "LLApp" * LLEventPump. Call close() to shut down this ThreadPool early. */ - void close(); + virtual void close(); std::string getName() const { return mName; } size_t getWidth() const { return mThreads.size(); } -- cgit v1.2.3