diff options
Diffstat (limited to 'indra/llcommon')
29 files changed, 389 insertions, 816 deletions
| diff --git a/indra/llcommon/classic_callback.h b/indra/llcommon/classic_callback.h index 1ad6dbc58f..009c25d67c 100644 --- a/indra/llcommon/classic_callback.h +++ b/indra/llcommon/classic_callback.h @@ -119,11 +119,11 @@ public:       * ClassicCallback must not itself be copied or moved! Once you've passed       * get_userdata() to some API, this object MUST remain at that address.       */ -    // However, we can't yet count on C++17 Class Template Argument Deduction, -    // which means makeClassicCallback() is still useful, which means we MUST -    // be able to return one to construct into caller's instance (move ctor). -    // Possible defense: bool 'referenced' data member set by get_userdata(), -    // with an llassert_always(! referenced) check in the move constructor. +    // However, makeClassicCallback() is useful for deducing the CALLABLE +    // type, which means we MUST be able to return one to construct into +    // caller's instance (move ctor). Possible defense: bool 'referenced' data +    // member set by get_userdata(), with an llassert_always(! referenced) +    // check in the move constructor.      ClassicCallback(ClassicCallback const&) = delete;      ClassicCallback(ClassicCallback&&) = default; // delete;      ClassicCallback& operator=(ClassicCallback const&) = delete; diff --git a/indra/llcommon/llapp.cpp b/indra/llcommon/llapp.cpp index b99166991f..90d0c28eb1 100644 --- a/indra/llcommon/llapp.cpp +++ b/indra/llcommon/llapp.cpp @@ -104,7 +104,6 @@ BOOL LLApp::sLogInSignal = FALSE;  // Keeps track of application status  LLScalarCond<LLApp::EAppStatus> LLApp::sStatus{LLApp::APP_STATUS_STOPPED};  LLAppErrorHandler LLApp::sErrorHandler = NULL; -BOOL LLApp::sErrorThreadRunning = FALSE;  LLApp::LLApp() @@ -787,13 +786,8 @@ void default_unix_signal_handler(int signum, siginfo_t *info, void *)  				return;  			}		 -			// Flag status to ERROR, so thread_error does its work. +			// Flag status to ERROR  			LLApp::setError(); -			// Block in the signal handler until somebody says that we're done. -			while (LLApp::sErrorThreadRunning && !LLApp::isStopped()) -			{ -				ms_sleep(10); -			}  			if (LLApp::sLogInSignal)  			{ diff --git a/indra/llcommon/llapp.h b/indra/llcommon/llapp.h index c832c8b142..a892bfeb1e 100644 --- a/indra/llcommon/llapp.h +++ b/indra/llcommon/llapp.h @@ -291,7 +291,6 @@ protected:  	static void setStatus(EAppStatus status);		// Use this to change the application status.  	static LLScalarCond<EAppStatus> sStatus; // Reflects current application status -	static BOOL sErrorThreadRunning; // Set while the error thread is running  	static BOOL sDisableCrashlogger; // Let the OS handle crashes for us.  	std::wstring mCrashReportPipeStr;  //Name of pipe to use for crash reporting. diff --git a/indra/llcommon/llbase64.cpp b/indra/llcommon/llbase64.cpp index bb85fe32a3..433b54f6f8 100644 --- a/indra/llcommon/llbase64.cpp +++ b/indra/llcommon/llbase64.cpp @@ -42,7 +42,7 @@ std::string LLBase64::encode(const U8* input, size_t input_size)  		&& input_size > 0)  	{  		// Yes, it returns int. -		int b64_buffer_length = apr_base64_encode_len(narrow(input_size)); +		int b64_buffer_length = apr_base64_encode_len(narrow<size_t>(input_size));  		char* b64_buffer = new char[b64_buffer_length];  		// This is faster than apr_base64_encode() if you know @@ -52,7 +52,7 @@ std::string LLBase64::encode(const U8* input, size_t input_size)  		b64_buffer_length = apr_base64_encode_binary(  			b64_buffer,  			input, -			narrow(input_size)); +			narrow<size_t>(input_size));  		output.assign(b64_buffer);  		delete[] b64_buffer;  	} diff --git a/indra/llcommon/llcoros.cpp b/indra/llcommon/llcoros.cpp index 1d383f174d..c13900f74a 100644 --- a/indra/llcommon/llcoros.cpp +++ b/indra/llcommon/llcoros.cpp @@ -123,7 +123,7 @@ LLCoros::LLCoros():      // Previously we used      // boost::context::guarded_stack_allocator::default_stacksize();      // empirically this is insufficient. -    mStackSize(768*1024), +    mStackSize(900*1024),      // mCurrent does NOT own the current CoroData instance -- it simply      // points to it. So initialize it with a no-op deleter.      mCurrent{ [](CoroData*){} } diff --git a/indra/llcommon/lldictionary.h b/indra/llcommon/lldictionary.h index 5800ec5e5d..18664e340e 100644 --- a/indra/llcommon/lldictionary.h +++ b/indra/llcommon/lldictionary.h @@ -87,11 +87,10 @@ protected:  	}  	void addEntry(Index index, Entry *entry)  	{ -		if (lookup(index)) +		if (!this->emplace(index, entry).second)   		{  			LL_ERRS() << "Dictionary entry already added (attempted to add duplicate entry)" << LL_ENDL;  		} -		(*this)[index] = entry;  	}  }; diff --git a/indra/llcommon/llpointer.h b/indra/llcommon/llpointer.h index f9de0c7929..64aceddf32 100644 --- a/indra/llcommon/llpointer.h +++ b/indra/llcommon/llpointer.h @@ -46,33 +46,32 @@  template <class Type> class LLPointer  {  public: - -	LLPointer() :  +	LLPointer() :  		mPointer(NULL)  	{  	} -	LLPointer(Type* ptr) :  +	LLPointer(Type* ptr) :  		mPointer(ptr)  	{  		ref();  	} -	LLPointer(const LLPointer<Type>& ptr) :  +	LLPointer(const LLPointer<Type>& ptr) :  		mPointer(ptr.mPointer)  	{  		ref();  	} -	// support conversion up the type hierarchy.  See Item 45 in Effective C++, 3rd Ed. +	// Support conversion up the type hierarchy. See Item 45 in Effective C++, 3rd Ed.  	template<typename Subclass> -	LLPointer(const LLPointer<Subclass>& ptr) :  +	LLPointer(const LLPointer<Subclass>& ptr) :  		mPointer(ptr.get())  	{  		ref();  	} -	~LLPointer()								 +	~LLPointer()  	{  		unref();  	} @@ -83,39 +82,39 @@ public:  	const Type&	operator*() const				{ return *mPointer; }  	Type&	operator*()							{ return *mPointer; } -	operator BOOL()  const						{ return (mPointer != NULL); } -	operator bool()  const						{ return (mPointer != NULL); } +	operator BOOL() const						{ return (mPointer != NULL); } +	operator bool() const						{ return (mPointer != NULL); }  	bool operator!() const						{ return (mPointer == NULL); }  	bool isNull() const							{ return (mPointer == NULL); }  	bool notNull() const						{ return (mPointer != NULL); } -	operator Type*()       const				{ return mPointer; } -	bool operator !=(Type* ptr) const           { return (mPointer != ptr); 	} -	bool operator ==(Type* ptr) const           { return (mPointer == ptr); 	} -	bool operator ==(const LLPointer<Type>& ptr) const           { return (mPointer == ptr.mPointer); 	} -	bool operator < (const LLPointer<Type>& ptr) const           { return (mPointer < ptr.mPointer); 	} -	bool operator > (const LLPointer<Type>& ptr) const           { return (mPointer > ptr.mPointer); 	} +	operator Type*() const						{ return mPointer; } +	bool operator !=(Type* ptr) const			{ return (mPointer != ptr); } +	bool operator ==(Type* ptr) const			{ return (mPointer == ptr); } +	bool operator ==(const LLPointer<Type>& ptr) const { return (mPointer == ptr.mPointer); } +	bool operator < (const LLPointer<Type>& ptr) const { return (mPointer < ptr.mPointer); } +	bool operator > (const LLPointer<Type>& ptr) const { return (mPointer > ptr.mPointer); } -	LLPointer<Type>& operator =(Type* ptr)                    -	{  +	LLPointer<Type>& operator =(Type* ptr) +	{  		assign(ptr); -		return *this;  +		return *this;  	} -	LLPointer<Type>& operator =(const LLPointer<Type>& ptr)   -	{  +	LLPointer<Type>& operator =(const LLPointer<Type>& ptr) +	{  		assign(ptr); -		return *this;  +		return *this;  	}  	// support assignment up the type hierarchy. See Item 45 in Effective C++, 3rd Ed.  	template<typename Subclass> -	LLPointer<Type>& operator =(const LLPointer<Subclass>& ptr)   -	{  +	LLPointer<Type>& operator =(const LLPointer<Subclass>& ptr) +	{  		assign(ptr.get()); -		return *this;  +		return *this;  	} -	 +  	// Just exchange the pointers, which will not change the reference counts.  	static void swap(LLPointer<Type>& a, LLPointer<Type>& b)  	{ @@ -129,16 +128,6 @@ protected:  	void ref();                               	void unref();  #else - -	void assign(const LLPointer<Type>& ptr) -	{ -		if( mPointer != ptr.mPointer ) -		{ -			unref();  -			mPointer = ptr.mPointer; -			ref(); -		} -	}  	void ref()                               	{   		if (mPointer) @@ -161,7 +150,18 @@ protected:  			}  		}  	} -#endif +#endif // LL_LIBRARY_INCLUDE + +	void assign(const LLPointer<Type>& ptr) +	{ +		if (mPointer != ptr.mPointer) +		{ +			unref(); +			mPointer = ptr.mPointer; +			ref(); +		} +	} +  protected:  	Type*	mPointer;  }; @@ -169,18 +169,18 @@ protected:  template <class Type> class LLConstPointer  {  public: -	LLConstPointer() :  +	LLConstPointer() :  		mPointer(NULL)  	{  	} -	LLConstPointer(const Type* ptr) :  +	LLConstPointer(const Type* ptr) :  		mPointer(ptr)  	{  		ref();  	} -	LLConstPointer(const LLConstPointer<Type>& ptr) :  +	LLConstPointer(const LLConstPointer<Type>& ptr) :  		mPointer(ptr.mPointer)  	{  		ref(); @@ -188,7 +188,7 @@ public:  	// support conversion up the type hierarchy.  See Item 45 in Effective C++, 3rd Ed.  	template<typename Subclass> -	LLConstPointer(const LLConstPointer<Subclass>& ptr) :  +	LLConstPointer(const LLConstPointer<Subclass>& ptr) :  		mPointer(ptr.get())  	{  		ref(); @@ -203,55 +203,55 @@ public:  	const Type*	operator->() const				{ return mPointer; }  	const Type&	operator*() const				{ return *mPointer; } -	operator BOOL()  const						{ return (mPointer != NULL); } -	operator bool()  const						{ return (mPointer != NULL); } +	operator BOOL() const						{ return (mPointer != NULL); } +	operator bool() const						{ return (mPointer != NULL); }  	bool operator!() const						{ return (mPointer == NULL); }  	bool isNull() const							{ return (mPointer == NULL); }  	bool notNull() const						{ return (mPointer != NULL); } -	operator const Type*()       const			{ return mPointer; } -	bool operator !=(const Type* ptr) const     { return (mPointer != ptr); 	} -	bool operator ==(const Type* ptr) const     { return (mPointer == ptr); 	} -	bool operator ==(const LLConstPointer<Type>& ptr) const           { return (mPointer == ptr.mPointer); 	} -	bool operator < (const LLConstPointer<Type>& ptr) const           { return (mPointer < ptr.mPointer); 	} -	bool operator > (const LLConstPointer<Type>& ptr) const           { return (mPointer > ptr.mPointer); 	} +	operator const Type*() const				{ return mPointer; } +	bool operator !=(const Type* ptr) const		{ return (mPointer != ptr); } +	bool operator ==(const Type* ptr) const		{ return (mPointer == ptr); } +	bool operator ==(const LLConstPointer<Type>& ptr) const { return (mPointer == ptr.mPointer); } +	bool operator < (const LLConstPointer<Type>& ptr) const { return (mPointer < ptr.mPointer); } +	bool operator > (const LLConstPointer<Type>& ptr) const { return (mPointer > ptr.mPointer); } -	LLConstPointer<Type>& operator =(const Type* ptr)                    +	LLConstPointer<Type>& operator =(const Type* ptr)  	{  		if( mPointer != ptr )  		{ -			unref();  -			mPointer = ptr;  +			unref(); +			mPointer = ptr;  			ref();  		} -		return *this;  +		return *this;  	} -	LLConstPointer<Type>& operator =(const LLConstPointer<Type>& ptr)   -	{  +	LLConstPointer<Type>& operator =(const LLConstPointer<Type>& ptr) +	{  		if( mPointer != ptr.mPointer )  		{ -			unref();  +			unref();  			mPointer = ptr.mPointer;  			ref();  		} -		return *this;  +		return *this;  	}  	// support assignment up the type hierarchy. See Item 45 in Effective C++, 3rd Ed.  	template<typename Subclass> -	LLConstPointer<Type>& operator =(const LLConstPointer<Subclass>& ptr)   -	{  +	LLConstPointer<Type>& operator =(const LLConstPointer<Subclass>& ptr) +	{  		if( mPointer != ptr.get() )  		{ -			unref();  +			unref();  			mPointer = ptr.get();  			ref();  		} -		return *this;  +		return *this;  	} -	 +  	// Just exchange the pointers, which will not change the reference counts.  	static void swap(LLConstPointer<Type>& a, LLConstPointer<Type>& b)  	{ @@ -262,11 +262,11 @@ public:  protected:  #ifdef LL_LIBRARY_INCLUDE -	void ref();                              +	void ref();  	void unref(); -#else -	void ref()                              -	{  +#else // LL_LIBRARY_INCLUDE +	void ref() +	{  		if (mPointer)  		{  			mPointer->ref(); @@ -277,9 +277,9 @@ protected:  	{  		if (mPointer)  		{ -			const Type *tempp = mPointer; +			const Type *temp = mPointer;  			mPointer = NULL; -			tempp->unref(); +			temp->unref();  			if (mPointer != NULL)  			{  				LL_WARNS() << "Unreference did assignment to non-NULL because of destructor" << LL_ENDL; @@ -287,7 +287,8 @@ protected:  			}  		}  	} -#endif +#endif // LL_LIBRARY_INCLUDE +  protected:  	const Type*	mPointer;  }; @@ -297,13 +298,13 @@ class LLCopyOnWritePointer : public LLPointer<Type>  {  public:  	typedef LLCopyOnWritePointer<Type> self_t; -    typedef LLPointer<Type> pointer_t; -     -	LLCopyOnWritePointer()  +	typedef LLPointer<Type> pointer_t; + +	LLCopyOnWritePointer()  	:	mStayUnique(false)  	{} -	LLCopyOnWritePointer(Type* ptr)  +	LLCopyOnWritePointer(Type* ptr)  	:	LLPointer<Type>(ptr),  		mStayUnique(false)  	{} diff --git a/indra/llcommon/llrand.cpp b/indra/llcommon/llrand.cpp index 33afc50cf7..0192111574 100644 --- a/indra/llcommon/llrand.cpp +++ b/indra/llcommon/llrand.cpp @@ -58,7 +58,9 @@   * to restore uniform distribution.   */ -static LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed()); +// gRandomGenerator is a stateful static object, which is therefore not +// inherently thread-safe. +static thread_local LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed());  // no default implementation, only specific F64 and F32 specializations  template <typename REAL> @@ -71,7 +73,7 @@ inline F64 ll_internal_random<F64>()  	// CPUs (or at least multi-threaded processes) seem to  	// occasionally give an obviously incorrect random number -- like  	// 5^15 or something. Sooooo, clamp it as described above. -	F64 rv = gRandomGenerator(); +	F64 rv{ gRandomGenerator() };  	if(!((rv >= 0.0) && (rv < 1.0))) return fmod(rv, 1.0);  	return rv;  } @@ -79,7 +81,13 @@ inline F64 ll_internal_random<F64>()  template <>  inline F32 ll_internal_random<F32>()  { -    return F32(ll_internal_random<F64>()); +	// *HACK: clamp the result as described above. +	// Per Monty, it's important to clamp using the correct fmodf() rather +	// than expanding to F64 for fmod() and then truncating back to F32. Prior +	// to this change, we were getting sporadic ll_frand() == 1.0 results. +	F32 rv{ narrow<F32>(gRandomGenerator()) }; +	if(!((rv >= 0.0f) && (rv < 1.0f))) return fmodf(rv, 1.0f); +	return rv;  }  /*------------------------------ F64 aliases -------------------------------*/ diff --git a/indra/llcommon/llrefcount.cpp b/indra/llcommon/llrefcount.cpp index 6852b5536a..3da94e7a8d 100644 --- a/indra/llcommon/llrefcount.cpp +++ b/indra/llcommon/llrefcount.cpp @@ -30,7 +30,7 @@  #include "llerror.h"  // maximum reference count before sounding memory leak alarm -const S32 gMaxRefCount = S32_MAX; +const S32 gMaxRefCount = LL_REFCOUNT_FREE;  LLRefCount::LLRefCount(const LLRefCount& other)  :	mRef(0) @@ -49,7 +49,7 @@ LLRefCount::LLRefCount() :  }  LLRefCount::~LLRefCount() -{  +{  	if (mRef != LL_REFCOUNT_FREE && mRef != 0)  	{  		LL_ERRS() << "deleting non-zero reference" << LL_ENDL; diff --git a/indra/llcommon/llrefcount.h b/indra/llcommon/llrefcount.h index 2080da1565..15e7175fc8 100644 --- a/indra/llcommon/llrefcount.h +++ b/indra/llcommon/llrefcount.h @@ -51,24 +51,20 @@ protected:  public:  	LLRefCount(); -    inline void validateRefCount() const -    { -        llassert(mRef > 0); // ref count below 0, likely corrupted -        llassert(mRef < gMaxRefCount); // ref count excessive, likely memory leak -    } -  	inline void ref() const -	{  -		mRef++;  -        validateRefCount(); -	}  +	{ +		llassert(mRef != LL_REFCOUNT_FREE); // object is deleted +		mRef++; +		llassert(mRef < gMaxRefCount); // ref count excessive, likely memory leak +	}  	inline S32 unref() const  	{ -        validateRefCount(); +		llassert(mRef != LL_REFCOUNT_FREE); // object is deleted +		llassert(mRef > 0); // ref count below 1, likely corrupted  		if (0 == --mRef)  		{ -            mRef = LL_REFCOUNT_FREE; // set to nonsense yet recognizable value to aid in debugging +			mRef = LL_REFCOUNT_FREE; // set to nonsense yet recognizable value to aid in debugging  			delete this;  			return 0;  		} @@ -82,8 +78,8 @@ public:  		return mRef;  	} -private:  -	mutable S32	mRef;  +private: +	mutable S32	mRef;  }; @@ -106,7 +102,7 @@ protected:  public:  	LLThreadSafeRefCount();  	LLThreadSafeRefCount(const LLThreadSafeRefCount&); -	LLThreadSafeRefCount& operator=(const LLThreadSafeRefCount& ref)  +	LLThreadSafeRefCount& operator=(const LLThreadSafeRefCount& ref)  	{  		mRef = 0;  		return *this; @@ -114,8 +110,8 @@ public:  	void ref()  	{ -		mRef++;  -	}  +		mRef++; +	}  	void unref()  	{ @@ -136,36 +132,36 @@ public:  		return currentVal;  	} -private:  -	LLAtomicS32 mRef;  +private: +	LLAtomicS32 mRef;  };  /**   * intrusive pointer support for LLThreadSafeRefCount   * this allows you to use boost::intrusive_ptr with any LLThreadSafeRefCount-derived type   */ -inline void intrusive_ptr_add_ref(LLThreadSafeRefCount* p)  +inline void intrusive_ptr_add_ref(LLThreadSafeRefCount* p)  {  	p->ref();  } -inline void intrusive_ptr_release(LLThreadSafeRefCount* p)  +inline void intrusive_ptr_release(LLThreadSafeRefCount* p)  { -	p->unref();  +	p->unref();  }  /**   * intrusive pointer support   * this allows you to use boost::intrusive_ptr with any LLRefCount-derived type   */ -inline void intrusive_ptr_add_ref(LLRefCount* p)  +inline void intrusive_ptr_add_ref(LLRefCount* p)  {  	p->ref();  } -inline void intrusive_ptr_release(LLRefCount* p)  +inline void intrusive_ptr_release(LLRefCount* p)  { -	p->unref();  +	p->unref();  }  #endif diff --git a/indra/llcommon/llsd.h b/indra/llcommon/llsd.h index cdb9a7ed8a..8ed254919c 100644 --- a/indra/llcommon/llsd.h +++ b/indra/llcommon/llsd.h @@ -197,12 +197,12 @@ public:  				  typename std::enable_if<std::is_integral<VALUE>::value &&  										  ! std::is_same<VALUE, Boolean>::value,  										  bool>::type = true> -		LLSD(VALUE v): LLSD(Integer(narrow(v))) {} +		LLSD(VALUE v): LLSD(Integer(narrow<VALUE>(v))) {}  		// support construction from F32 et al.  		template <typename VALUE,  				  typename std::enable_if<std::is_floating_point<VALUE>::value,  										  bool>::type = true> -		LLSD(VALUE v): LLSD(Real(narrow(v))) {} +		LLSD(VALUE v): LLSD(Real(narrow<VALUE>(v))) {}  	//@}  	/** @name Scalar Assignment */ diff --git a/indra/llcommon/llsdserialize.cpp b/indra/llcommon/llsdserialize.cpp index a475be6293..76171f2dfd 100644 --- a/indra/llcommon/llsdserialize.cpp +++ b/indra/llcommon/llsdserialize.cpp @@ -2174,7 +2174,7 @@ std::string zip_llsd(LLSD& data)  	U8 out[CHUNK]; -	strm.avail_in = narrow(source.size()); +	strm.avail_in = narrow<size_t>(source.size());  	strm.next_in = (U8*) source.data();  	U8* output = NULL; diff --git a/indra/llcommon/llsdserialize_xml.cpp b/indra/llcommon/llsdserialize_xml.cpp index 38b11eb32b..db61f4ae41 100644 --- a/indra/llcommon/llsdserialize_xml.cpp +++ b/indra/llcommon/llsdserialize_xml.cpp @@ -196,12 +196,12 @@ S32 LLSDXMLFormatter::format_impl(const LLSD& data, std::ostream& ostr,  			// *FIX: memory inefficient.  			// *TODO: convert to use LLBase64  			ostr << pre << "<binary encoding=\"base64\">"; -			int b64_buffer_length = apr_base64_encode_len(narrow(buffer.size())); +			int b64_buffer_length = apr_base64_encode_len(narrow<size_t>(buffer.size()));  			char* b64_buffer = new char[b64_buffer_length];  			b64_buffer_length = apr_base64_encode_binary(  				b64_buffer,  				&buffer[0], -				narrow(buffer.size())); +				narrow<size_t>(buffer.size()));  			ostr.write(b64_buffer, b64_buffer_length - 1);  			delete[] b64_buffer;  			ostr << "</binary>" << post; @@ -404,11 +404,18 @@ S32 LLSDXMLParser::Impl::parse(std::istream& input, LLSD& data)  		if (buffer)  		{  			((char*) buffer)[count ? count - 1 : 0] = '\0'; +            if (mEmitErrors) +            { +                LL_INFOS() << "LLSDXMLParser::Impl::parse: XML_STATUS_ERROR parsing:" << (char*)buffer << LL_ENDL; +            }  		} -		if (mEmitErrors) -		{ -		LL_INFOS() << "LLSDXMLParser::Impl::parse: XML_STATUS_ERROR parsing:" << (char*) buffer << LL_ENDL; -		} +        else +        { +            if (mEmitErrors) +            { +                LL_INFOS() << "LLSDXMLParser::Impl::parse: XML_STATUS_ERROR, null buffer" << LL_ENDL; +            } +        }  		data = LLSD();  		return LLSDParser::PARSE_FAILURE;  	} diff --git a/indra/llcommon/llstring.cpp b/indra/llcommon/llstring.cpp index 82dc7c9f80..a746cc11ec 100644 --- a/indra/llcommon/llstring.cpp +++ b/indra/llcommon/llstring.cpp @@ -1406,9 +1406,17 @@ bool LLStringUtil::formatDatetime(std::string& replacement, std::string token,  		}  		else  		{ +#if 0 +			// EXT-1565 : Zai Lynch, James Linden : 15/Oct/09 +			// [BSI] Feedback: Viewer clock mentions SLT, but would prefer it to show PST/PDT  			// "slt" = Second Life Time, which is deprecated.  			// If not utc or user local time, fallback to Pacific time  			replacement = LLStringOps::getPacificDaylightTime() ? "PDT" : "PST"; +#else +			// SL-20370 : Steeltoe Linden : 29/Sep/23 +			// Change "PDT" to "SLT" on menu bar +			replacement = "SLT"; +#endif  		}  		return true;  	} diff --git a/indra/llcommon/llsys.cpp b/indra/llcommon/llsys.cpp index 988c74229c..7473de988c 100644 --- a/indra/llcommon/llsys.cpp +++ b/indra/llcommon/llsys.cpp @@ -906,7 +906,7 @@ void LLMemoryInfo::stream(std::ostream& s) const  	// Now stream stats  	BOOST_FOREACH(const MapEntry& pair, inMap(mStatsMap))  	{ -		s << pfx << std::setw(narrow(key_width+1)) << (pair.first + ':') << ' '; +		s << pfx << std::setw(narrow<size_t>(key_width+1)) << (pair.first + ':') << ' ';  		LLSD value(pair.second);  		if (value.isInteger())  			s << std::setw(12) << value.asInteger(); diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp index 4eaa05c335..cd4975d9d3 100644 --- a/indra/llcommon/llthread.cpp +++ b/indra/llcommon/llthread.cpp @@ -113,15 +113,16 @@ LL_COMMON_API bool on_main_thread()      return (LLThread::currentID() == main_thread());  } -LL_COMMON_API void assert_main_thread() +LL_COMMON_API bool assert_main_thread()  {      auto curr = LLThread::currentID();      auto main = main_thread(); -    if (curr != main) -    { -        LL_WARNS() << "Illegal execution from thread id " << curr -            << " outside main thread " << main << LL_ENDL; -    } +    if (curr == main) +        return true; + +    LL_WARNS() << "Illegal execution from thread id " << curr +               << " outside main thread " << main << LL_ENDL; +    return false;  }  // this function has become moot diff --git a/indra/llcommon/llthread.h b/indra/llcommon/llthread.h index 50202631e7..9f1c589fcd 100644 --- a/indra/llcommon/llthread.h +++ b/indra/llcommon/llthread.h @@ -152,7 +152,7 @@ public:  //============================================================================ -extern LL_COMMON_API void assert_main_thread(); +extern LL_COMMON_API bool assert_main_thread();  extern LL_COMMON_API bool on_main_thread();  #endif // LL_LLTHREAD_H diff --git a/indra/llcommon/lltrace.cpp b/indra/llcommon/lltrace.cpp index ff671a8370..87457ad907 100644 --- a/indra/llcommon/lltrace.cpp +++ b/indra/llcommon/lltrace.cpp @@ -33,8 +33,6 @@  namespace LLTrace  { -MemStatHandle gTraceMemStat("LLTrace"); -  StatBase::StatBase( const char* name, const char* description )   :	mName(name),  	mDescription(description ? description : "") @@ -65,7 +63,7 @@ void TimeBlockTreeNode::setParent( BlockTimerStatHandle* parent )  	llassert_always(parent != mBlock);  	llassert_always(parent != NULL); -	TimeBlockTreeNode* parent_tree_node = get_thread_recorder()->getTimeBlockTreeNode(narrow(parent->getIndex())); +	TimeBlockTreeNode* parent_tree_node = get_thread_recorder()->getTimeBlockTreeNode(narrow<size_t>(parent->getIndex()));  	if (!parent_tree_node) return;  	if (mParent) diff --git a/indra/llcommon/lltrace.h b/indra/llcommon/lltrace.h index 580cf0a5fd..21a5803a76 100644 --- a/indra/llcommon/lltrace.h +++ b/indra/llcommon/lltrace.h @@ -193,61 +193,6 @@ void add(CountStatHandle<T>& count, VALUE_T value)  #endif  } -template<> -class StatType<MemAccumulator::AllocationFacet> -:	public StatType<MemAccumulator> -{ -public: - -	StatType(const char* name, const char* description = "") -	:	StatType<MemAccumulator>(name, description) -	{} -}; - -template<> -class StatType<MemAccumulator::DeallocationFacet> -:	public StatType<MemAccumulator> -{ -public: - -	StatType(const char* name, const char* description = "") -	:	StatType<MemAccumulator>(name, description) -	{} -}; - -class MemStatHandle : public StatType<MemAccumulator> -{ -public: -	typedef StatType<MemAccumulator> stat_t; -	MemStatHandle(const char* name, const char* description = "") -	:	stat_t(name, description) -	{ -		mName = name; -	} - -	void setName(const char* name) -	{ -        LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -		mName = name; -		setKey(name); -	} - -	/*virtual*/ const char* getUnitLabel() const { return "KB"; } - -	StatType<MemAccumulator::AllocationFacet>& allocations()  -	{ -        LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -		return static_cast<StatType<MemAccumulator::AllocationFacet>&>(*(StatType<MemAccumulator>*)this); -	} - -	StatType<MemAccumulator::DeallocationFacet>& deallocations()  -	{ -        LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -		return static_cast<StatType<MemAccumulator::DeallocationFacet>&>(*(StatType<MemAccumulator>*)this); -	} -}; - -  // measures effective memory footprint of specified type  // specialize to cover different types  template<typename T, typename IS_MEM_TRACKABLE = void, typename IS_UNITS = void> @@ -334,33 +279,6 @@ struct MeasureMem<std::basic_string<T>, IS_MEM_TRACKABLE, IS_BYTES>  	}  }; - -template<typename T> -inline void claim_alloc(MemStatHandle& measurement, const T& value) -{ -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -#if LL_TRACE_ENABLED -	auto size = MeasureMem<T>::measureFootprint(value); -	if(size == 0) return; -	MemAccumulator& accumulator = measurement.getCurrentAccumulator(); -	accumulator.mSize.sample(accumulator.mSize.hasValue() ? accumulator.mSize.getLastValue() + (F64)size : (F64)size); -	accumulator.mAllocations.record(size); -#endif -} - -template<typename T> -inline void disclaim_alloc(MemStatHandle& measurement, const T& value) -{ -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -#if LL_TRACE_ENABLED -	auto size = MeasureMem<T>::measureFootprint(value); -	if(size == 0) return; -	MemAccumulator& accumulator = measurement.getCurrentAccumulator(); -	accumulator.mSize.sample(accumulator.mSize.hasValue() ? accumulator.mSize.getLastValue() - (F64)size : -(F64)size); -	accumulator.mDeallocations.add(size); -#endif -} -  }  #endif // LL_LLTRACE_H diff --git a/indra/llcommon/lltraceaccumulators.cpp b/indra/llcommon/lltraceaccumulators.cpp index 6bd886ae98..b5b32cba38 100644 --- a/indra/llcommon/lltraceaccumulators.cpp +++ b/indra/llcommon/lltraceaccumulators.cpp @@ -1,24 +1,24 @@ -/**  +/**   * @file lltracesampler.cpp   *   * $LicenseInfo:firstyear=2001&license=viewerlgpl$   * Second Life Viewer Source Code   * Copyright (C) 2012, Linden Research, Inc. - *  + *   * This library is free software; you can redistribute it and/or   * modify it under the terms of the GNU Lesser General Public   * License as published by the Free Software Foundation;   * version 2.1 of the License only. - *  + *   * This library is distributed in the hope that it will be useful,   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU   * Lesser General Public License for more details. - *  + *   * You should have received a copy of the GNU Lesser General Public   * License along with this library; if not, write to the Free Software   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA - *  + *   * Linden Research, Inc., 945 Battery Street, San Francisco, CA  94111  USA   * $/LicenseInfo$   */ @@ -32,73 +32,52 @@  namespace LLTrace  { -extern MemStatHandle gTraceMemStat; - -  ///////////////////////////////////////////////////////////////////////  // AccumulatorBufferGroup  /////////////////////////////////////////////////////////////////////// -AccumulatorBufferGroup::AccumulatorBufferGroup()  +AccumulatorBufferGroup::AccumulatorBufferGroup()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator)); -	claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator)); -	claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator)); -	claim_alloc(gTraceMemStat, mStackTimers.capacity() * sizeof(TimeBlockAccumulator)); -	claim_alloc(gTraceMemStat, mMemStats.capacity() * sizeof(MemAccumulator)); +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  }  AccumulatorBufferGroup::AccumulatorBufferGroup(const AccumulatorBufferGroup& other)  :	mCounts(other.mCounts),  	mSamples(other.mSamples),  	mEvents(other.mEvents), -	mStackTimers(other.mStackTimers), -	mMemStats(other.mMemStats) +	mStackTimers(other.mStackTimers)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator)); -	claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator)); -	claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator)); -	claim_alloc(gTraceMemStat, mStackTimers.capacity() * sizeof(TimeBlockAccumulator)); -	claim_alloc(gTraceMemStat, mMemStats.capacity() * sizeof(MemAccumulator)); +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  }  AccumulatorBufferGroup::~AccumulatorBufferGroup()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	disclaim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator)); -	disclaim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator)); -	disclaim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator)); -	disclaim_alloc(gTraceMemStat, mStackTimers.capacity() * sizeof(TimeBlockAccumulator)); -	disclaim_alloc(gTraceMemStat, mMemStats.capacity() * sizeof(MemAccumulator)); +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  }  void AccumulatorBufferGroup::handOffTo(AccumulatorBufferGroup& other)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	other.mCounts.reset(&mCounts);  	other.mSamples.reset(&mSamples);  	other.mEvents.reset(&mEvents);  	other.mStackTimers.reset(&mStackTimers); -	other.mMemStats.reset(&mMemStats);  }  void AccumulatorBufferGroup::makeCurrent()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mCounts.makeCurrent();  	mSamples.makeCurrent();  	mEvents.makeCurrent();  	mStackTimers.makeCurrent(); -	mMemStats.makeCurrent();  	ThreadRecorder* thread_recorder = get_thread_recorder();  	AccumulatorBuffer<TimeBlockAccumulator>& timer_accumulator_buffer = mStackTimers;  	// update stacktimer parent pointers  	for (size_t i = 0, end_i = mStackTimers.size(); i < end_i; i++)  	{ -		TimeBlockTreeNode* tree_node = thread_recorder->getTimeBlockTreeNode(narrow(i)); +		TimeBlockTreeNode* tree_node = thread_recorder->getTimeBlockTreeNode(narrow<size_t>(i));  		if (tree_node)  		{  			timer_accumulator_buffer[i].mParent = tree_node->mParent; @@ -109,12 +88,11 @@ void AccumulatorBufferGroup::makeCurrent()  //static  void AccumulatorBufferGroup::clearCurrent()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	AccumulatorBuffer<CountAccumulator>::clearCurrent();	 +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	AccumulatorBuffer<CountAccumulator>::clearCurrent();  	AccumulatorBuffer<SampleAccumulator>::clearCurrent();  	AccumulatorBuffer<EventAccumulator>::clearCurrent();  	AccumulatorBuffer<TimeBlockAccumulator>::clearCurrent(); -	AccumulatorBuffer<MemAccumulator>::clearCurrent();  }  bool AccumulatorBufferGroup::isCurrent() const @@ -124,44 +102,39 @@ bool AccumulatorBufferGroup::isCurrent() const  void AccumulatorBufferGroup::append( const AccumulatorBufferGroup& other )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mCounts.addSamples(other.mCounts, SEQUENTIAL);  	mSamples.addSamples(other.mSamples, SEQUENTIAL);  	mEvents.addSamples(other.mEvents, SEQUENTIAL); -	mMemStats.addSamples(other.mMemStats, SEQUENTIAL);  	mStackTimers.addSamples(other.mStackTimers, SEQUENTIAL);  }  void AccumulatorBufferGroup::merge( const AccumulatorBufferGroup& other)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mCounts.addSamples(other.mCounts, NON_SEQUENTIAL);  	mSamples.addSamples(other.mSamples, NON_SEQUENTIAL);  	mEvents.addSamples(other.mEvents, NON_SEQUENTIAL); -	mMemStats.addSamples(other.mMemStats, NON_SEQUENTIAL);  	// for now, hold out timers from merge, need to be displayed per thread  	//mStackTimers.addSamples(other.mStackTimers, NON_SEQUENTIAL);  }  void AccumulatorBufferGroup::reset(AccumulatorBufferGroup* other)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mCounts.reset(other ? &other->mCounts : NULL);  	mSamples.reset(other ? &other->mSamples : NULL);  	mEvents.reset(other ? &other->mEvents : NULL);  	mStackTimers.reset(other ? &other->mStackTimers : NULL); -	mMemStats.reset(other ? &other->mMemStats : NULL);  }  void AccumulatorBufferGroup::sync()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	if (isCurrent())  	{  		F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds(); -  		mSamples.sync(time_stamp); -		mMemStats.sync(time_stamp);  	}  } @@ -197,10 +170,9 @@ F64 SampleAccumulator::mergeSumsOfSquares(const SampleAccumulator& a, const Samp  	return a.getSumOfSquares();  } -  void SampleAccumulator::addSamples( const SampleAccumulator& other, EBufferAppendType append_type )  { -    if (append_type == NON_SEQUENTIAL) +	if (append_type == NON_SEQUENTIAL)  	{  		return;  	} @@ -299,7 +271,7 @@ void EventAccumulator::addSamples( const EventAccumulator& other, EBufferAppendT  void EventAccumulator::reset( const EventAccumulator* other )  { -    mNumSamples = 0; +	mNumSamples = 0;  	mSum = 0;  	mMin = F32(NaN);  	mMax = F32(NaN); @@ -308,5 +280,4 @@ void EventAccumulator::reset( const EventAccumulator* other )  	mLastValue = other ? other->mLastValue : NaN;  } -  } diff --git a/indra/llcommon/lltraceaccumulators.h b/indra/llcommon/lltraceaccumulators.h index 7267a44300..b9d577be9e 100644 --- a/indra/llcommon/lltraceaccumulators.h +++ b/indra/llcommon/lltraceaccumulators.h @@ -1,26 +1,26 @@ -/**  +/**   * @file lltraceaccumulators.h   * @brief Storage for accumulating statistics   *   * $LicenseInfo:firstyear=2001&license=viewerlgpl$   * Second Life Viewer Source Code   * Copyright (C) 2012, Linden Research, Inc. - *  + *   * This library is free software; you can redistribute it and/or   * modify it under the terms of the GNU Lesser General Public   * License as published by the Free Software Foundation;   * version 2.1 of the License only. - *  + *   * This library is distributed in the hope that it will be useful,   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU   * Lesser General Public License for more details. - *  + *   * You should have received a copy of the GNU Lesser General Public   * License along with this library; if not, write to the Free Software   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA - *  + *   * Linden Research, Inc., 945 Battery Street, San Francisco, CA  94111  USA   * $/LicenseInfo$   */ @@ -28,7 +28,6 @@  #ifndef LL_LLTRACEACCUMULATORS_H  #define LL_LLTRACEACCUMULATORS_H -  #include "stdtypes.h"  #include "llpreprocessor.h"  #include "llunits.h" @@ -66,7 +65,7 @@ namespace LLTrace  			: mStorageSize(0),  			mStorage(NULL)  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			const AccumulatorBuffer& other = *getDefaultBuffer();  			resize(sNextStorageSlot);  			for (S32 i = 0; i < sNextStorageSlot; i++) @@ -77,7 +76,7 @@ namespace LLTrace  		~AccumulatorBuffer()  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			if (isCurrent())  			{  				LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL); @@ -85,14 +84,14 @@ namespace LLTrace  			delete[] mStorage;  		} -		LL_FORCE_INLINE ACCUMULATOR& operator[](size_t index)  -		{  -			return mStorage[index];  +		LL_FORCE_INLINE ACCUMULATOR& operator[](size_t index) +		{ +			return mStorage[index];  		}  		LL_FORCE_INLINE const ACCUMULATOR& operator[](size_t index) const -		{  -			return mStorage[index];  +		{ +			return mStorage[index];  		} @@ -100,7 +99,7 @@ namespace LLTrace  			: mStorageSize(0),  			mStorage(NULL)  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			resize(sNextStorageSlot);  			for (S32 i = 0; i < sNextStorageSlot; i++)  			{ @@ -110,7 +109,7 @@ namespace LLTrace  		void addSamples(const AccumulatorBuffer<ACCUMULATOR>& other, EBufferAppendType append_type)  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot);  			for (size_t i = 0; i < sNextStorageSlot; i++)  			{ @@ -120,7 +119,7 @@ namespace LLTrace  		void copyFrom(const AccumulatorBuffer<ACCUMULATOR>& other)  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot);  			for (size_t i = 0; i < sNextStorageSlot; i++)  			{ @@ -130,7 +129,7 @@ namespace LLTrace  		void reset(const AccumulatorBuffer<ACCUMULATOR>* other = NULL)  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			llassert(mStorageSize >= sNextStorageSlot);  			for (size_t i = 0; i < sNextStorageSlot; i++)  			{ @@ -140,7 +139,7 @@ namespace LLTrace  		void sync(F64SecondsImplicit time_stamp)  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			llassert(mStorageSize >= sNextStorageSlot);  			for (size_t i = 0; i < sNextStorageSlot; i++)  			{ @@ -160,13 +159,13 @@ namespace LLTrace  		static void clearCurrent()  		{ -            LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL); +			LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL);  		}  		// NOTE: this is not thread-safe.  We assume that slots are reserved in the main thread before any child threads are spawned  		size_t reserveSlot()  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			size_t next_slot = sNextStorageSlot++;  			if (next_slot >= mStorageSize)  			{ @@ -180,7 +179,7 @@ namespace LLTrace  		void resize(size_t new_size)  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			if (new_size <= mStorageSize) return;  			ACCUMULATOR* old_storage = mStorage; @@ -214,14 +213,14 @@ namespace LLTrace  			return mStorageSize;  		} -		static size_t getNumIndices()  +		static size_t getNumIndices()  		{  			return sNextStorageSlot;  		}  		static self_t* getDefaultBuffer()  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			static bool sInitialized = false;  			if (!sInitialized)  			{ @@ -336,7 +335,7 @@ namespace LLTrace  		void sample(F64 value)  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds();  			// store effect of last value @@ -399,7 +398,7 @@ namespace LLTrace  		F64		mMean,  				mSumOfSquares; -		F64SecondsImplicit	 +		F64SecondsImplicit  				mLastSampleTimeStamp,  				mTotalSamplingTime; @@ -409,7 +408,7 @@ namespace LLTrace  		S32		mNumSamples;  		// distinct from mNumSamples, since we might have inherited a last value from  		// a previous sampling period -		bool	mHasValue;		 +		bool	mHasValue;  	};  	class CountAccumulator @@ -457,14 +456,14 @@ namespace LLTrace  	class alignas(32) TimeBlockAccumulator  	{ -    public: +	public:  		typedef F64Seconds value_t;  		static F64Seconds getDefaultValue() { return F64Seconds(0); }  		typedef TimeBlockAccumulator self_t;  		// fake classes that allows us to view different facets of underlying statistic -		struct CallCountFacet  +		struct CallCountFacet  		{  			typedef S32 value_t;  		}; @@ -515,12 +514,12 @@ namespace LLTrace  		BlockTimerStatHandle* getParent() { return mParent; }  		BlockTimerStatHandle*					mBlock; -		BlockTimerStatHandle*					mParent;	 +		BlockTimerStatHandle*					mParent;  		std::vector<BlockTimerStatHandle*>		mChildren;  		bool						mCollapsed;  		bool						mNeedsSorting;  	}; -	 +  	struct BlockTimerStackRecord  	{  		class BlockTimer*	mActiveTimer; @@ -528,65 +527,6 @@ namespace LLTrace  		U64					mChildTime;  	}; -	struct MemAccumulator -	{ -		typedef F64Bytes value_t; -		static F64Bytes getDefaultValue() { return F64Bytes(0); } - -		typedef MemAccumulator self_t; - -		// fake classes that allows us to view different facets of underlying statistic -		struct AllocationFacet  -		{ -			typedef F64Bytes value_t; -			static F64Bytes getDefaultValue() { return F64Bytes(0); } -		}; - -		struct DeallocationFacet  -		{ -			typedef F64Bytes value_t; -			static F64Bytes getDefaultValue() { return F64Bytes(0); } -		}; - -		void addSamples(const MemAccumulator& other, EBufferAppendType append_type) -		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -			mAllocations.addSamples(other.mAllocations, append_type); -			mDeallocations.addSamples(other.mDeallocations, append_type); - -			if (append_type == SEQUENTIAL) -			{ -				mSize.addSamples(other.mSize, SEQUENTIAL); -			} -			else -			{ -				F64 allocation_delta(other.mAllocations.getSum() - other.mDeallocations.getSum()); -				mSize.sample(mSize.hasValue()  -					? mSize.getLastValue() + allocation_delta  -					: allocation_delta); -			} -		} - -		void reset(const MemAccumulator* other) -		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -			mSize.reset(other ? &other->mSize : NULL); -			mAllocations.reset(other ? &other->mAllocations : NULL); -			mDeallocations.reset(other ? &other->mDeallocations : NULL); -		} - -		void sync(F64SecondsImplicit time_stamp)  -		{ -			mSize.sync(time_stamp); -		} - -		bool hasValue() const			 { return mSize.hasValue(); } - -		SampleAccumulator	mSize; -		EventAccumulator	mAllocations; -		CountAccumulator	mDeallocations; -	}; -  	struct AccumulatorBufferGroup : public LLRefCount  	{  		AccumulatorBufferGroup(); @@ -607,9 +547,7 @@ namespace LLTrace  		AccumulatorBuffer<SampleAccumulator>	mSamples;  		AccumulatorBuffer<EventAccumulator>		mEvents;  		AccumulatorBuffer<TimeBlockAccumulator> mStackTimers; -		AccumulatorBuffer<MemAccumulator> 	mMemStats;  	};  }  #endif // LL_LLTRACEACCUMULATORS_H - diff --git a/indra/llcommon/lltracerecording.cpp b/indra/llcommon/lltracerecording.cpp index bb3d667a42..075e7c1d28 100644 --- a/indra/llcommon/lltracerecording.cpp +++ b/indra/llcommon/lltracerecording.cpp @@ -1,24 +1,24 @@ -/**  +/**   * @file lltracesampler.cpp   *   * $LicenseInfo:firstyear=2001&license=viewerlgpl$   * Second Life Viewer Source Code   * Copyright (C) 2012, Linden Research, Inc. - *  + *   * This library is free software; you can redistribute it and/or   * modify it under the terms of the GNU Lesser General Public   * License as published by the Free Software Foundation;   * version 2.1 of the License only. - *  + *   * This library is distributed in the hope that it will be useful,   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU   * Lesser General Public License for more details. - *  + *   * You should have received a copy of the GNU Lesser General Public   * License along with this library; if not, write to the Free Software   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA - *  + *   * Linden Research, Inc., 945 Battery Street, San Francisco, CA  94111  USA   * $/LicenseInfo$   */ @@ -32,7 +32,7 @@  #include "lltracethreadrecorder.h"  #include "llthread.h" -inline F64 lerp(F64 a, F64 b, F64 u)  +inline F64 lerp(F64 a, F64 b, F64 u)  {  	return a + ((b - a) * u);  } @@ -40,34 +40,29 @@ inline F64 lerp(F64 a, F64 b, F64 u)  namespace LLTrace  { -extern MemStatHandle gTraceMemStat; -  ///////////////////////////////////////////////////////////////////////  // Recording  /////////////////////////////////////////////////////////////////////// -Recording::Recording(EPlayState state)  +Recording::Recording(EPlayState state)  :	mElapsedSeconds(0),  	mActiveBuffers(NULL)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	claim_alloc(gTraceMemStat, this); +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mBuffers = new AccumulatorBufferGroup(); -	claim_alloc(gTraceMemStat, mBuffers);  	setPlayState(state);  }  Recording::Recording( const Recording& other )  :	mActiveBuffers(NULL)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	claim_alloc(gTraceMemStat, this); +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	*this = other;  }  Recording& Recording::operator = (const Recording& other)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	// this will allow us to seamlessly start without affecting any data we've acquired from other  	setPlayState(PAUSED); @@ -85,14 +80,11 @@ Recording& Recording::operator = (const Recording& other)  	return *this;  } -  Recording::~Recording()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	disclaim_alloc(gTraceMemStat, this); -	disclaim_alloc(gTraceMemStat, mBuffers); +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	// allow recording destruction without thread recorder running,  +	// allow recording destruction without thread recorder running,  	// otherwise thread shutdown could crash if a recording outlives the thread recorder  	// besides, recording construction and destruction is fine without a recorder...just don't attempt to start one  	if (isStarted() && LLTrace::get_thread_recorder() != NULL) @@ -107,14 +99,14 @@ void Recording::update()  #if LL_TRACE_ENABLED  	if (isStarted())  	{ -        LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +		LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  		mElapsedSeconds += mSamplingTimer.getElapsedTimeF64(); -		// must have  -		llassert(mActiveBuffers != NULL  +		// must have +		llassert(mActiveBuffers != NULL  				&& LLTrace::get_thread_recorder() != NULL); -		if(!mActiveBuffers->isCurrent() && LLTrace::get_thread_recorder() != NULL) +		if (!mActiveBuffers->isCurrent() && LLTrace::get_thread_recorder() != NULL)  		{  			AccumulatorBufferGroup* buffers = mBuffers.write();  			LLTrace::get_thread_recorder()->deactivate(buffers); @@ -128,7 +120,7 @@ void Recording::update()  void Recording::handleReset()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  #if LL_TRACE_ENABLED  	mBuffers.write()->reset(); @@ -139,7 +131,7 @@ void Recording::handleReset()  void Recording::handleStart()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  #if LL_TRACE_ENABLED  	mSamplingTimer.reset();  	mBuffers.setStayUnique(true); @@ -151,7 +143,7 @@ void Recording::handleStart()  void Recording::handleStop()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  #if LL_TRACE_ENABLED  	mElapsedSeconds += mSamplingTimer.getElapsedTimeF64();  	// must have thread recorder running on this thread @@ -204,7 +196,6 @@ F64Seconds Recording::getSum(const StatType<TimeBlockAccumulator::SelfTimeFacet>  	return F64Seconds(((F64)(accumulator.mSelfTimeCounter) + (F64)(active_accumulator ? active_accumulator->mSelfTimeCounter : 0)) / (F64)LLTrace::BlockTimer::countsPerSecond());  } -  S32 Recording::getSum(const StatType<TimeBlockAccumulator::CallCountFacet>& stat)  {  	update(); @@ -219,7 +210,7 @@ F64Seconds Recording::getPerSec(const StatType<TimeBlockAccumulator>& stat)  	const TimeBlockAccumulator& accumulator = mBuffers->mStackTimers[stat.getIndex()];  	const TimeBlockAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mStackTimers[stat.getIndex()] : NULL; -	return F64Seconds((F64)(accumulator.mTotalTimeCounter + (active_accumulator ? active_accumulator->mTotalTimeCounter : 0))  +	return F64Seconds((F64)(accumulator.mTotalTimeCounter + (active_accumulator ? active_accumulator->mTotalTimeCounter : 0))  				/ ((F64)LLTrace::BlockTimer::countsPerSecond() * mElapsedSeconds.value()));  } @@ -241,144 +232,9 @@ F32 Recording::getPerSec(const StatType<TimeBlockAccumulator::CallCountFacet>& s  	return (F32)(accumulator.mCalls + (active_accumulator ? active_accumulator->mCalls : 0)) / mElapsedSeconds.value();  } -bool Recording::hasValue(const StatType<MemAccumulator>& stat) -{ -	update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return accumulator.mSize.hasValue() || (active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.hasValue() : false); -} - -F64Kilobytes Recording::getMin(const StatType<MemAccumulator>& stat) -{ -	update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return F64Bytes(llmin(accumulator.mSize.getMin(), (active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.getMin() : F32_MAX))); -} - -F64Kilobytes Recording::getMean(const StatType<MemAccumulator>& stat) -{ -	update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	 -	if (active_accumulator && active_accumulator->mSize.hasValue()) -	{ -        F32 t = 0.0f; -        S32 div = accumulator.mSize.getSampleCount() + active_accumulator->mSize.getSampleCount(); -        if (div > 0) -        { -            t = active_accumulator->mSize.getSampleCount() / div; -        } -		return F64Bytes(lerp(accumulator.mSize.getMean(), active_accumulator->mSize.getMean(), t)); -	} -	else -	{ -		return F64Bytes(accumulator.mSize.getMean()); -	} -} - -F64Kilobytes Recording::getMax(const StatType<MemAccumulator>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return F64Bytes(llmax(accumulator.mSize.getMax(), active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.getMax() : F32_MIN)); -} - -F64Kilobytes Recording::getStandardDeviation(const StatType<MemAccumulator>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	if (active_accumulator && active_accumulator->hasValue()) -	{ -		F64 sum_of_squares = SampleAccumulator::mergeSumsOfSquares(accumulator.mSize, active_accumulator->mSize); -		return F64Bytes(sqrtf(sum_of_squares / (accumulator.mSize.getSamplingTime().value() + active_accumulator->mSize.getSamplingTime().value()))); -	} -	else -	{ -		return F64Bytes(accumulator.mSize.getStandardDeviation()); -	} -} - -F64Kilobytes Recording::getLastValue(const StatType<MemAccumulator>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return F64Bytes(active_accumulator ? active_accumulator->mSize.getLastValue() : accumulator.mSize.getLastValue()); -} - -bool Recording::hasValue(const StatType<MemAccumulator::AllocationFacet>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return accumulator.mAllocations.hasValue() || (active_accumulator ? active_accumulator->mAllocations.hasValue() : false); -} - -F64Kilobytes Recording::getSum(const StatType<MemAccumulator::AllocationFacet>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return F64Bytes(accumulator.mAllocations.getSum() + (active_accumulator ? active_accumulator->mAllocations.getSum() : 0)); -} - -F64Kilobytes Recording::getPerSec(const StatType<MemAccumulator::AllocationFacet>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return F64Bytes((accumulator.mAllocations.getSum() + (active_accumulator ? active_accumulator->mAllocations.getSum() : 0)) / mElapsedSeconds.value()); -} - -S32 Recording::getSampleCount(const StatType<MemAccumulator::AllocationFacet>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return accumulator.mAllocations.getSampleCount() + (active_accumulator ? active_accumulator->mAllocations.getSampleCount() : 0); -} - -bool Recording::hasValue(const StatType<MemAccumulator::DeallocationFacet>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return accumulator.mDeallocations.hasValue() || (active_accumulator ? active_accumulator->mDeallocations.hasValue() : false); -} - - -F64Kilobytes Recording::getSum(const StatType<MemAccumulator::DeallocationFacet>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return F64Bytes(accumulator.mDeallocations.getSum() + (active_accumulator ? active_accumulator->mDeallocations.getSum() : 0)); -} - -F64Kilobytes Recording::getPerSec(const StatType<MemAccumulator::DeallocationFacet>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return F64Bytes((accumulator.mDeallocations.getSum() + (active_accumulator ? active_accumulator->mDeallocations.getSum() : 0)) / mElapsedSeconds.value()); -} - -S32 Recording::getSampleCount(const StatType<MemAccumulator::DeallocationFacet>& stat) -{ -    update(); -	const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()]; -	const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL; -	return accumulator.mDeallocations.getSampleCount() + (active_accumulator ? active_accumulator->mDeallocations.getSampleCount() : 0); -} -  bool Recording::hasValue(const StatType<CountAccumulator>& stat)  { -    update(); +	update();  	const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];  	const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;  	return accumulator.hasValue() || (active_accumulator ? active_accumulator->hasValue() : false); @@ -386,7 +242,7 @@ bool Recording::hasValue(const StatType<CountAccumulator>& stat)  F64 Recording::getSum(const StatType<CountAccumulator>& stat)  { -    update(); +	update();  	const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];  	const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;  	return accumulator.getSum() + (active_accumulator ? active_accumulator->getSum() : 0); @@ -394,7 +250,7 @@ F64 Recording::getSum(const StatType<CountAccumulator>& stat)  F64 Recording::getPerSec( const StatType<CountAccumulator>& stat )  { -    update(); +	update();  	const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];  	const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;  	F64 sum = accumulator.getSum() + (active_accumulator ? active_accumulator->getSum() : 0); @@ -403,7 +259,7 @@ F64 Recording::getPerSec( const StatType<CountAccumulator>& stat )  S32 Recording::getSampleCount( const StatType<CountAccumulator>& stat )  { -    update(); +	update();  	const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];  	const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;  	return accumulator.getSampleCount() + (active_accumulator ? active_accumulator->getSampleCount() : 0); @@ -411,7 +267,7 @@ S32 Recording::getSampleCount( const StatType<CountAccumulator>& stat )  bool Recording::hasValue(const StatType<SampleAccumulator>& stat)  { -    update(); +	update();  	const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];  	const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;  	return accumulator.hasValue() || (active_accumulator && active_accumulator->hasValue()); @@ -419,7 +275,7 @@ bool Recording::hasValue(const StatType<SampleAccumulator>& stat)  F64 Recording::getMin( const StatType<SampleAccumulator>& stat )  { -    update(); +	update();  	const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];  	const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;  	return llmin(accumulator.getMin(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMin() : F32_MAX); @@ -427,7 +283,7 @@ F64 Recording::getMin( const StatType<SampleAccumulator>& stat )  F64 Recording::getMax( const StatType<SampleAccumulator>& stat )  { -    update(); +	update();  	const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];  	const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;  	return llmax(accumulator.getMax(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMax() : F32_MIN); @@ -435,17 +291,17 @@ F64 Recording::getMax( const StatType<SampleAccumulator>& stat )  F64 Recording::getMean( const StatType<SampleAccumulator>& stat )  { -    update(); +	update();  	const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];  	const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;  	if (active_accumulator && active_accumulator->hasValue())  	{ -        F32 t = 0.0f; -        S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount(); -        if (div > 0) -        { -            t = active_accumulator->getSampleCount() / div; -        } +		F32 t = 0.0f; +		S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount(); +		if (div > 0) +		{ +			t = active_accumulator->getSampleCount() / div; +		}  		return lerp(accumulator.getMean(), active_accumulator->getMean(), t);  	}  	else @@ -456,7 +312,7 @@ F64 Recording::getMean( const StatType<SampleAccumulator>& stat )  F64 Recording::getStandardDeviation( const StatType<SampleAccumulator>& stat )  { -    update(); +	update();  	const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];  	const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL; @@ -473,7 +329,7 @@ F64 Recording::getStandardDeviation( const StatType<SampleAccumulator>& stat )  F64 Recording::getLastValue( const StatType<SampleAccumulator>& stat )  { -    update(); +	update();  	const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];  	const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;  	return (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getLastValue() : accumulator.getLastValue()); @@ -481,7 +337,7 @@ F64 Recording::getLastValue( const StatType<SampleAccumulator>& stat )  S32 Recording::getSampleCount( const StatType<SampleAccumulator>& stat )  { -    update(); +	update();  	const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];  	const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;  	return accumulator.getSampleCount() + (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getSampleCount() : 0); @@ -489,7 +345,7 @@ S32 Recording::getSampleCount( const StatType<SampleAccumulator>& stat )  bool Recording::hasValue(const StatType<EventAccumulator>& stat)  { -    update(); +	update();  	const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];  	const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;  	return accumulator.hasValue() || (active_accumulator && active_accumulator->hasValue()); @@ -497,7 +353,7 @@ bool Recording::hasValue(const StatType<EventAccumulator>& stat)  F64 Recording::getSum( const StatType<EventAccumulator>& stat)  { -    update(); +	update();  	const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];  	const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;  	return (F64)(accumulator.getSum() + (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getSum() : 0)); @@ -505,7 +361,7 @@ F64 Recording::getSum( const StatType<EventAccumulator>& stat)  F64 Recording::getMin( const StatType<EventAccumulator>& stat )  { -    update(); +	update();  	const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];  	const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;  	return llmin(accumulator.getMin(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMin() : F32_MAX); @@ -513,7 +369,7 @@ F64 Recording::getMin( const StatType<EventAccumulator>& stat )  F64 Recording::getMax( const StatType<EventAccumulator>& stat )  { -    update(); +	update();  	const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];  	const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;  	return llmax(accumulator.getMax(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMax() : F32_MIN); @@ -521,17 +377,17 @@ F64 Recording::getMax( const StatType<EventAccumulator>& stat )  F64 Recording::getMean( const StatType<EventAccumulator>& stat )  { -    update(); +	update();  	const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];  	const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;  	if (active_accumulator && active_accumulator->hasValue())  	{  		F32 t = 0.0f; -        S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount(); -        if (div > 0) -        { -            t = active_accumulator->getSampleCount() / div; -        } +		S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount(); +		if (div > 0) +		{ +			t = active_accumulator->getSampleCount() / div; +		}  		return lerp(accumulator.getMean(), active_accumulator->getMean(), t);  	}  	else @@ -542,7 +398,7 @@ F64 Recording::getMean( const StatType<EventAccumulator>& stat )  F64 Recording::getStandardDeviation( const StatType<EventAccumulator>& stat )  { -    update(); +	update();  	const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];  	const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL; @@ -559,7 +415,7 @@ F64 Recording::getStandardDeviation( const StatType<EventAccumulator>& stat )  F64 Recording::getLastValue( const StatType<EventAccumulator>& stat )  { -    update(); +	update();  	const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];  	const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;  	return active_accumulator ? active_accumulator->getLastValue() : accumulator.getLastValue(); @@ -567,7 +423,7 @@ F64 Recording::getLastValue( const StatType<EventAccumulator>& stat )  S32 Recording::getSampleCount( const StatType<EventAccumulator>& stat )  { -    update(); +	update();  	const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];  	const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;  	return accumulator.getSampleCount() + (active_accumulator ? active_accumulator->getSampleCount() : 0); @@ -577,7 +433,7 @@ S32 Recording::getSampleCount( const StatType<EventAccumulator>& stat )  // PeriodicRecording  /////////////////////////////////////////////////////////////////////// -PeriodicRecording::PeriodicRecording( size_t num_periods, EPlayState state)  +PeriodicRecording::PeriodicRecording( size_t num_periods, EPlayState state)  :	mAutoResize(num_periods == 0),  	mCurPeriod(0),  	mNumRecordedPeriods(0), @@ -585,15 +441,13 @@ PeriodicRecording::PeriodicRecording( size_t num_periods, EPlayState state)  	// code in several methods.  	mRecordingPeriods(num_periods ? num_periods : 1)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	setPlayState(state); -	claim_alloc(gTraceMemStat, this);  }  PeriodicRecording::~PeriodicRecording()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	disclaim_alloc(gTraceMemStat, this); +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  }  void PeriodicRecording::nextPeriod() @@ -615,12 +469,11 @@ void PeriodicRecording::nextPeriod()  void PeriodicRecording::appendRecording(Recording& recording)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	getCurRecording().appendRecording(recording);  	nextPeriod();  } -  void PeriodicRecording::appendPeriodicRecording( PeriodicRecording& other )  {  	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; @@ -693,16 +546,14 @@ F64Seconds PeriodicRecording::getDuration() const  	return duration;  } -  LLTrace::Recording PeriodicRecording::snapshotCurRecording() const  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	Recording recording_copy(getCurRecording());  	recording_copy.stop();  	return recording_copy;  } -  Recording& PeriodicRecording::getLastRecording()  {  	return getPrevRecording(1); @@ -737,19 +588,19 @@ const Recording& PeriodicRecording::getPrevRecording( size_t offset ) const  void PeriodicRecording::handleStart()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	getCurRecording().start();  }  void PeriodicRecording::handleStop()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	getCurRecording().pause();  }  void PeriodicRecording::handleReset()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	getCurRecording().stop();  	if (mAutoResize) @@ -771,13 +622,13 @@ void PeriodicRecording::handleReset()  void PeriodicRecording::handleSplitTo(PeriodicRecording& other)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	getCurRecording().splitTo(other.getCurRecording());  }  F64 PeriodicRecording::getPeriodMin( const StatType<EventAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	num_periods = llmin(num_periods, getNumRecordedPeriods());  	bool has_value = false; @@ -792,14 +643,14 @@ F64 PeriodicRecording::getPeriodMin( const StatType<EventAccumulator>& stat, siz  		}  	} -	return has_value  -			? min_val  +	return has_value +			? min_val  			: NaN;  }  F64 PeriodicRecording::getPeriodMax( const StatType<EventAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	num_periods = llmin(num_periods, getNumRecordedPeriods());  	bool has_value = false; @@ -814,15 +665,15 @@ F64 PeriodicRecording::getPeriodMax( const StatType<EventAccumulator>& stat, siz  		}  	} -	return has_value  -			? max_val  +	return has_value +			? max_val  			: NaN;  }  // calculates means using aggregates per period  F64 PeriodicRecording::getPeriodMean( const StatType<EventAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	num_periods = llmin(num_periods, getNumRecordedPeriods());  	F64 mean = 0; @@ -838,14 +689,14 @@ F64 PeriodicRecording::getPeriodMean( const StatType<EventAccumulator>& stat, si  		}  	} -	return valid_period_count  +	return valid_period_count  			? mean / (F64)valid_period_count  			: NaN;  }  F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<EventAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	num_periods = llmin(num_periods, getNumRecordedPeriods());  	F64 period_mean = getPeriodMean(stat, num_periods); @@ -870,7 +721,7 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<EventAccumulat  F64 PeriodicRecording::getPeriodMin( const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	num_periods = llmin(num_periods, getNumRecordedPeriods());  	bool has_value = false; @@ -885,14 +736,14 @@ F64 PeriodicRecording::getPeriodMin( const StatType<SampleAccumulator>& stat, si  		}  	} -	return has_value  -			? min_val  +	return has_value +			? min_val  			: NaN;  }  F64 PeriodicRecording::getPeriodMax(const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	num_periods = llmin(num_periods, getNumRecordedPeriods());  	bool has_value = false; @@ -907,15 +758,15 @@ F64 PeriodicRecording::getPeriodMax(const StatType<SampleAccumulator>& stat, siz  		}  	} -	return has_value  -			? max_val  +	return has_value +			? max_val  			: NaN;  }  F64 PeriodicRecording::getPeriodMean( const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	num_periods = llmin(num_periods, getNumRecordedPeriods());  	S32 valid_period_count = 0; @@ -938,7 +789,7 @@ F64 PeriodicRecording::getPeriodMean( const StatType<SampleAccumulator>& stat, s  F64 PeriodicRecording::getPeriodMedian( const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	num_periods = llmin(num_periods, getNumRecordedPeriods());  	std::vector<F64> buf; @@ -964,7 +815,7 @@ F64 PeriodicRecording::getPeriodMedian( const StatType<SampleAccumulator>& stat,  F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	num_periods = llmin(num_periods, getNumRecordedPeriods());  	F64 period_mean = getPeriodMean(stat, num_periods); @@ -987,105 +838,13 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<SampleAccumula  			: NaN;  } - -F64Kilobytes PeriodicRecording::getPeriodMin( const StatType<MemAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ ) -{ -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	num_periods = llmin(num_periods, getNumRecordedPeriods()); - -	F64Kilobytes min_val(std::numeric_limits<F64>::max()); -	for (size_t i = 1; i <= num_periods; i++) -	{ -		Recording& recording = getPrevRecording(i); -		min_val = llmin(min_val, recording.getMin(stat)); -	} - -	return min_val; -} - -F64Kilobytes PeriodicRecording::getPeriodMin(const MemStatHandle& stat, size_t num_periods) -{ -	return getPeriodMin(static_cast<const StatType<MemAccumulator>&>(stat), num_periods); -} - -F64Kilobytes PeriodicRecording::getPeriodMax(const StatType<MemAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/) -{ -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	num_periods = llmin(num_periods, getNumRecordedPeriods()); - -	F64Kilobytes max_val(0.0); -	for (size_t i = 1; i <= num_periods; i++) -	{ -		Recording& recording = getPrevRecording(i); -		max_val = llmax(max_val, recording.getMax(stat)); -	} - -	return max_val; -} - -F64Kilobytes PeriodicRecording::getPeriodMax(const MemStatHandle& stat, size_t num_periods) -{ -	return getPeriodMax(static_cast<const StatType<MemAccumulator>&>(stat), num_periods); -} - -F64Kilobytes PeriodicRecording::getPeriodMean( const StatType<MemAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ ) -{ -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	num_periods = llmin(num_periods, getNumRecordedPeriods()); - -	F64Kilobytes mean(0); - -	for (size_t i = 1; i <= num_periods; i++) -	{ -		Recording& recording = getPrevRecording(i); -		mean += recording.getMean(stat); -	} - -	return mean / F64(num_periods); -} - -F64Kilobytes PeriodicRecording::getPeriodMean(const MemStatHandle& stat, size_t num_periods) -{ -	return getPeriodMean(static_cast<const StatType<MemAccumulator>&>(stat), num_periods); -} - -F64Kilobytes PeriodicRecording::getPeriodStandardDeviation( const StatType<MemAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ ) -{ -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; -	num_periods = llmin(num_periods, getNumRecordedPeriods()); - -	F64Kilobytes period_mean = getPeriodMean(stat, num_periods); -	S32 valid_period_count = 0; -	F64 sum_of_squares = 0; - -	for (size_t i = 1; i <= num_periods; i++) -	{ -		Recording& recording = getPrevRecording(i); -		if (recording.hasValue(stat)) -		{ -			F64Kilobytes delta = recording.getMean(stat) - period_mean; -			sum_of_squares += delta.value() * delta.value(); -			valid_period_count++; -		} -	} - -	return F64Kilobytes(valid_period_count -			? sqrt(sum_of_squares / (F64)valid_period_count) -			: NaN); -} - -F64Kilobytes PeriodicRecording::getPeriodStandardDeviation(const MemStatHandle& stat, size_t num_periods) -{ -	return getPeriodStandardDeviation(static_cast<const StatType<MemAccumulator>&>(stat), num_periods); -} -  ///////////////////////////////////////////////////////////////////////  // ExtendableRecording  ///////////////////////////////////////////////////////////////////////  void ExtendableRecording::extend()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	// push the data back to accepted recording  	mAcceptedRecording.appendRecording(mPotentialRecording);  	// flush data, so we can start from scratch @@ -1094,76 +853,72 @@ void ExtendableRecording::extend()  void ExtendableRecording::handleStart()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mPotentialRecording.start();  }  void ExtendableRecording::handleStop()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mPotentialRecording.pause();  }  void ExtendableRecording::handleReset()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mAcceptedRecording.reset();  	mPotentialRecording.reset();  }  void ExtendableRecording::handleSplitTo(ExtendableRecording& other)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mPotentialRecording.splitTo(other.mPotentialRecording);  } -  ///////////////////////////////////////////////////////////////////////  // ExtendablePeriodicRecording  /////////////////////////////////////////////////////////////////////// - -ExtendablePeriodicRecording::ExtendablePeriodicRecording()  -:	mAcceptedRecording(0),  +ExtendablePeriodicRecording::ExtendablePeriodicRecording() +:	mAcceptedRecording(0),  	mPotentialRecording(0)  {}  void ExtendablePeriodicRecording::extend()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	// push the data back to accepted recording  	mAcceptedRecording.appendPeriodicRecording(mPotentialRecording);  	// flush data, so we can start from scratch  	mPotentialRecording.reset();  } -  void ExtendablePeriodicRecording::handleStart()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mPotentialRecording.start();  }  void ExtendablePeriodicRecording::handleStop()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mPotentialRecording.pause();  }  void ExtendablePeriodicRecording::handleReset()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mAcceptedRecording.reset();  	mPotentialRecording.reset();  }  void ExtendablePeriodicRecording::handleSplitTo(ExtendablePeriodicRecording& other)  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	mPotentialRecording.splitTo(other.mPotentialRecording);  } -  PeriodicRecording& get_frame_recording()  {  	static thread_local PeriodicRecording sRecording(200, PeriodicRecording::STARTED); @@ -1174,7 +929,7 @@ PeriodicRecording& get_frame_recording()  void LLStopWatchControlsMixinCommon::start()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	switch (mPlayState)  	{  	case STOPPED: @@ -1196,7 +951,7 @@ void LLStopWatchControlsMixinCommon::start()  void LLStopWatchControlsMixinCommon::stop()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	switch (mPlayState)  	{  	case STOPPED: @@ -1216,7 +971,7 @@ void LLStopWatchControlsMixinCommon::stop()  void LLStopWatchControlsMixinCommon::pause()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	switch (mPlayState)  	{  	case STOPPED: @@ -1236,7 +991,7 @@ void LLStopWatchControlsMixinCommon::pause()  void LLStopWatchControlsMixinCommon::unpause()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	switch (mPlayState)  	{  	case STOPPED: @@ -1256,7 +1011,7 @@ void LLStopWatchControlsMixinCommon::unpause()  void LLStopWatchControlsMixinCommon::resume()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	switch (mPlayState)  	{  	case STOPPED: @@ -1277,7 +1032,7 @@ void LLStopWatchControlsMixinCommon::resume()  void LLStopWatchControlsMixinCommon::restart()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	switch (mPlayState)  	{  	case STOPPED: @@ -1301,13 +1056,13 @@ void LLStopWatchControlsMixinCommon::restart()  void LLStopWatchControlsMixinCommon::reset()  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	handleReset();  }  void LLStopWatchControlsMixinCommon::setPlayState( EPlayState state )  { -    LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +	LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  	switch(state)  	{  	case STOPPED: diff --git a/indra/llcommon/lltracerecording.h b/indra/llcommon/lltracerecording.h index a6b1a67d02..61b9096ae2 100644 --- a/indra/llcommon/lltracerecording.h +++ b/indra/llcommon/lltracerecording.h @@ -1,25 +1,25 @@ -/**  +/**   * @file lltracerecording.h   * @brief Sampling object for collecting runtime statistics originating from lltrace.   *   * $LicenseInfo:firstyear=2001&license=viewerlgpl$   * Second Life Viewer Source Code   * Copyright (C) 2012, Linden Research, Inc. - *  + *   * This library is free software; you can redistribute it and/or   * modify it under the terms of the GNU Lesser General Public   * License as published by the Free Software Foundation;   * version 2.1 of the License only. - *  + *   * This library is distributed in the hope that it will be useful,   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU   * Lesser General Public License for more details. - *  + *   * You should have received a copy of the GNU Lesser General Public   * License along with this library; if not, write to the Free Software   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA - *  + *   * Linden Research, Inc., 945 Battery Street, San Francisco, CA  94111  USA   * $/LicenseInfo$   */ @@ -112,7 +112,6 @@ private:  	// atomically stop this object while starting the other  	// no data can be missed in between stop and start  	virtual void handleSplitTo(DERIVED& other) {}; -  };  namespace LLTrace @@ -129,8 +128,6 @@ namespace LLTrace  	template<typename T>  	class EventStatHandle; -	class MemStatHandle; -  	template<typename T>  	struct RelatedTypes  	{ @@ -152,7 +149,7 @@ namespace LLTrace  		typedef S32 sum_t;  	}; -	class Recording  +	class Recording  	:	public LLStopWatchControlsMixin<Recording>  	{  	public: @@ -182,24 +179,6 @@ namespace LLTrace  		F64Seconds getPerSec(const StatType<TimeBlockAccumulator::SelfTimeFacet>& stat);  		F32 getPerSec(const StatType<TimeBlockAccumulator::CallCountFacet>& stat); -		// Memory accessors -		bool hasValue(const StatType<MemAccumulator>& stat); -		F64Kilobytes getMin(const StatType<MemAccumulator>& stat); -		F64Kilobytes getMean(const StatType<MemAccumulator>& stat); -		F64Kilobytes getMax(const StatType<MemAccumulator>& stat); -		F64Kilobytes getStandardDeviation(const StatType<MemAccumulator>& stat); -		F64Kilobytes getLastValue(const StatType<MemAccumulator>& stat); - -		bool hasValue(const StatType<MemAccumulator::AllocationFacet>& stat); -		F64Kilobytes getSum(const StatType<MemAccumulator::AllocationFacet>& stat); -		F64Kilobytes getPerSec(const StatType<MemAccumulator::AllocationFacet>& stat); -		S32 getSampleCount(const StatType<MemAccumulator::AllocationFacet>& stat); - -		bool hasValue(const StatType<MemAccumulator::DeallocationFacet>& stat); -		F64Kilobytes getSum(const StatType<MemAccumulator::DeallocationFacet>& stat); -		F64Kilobytes getPerSec(const StatType<MemAccumulator::DeallocationFacet>& stat); -		S32 getSampleCount(const StatType<MemAccumulator::DeallocationFacet>& stat); -  		// CountStatHandle accessors  		bool hasValue(const StatType<CountAccumulator>& stat);  		F64 getSum(const StatType<CountAccumulator>& stat); @@ -318,7 +297,7 @@ namespace LLTrace  		/*virtual*/ void handleSplitTo(Recording& other);  		// returns data for current thread -		class ThreadRecorder* getThreadRecorder();  +		class ThreadRecorder* getThreadRecorder();  		LLTimer											mSamplingTimer;  		F64Seconds										mElapsedSeconds; @@ -335,10 +314,10 @@ namespace LLTrace  		~PeriodicRecording();  		void nextPeriod(); -		auto getNumRecordedPeriods()  -		{  +		auto getNumRecordedPeriods() +		{  			// current period counts if not active -			return mNumRecordedPeriods + (isStarted() ? 0 : 1);  +			return mNumRecordedPeriods + (isStarted() ? 0 : 1);  		}  		F64Seconds getDuration() const; @@ -367,7 +346,7 @@ namespace LLTrace  			}  			return num_samples;  		} -         +  		//  		// PERIODIC MIN  		// @@ -376,7 +355,7 @@ namespace LLTrace  		template <typename T>  		typename T::value_t getPeriodMin(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			num_periods = llmin(num_periods, getNumRecordedPeriods());  			bool has_value = false; @@ -391,15 +370,15 @@ namespace LLTrace  				}  			} -			return has_value  -				? min_val  +			return has_value +				? min_val  				: T::getDefaultValue();  		}  		template<typename T>  		T getPeriodMin(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return T(getPeriodMin(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));  		} @@ -407,7 +386,7 @@ namespace LLTrace  		template<typename T>  		T getPeriodMin(const SampleStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return T(getPeriodMin(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));  		} @@ -415,17 +394,14 @@ namespace LLTrace  		template<typename T>  		T getPeriodMin(const EventStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return T(getPeriodMin(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));  		} -		F64Kilobytes getPeriodMin(const StatType<MemAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -		F64Kilobytes getPeriodMin(const MemStatHandle& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -  		template <typename T>  		typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMinPerSec(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			num_periods = llmin(num_periods, getNumRecordedPeriods());  			typename RelatedTypes<typename T::value_t>::fractional_t min_val(std::numeric_limits<F64>::max()); @@ -440,7 +416,7 @@ namespace LLTrace  		template<typename T>  		typename RelatedTypes<T>::fractional_t getPeriodMinPerSec(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return typename RelatedTypes<T>::fractional_t(getPeriodMinPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));  		} @@ -452,7 +428,7 @@ namespace LLTrace  		template <typename T>  		typename T::value_t getPeriodMax(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			num_periods = llmin(num_periods, getNumRecordedPeriods());  			bool has_value = false; @@ -467,15 +443,15 @@ namespace LLTrace  				}  			} -			return has_value  -				? max_val  +			return has_value +				? max_val  				: T::getDefaultValue();  		}  		template<typename T>  		T getPeriodMax(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return T(getPeriodMax(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));  		} @@ -483,7 +459,7 @@ namespace LLTrace  		template<typename T>  		T getPeriodMax(const SampleStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return T(getPeriodMax(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));  		} @@ -491,17 +467,14 @@ namespace LLTrace  		template<typename T>  		T getPeriodMax(const EventStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return T(getPeriodMax(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));  		} -		F64Kilobytes getPeriodMax(const StatType<MemAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -		F64Kilobytes getPeriodMax(const MemStatHandle& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -  		template <typename T>  		typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMaxPerSec(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			num_periods = llmin(num_periods, getNumRecordedPeriods());  			F64 max_val = std::numeric_limits<F64>::min(); @@ -516,7 +489,7 @@ namespace LLTrace  		template<typename T>  		typename RelatedTypes<T>::fractional_t getPeriodMaxPerSec(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return typename RelatedTypes<T>::fractional_t(getPeriodMaxPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));  		} @@ -528,7 +501,7 @@ namespace LLTrace  		template <typename T>  		typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMean(const StatType<T >& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			num_periods = llmin(num_periods, getNumRecordedPeriods());  			typename RelatedTypes<typename T::value_t>::fractional_t mean(0); @@ -549,14 +522,14 @@ namespace LLTrace  		template<typename T>  		typename RelatedTypes<T>::fractional_t getPeriodMean(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));  		}  		F64 getPeriodMean(const StatType<SampleAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -		template<typename T>  +		template<typename T>  		typename RelatedTypes<T>::fractional_t getPeriodMean(const SampleStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));  		} @@ -564,17 +537,14 @@ namespace LLTrace  		template<typename T>  		typename RelatedTypes<T>::fractional_t getPeriodMean(const EventStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));  		} -		F64Kilobytes getPeriodMean(const StatType<MemAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -		F64Kilobytes getPeriodMean(const MemStatHandle& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -		  		template <typename T>  		typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMeanPerSec(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			num_periods = llmin(num_periods, getNumRecordedPeriods());  			typename RelatedTypes<typename T::value_t>::fractional_t mean = 0; @@ -596,7 +566,7 @@ namespace LLTrace  		template<typename T>  		typename RelatedTypes<T>::fractional_t getPeriodMeanPerSec(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return typename RelatedTypes<T>::fractional_t(getPeriodMeanPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));  		} @@ -635,10 +605,10 @@ namespace LLTrace  		F64 getPeriodStandardDeviation(const StatType<SampleAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -		template<typename T>  +		template<typename T>  		typename RelatedTypes<T>::fractional_t getPeriodStandardDeviation(const SampleStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return typename RelatedTypes<T>::fractional_t(getPeriodStandardDeviation(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));  		} @@ -646,13 +616,10 @@ namespace LLTrace  		template<typename T>  		typename RelatedTypes<T>::fractional_t getPeriodStandardDeviation(const EventStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())  		{ -            LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; +			LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;  			return typename RelatedTypes<T>::fractional_t(getPeriodStandardDeviation(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));  		} -		F64Kilobytes getPeriodStandardDeviation(const StatType<MemAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -		F64Kilobytes getPeriodStandardDeviation(const MemStatHandle& stat, size_t num_periods = std::numeric_limits<size_t>::max()); -  	private:  		// implementation for LLStopWatchControlsMixin  		/*virtual*/ void handleStart(); @@ -731,7 +698,7 @@ namespace LLTrace  		PeriodicRecording& getResults()				{ return mAcceptedRecording; }  		const PeriodicRecording& getResults() const	{return mAcceptedRecording;} -		 +  		void nextPeriod() { mPotentialRecording.nextPeriod(); }  	private: diff --git a/indra/llcommon/lltracethreadrecorder.cpp b/indra/llcommon/lltracethreadrecorder.cpp index 282c454a2a..914bfb55dc 100644 --- a/indra/llcommon/lltracethreadrecorder.cpp +++ b/indra/llcommon/lltracethreadrecorder.cpp @@ -32,7 +32,7 @@  namespace LLTrace  { -extern MemStatHandle gTraceMemStat; +//extern MemStatHandle gTraceMemStat;  static ThreadRecorder* sMasterThreadRecorder = NULL; @@ -81,9 +81,9 @@ void ThreadRecorder::init()  	BlockTimer::getRootTimeBlock().getCurrentAccumulator().mActiveCount = 1; -	claim_alloc(gTraceMemStat, this); -	claim_alloc(gTraceMemStat, mRootTimer); -	claim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes); +	//claim_alloc(gTraceMemStat, this); +	//claim_alloc(gTraceMemStat, mRootTimer); +	//claim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes);  #endif  } @@ -101,9 +101,9 @@ ThreadRecorder::~ThreadRecorder()  #if LL_TRACE_ENABLED  	LLThreadLocalSingletonPointer<BlockTimerStackRecord>::setInstance(NULL); -	disclaim_alloc(gTraceMemStat, this); -	disclaim_alloc(gTraceMemStat, sizeof(BlockTimer)); -	disclaim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes); +	//disclaim_alloc(gTraceMemStat, this); +	//disclaim_alloc(gTraceMemStat, sizeof(BlockTimer)); +	//disclaim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes);  	deactivate(&mThreadRecordingBuffers); diff --git a/indra/llcommon/lluriparser.cpp b/indra/llcommon/lluriparser.cpp index e4f229dd16..f79a98a56d 100644 --- a/indra/llcommon/lluriparser.cpp +++ b/indra/llcommon/lluriparser.cpp @@ -164,8 +164,10 @@ void LLUriParser::extractParts()  #if LL_DARWIN  typedef void(*sighandler_t)(int);  jmp_buf return_to_normalize; +static int sLastSignal = 0;  void uri_signal_handler(int signal)  { +    sLastSignal = signal;      // Apparently signal handler throwing an exception doesn't work.      // This is ugly and unsafe due to not unwinding content of uriparser library,      // but unless we have a way to catch this as NSexception, jump appears to be the only option. @@ -179,8 +181,10 @@ S32 LLUriParser::normalize()  	if (!mRes)  	{  #if LL_DARWIN -        sighandler_t last_handler; -        last_handler = signal(SIGILL, &uri_signal_handler);		// illegal instruction +        sighandler_t last_sigill_handler, last_sigbus_handler; +        last_sigill_handler = signal(SIGILL, &uri_signal_handler);		// illegal instruction +        last_sigbus_handler = signal(SIGBUS, &uri_signal_handler); +                  if (setjmp(return_to_normalize))          {              // Issue: external library crashed via signal @@ -194,8 +198,9 @@ S32 LLUriParser::normalize()              // if this can be handled by NSexception, it needs to be remade              llassert(0); -            LL_WARNS() << "Uriparser crashed with SIGILL, while processing: " << mNormalizedUri << LL_ENDL; -            signal(SIGILL, last_handler); +            LL_WARNS() << "Uriparser crashed with " << sLastSignal << " , while processing: " << mNormalizedUri << LL_ENDL; +            signal(SIGILL, last_sigill_handler); +            signal(SIGBUS, last_sigbus_handler);              return 1;          }  #endif @@ -203,7 +208,8 @@ S32 LLUriParser::normalize()          mRes = uriNormalizeSyntaxExA(&mUri, URI_NORMALIZE_SCHEME | URI_NORMALIZE_HOST);  #if LL_DARWIN -        signal(SIGILL, last_handler); +        signal(SIGILL, last_sigill_handler); +        signal(SIGBUS, last_sigbus_handler);  #endif          if (!mRes) @@ -226,7 +232,7 @@ S32 LLUriParser::normalize()          }  	} -	if(mTmpScheme) +	if(mTmpScheme && mNormalizedUri.size() > 7)  	{  		mNormalizedUri = mNormalizedUri.substr(7);  		mTmpScheme = false; diff --git a/indra/llcommon/stdtypes.h b/indra/llcommon/stdtypes.h index 0b43d7ad4b..3aba9dda00 100644 --- a/indra/llcommon/stdtypes.h +++ b/indra/llcommon/stdtypes.h @@ -156,18 +156,15 @@ typedef int intptr_t;   * type.   */  // narrow_holder is a struct that accepts the passed value as its original -// type and provides templated conversion functions to other types. Once we're -// building with compilers that support Class Template Argument Deduction, we -// can rename this class template 'narrow' and eliminate the narrow() factory -// function below. +// type and provides templated conversion functions to other types.  template <typename FROM> -class narrow_holder +class narrow  {  private:      FROM mValue;  public: -    narrow_holder(FROM value): mValue(value) {} +    narrow(FROM value): mValue(value) {}      /*---------------------- Narrowing unsigned to signed ----------------------*/      template <typename TO, @@ -207,13 +204,4 @@ public:      }  }; -/// narrow() factory function returns a narrow_holder<FROM>(), which can be -/// implicitly converted to the target type. -template <typename FROM> -inline -narrow_holder<FROM> narrow(FROM value) -{ -    return { value }; -} -  #endif diff --git a/indra/llcommon/tests/threadsafeschedule_test.cpp b/indra/llcommon/tests/threadsafeschedule_test.cpp index c421cc7b1c..8851590189 100644 --- a/indra/llcommon/tests/threadsafeschedule_test.cpp +++ b/indra/llcommon/tests/threadsafeschedule_test.cpp @@ -46,11 +46,12 @@ namespace tut          // the real time required for each push() call. Explicitly increment          // the timestamp for each one -- but since we're passing explicit          // timestamps, make the queue reorder them. -        queue.push(Queue::TimeTuple(Queue::Clock::now() + 200ms, "ghi")); +        auto now{ Queue::Clock::now() }; +        queue.push(Queue::TimeTuple(now + 200ms, "ghi"));          // Given the various push() overloads, you have to match the type          // exactly: conversions are ambiguous.          queue.push("abc"s); -        queue.push(Queue::Clock::now() + 100ms, "def"); +        queue.push(now + 100ms, "def");          queue.close();          auto entry = queue.pop();          ensure_equals("failed to pop first", std::get<0>(entry), "abc"s); diff --git a/indra/llcommon/threadpool.cpp b/indra/llcommon/threadpool.cpp index 3a9a5a2062..c48989358e 100644 --- a/indra/llcommon/threadpool.cpp +++ b/indra/llcommon/threadpool.cpp @@ -60,12 +60,15 @@ struct sleepy_robin: public boost::fibers::algo::round_robin  /*****************************************************************************  *   ThreadPoolBase  *****************************************************************************/ -LL::ThreadPoolBase::ThreadPoolBase(const std::string& name, size_t threads, -                                   WorkQueueBase* queue): +LL::ThreadPoolBase::ThreadPoolBase(const std::string& name, +                                   size_t threads, +                                   WorkQueueBase* queue, +                                   bool auto_shutdown):      super(name),      mName("ThreadPool:" + name),      mThreadCount(getConfiguredWidth(name, threads)), -    mQueue(queue) +    mQueue(queue), +    mAutomaticShutdown(auto_shutdown)  {}  void LL::ThreadPoolBase::start() @@ -79,6 +82,14 @@ void LL::ThreadPoolBase::start()                  run(tname);              });      } + +    if (!mAutomaticShutdown) +    { +        // Some threads, like main window's might need to run a bit longer +        // to wait for a proper shutdown message +        return; +    } +      // Listen on "LLApp", and when the app is shutting down, close the queue      // and join the workers.      LLEventPumps::instance().obtain("LLApp").listen( @@ -109,8 +120,11 @@ void LL::ThreadPoolBase::close()          mQueue->close();          for (auto& pair: mThreads)          { -            LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL; -            pair.second.join(); +            if (pair.second.joinable()) +            { +                LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL; +                pair.second.join(); +            }          }          LL_DEBUGS("ThreadPool") << mName << " shutdown complete" << LL_ENDL;      } diff --git a/indra/llcommon/threadpool.h b/indra/llcommon/threadpool.h index 60f4a0ce1b..74056aea17 100644 --- a/indra/llcommon/threadpool.h +++ b/indra/llcommon/threadpool.h @@ -40,7 +40,7 @@ namespace LL           * overrides this parameter.           */          ThreadPoolBase(const std::string& name, size_t threads, -                       WorkQueueBase* queue); +                       WorkQueueBase* queue, bool auto_shutdown = true);          virtual ~ThreadPoolBase();          /** @@ -55,7 +55,7 @@ namespace LL           * ThreadPool listens for application shutdown messages on the "LLApp"           * LLEventPump. Call close() to shut down this ThreadPool early.           */ -        void close(); +        virtual void close();          std::string getName() const { return mName; }          size_t getWidth() const { return mThreads.size(); } @@ -87,13 +87,14 @@ namespace LL      protected:          std::unique_ptr<WorkQueueBase> mQueue; +        std::vector<std::pair<std::string, std::thread>> mThreads; +        bool mAutomaticShutdown;      private:          void run(const std::string& name);          std::string mName;          size_t mThreadCount; -        std::vector<std::pair<std::string, std::thread>> mThreads;      };      /** @@ -117,8 +118,11 @@ namespace LL           * Constraining the queue can cause a submitter to block. Do not           * constrain any ThreadPool accepting work from the main thread.           */ -        ThreadPoolUsing(const std::string& name, size_t threads=1, size_t capacity=1024*1024): -            ThreadPoolBase(name, threads, new queue_t(name, capacity)) +        ThreadPoolUsing(const std::string& name, +                        size_t threads=1, +                        size_t capacity=1024*1024, +                        bool auto_shutdown = true): +            ThreadPoolBase(name, threads, new queue_t(name, capacity), auto_shutdown)          {}          ~ThreadPoolUsing() override {} | 
