summaryrefslogtreecommitdiff
path: root/indra/llcommon
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llcommon')
-rw-r--r--indra/llcommon/lldate.cpp6
-rw-r--r--indra/llcommon/llfasttimer.cpp49
-rw-r--r--indra/llcommon/llmemory.h77
-rw-r--r--indra/llcommon/llprofiler.h8
-rw-r--r--indra/llcommon/llsdparam.cpp2
-rw-r--r--indra/llcommon/llsdparam.h3
-rw-r--r--indra/llcommon/llsingleton.h26
-rw-r--r--indra/llcommon/llstring.cpp7
-rw-r--r--indra/llcommon/llsys.cpp4
-rw-r--r--indra/llcommon/lltrace.cpp1
-rw-r--r--indra/llcommon/lltrace.h148
-rw-r--r--indra/llcommon/lltraceaccumulators.cpp14
-rw-r--r--indra/llcommon/lltraceaccumulators.h19
-rw-r--r--indra/llcommon/lltracerecording.cpp109
-rw-r--r--indra/llcommon/lltracerecording.h21
-rw-r--r--indra/llcommon/lltracethreadrecorder.cpp4
16 files changed, 260 insertions, 238 deletions
diff --git a/indra/llcommon/lldate.cpp b/indra/llcommon/lldate.cpp
index 7a2a0869f4..2ddcf40895 100644
--- a/indra/llcommon/lldate.cpp
+++ b/indra/llcommon/lldate.cpp
@@ -86,11 +86,9 @@ std::string LLDate::asRFC1123() const
return toHTTPDateString (std::string ("%A, %d %b %Y %H:%M:%S GMT"));
}
-LLTrace::BlockTimerStatHandle FT_DATE_FORMAT("Date Format");
-
std::string LLDate::toHTTPDateString (std::string fmt) const
{
- LL_RECORD_BLOCK_TIME(FT_DATE_FORMAT);
+ LL_PROFILE_ZONE_SCOPED;
time_t locSeconds = (time_t) mSecondsSinceEpoch;
struct tm * gmt = gmtime (&locSeconds);
@@ -99,7 +97,7 @@ std::string LLDate::toHTTPDateString (std::string fmt) const
std::string LLDate::toHTTPDateString (tm * gmt, std::string fmt)
{
- LL_RECORD_BLOCK_TIME(FT_DATE_FORMAT);
+ LL_PROFILE_ZONE_SCOPED;
// avoid calling setlocale() unnecessarily - it's expensive.
static std::string prev_locale = "";
diff --git a/indra/llcommon/llfasttimer.cpp b/indra/llcommon/llfasttimer.cpp
index 5b6a7b82f8..d38946004f 100644
--- a/indra/llcommon/llfasttimer.cpp
+++ b/indra/llcommon/llfasttimer.cpp
@@ -191,29 +191,30 @@ TimeBlockTreeNode& BlockTimerStatHandle::getTreeNode() const
}
+
void BlockTimer::bootstrapTimerTree()
{
- for (auto& base : BlockTimerStatHandle::instance_snapshot())
- {
- // because of indirect derivation from LLInstanceTracker, have to downcast
- BlockTimerStatHandle& timer = static_cast<BlockTimerStatHandle&>(base);
- if (&timer == &BlockTimer::getRootTimeBlock()) continue;
-
- // bootstrap tree construction by attaching to last timer to be on stack
- // when this timer was called
- if (timer.getParent() == &BlockTimer::getRootTimeBlock())
- {
- TimeBlockAccumulator& accumulator = timer.getCurrentAccumulator();
-
- if (accumulator.mLastCaller)
- {
- timer.setParent(accumulator.mLastCaller);
- accumulator.mParent = accumulator.mLastCaller;
- }
- // no need to push up tree on first use, flag can be set spuriously
- accumulator.mMoveUpTree = false;
- }
- }
+ for (auto& base : BlockTimerStatHandle::instance_snapshot())
+ {
+ // because of indirect derivation from LLInstanceTracker, have to downcast
+ BlockTimerStatHandle& timer = static_cast<BlockTimerStatHandle&>(base);
+ if (&timer == &BlockTimer::getRootTimeBlock()) continue;
+
+ // bootstrap tree construction by attaching to last timer to be on stack
+ // when this timer was called
+ if (timer.getParent() == &BlockTimer::getRootTimeBlock())
+ {
+ TimeBlockAccumulator& accumulator = timer.getCurrentAccumulator();
+
+ if (accumulator.mLastCaller)
+ {
+ timer.setParent(accumulator.mLastCaller);
+ accumulator.mParent = accumulator.mLastCaller;
+ }
+ // no need to push up tree on first use, flag can be set spuriously
+ accumulator.mMoveUpTree = false;
+ }
+ }
}
// bump timers up tree if they have been flagged as being in the wrong place
@@ -221,6 +222,7 @@ void BlockTimer::bootstrapTimerTree()
// this preserves partial order derived from current frame's observations
void BlockTimer::incrementalUpdateTimerTree()
{
+ LL_PROFILE_ZONE_SCOPED;
for(block_timer_tree_df_post_iterator_t it = begin_block_timer_tree_df_post(BlockTimer::getRootTimeBlock());
it != end_block_timer_tree_df_post();
++it)
@@ -260,7 +262,8 @@ void BlockTimer::incrementalUpdateTimerTree()
void BlockTimer::updateTimes()
- {
+{
+ LL_PROFILE_ZONE_SCOPED;
// walk up stack of active timers and accumulate current time while leaving timing structures active
BlockTimerStackRecord* stack_record = LLThreadLocalSingletonPointer<BlockTimerStackRecord>::getInstance();
if (!stack_record) return;
@@ -271,7 +274,7 @@ void BlockTimer::updateTimes()
while(cur_timer
&& cur_timer->mParentTimerData.mActiveTimer != cur_timer) // root defined by parent pointing to self
- {
+ {
U64 cumulative_time_delta = cur_time - cur_timer->mStartTime;
cur_timer->mStartTime = cur_time;
diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h
index 2704a495e0..41023b4ba4 100644
--- a/indra/llcommon/llmemory.h
+++ b/indra/llcommon/llmemory.h
@@ -111,6 +111,16 @@ public: \
void operator delete(void* ptr) \
{ \
ll_aligned_free_16(ptr); \
+ } \
+ \
+ void* operator new[](size_t size) \
+ { \
+ return ll_aligned_malloc_16(size); \
+ } \
+ \
+ void operator delete[](void* ptr) \
+ { \
+ ll_aligned_free_16(ptr); \
}
@@ -126,8 +136,9 @@ public: \
#else
inline void* ll_aligned_malloc_fallback( size_t size, int align )
{
+ LL_PROFILE_ZONE_SCOPED;
#if defined(LL_WINDOWS)
- return _aligned_malloc(size, align);
+ void* ret = _aligned_malloc(size, align);
#else
char* aligned = NULL;
void* mem = malloc( size + (align - 1) + sizeof(void*) );
@@ -138,12 +149,16 @@ public: \
((void**)aligned)[-1] = mem;
}
- return aligned;
+ void* ret = aligned;
#endif
+ LL_PROFILE_ALLOC(ret, size);
+ return ret;
}
inline void ll_aligned_free_fallback( void* ptr )
{
+ LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_FREE(ptr);
#if defined(LL_WINDOWS)
_aligned_free(ptr);
#else
@@ -159,21 +174,24 @@ public: \
inline void* ll_aligned_malloc_16(size_t size) // returned hunk MUST be freed with ll_aligned_free_16().
{
+ LL_PROFILE_ZONE_SCOPED;
#if defined(LL_WINDOWS)
- return _aligned_malloc(size, 16);
+ void* ret = _aligned_malloc(size, 16);
#elif defined(LL_DARWIN)
- return malloc(size); // default osx malloc is 16 byte aligned.
+ void* ret = malloc(size); // default osx malloc is 16 byte aligned.
#else
- void *rtn;
- if (LL_LIKELY(0 == posix_memalign(&rtn, 16, size)))
- return rtn;
- else // bad alignment requested, or out of memory
- return NULL;
+ void *ret;
+ if (0 != posix_memalign(&ret, 16, size))
+ return nullptr;
#endif
+ LL_PROFILE_ALLOC(ret, size);
+ return ret;
}
inline void ll_aligned_free_16(void *p)
{
+ LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_FREE(p);
#if defined(LL_WINDOWS)
_aligned_free(p);
#elif defined(LL_DARWIN)
@@ -185,10 +203,12 @@ inline void ll_aligned_free_16(void *p)
inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // returned hunk MUST be freed with ll_aligned_free_16().
{
+ LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_FREE(ptr);
#if defined(LL_WINDOWS)
- return _aligned_realloc(ptr, size, 16);
+ void* ret = _aligned_realloc(ptr, size, 16);
#elif defined(LL_DARWIN)
- return realloc(ptr,size); // default osx malloc is 16 byte aligned.
+ void* ret = realloc(ptr,size); // default osx malloc is 16 byte aligned.
#else
//FIXME: memcpy is SLOW
void* ret = ll_aligned_malloc_16(size);
@@ -201,27 +221,31 @@ inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // r
}
ll_aligned_free_16(ptr);
}
- return ret;
#endif
+ LL_PROFILE_ALLOC(ptr, size);
+ return ret;
}
inline void* ll_aligned_malloc_32(size_t size) // returned hunk MUST be freed with ll_aligned_free_32().
{
+ LL_PROFILE_ZONE_SCOPED;
#if defined(LL_WINDOWS)
- return _aligned_malloc(size, 32);
+ void* ret = _aligned_malloc(size, 32);
#elif defined(LL_DARWIN)
- return ll_aligned_malloc_fallback( size, 32 );
+ void* ret = ll_aligned_malloc_fallback( size, 32 );
#else
- void *rtn;
- if (LL_LIKELY(0 == posix_memalign(&rtn, 32, size)))
- return rtn;
- else // bad alignment requested, or out of memory
- return NULL;
+ void *ret;
+ if (0 != posix_memalign(&ret, 32, size))
+ return nullptr;
#endif
+ LL_PROFILE_ALLOC(ret, size);
+ return ret;
}
inline void ll_aligned_free_32(void *p)
{
+ LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_FREE(p);
#if defined(LL_WINDOWS)
_aligned_free(p);
#elif defined(LL_DARWIN)
@@ -235,29 +259,35 @@ inline void ll_aligned_free_32(void *p)
template<size_t ALIGNMENT>
LL_FORCE_INLINE void* ll_aligned_malloc(size_t size)
{
+ LL_PROFILE_ZONE_SCOPED;
+ void* ret;
if (LL_DEFAULT_HEAP_ALIGN % ALIGNMENT == 0)
{
- return malloc(size);
+ ret = malloc(size);
+ LL_PROFILE_ALLOC(ret, size);
}
else if (ALIGNMENT == 16)
{
- return ll_aligned_malloc_16(size);
+ ret = ll_aligned_malloc_16(size);
}
else if (ALIGNMENT == 32)
{
- return ll_aligned_malloc_32(size);
+ ret = ll_aligned_malloc_32(size);
}
else
{
- return ll_aligned_malloc_fallback(size, ALIGNMENT);
+ ret = ll_aligned_malloc_fallback(size, ALIGNMENT);
}
+ return ret;
}
template<size_t ALIGNMENT>
LL_FORCE_INLINE void ll_aligned_free(void* ptr)
{
+ LL_PROFILE_ZONE_SCOPED;
if (ALIGNMENT == LL_DEFAULT_HEAP_ALIGN)
{
+ LL_PROFILE_FREE(ptr);
free(ptr);
}
else if (ALIGNMENT == 16)
@@ -279,6 +309,7 @@ LL_FORCE_INLINE void ll_aligned_free(void* ptr)
//
inline void ll_memcpy_nonaliased_aligned_16(char* __restrict dst, const char* __restrict src, size_t bytes)
{
+ LL_PROFILE_ZONE_SCOPED;
assert(src != NULL);
assert(dst != NULL);
assert(bytes > 0);
diff --git a/indra/llcommon/llprofiler.h b/indra/llcommon/llprofiler.h
index e36f693dd3..ca60d23248 100644
--- a/indra/llcommon/llprofiler.h
+++ b/indra/llcommon/llprofiler.h
@@ -66,6 +66,8 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
+ #define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
+ #define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_FAST_TIMER
#define LL_PROFILER_FRAME_END
@@ -81,11 +83,13 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_INFO(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_WARN(name) (void)(name); // Not supported
+ #define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
+ #define LL_PROFILE_FREE(ptr) (void)(ptr);
#endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
#define LL_PROFILER_FRAME_END FrameMark
#define LL_PROFILER_SET_THREAD_NAME( name ) tracy::SetThreadName( name ); gProfilerEnabled = true;
- #define LL_RECORD_BLOCK_TIME(name) ZoneScoped const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(name)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__);
+ #define LL_RECORD_BLOCK_TIME(name) ZoneNamedN(___tracy_scoped_zone, #name, true); const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(name)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__);
#define LL_PROFILE_ZONE_NAMED(name) ZoneNamedN( ___tracy_scoped_zone, #name, true );
#define LL_PROFILE_ZONE_NAMED_COLOR(name,color) ZoneNamedNC( ___tracy_scopped_zone, name, color, true ) // RGB
#define LL_PROFILE_ZONE_SCOPED ZoneScoped
@@ -96,6 +100,8 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
+ #define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
+ #define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#endif
#else
#define LL_PROFILER_FRAME_END
diff --git a/indra/llcommon/llsdparam.cpp b/indra/llcommon/llsdparam.cpp
index 2e7b46f885..af4ccf25fd 100644
--- a/indra/llcommon/llsdparam.cpp
+++ b/indra/llcommon/llsdparam.cpp
@@ -37,8 +37,6 @@ static LLInitParam::Parser::parser_write_func_map_t sWriteFuncs;
static LLInitParam::Parser::parser_inspect_func_map_t sInspectFuncs;
static const LLSD NO_VALUE_MARKER;
-LLTrace::BlockTimerStatHandle FTM_SD_PARAM_ADAPTOR("LLSD to LLInitParam conversion");
-
//
// LLParamSDParser
//
diff --git a/indra/llcommon/llsdparam.h b/indra/llcommon/llsdparam.h
index 93910b70ae..82a623a8a0 100644
--- a/indra/llcommon/llsdparam.h
+++ b/indra/llcommon/llsdparam.h
@@ -110,7 +110,6 @@ private:
};
-extern LL_COMMON_API LLTrace::BlockTimerStatHandle FTM_SD_PARAM_ADAPTOR;
template<typename T>
class LLSDParamAdapter : public T
{
@@ -118,7 +117,7 @@ public:
LLSDParamAdapter() {}
LLSDParamAdapter(const LLSD& sd)
{
- LL_RECORD_BLOCK_TIME(FTM_SD_PARAM_ADAPTOR);
+ LL_PROFILE_ZONE_SCOPED;
LLParamSDParser parser;
// don't spam for implicit parsing of LLSD, as we want to allow arbitrary freeform data and ignore most of it
bool parse_silently = true;
diff --git a/indra/llcommon/llsingleton.h b/indra/llcommon/llsingleton.h
index 2e43a3cbed..10a8ecfedb 100644
--- a/indra/llcommon/llsingleton.h
+++ b/indra/llcommon/llsingleton.h
@@ -839,4 +839,30 @@ private: \
/* LLSINGLETON() is carefully implemented to permit exactly this */ \
LLSINGLETON_C11(DERIVED_CLASS) {}
+// Relatively unsafe singleton implementation that is much faster
+// and simpler than LLSingleton, but has no dependency tracking
+// or inherent thread safety and requires manual invocation of
+// createInstance before first use.
+template<class T>
+class LLSimpleton
+{
+public:
+ static T* sInstance;
+
+ static void createInstance()
+ {
+ llassert(sInstance == nullptr);
+ sInstance = new T();
+ }
+
+ static inline T* getInstance() { return sInstance; }
+ static inline T& instance() { return *getInstance(); }
+ static inline bool instanceExists() { return sInstance != nullptr; }
+
+ static void deleteSingleton() {
+ delete sInstance;
+ sInstance = nullptr;
+ }
+};
+
#endif
diff --git a/indra/llcommon/llstring.cpp b/indra/llcommon/llstring.cpp
index 0290eea143..f6f9f97809 100644
--- a/indra/llcommon/llstring.cpp
+++ b/indra/llcommon/llstring.cpp
@@ -37,9 +37,6 @@
#include <winnls.h> // for WideCharToMultiByte
#endif
-LLTrace::BlockTimerStatHandle FT_STRING_FORMAT("String Format");
-
-
std::string ll_safe_string(const char* in)
{
if(in) return std::string(in);
@@ -1356,7 +1353,7 @@ bool LLStringUtil::formatDatetime(std::string& replacement, std::string token,
template<>
S32 LLStringUtil::format(std::string& s, const format_map_t& substitutions)
{
- LL_RECORD_BLOCK_TIME(FT_STRING_FORMAT);
+ LL_PROFILE_ZONE_SCOPED;
S32 res = 0;
std::string output;
@@ -1429,7 +1426,7 @@ S32 LLStringUtil::format(std::string& s, const format_map_t& substitutions)
template<>
S32 LLStringUtil::format(std::string& s, const LLSD& substitutions)
{
- LL_RECORD_BLOCK_TIME(FT_STRING_FORMAT);
+ LL_PROFILE_ZONE_SCOPED;
S32 res = 0;
if (!substitutions.isMap())
diff --git a/indra/llcommon/llsys.cpp b/indra/llcommon/llsys.cpp
index 6d5d043e8d..306ef05b6d 100644
--- a/indra/llcommon/llsys.cpp
+++ b/indra/llcommon/llsys.cpp
@@ -871,11 +871,9 @@ LLMemoryInfo& LLMemoryInfo::refresh()
return *this;
}
-static LLTrace::BlockTimerStatHandle FTM_MEMINFO_LOAD_STATS("MemInfo Load Stats");
-
LLSD LLMemoryInfo::loadStatsMap()
{
- LL_RECORD_BLOCK_TIME(FTM_MEMINFO_LOAD_STATS);
+ LL_PROFILE_ZONE_SCOPED;
// This implementation is derived from stream() code (as of 2011-06-29).
Stats stats;
diff --git a/indra/llcommon/lltrace.cpp b/indra/llcommon/lltrace.cpp
index 54079a4689..f59b207ded 100644
--- a/indra/llcommon/lltrace.cpp
+++ b/indra/llcommon/lltrace.cpp
@@ -61,6 +61,7 @@ TimeBlockTreeNode::TimeBlockTreeNode()
void TimeBlockTreeNode::setParent( BlockTimerStatHandle* parent )
{
+ LL_PROFILE_ZONE_SCOPED;
llassert_always(parent != mBlock);
llassert_always(parent != NULL);
diff --git a/indra/llcommon/lltrace.h b/indra/llcommon/lltrace.h
index 0d0cd6f581..4051c558a4 100644
--- a/indra/llcommon/lltrace.h
+++ b/indra/llcommon/lltrace.h
@@ -227,6 +227,7 @@ public:
void setName(const char* name)
{
+ LL_PROFILE_ZONE_SCOPED;
mName = name;
setKey(name);
}
@@ -234,12 +235,14 @@ public:
/*virtual*/ const char* getUnitLabel() const { return "KB"; }
StatType<MemAccumulator::AllocationFacet>& allocations()
- {
+ {
+ LL_PROFILE_ZONE_SCOPED;
return static_cast<StatType<MemAccumulator::AllocationFacet>&>(*(StatType<MemAccumulator>*)this);
}
StatType<MemAccumulator::DeallocationFacet>& deallocations()
- {
+ {
+ LL_PROFILE_ZONE_SCOPED;
return static_cast<StatType<MemAccumulator::DeallocationFacet>&>(*(StatType<MemAccumulator>*)this);
}
};
@@ -261,6 +264,7 @@ struct MeasureMem<T, typename T::mem_trackable_tag_t, IS_BYTES>
{
static size_t measureFootprint(const T& value)
{
+ LL_PROFILE_ZONE_SCOPED;
return sizeof(T) + value.getMemFootprint();
}
};
@@ -270,6 +274,7 @@ struct MeasureMem<T, IS_MEM_TRACKABLE, typename T::is_unit_t>
{
static size_t measureFootprint(const T& value)
{
+ LL_PROFILE_ZONE_SCOPED;
return U32Bytes(value).value();
}
};
@@ -279,6 +284,7 @@ struct MeasureMem<T*, IS_MEM_TRACKABLE, IS_BYTES>
{
static size_t measureFootprint(const T* value)
{
+ LL_PROFILE_ZONE_SCOPED;
if (!value)
{
return 0;
@@ -323,6 +329,7 @@ struct MeasureMem<std::basic_string<T>, IS_MEM_TRACKABLE, IS_BYTES>
{
static size_t measureFootprint(const std::basic_string<T>& value)
{
+ LL_PROFILE_ZONE_SCOPED;
return value.capacity() * sizeof(T);
}
};
@@ -331,6 +338,7 @@ struct MeasureMem<std::basic_string<T>, IS_MEM_TRACKABLE, IS_BYTES>
template<typename T>
inline void claim_alloc(MemStatHandle& measurement, const T& value)
{
+ LL_PROFILE_ZONE_SCOPED;
#if LL_TRACE_ENABLED
S32 size = MeasureMem<T>::measureFootprint(value);
if(size == 0) return;
@@ -343,6 +351,7 @@ inline void claim_alloc(MemStatHandle& measurement, const T& value)
template<typename T>
inline void disclaim_alloc(MemStatHandle& measurement, const T& value)
{
+ LL_PROFILE_ZONE_SCOPED;
#if LL_TRACE_ENABLED
S32 size = MeasureMem<T>::measureFootprint(value);
if(size == 0) return;
@@ -352,141 +361,6 @@ inline void disclaim_alloc(MemStatHandle& measurement, const T& value)
#endif
}
-template<typename DERIVED, size_t ALIGNMENT = LL_DEFAULT_HEAP_ALIGN>
-class MemTrackableNonVirtual
-{
-public:
- typedef void mem_trackable_tag_t;
-
- MemTrackableNonVirtual(const char* name)
-#if LL_TRACE_ENABLED
- : mMemFootprint(0)
-#endif
- {
-#if LL_TRACE_ENABLED
- static bool name_initialized = false;
- if (!name_initialized)
- {
- name_initialized = true;
- sMemStat.setName(name);
- }
-#endif
- }
-
-#if LL_TRACE_ENABLED
- ~MemTrackableNonVirtual()
- {
- disclaimMem(mMemFootprint);
- }
-
- static MemStatHandle& getMemStatHandle()
- {
- return sMemStat;
- }
-
- S32 getMemFootprint() const { return mMemFootprint; }
-#endif
-
- void* operator new(size_t size)
- {
-#if LL_TRACE_ENABLED
- claim_alloc(sMemStat, size);
-#endif
- return ll_aligned_malloc<ALIGNMENT>(size);
- }
-
- template<int CUSTOM_ALIGNMENT>
- static void* aligned_new(size_t size)
- {
-#if LL_TRACE_ENABLED
- claim_alloc(sMemStat, size);
-#endif
- return ll_aligned_malloc<CUSTOM_ALIGNMENT>(size);
- }
-
- void operator delete(void* ptr, size_t size)
- {
-#if LL_TRACE_ENABLED
- disclaim_alloc(sMemStat, size);
-#endif
- ll_aligned_free<ALIGNMENT>(ptr);
- }
-
- template<int CUSTOM_ALIGNMENT>
- static void aligned_delete(void* ptr, size_t size)
- {
-#if LL_TRACE_ENABLED
- disclaim_alloc(sMemStat, size);
-#endif
- ll_aligned_free<CUSTOM_ALIGNMENT>(ptr);
- }
-
- void* operator new [](size_t size)
- {
-#if LL_TRACE_ENABLED
- claim_alloc(sMemStat, size);
-#endif
- return ll_aligned_malloc<ALIGNMENT>(size);
- }
-
- void operator delete[](void* ptr, size_t size)
- {
-#if LL_TRACE_ENABLED
- disclaim_alloc(sMemStat, size);
-#endif
- ll_aligned_free<ALIGNMENT>(ptr);
- }
-
- // claim memory associated with other objects/data as our own, adding to our calculated footprint
- template<typename CLAIM_T>
- void claimMem(const CLAIM_T& value) const
- {
-#if LL_TRACE_ENABLED
- S32 size = MeasureMem<CLAIM_T>::measureFootprint(value);
- claim_alloc(sMemStat, size);
- mMemFootprint += size;
-#endif
- }
-
- // remove memory we had claimed from our calculated footprint
- template<typename CLAIM_T>
- void disclaimMem(const CLAIM_T& value) const
- {
-#if LL_TRACE_ENABLED
- S32 size = MeasureMem<CLAIM_T>::measureFootprint(value);
- disclaim_alloc(sMemStat, size);
- mMemFootprint -= size;
-#endif
- }
-
-private:
-#if LL_TRACE_ENABLED
- // use signed values so that we can temporarily go negative
- // and reconcile in destructor
- // NB: this assumes that no single class is responsible for > 2GB of allocations
- mutable S32 mMemFootprint;
-
- static MemStatHandle sMemStat;
-#endif
-
-};
-
-#if LL_TRACE_ENABLED
-template<typename DERIVED, size_t ALIGNMENT>
-MemStatHandle MemTrackableNonVirtual<DERIVED, ALIGNMENT>::sMemStat(typeid(MemTrackableNonVirtual<DERIVED, ALIGNMENT>).name());
-#endif
-
-template<typename DERIVED, size_t ALIGNMENT = LL_DEFAULT_HEAP_ALIGN>
-class MemTrackable : public MemTrackableNonVirtual<DERIVED, ALIGNMENT>
-{
-public:
- MemTrackable(const char* name)
- : MemTrackableNonVirtual<DERIVED, ALIGNMENT>(name)
- {}
-
- virtual ~MemTrackable()
- {}
-};
}
#endif // LL_LLTRACE_H
diff --git a/indra/llcommon/lltraceaccumulators.cpp b/indra/llcommon/lltraceaccumulators.cpp
index b1c23c6fb7..8e9aaee0e6 100644
--- a/indra/llcommon/lltraceaccumulators.cpp
+++ b/indra/llcommon/lltraceaccumulators.cpp
@@ -41,6 +41,7 @@ extern MemStatHandle gTraceMemStat;
AccumulatorBufferGroup::AccumulatorBufferGroup()
{
+ LL_PROFILE_ZONE_SCOPED;
claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator));
claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator));
claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator));
@@ -55,6 +56,7 @@ AccumulatorBufferGroup::AccumulatorBufferGroup(const AccumulatorBufferGroup& oth
mStackTimers(other.mStackTimers),
mMemStats(other.mMemStats)
{
+ LL_PROFILE_ZONE_SCOPED;
claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator));
claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator));
claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator));
@@ -64,6 +66,7 @@ AccumulatorBufferGroup::AccumulatorBufferGroup(const AccumulatorBufferGroup& oth
AccumulatorBufferGroup::~AccumulatorBufferGroup()
{
+ LL_PROFILE_ZONE_SCOPED;
disclaim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator));
disclaim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator));
disclaim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator));
@@ -73,6 +76,7 @@ AccumulatorBufferGroup::~AccumulatorBufferGroup()
void AccumulatorBufferGroup::handOffTo(AccumulatorBufferGroup& other)
{
+ LL_PROFILE_ZONE_SCOPED;
other.mCounts.reset(&mCounts);
other.mSamples.reset(&mSamples);
other.mEvents.reset(&mEvents);
@@ -82,6 +86,7 @@ void AccumulatorBufferGroup::handOffTo(AccumulatorBufferGroup& other)
void AccumulatorBufferGroup::makeCurrent()
{
+ LL_PROFILE_ZONE_SCOPED;
mCounts.makeCurrent();
mSamples.makeCurrent();
mEvents.makeCurrent();
@@ -104,6 +109,7 @@ void AccumulatorBufferGroup::makeCurrent()
//static
void AccumulatorBufferGroup::clearCurrent()
{
+ LL_PROFILE_ZONE_SCOPED;
AccumulatorBuffer<CountAccumulator>::clearCurrent();
AccumulatorBuffer<SampleAccumulator>::clearCurrent();
AccumulatorBuffer<EventAccumulator>::clearCurrent();
@@ -118,6 +124,7 @@ bool AccumulatorBufferGroup::isCurrent() const
void AccumulatorBufferGroup::append( const AccumulatorBufferGroup& other )
{
+ LL_PROFILE_ZONE_SCOPED;
mCounts.addSamples(other.mCounts, SEQUENTIAL);
mSamples.addSamples(other.mSamples, SEQUENTIAL);
mEvents.addSamples(other.mEvents, SEQUENTIAL);
@@ -127,6 +134,7 @@ void AccumulatorBufferGroup::append( const AccumulatorBufferGroup& other )
void AccumulatorBufferGroup::merge( const AccumulatorBufferGroup& other)
{
+ LL_PROFILE_ZONE_SCOPED;
mCounts.addSamples(other.mCounts, NON_SEQUENTIAL);
mSamples.addSamples(other.mSamples, NON_SEQUENTIAL);
mEvents.addSamples(other.mEvents, NON_SEQUENTIAL);
@@ -137,6 +145,7 @@ void AccumulatorBufferGroup::merge( const AccumulatorBufferGroup& other)
void AccumulatorBufferGroup::reset(AccumulatorBufferGroup* other)
{
+ LL_PROFILE_ZONE_SCOPED;
mCounts.reset(other ? &other->mCounts : NULL);
mSamples.reset(other ? &other->mSamples : NULL);
mEvents.reset(other ? &other->mEvents : NULL);
@@ -146,6 +155,7 @@ void AccumulatorBufferGroup::reset(AccumulatorBufferGroup* other)
void AccumulatorBufferGroup::sync()
{
+ LL_PROFILE_ZONE_SCOPED;
if (isCurrent())
{
F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds();
@@ -190,7 +200,7 @@ F64 SampleAccumulator::mergeSumsOfSquares(const SampleAccumulator& a, const Samp
void SampleAccumulator::addSamples( const SampleAccumulator& other, EBufferAppendType append_type )
{
- if (append_type == NON_SEQUENTIAL)
+ if (append_type == NON_SEQUENTIAL)
{
return;
}
@@ -289,7 +299,7 @@ void EventAccumulator::addSamples( const EventAccumulator& other, EBufferAppendT
void EventAccumulator::reset( const EventAccumulator* other )
{
- mNumSamples = 0;
+ mNumSamples = 0;
mSum = 0;
mMin = F32(NaN);
mMax = F32(NaN);
diff --git a/indra/llcommon/lltraceaccumulators.h b/indra/llcommon/lltraceaccumulators.h
index 8eb5338a2a..b183fcd14a 100644
--- a/indra/llcommon/lltraceaccumulators.h
+++ b/indra/llcommon/lltraceaccumulators.h
@@ -66,6 +66,7 @@ namespace LLTrace
: mStorageSize(0),
mStorage(NULL)
{
+ LL_PROFILE_ZONE_SCOPED;
const AccumulatorBuffer& other = *getDefaultBuffer();
resize(sNextStorageSlot);
for (S32 i = 0; i < sNextStorageSlot; i++)
@@ -76,6 +77,7 @@ namespace LLTrace
~AccumulatorBuffer()
{
+ LL_PROFILE_ZONE_SCOPED;
if (isCurrent())
{
LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL);
@@ -98,6 +100,7 @@ namespace LLTrace
: mStorageSize(0),
mStorage(NULL)
{
+ LL_PROFILE_ZONE_SCOPED;
resize(sNextStorageSlot);
for (S32 i = 0; i < sNextStorageSlot; i++)
{
@@ -107,6 +110,7 @@ namespace LLTrace
void addSamples(const AccumulatorBuffer<ACCUMULATOR>& other, EBufferAppendType append_type)
{
+ LL_PROFILE_ZONE_SCOPED;
llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot);
for (size_t i = 0; i < sNextStorageSlot; i++)
{
@@ -116,6 +120,7 @@ namespace LLTrace
void copyFrom(const AccumulatorBuffer<ACCUMULATOR>& other)
{
+ LL_PROFILE_ZONE_SCOPED;
llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot);
for (size_t i = 0; i < sNextStorageSlot; i++)
{
@@ -125,6 +130,7 @@ namespace LLTrace
void reset(const AccumulatorBuffer<ACCUMULATOR>* other = NULL)
{
+ LL_PROFILE_ZONE_SCOPED;
llassert(mStorageSize >= sNextStorageSlot);
for (size_t i = 0; i < sNextStorageSlot; i++)
{
@@ -134,6 +140,7 @@ namespace LLTrace
void sync(F64SecondsImplicit time_stamp)
{
+ LL_PROFILE_ZONE_SCOPED;
llassert(mStorageSize >= sNextStorageSlot);
for (size_t i = 0; i < sNextStorageSlot; i++)
{
@@ -153,12 +160,13 @@ namespace LLTrace
static void clearCurrent()
{
- LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL);
+ LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL);
}
// NOTE: this is not thread-safe. We assume that slots are reserved in the main thread before any child threads are spawned
size_t reserveSlot()
{
+ LL_PROFILE_ZONE_SCOPED;
size_t next_slot = sNextStorageSlot++;
if (next_slot >= mStorageSize)
{
@@ -172,6 +180,7 @@ namespace LLTrace
void resize(size_t new_size)
{
+ LL_PROFILE_ZONE_SCOPED;
if (new_size <= mStorageSize) return;
ACCUMULATOR* old_storage = mStorage;
@@ -212,6 +221,7 @@ namespace LLTrace
static self_t* getDefaultBuffer()
{
+ LL_PROFILE_ZONE_SCOPED;
static bool sInitialized = false;
if (!sInitialized)
{
@@ -326,6 +336,7 @@ namespace LLTrace
void sample(F64 value)
{
+ LL_PROFILE_ZONE_SCOPED;
F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds();
// store effect of last value
@@ -444,9 +455,9 @@ namespace LLTrace
S32 mNumSamples;
};
- class TimeBlockAccumulator
+ class alignas(32) TimeBlockAccumulator
{
- public:
+ public:
typedef F64Seconds value_t;
static F64Seconds getDefaultValue() { return F64Seconds(0); }
@@ -539,6 +550,7 @@ namespace LLTrace
void addSamples(const MemAccumulator& other, EBufferAppendType append_type)
{
+ LL_PROFILE_ZONE_SCOPED;
mAllocations.addSamples(other.mAllocations, append_type);
mDeallocations.addSamples(other.mDeallocations, append_type);
@@ -557,6 +569,7 @@ namespace LLTrace
void reset(const MemAccumulator* other)
{
+ LL_PROFILE_ZONE_SCOPED;
mSize.reset(other ? &other->mSize : NULL);
mAllocations.reset(other ? &other->mAllocations : NULL);
mDeallocations.reset(other ? &other->mDeallocations : NULL);
diff --git a/indra/llcommon/lltracerecording.cpp b/indra/llcommon/lltracerecording.cpp
index 3094b627a2..c72a64d086 100644
--- a/indra/llcommon/lltracerecording.cpp
+++ b/indra/llcommon/lltracerecording.cpp
@@ -50,6 +50,7 @@ Recording::Recording(EPlayState state)
: mElapsedSeconds(0),
mActiveBuffers(NULL)
{
+ LL_PROFILE_ZONE_SCOPED;
claim_alloc(gTraceMemStat, this);
mBuffers = new AccumulatorBufferGroup();
claim_alloc(gTraceMemStat, mBuffers);
@@ -59,12 +60,14 @@ Recording::Recording(EPlayState state)
Recording::Recording( const Recording& other )
: mActiveBuffers(NULL)
{
+ LL_PROFILE_ZONE_SCOPED;
claim_alloc(gTraceMemStat, this);
*this = other;
}
Recording& Recording::operator = (const Recording& other)
{
+ LL_PROFILE_ZONE_SCOPED;
// this will allow us to seamlessly start without affecting any data we've acquired from other
setPlayState(PAUSED);
@@ -85,6 +88,7 @@ Recording& Recording::operator = (const Recording& other)
Recording::~Recording()
{
+ LL_PROFILE_ZONE_SCOPED;
disclaim_alloc(gTraceMemStat, this);
disclaim_alloc(gTraceMemStat, mBuffers);
@@ -103,6 +107,7 @@ void Recording::update()
#if LL_TRACE_ENABLED
if (isStarted())
{
+ LL_PROFILE_ZONE_SCOPED;
mElapsedSeconds += mSamplingTimer.getElapsedTimeF64();
// must have
@@ -123,6 +128,7 @@ void Recording::update()
void Recording::handleReset()
{
+ LL_PROFILE_ZONE_SCOPED;
#if LL_TRACE_ENABLED
mBuffers.write()->reset();
@@ -133,6 +139,7 @@ void Recording::handleReset()
void Recording::handleStart()
{
+ LL_PROFILE_ZONE_SCOPED;
#if LL_TRACE_ENABLED
mSamplingTimer.reset();
mBuffers.setStayUnique(true);
@@ -144,6 +151,7 @@ void Recording::handleStart()
void Recording::handleStop()
{
+ LL_PROFILE_ZONE_SCOPED;
#if LL_TRACE_ENABLED
mElapsedSeconds += mSamplingTimer.getElapsedTimeF64();
// must have thread recorder running on this thread
@@ -273,7 +281,7 @@ F64Kilobytes Recording::getMean(const StatType<MemAccumulator>& stat)
F64Kilobytes Recording::getMax(const StatType<MemAccumulator>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return F64Bytes(llmax(accumulator.mSize.getMax(), active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.getMax() : F32_MIN));
@@ -281,7 +289,7 @@ F64Kilobytes Recording::getMax(const StatType<MemAccumulator>& stat)
F64Kilobytes Recording::getStandardDeviation(const StatType<MemAccumulator>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
if (active_accumulator && active_accumulator->hasValue())
@@ -297,7 +305,7 @@ F64Kilobytes Recording::getStandardDeviation(const StatType<MemAccumulator>& sta
F64Kilobytes Recording::getLastValue(const StatType<MemAccumulator>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return F64Bytes(active_accumulator ? active_accumulator->mSize.getLastValue() : accumulator.mSize.getLastValue());
@@ -305,7 +313,7 @@ F64Kilobytes Recording::getLastValue(const StatType<MemAccumulator>& stat)
bool Recording::hasValue(const StatType<MemAccumulator::AllocationFacet>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return accumulator.mAllocations.hasValue() || (active_accumulator ? active_accumulator->mAllocations.hasValue() : false);
@@ -313,7 +321,7 @@ bool Recording::hasValue(const StatType<MemAccumulator::AllocationFacet>& stat)
F64Kilobytes Recording::getSum(const StatType<MemAccumulator::AllocationFacet>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return F64Bytes(accumulator.mAllocations.getSum() + (active_accumulator ? active_accumulator->mAllocations.getSum() : 0));
@@ -321,7 +329,7 @@ F64Kilobytes Recording::getSum(const StatType<MemAccumulator::AllocationFacet>&
F64Kilobytes Recording::getPerSec(const StatType<MemAccumulator::AllocationFacet>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return F64Bytes((accumulator.mAllocations.getSum() + (active_accumulator ? active_accumulator->mAllocations.getSum() : 0)) / mElapsedSeconds.value());
@@ -329,7 +337,7 @@ F64Kilobytes Recording::getPerSec(const StatType<MemAccumulator::AllocationFacet
S32 Recording::getSampleCount(const StatType<MemAccumulator::AllocationFacet>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return accumulator.mAllocations.getSampleCount() + (active_accumulator ? active_accumulator->mAllocations.getSampleCount() : 0);
@@ -337,7 +345,7 @@ S32 Recording::getSampleCount(const StatType<MemAccumulator::AllocationFacet>& s
bool Recording::hasValue(const StatType<MemAccumulator::DeallocationFacet>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return accumulator.mDeallocations.hasValue() || (active_accumulator ? active_accumulator->mDeallocations.hasValue() : false);
@@ -346,7 +354,7 @@ bool Recording::hasValue(const StatType<MemAccumulator::DeallocationFacet>& stat
F64Kilobytes Recording::getSum(const StatType<MemAccumulator::DeallocationFacet>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return F64Bytes(accumulator.mDeallocations.getSum() + (active_accumulator ? active_accumulator->mDeallocations.getSum() : 0));
@@ -354,7 +362,7 @@ F64Kilobytes Recording::getSum(const StatType<MemAccumulator::DeallocationFacet>
F64Kilobytes Recording::getPerSec(const StatType<MemAccumulator::DeallocationFacet>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return F64Bytes((accumulator.mDeallocations.getSum() + (active_accumulator ? active_accumulator->mDeallocations.getSum() : 0)) / mElapsedSeconds.value());
@@ -362,7 +370,7 @@ F64Kilobytes Recording::getPerSec(const StatType<MemAccumulator::DeallocationFac
S32 Recording::getSampleCount(const StatType<MemAccumulator::DeallocationFacet>& stat)
{
- update();
+ update();
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
return accumulator.mDeallocations.getSampleCount() + (active_accumulator ? active_accumulator->mDeallocations.getSampleCount() : 0);
@@ -370,7 +378,7 @@ S32 Recording::getSampleCount(const StatType<MemAccumulator::DeallocationFacet>&
bool Recording::hasValue(const StatType<CountAccumulator>& stat)
{
- update();
+ update();
const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];
const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;
return accumulator.hasValue() || (active_accumulator ? active_accumulator->hasValue() : false);
@@ -378,7 +386,7 @@ bool Recording::hasValue(const StatType<CountAccumulator>& stat)
F64 Recording::getSum(const StatType<CountAccumulator>& stat)
{
- update();
+ update();
const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];
const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;
return accumulator.getSum() + (active_accumulator ? active_accumulator->getSum() : 0);
@@ -386,7 +394,7 @@ F64 Recording::getSum(const StatType<CountAccumulator>& stat)
F64 Recording::getPerSec( const StatType<CountAccumulator>& stat )
{
- update();
+ update();
const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];
const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;
F64 sum = accumulator.getSum() + (active_accumulator ? active_accumulator->getSum() : 0);
@@ -395,7 +403,7 @@ F64 Recording::getPerSec( const StatType<CountAccumulator>& stat )
S32 Recording::getSampleCount( const StatType<CountAccumulator>& stat )
{
- update();
+ update();
const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];
const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;
return accumulator.getSampleCount() + (active_accumulator ? active_accumulator->getSampleCount() : 0);
@@ -403,7 +411,7 @@ S32 Recording::getSampleCount( const StatType<CountAccumulator>& stat )
bool Recording::hasValue(const StatType<SampleAccumulator>& stat)
{
- update();
+ update();
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
return accumulator.hasValue() || (active_accumulator && active_accumulator->hasValue());
@@ -411,7 +419,7 @@ bool Recording::hasValue(const StatType<SampleAccumulator>& stat)
F64 Recording::getMin( const StatType<SampleAccumulator>& stat )
{
- update();
+ update();
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
return llmin(accumulator.getMin(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMin() : F32_MAX);
@@ -419,7 +427,7 @@ F64 Recording::getMin( const StatType<SampleAccumulator>& stat )
F64 Recording::getMax( const StatType<SampleAccumulator>& stat )
{
- update();
+ update();
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
return llmax(accumulator.getMax(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMax() : F32_MIN);
@@ -427,7 +435,7 @@ F64 Recording::getMax( const StatType<SampleAccumulator>& stat )
F64 Recording::getMean( const StatType<SampleAccumulator>& stat )
{
- update();
+ update();
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
if (active_accumulator && active_accumulator->hasValue())
@@ -448,7 +456,7 @@ F64 Recording::getMean( const StatType<SampleAccumulator>& stat )
F64 Recording::getStandardDeviation( const StatType<SampleAccumulator>& stat )
{
- update();
+ update();
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
@@ -465,7 +473,7 @@ F64 Recording::getStandardDeviation( const StatType<SampleAccumulator>& stat )
F64 Recording::getLastValue( const StatType<SampleAccumulator>& stat )
{
- update();
+ update();
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
return (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getLastValue() : accumulator.getLastValue());
@@ -473,7 +481,7 @@ F64 Recording::getLastValue( const StatType<SampleAccumulator>& stat )
S32 Recording::getSampleCount( const StatType<SampleAccumulator>& stat )
{
- update();
+ update();
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
return accumulator.getSampleCount() + (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getSampleCount() : 0);
@@ -481,7 +489,7 @@ S32 Recording::getSampleCount( const StatType<SampleAccumulator>& stat )
bool Recording::hasValue(const StatType<EventAccumulator>& stat)
{
- update();
+ update();
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
return accumulator.hasValue() || (active_accumulator && active_accumulator->hasValue());
@@ -489,7 +497,7 @@ bool Recording::hasValue(const StatType<EventAccumulator>& stat)
F64 Recording::getSum( const StatType<EventAccumulator>& stat)
{
- update();
+ update();
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
return (F64)(accumulator.getSum() + (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getSum() : 0));
@@ -497,7 +505,7 @@ F64 Recording::getSum( const StatType<EventAccumulator>& stat)
F64 Recording::getMin( const StatType<EventAccumulator>& stat )
{
- update();
+ update();
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
return llmin(accumulator.getMin(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMin() : F32_MAX);
@@ -505,7 +513,7 @@ F64 Recording::getMin( const StatType<EventAccumulator>& stat )
F64 Recording::getMax( const StatType<EventAccumulator>& stat )
{
- update();
+ update();
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
return llmax(accumulator.getMax(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMax() : F32_MIN);
@@ -513,7 +521,7 @@ F64 Recording::getMax( const StatType<EventAccumulator>& stat )
F64 Recording::getMean( const StatType<EventAccumulator>& stat )
{
- update();
+ update();
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
if (active_accumulator && active_accumulator->hasValue())
@@ -534,7 +542,7 @@ F64 Recording::getMean( const StatType<EventAccumulator>& stat )
F64 Recording::getStandardDeviation( const StatType<EventAccumulator>& stat )
{
- update();
+ update();
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
@@ -551,7 +559,7 @@ F64 Recording::getStandardDeviation( const StatType<EventAccumulator>& stat )
F64 Recording::getLastValue( const StatType<EventAccumulator>& stat )
{
- update();
+ update();
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
return active_accumulator ? active_accumulator->getLastValue() : accumulator.getLastValue();
@@ -559,7 +567,7 @@ F64 Recording::getLastValue( const StatType<EventAccumulator>& stat )
S32 Recording::getSampleCount( const StatType<EventAccumulator>& stat )
{
- update();
+ update();
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
return accumulator.getSampleCount() + (active_accumulator ? active_accumulator->getSampleCount() : 0);
@@ -575,17 +583,20 @@ PeriodicRecording::PeriodicRecording( S32 num_periods, EPlayState state)
mNumRecordedPeriods(0),
mRecordingPeriods(num_periods ? num_periods : 1)
{
+ LL_PROFILE_ZONE_SCOPED;
setPlayState(state);
claim_alloc(gTraceMemStat, this);
}
PeriodicRecording::~PeriodicRecording()
{
+ LL_PROFILE_ZONE_SCOPED;
disclaim_alloc(gTraceMemStat, this);
}
void PeriodicRecording::nextPeriod()
{
+ LL_PROFILE_ZONE_SCOPED;
if (mAutoResize)
{
mRecordingPeriods.push_back(Recording());
@@ -600,6 +611,7 @@ void PeriodicRecording::nextPeriod()
void PeriodicRecording::appendRecording(Recording& recording)
{
+ LL_PROFILE_ZONE_SCOPED;
getCurRecording().appendRecording(recording);
nextPeriod();
}
@@ -607,6 +619,7 @@ void PeriodicRecording::appendRecording(Recording& recording)
void PeriodicRecording::appendPeriodicRecording( PeriodicRecording& other )
{
+ LL_PROFILE_ZONE_SCOPED;
if (other.mRecordingPeriods.empty()) return;
getCurRecording().update();
@@ -680,6 +693,7 @@ void PeriodicRecording::appendPeriodicRecording( PeriodicRecording& other )
F64Seconds PeriodicRecording::getDuration() const
{
+ LL_PROFILE_ZONE_SCOPED;
F64Seconds duration;
S32 num_periods = mRecordingPeriods.size();
for (S32 i = 1; i <= num_periods; i++)
@@ -693,6 +707,7 @@ F64Seconds PeriodicRecording::getDuration() const
LLTrace::Recording PeriodicRecording::snapshotCurRecording() const
{
+ LL_PROFILE_ZONE_SCOPED;
Recording recording_copy(getCurRecording());
recording_copy.stop();
return recording_copy;
@@ -735,16 +750,19 @@ const Recording& PeriodicRecording::getPrevRecording( S32 offset ) const
void PeriodicRecording::handleStart()
{
+ LL_PROFILE_ZONE_SCOPED;
getCurRecording().start();
}
void PeriodicRecording::handleStop()
{
+ LL_PROFILE_ZONE_SCOPED;
getCurRecording().pause();
}
void PeriodicRecording::handleReset()
{
+ LL_PROFILE_ZONE_SCOPED;
getCurRecording().stop();
if (mAutoResize)
@@ -768,11 +786,13 @@ void PeriodicRecording::handleReset()
void PeriodicRecording::handleSplitTo(PeriodicRecording& other)
{
+ LL_PROFILE_ZONE_SCOPED;
getCurRecording().splitTo(other.getCurRecording());
}
F64 PeriodicRecording::getPeriodMin( const StatType<EventAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -794,6 +814,7 @@ F64 PeriodicRecording::getPeriodMin( const StatType<EventAccumulator>& stat, S32
F64 PeriodicRecording::getPeriodMax( const StatType<EventAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -816,6 +837,7 @@ F64 PeriodicRecording::getPeriodMax( const StatType<EventAccumulator>& stat, S32
// calculates means using aggregates per period
F64 PeriodicRecording::getPeriodMean( const StatType<EventAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64 mean = 0;
@@ -839,6 +861,7 @@ F64 PeriodicRecording::getPeriodMean( const StatType<EventAccumulator>& stat, S3
F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<EventAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64 period_mean = getPeriodMean(stat, num_periods);
@@ -863,6 +886,7 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<EventAccumulat
F64 PeriodicRecording::getPeriodMin( const StatType<SampleAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -884,6 +908,7 @@ F64 PeriodicRecording::getPeriodMin( const StatType<SampleAccumulator>& stat, S3
F64 PeriodicRecording::getPeriodMax(const StatType<SampleAccumulator>& stat, S32 num_periods /*= S32_MAX*/)
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -906,6 +931,7 @@ F64 PeriodicRecording::getPeriodMax(const StatType<SampleAccumulator>& stat, S32
F64 PeriodicRecording::getPeriodMean( const StatType<SampleAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
S32 valid_period_count = 0;
@@ -928,6 +954,7 @@ F64 PeriodicRecording::getPeriodMean( const StatType<SampleAccumulator>& stat, S
F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<SampleAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64 period_mean = getPeriodMean(stat, num_periods);
@@ -953,6 +980,7 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<SampleAccumula
F64Kilobytes PeriodicRecording::getPeriodMin( const StatType<MemAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64Kilobytes min_val(std::numeric_limits<F64>::max());
@@ -972,6 +1000,7 @@ F64Kilobytes PeriodicRecording::getPeriodMin(const MemStatHandle& stat, S32 num_
F64Kilobytes PeriodicRecording::getPeriodMax(const StatType<MemAccumulator>& stat, S32 num_periods /*= S32_MAX*/)
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64Kilobytes max_val(0.0);
@@ -991,6 +1020,7 @@ F64Kilobytes PeriodicRecording::getPeriodMax(const MemStatHandle& stat, S32 num_
F64Kilobytes PeriodicRecording::getPeriodMean( const StatType<MemAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64Kilobytes mean(0);
@@ -1011,6 +1041,7 @@ F64Kilobytes PeriodicRecording::getPeriodMean(const MemStatHandle& stat, S32 num
F64Kilobytes PeriodicRecording::getPeriodStandardDeviation( const StatType<MemAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64Kilobytes period_mean = getPeriodMean(stat, num_periods);
@@ -1044,6 +1075,7 @@ F64Kilobytes PeriodicRecording::getPeriodStandardDeviation(const MemStatHandle&
void ExtendableRecording::extend()
{
+ LL_PROFILE_ZONE_SCOPED;
// push the data back to accepted recording
mAcceptedRecording.appendRecording(mPotentialRecording);
// flush data, so we can start from scratch
@@ -1052,22 +1084,26 @@ void ExtendableRecording::extend()
void ExtendableRecording::handleStart()
{
+ LL_PROFILE_ZONE_SCOPED;
mPotentialRecording.start();
}
void ExtendableRecording::handleStop()
{
+ LL_PROFILE_ZONE_SCOPED;
mPotentialRecording.pause();
}
void ExtendableRecording::handleReset()
{
+ LL_PROFILE_ZONE_SCOPED;
mAcceptedRecording.reset();
mPotentialRecording.reset();
}
void ExtendableRecording::handleSplitTo(ExtendableRecording& other)
{
+ LL_PROFILE_ZONE_SCOPED;
mPotentialRecording.splitTo(other.mPotentialRecording);
}
@@ -1084,6 +1120,7 @@ ExtendablePeriodicRecording::ExtendablePeriodicRecording()
void ExtendablePeriodicRecording::extend()
{
+ LL_PROFILE_ZONE_SCOPED;
// push the data back to accepted recording
mAcceptedRecording.appendPeriodicRecording(mPotentialRecording);
// flush data, so we can start from scratch
@@ -1093,22 +1130,26 @@ void ExtendablePeriodicRecording::extend()
void ExtendablePeriodicRecording::handleStart()
{
+ LL_PROFILE_ZONE_SCOPED;
mPotentialRecording.start();
}
void ExtendablePeriodicRecording::handleStop()
{
+ LL_PROFILE_ZONE_SCOPED;
mPotentialRecording.pause();
}
void ExtendablePeriodicRecording::handleReset()
{
+ LL_PROFILE_ZONE_SCOPED;
mAcceptedRecording.reset();
mPotentialRecording.reset();
}
void ExtendablePeriodicRecording::handleSplitTo(ExtendablePeriodicRecording& other)
{
+ LL_PROFILE_ZONE_SCOPED;
mPotentialRecording.splitTo(other.mPotentialRecording);
}
@@ -1123,6 +1164,7 @@ PeriodicRecording& get_frame_recording()
void LLStopWatchControlsMixinCommon::start()
{
+ LL_PROFILE_ZONE_SCOPED;
switch (mPlayState)
{
case STOPPED:
@@ -1144,6 +1186,7 @@ void LLStopWatchControlsMixinCommon::start()
void LLStopWatchControlsMixinCommon::stop()
{
+ LL_PROFILE_ZONE_SCOPED;
switch (mPlayState)
{
case STOPPED:
@@ -1163,6 +1206,7 @@ void LLStopWatchControlsMixinCommon::stop()
void LLStopWatchControlsMixinCommon::pause()
{
+ LL_PROFILE_ZONE_SCOPED;
switch (mPlayState)
{
case STOPPED:
@@ -1182,6 +1226,7 @@ void LLStopWatchControlsMixinCommon::pause()
void LLStopWatchControlsMixinCommon::unpause()
{
+ LL_PROFILE_ZONE_SCOPED;
switch (mPlayState)
{
case STOPPED:
@@ -1201,6 +1246,7 @@ void LLStopWatchControlsMixinCommon::unpause()
void LLStopWatchControlsMixinCommon::resume()
{
+ LL_PROFILE_ZONE_SCOPED;
switch (mPlayState)
{
case STOPPED:
@@ -1221,6 +1267,7 @@ void LLStopWatchControlsMixinCommon::resume()
void LLStopWatchControlsMixinCommon::restart()
{
+ LL_PROFILE_ZONE_SCOPED;
switch (mPlayState)
{
case STOPPED:
@@ -1244,11 +1291,13 @@ void LLStopWatchControlsMixinCommon::restart()
void LLStopWatchControlsMixinCommon::reset()
{
+ LL_PROFILE_ZONE_SCOPED;
handleReset();
}
void LLStopWatchControlsMixinCommon::setPlayState( EPlayState state )
{
+ LL_PROFILE_ZONE_SCOPED;
switch(state)
{
case STOPPED:
diff --git a/indra/llcommon/lltracerecording.h b/indra/llcommon/lltracerecording.h
index d0b4a842a6..6715104613 100644
--- a/indra/llcommon/lltracerecording.h
+++ b/indra/llcommon/lltracerecording.h
@@ -355,6 +355,7 @@ namespace LLTrace
template <typename T>
S32 getSampleCount(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
S32 num_samples = 0;
@@ -374,6 +375,7 @@ namespace LLTrace
template <typename T>
typename T::value_t getPeriodMin(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -396,6 +398,7 @@ namespace LLTrace
template<typename T>
T getPeriodMin(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return T(getPeriodMin(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -403,6 +406,7 @@ namespace LLTrace
template<typename T>
T getPeriodMin(const SampleStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return T(getPeriodMin(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
}
@@ -410,6 +414,7 @@ namespace LLTrace
template<typename T>
T getPeriodMin(const EventStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return T(getPeriodMin(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
}
@@ -419,6 +424,7 @@ namespace LLTrace
template <typename T>
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMinPerSec(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
typename RelatedTypes<typename T::value_t>::fractional_t min_val(std::numeric_limits<F64>::max());
@@ -433,6 +439,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMinPerSec(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return typename RelatedTypes<T>::fractional_t(getPeriodMinPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -444,6 +451,7 @@ namespace LLTrace
template <typename T>
typename T::value_t getPeriodMax(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -466,6 +474,7 @@ namespace LLTrace
template<typename T>
T getPeriodMax(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return T(getPeriodMax(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -473,6 +482,7 @@ namespace LLTrace
template<typename T>
T getPeriodMax(const SampleStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return T(getPeriodMax(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
}
@@ -480,6 +490,7 @@ namespace LLTrace
template<typename T>
T getPeriodMax(const EventStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return T(getPeriodMax(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
}
@@ -489,6 +500,7 @@ namespace LLTrace
template <typename T>
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMaxPerSec(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64 max_val = std::numeric_limits<F64>::min();
@@ -503,6 +515,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMaxPerSec(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return typename RelatedTypes<T>::fractional_t(getPeriodMaxPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -514,6 +527,7 @@ namespace LLTrace
template <typename T>
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMean(const StatType<T >& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
typename RelatedTypes<typename T::value_t>::fractional_t mean(0);
@@ -534,12 +548,14 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMean(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
F64 getPeriodMean(const StatType<SampleAccumulator>& stat, S32 num_periods = S32_MAX);
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMean(const SampleStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
}
@@ -547,6 +563,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMean(const EventStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
}
@@ -556,6 +573,7 @@ namespace LLTrace
template <typename T>
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMeanPerSec(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
num_periods = llmin(num_periods, getNumRecordedPeriods());
typename RelatedTypes<typename T::value_t>::fractional_t mean = 0;
@@ -577,6 +595,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMeanPerSec(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return typename RelatedTypes<T>::fractional_t(getPeriodMeanPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -589,6 +608,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodStandardDeviation(const SampleStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return typename RelatedTypes<T>::fractional_t(getPeriodStandardDeviation(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
}
@@ -596,6 +616,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodStandardDeviation(const EventStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
+ LL_PROFILE_ZONE_SCOPED;
return typename RelatedTypes<T>::fractional_t(getPeriodStandardDeviation(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
}
diff --git a/indra/llcommon/lltracethreadrecorder.cpp b/indra/llcommon/lltracethreadrecorder.cpp
index 025dc57044..7ae1e72784 100644
--- a/indra/llcommon/lltracethreadrecorder.cpp
+++ b/indra/llcommon/lltracethreadrecorder.cpp
@@ -274,12 +274,10 @@ void ThreadRecorder::pushToParent()
}
-static LLTrace::BlockTimerStatHandle FTM_PULL_TRACE_DATA_FROM_CHILDREN("Pull child thread trace data");
-
void ThreadRecorder::pullFromChildren()
{
#if LL_TRACE_ENABLED
- LL_RECORD_BLOCK_TIME(FTM_PULL_TRACE_DATA_FROM_CHILDREN);
+ LL_PROFILE_ZONE_SCOPED;
if (mActiveRecordings.empty()) return;
{ LLMutexLock lock(&mChildListMutex);