summaryrefslogtreecommitdiff
path: root/indra/llcommon
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llcommon')
-rw-r--r--indra/llcommon/CMakeLists.txt1
-rw-r--r--indra/llcommon/llcommon.cpp4
-rw-r--r--indra/llcommon/llerror.cpp18
-rw-r--r--indra/llcommon/llfasttimer.cpp4
-rw-r--r--indra/llcommon/llmemory.h20
-rw-r--r--indra/llcommon/llmutex.cpp24
-rw-r--r--indra/llcommon/llprofiler.h40
-rw-r--r--indra/llcommon/llprofilercategories.h280
-rw-r--r--indra/llcommon/llsd.cpp22
-rw-r--r--indra/llcommon/llsd.h14
-rw-r--r--indra/llcommon/llsingleton.h2
-rw-r--r--indra/llcommon/llstring.cpp4
-rw-r--r--indra/llcommon/llthread.cpp14
-rw-r--r--indra/llcommon/llthreadsafequeue.h36
-rw-r--r--indra/llcommon/lltrace.h18
-rw-r--r--indra/llcommon/lltraceaccumulators.cpp20
-rw-r--r--indra/llcommon/lltraceaccumulators.h26
-rw-r--r--indra/llcommon/lltracerecording.cpp100
-rw-r--r--indra/llcommon/lltracerecording.h46
-rw-r--r--indra/llcommon/lltracethreadrecorder.cpp2
-rw-r--r--indra/llcommon/threadsafeschedule.h44
-rw-r--r--indra/llcommon/workqueue.cpp8
22 files changed, 534 insertions, 213 deletions
diff --git a/indra/llcommon/CMakeLists.txt b/indra/llcommon/CMakeLists.txt
index 782f656406..ca8b5e946f 100644
--- a/indra/llcommon/CMakeLists.txt
+++ b/indra/llcommon/CMakeLists.txt
@@ -202,6 +202,7 @@ set(llcommon_HEADER_FILES
llnametable.h
llpointer.h
llprofiler.h
+ llprofilercategories.h
llpounceable.h
llpredicate.h
llpreprocessor.h
diff --git a/indra/llcommon/llcommon.cpp b/indra/llcommon/llcommon.cpp
index 25a809dad2..d2c4e66160 100644
--- a/indra/llcommon/llcommon.cpp
+++ b/indra/llcommon/llcommon.cpp
@@ -42,7 +42,7 @@ void *operator new(size_t size)
void* ptr;
if (gProfilerEnabled)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
ptr = (malloc)(size);
}
else
@@ -62,7 +62,7 @@ void operator delete(void *ptr) noexcept
TracyFree(ptr);
if (gProfilerEnabled)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
(free)(ptr);
}
else
diff --git a/indra/llcommon/llerror.cpp b/indra/llcommon/llerror.cpp
index 17a5ec5776..2fae9fdfaa 100644
--- a/indra/llcommon/llerror.cpp
+++ b/indra/llcommon/llerror.cpp
@@ -109,7 +109,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
int syslogPriority = LOG_CRIT;
switch (level) {
case LLError::LEVEL_DEBUG: syslogPriority = LOG_DEBUG; break;
@@ -167,7 +167,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
if (LLError::getAlwaysFlush())
{
mFile << message << std::endl;
@@ -234,7 +234,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
// The default colors for error, warn and debug are now a bit more pastel
// and easier to read on the default (black) terminal background but you
// now have the option to set the color of each via an environment variables:
@@ -274,7 +274,7 @@ namespace {
LL_FORCE_INLINE void writeANSI(const std::string& ansi_code, const std::string& message)
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
static std::string s_ansi_bold = createBoldANSI(); // bold text
static std::string s_ansi_reset = createResetANSI(); // reset
// ANSI color code escape sequence, message, and reset in one fprintf call
@@ -311,7 +311,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
mBuffer->addLine(message);
}
@@ -338,7 +338,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
debugger_print(message);
}
};
@@ -1220,7 +1220,7 @@ namespace
void writeToRecorders(const LLError::CallSite& site, const std::string& message)
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
LLError::ELevel level = site.mLevel;
SettingsConfigPtr s = Globals::getInstance()->getSettingsConfig();
@@ -1355,7 +1355,7 @@ namespace LLError
bool Log::shouldLog(CallSite& site)
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
LLMutexTrylock lock(getMutex<LOG_MUTEX>(), 5);
if (!lock.isLocked())
{
@@ -1400,7 +1400,7 @@ namespace LLError
void Log::flush(const std::ostringstream& out, const CallSite& site)
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
LLMutexTrylock lock(getMutex<LOG_MUTEX>(),5);
if (!lock.isLocked())
{
diff --git a/indra/llcommon/llfasttimer.cpp b/indra/llcommon/llfasttimer.cpp
index d38946004f..2612d0f07c 100644
--- a/indra/llcommon/llfasttimer.cpp
+++ b/indra/llcommon/llfasttimer.cpp
@@ -222,7 +222,7 @@ void BlockTimer::bootstrapTimerTree()
// this preserves partial order derived from current frame's observations
void BlockTimer::incrementalUpdateTimerTree()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
for(block_timer_tree_df_post_iterator_t it = begin_block_timer_tree_df_post(BlockTimer::getRootTimeBlock());
it != end_block_timer_tree_df_post();
++it)
@@ -263,7 +263,7 @@ void BlockTimer::incrementalUpdateTimerTree()
void BlockTimer::updateTimes()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
// walk up stack of active timers and accumulate current time while leaving timing structures active
BlockTimerStackRecord* stack_record = LLThreadLocalSingletonPointer<BlockTimerStackRecord>::getInstance();
if (!stack_record) return;
diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h
index 41023b4ba4..ac6c969d70 100644
--- a/indra/llcommon/llmemory.h
+++ b/indra/llcommon/llmemory.h
@@ -136,7 +136,7 @@ public: \
#else
inline void* ll_aligned_malloc_fallback( size_t size, int align )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
#if defined(LL_WINDOWS)
void* ret = _aligned_malloc(size, align);
#else
@@ -157,7 +157,7 @@ public: \
inline void ll_aligned_free_fallback( void* ptr )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
LL_PROFILE_FREE(ptr);
#if defined(LL_WINDOWS)
_aligned_free(ptr);
@@ -174,7 +174,7 @@ public: \
inline void* ll_aligned_malloc_16(size_t size) // returned hunk MUST be freed with ll_aligned_free_16().
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
#if defined(LL_WINDOWS)
void* ret = _aligned_malloc(size, 16);
#elif defined(LL_DARWIN)
@@ -190,7 +190,7 @@ inline void* ll_aligned_malloc_16(size_t size) // returned hunk MUST be freed wi
inline void ll_aligned_free_16(void *p)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
LL_PROFILE_FREE(p);
#if defined(LL_WINDOWS)
_aligned_free(p);
@@ -203,7 +203,7 @@ inline void ll_aligned_free_16(void *p)
inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // returned hunk MUST be freed with ll_aligned_free_16().
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
LL_PROFILE_FREE(ptr);
#if defined(LL_WINDOWS)
void* ret = _aligned_realloc(ptr, size, 16);
@@ -228,7 +228,7 @@ inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // r
inline void* ll_aligned_malloc_32(size_t size) // returned hunk MUST be freed with ll_aligned_free_32().
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
#if defined(LL_WINDOWS)
void* ret = _aligned_malloc(size, 32);
#elif defined(LL_DARWIN)
@@ -244,7 +244,7 @@ inline void* ll_aligned_malloc_32(size_t size) // returned hunk MUST be freed wi
inline void ll_aligned_free_32(void *p)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
LL_PROFILE_FREE(p);
#if defined(LL_WINDOWS)
_aligned_free(p);
@@ -259,7 +259,7 @@ inline void ll_aligned_free_32(void *p)
template<size_t ALIGNMENT>
LL_FORCE_INLINE void* ll_aligned_malloc(size_t size)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
void* ret;
if (LL_DEFAULT_HEAP_ALIGN % ALIGNMENT == 0)
{
@@ -284,7 +284,7 @@ LL_FORCE_INLINE void* ll_aligned_malloc(size_t size)
template<size_t ALIGNMENT>
LL_FORCE_INLINE void ll_aligned_free(void* ptr)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
if (ALIGNMENT == LL_DEFAULT_HEAP_ALIGN)
{
LL_PROFILE_FREE(ptr);
@@ -309,7 +309,7 @@ LL_FORCE_INLINE void ll_aligned_free(void* ptr)
//
inline void ll_memcpy_nonaliased_aligned_16(char* __restrict dst, const char* __restrict src, size_t bytes)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
assert(src != NULL);
assert(dst != NULL);
assert(bytes > 0);
diff --git a/indra/llcommon/llmutex.cpp b/indra/llcommon/llmutex.cpp
index a49002b5dc..0273dd5970 100644
--- a/indra/llcommon/llmutex.cpp
+++ b/indra/llcommon/llmutex.cpp
@@ -44,7 +44,7 @@ LLMutex::~LLMutex()
void LLMutex::lock()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(isSelfLocked())
{ //redundant lock
mCount++;
@@ -66,7 +66,7 @@ void LLMutex::lock()
void LLMutex::unlock()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (mCount > 0)
{ //not the root unlock
mCount--;
@@ -87,7 +87,7 @@ void LLMutex::unlock()
bool LLMutex::isLocked()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (!mMutex.try_lock())
{
return true;
@@ -111,7 +111,7 @@ LLThread::id_t LLMutex::lockingThread() const
bool LLMutex::trylock()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(isSelfLocked())
{ //redundant lock
mCount++;
@@ -150,20 +150,20 @@ LLCondition::~LLCondition()
void LLCondition::wait()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
std::unique_lock< std::mutex > lock(mMutex);
mCond.wait(lock);
}
void LLCondition::signal()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mCond.notify_one();
}
void LLCondition::broadcast()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mCond.notify_all();
}
@@ -173,7 +173,7 @@ LLMutexTrylock::LLMutexTrylock(LLMutex* mutex)
: mMutex(mutex),
mLocked(false)
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (mMutex)
mLocked = mMutex->trylock();
}
@@ -182,7 +182,7 @@ LLMutexTrylock::LLMutexTrylock(LLMutex* mutex, U32 aTries, U32 delay_ms)
: mMutex(mutex),
mLocked(false)
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (!mMutex)
return;
@@ -197,7 +197,7 @@ LLMutexTrylock::LLMutexTrylock(LLMutex* mutex, U32 aTries, U32 delay_ms)
LLMutexTrylock::~LLMutexTrylock()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (mMutex && mLocked)
mMutex->unlock();
}
@@ -209,7 +209,7 @@ LLMutexTrylock::~LLMutexTrylock()
//
LLScopedLock::LLScopedLock(std::mutex* mutex) : mMutex(mutex)
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(mutex)
{
mutex->lock();
@@ -228,7 +228,7 @@ LLScopedLock::~LLScopedLock()
void LLScopedLock::unlock()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(mLocked)
{
mMutex->unlock();
diff --git a/indra/llcommon/llprofiler.h b/indra/llcommon/llprofiler.h
index ca60d23248..f9d7ae7ce4 100644
--- a/indra/llcommon/llprofiler.h
+++ b/indra/llcommon/llprofiler.h
@@ -27,6 +27,44 @@
#ifndef LL_PROFILER_H
#define LL_PROFILER_H
+// If you use the default macros LL_PROFILE_ZONE_SCOPED and LL_PROFILE_ZONE_NAMED to profile code ...
+//
+// void foo()
+// {
+// LL_PROFILE_ZONE_SCOPED;
+// :
+//
+// {
+// LL_PROFILE_ZONE_NAMED("widget bar");
+// :
+// }
+// {
+// LL_PROFILE_ZONE_NAMED("widget qux");
+// :
+// }
+// }
+//
+// ... please be aware that ALL these will show up in a Tracy capture which can quickly exhaust memory.
+// Instead, use LL_PROFILE_ZONE_SCOPED_CATEGORY_* and LL_PROFILE_ZONE_NAMED_CATEGORY_* to profile code ...
+//
+// void foo()
+// {
+// LL_PROFILE_ZONE_SCOPED_CATEGORY_UI;
+// :
+//
+// {
+// LL_PROFILE_ZONE_NAMED_CATEGORY_UI("widget bar");
+// :
+// }
+// {
+// LL_PROFILE_ZONE_NAMED_CATEGORY_UI("widget qux");
+// :
+// }
+// }
+//
+// ... as these can be selectively turned on/off. This will minimize memory usage and visual clutter in a Tracy capture.
+// See llprofiler_categories.h for more details on profiling categories.
+
#define LL_PROFILER_CONFIG_NONE 0 // No profiling
#define LL_PROFILER_CONFIG_FAST_TIMER 1 // Profiling on: Only Fast Timers
#define LL_PROFILER_CONFIG_TRACY 2 // Profiling on: Only Tracy
@@ -108,4 +146,6 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name)
#endif // LL_PROFILER
+#include "llprofilercategories.h"
+
#endif // LL_PROFILER_H
diff --git a/indra/llcommon/llprofilercategories.h b/indra/llcommon/llprofilercategories.h
new file mode 100644
index 0000000000..8db29468cc
--- /dev/null
+++ b/indra/llcommon/llprofilercategories.h
@@ -0,0 +1,280 @@
+/**
+ * @file llprofiler_ategories.h
+ * @brief Profiling categories to minimize Tracy memory usage when viewing captures.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2022, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ */
+
+#ifndef LL_PROFILER_CATEGORIES_H
+#define LL_PROFILER_CATEGORIES_H
+
+// A Tracy capture can quickly consume memory. Use these defines to selectively turn on/off Tracy profiling for these categories.
+// The biggest memory usage ones are:
+//
+// LL_PROFILER_CATEGORY_ENABLE_DRAWPOOL
+// LL_PROFILER_CATEGORY_ENABLE_LLSD
+// LL_PROFILER_CATEGORY_ENABLE_MEMORY
+// LL_PROFILER_CATEGORY_ENABLE_SHADERS
+//
+// NOTE: You can still manually use:
+// LL_PROFILE_ZONE_SCOPED();
+// LL_PROFILE_ZONE_NAMED("name");
+// but just be aware that those will ALWAYS show up in a Tracy capture
+// a) using more memory, and
+// b) adding visual clutter.
+#define LL_PROFILER_CATEGORY_ENABLE_APP 1
+#define LL_PROFILER_CATEGORY_ENABLE_AVATAR 1
+#define LL_PROFILER_CATEGORY_ENABLE_DISPLAY 1
+#define LL_PROFILER_CATEGORY_ENABLE_DRAWABLE 1
+#define LL_PROFILER_CATEGORY_ENABLE_DRAWPOOL 1
+#define LL_PROFILER_CATEGORY_ENABLE_ENVIRONMENT 1
+#define LL_PROFILER_CATEGORY_ENABLE_FACE 1
+#define LL_PROFILER_CATEGORY_ENABLE_LLSD 1
+#define LL_PROFILER_CATEGORY_ENABLE_LOGGING 1
+#define LL_PROFILER_CATEGORY_ENABLE_MATERIAL 1
+#define LL_PROFILER_CATEGORY_ENABLE_MEDIA 1
+#define LL_PROFILER_CATEGORY_ENABLE_MEMORY 1
+#define LL_PROFILER_CATEGORY_ENABLE_NETWORK 1
+#define LL_PROFILER_CATEGORY_ENABLE_OCTREE 1
+#define LL_PROFILER_CATEGORY_ENABLE_PIPELINE 1
+#define LL_PROFILER_CATEGORY_ENABLE_SHADER 1
+#define LL_PROFILER_CATEGORY_ENABLE_SPATIAL 1
+#define LL_PROFILER_CATEGORY_ENABLE_STATS 1
+#define LL_PROFILER_CATEGORY_ENABLE_STRING 1
+#define LL_PROFILER_CATEGORY_ENABLE_TEXTURE 1
+#define LL_PROFILER_CATEGORY_ENABLE_THREAD 1
+#define LL_PROFILER_CATEGORY_ENABLE_UI 1
+#define LL_PROFILER_CATEGORY_ENABLE_VIEWER 1
+#define LL_PROFILER_CATEGORY_ENABLE_VERTEX 1
+#define LL_PROFILER_CATEGORY_ENABLE_VOLUME 1
+#define LL_PROFILER_CATEGORY_ENABLE_WIN32 1
+
+#if LL_PROFILER_CATEGORY_ENABLE_APP
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_APP LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_APP LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_APP(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_APP
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_AVATAR
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_AVATAR LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_AVATAR(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_DISPLAY
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DISPLAY LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DISPLAY LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DISPLAY(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DISPLAY
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_DRAWABLE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWABLE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWABLE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWABLE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWABLE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_DRAWPOOL
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWPOOL LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWPOOL LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWPOOL(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWPOOL
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_ENVIRONMENT
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_ENVIRONMENT LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_ENVIRONMENT LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_ENVIRONMENT(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_ENVIRONMENT
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_FACE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_FACE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_FACE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_FACE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_FACE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_LLSD
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_LLSD LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_LLSD(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_LOGGING
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_LOGGING LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_LOGGING(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_MATERIAL
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MATERIAL LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MATERIAL LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MATERIAL(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MATERIAL
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_MEDIA
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MEDIA LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MEDIA LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MEDIA(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MEDIA
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_MEMORY
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MEMORY LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MEMORY(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_NETWORK
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_NETWORK LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_NETWORK(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_OCTREE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_OCTREE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_OCTREE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_OCTREE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_OCTREE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_PIPELINE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_PIPELINE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_PIPELINE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_PIPELINE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_PIPELINE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_SHADER
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_SHADER LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_SHADER LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_SHADER(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_SHADER
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_SPATIAL
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_SPATIAL LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_SPATIAL LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_SPATIAL(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_SPATIAL
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_STATS
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_STATS LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_STATS(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_STRING
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_STRING LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_STRING LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_STRING(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_STRING
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_TEXTURE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_TEXTURE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_TEXTURE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_THREAD
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_UI
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_UI LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_UI LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_UI(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_UI
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_VERTEX
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VERTEX LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VERTEX(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_VIEWER
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VIEWER LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VIEWER LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VIEWER(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VIEWER
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_VOLUME
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VOLUME LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VOLUME LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VOLUME(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VOLUME
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_WIN32
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_WIN32 LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_WIN32 LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_WIN32(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_WIN32
+#endif
+
+#endif // LL_PROFILER_CATEGORIES_H
+
diff --git a/indra/llcommon/llsd.cpp b/indra/llcommon/llsd.cpp
index 605f6bf0e3..807b3d13f8 100644
--- a/indra/llcommon/llsd.cpp
+++ b/indra/llcommon/llsd.cpp
@@ -400,7 +400,7 @@ namespace
ImplMap& ImplMap::makeMap(LLSD::Impl*& var)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
if (shared())
{
ImplMap* i = new ImplMap(mData);
@@ -415,21 +415,21 @@ namespace
bool ImplMap::has(const LLSD::String& k) const
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
DataMap::const_iterator i = mData.find(k);
return i != mData.end();
}
LLSD ImplMap::get(const LLSD::String& k) const
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
DataMap::const_iterator i = mData.find(k);
return (i != mData.end()) ? i->second : LLSD();
}
LLSD ImplMap::getKeys() const
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
LLSD keys = LLSD::emptyArray();
DataMap::const_iterator iter = mData.begin();
while (iter != mData.end())
@@ -442,13 +442,13 @@ namespace
void ImplMap::insert(const LLSD::String& k, const LLSD& v)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
mData.insert(DataMap::value_type(k, v));
}
void ImplMap::erase(const LLSD::String& k)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
mData.erase(k);
}
@@ -690,7 +690,7 @@ const LLSD::Impl& LLSD::Impl::safe(const Impl* impl)
ImplMap& LLSD::Impl::makeMap(Impl*& var)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
ImplMap* im = new ImplMap;
reset(var, im);
return *im;
@@ -896,12 +896,12 @@ void LLSD::erase(const String& k) { makeMap(impl).erase(k); }
LLSD& LLSD::operator[](const String& k)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
return makeMap(impl).ref(k);
}
const LLSD& LLSD::operator[](const String& k) const
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
return safe(impl).ref(k);
}
@@ -928,12 +928,12 @@ void LLSD::erase(Integer i) { makeArray(impl).erase(i); }
LLSD& LLSD::operator[](Integer i)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
return makeArray(impl).ref(i);
}
const LLSD& LLSD::operator[](Integer i) const
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
return safe(impl).ref(i);
}
diff --git a/indra/llcommon/llsd.h b/indra/llcommon/llsd.h
index b8ddf21596..24cb9bbce1 100644
--- a/indra/llcommon/llsd.h
+++ b/indra/llcommon/llsd.h
@@ -290,16 +290,16 @@ public:
LLSD& with(const String&, const LLSD&);
LLSD& operator[](const String&);
- LLSD& operator[](const char* c)
- {
- LL_PROFILE_ZONE_SCOPED;
- return (*this)[String(c)];
+ LLSD& operator[](const char* c)
+ {
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
+ return (*this)[String(c)];
}
const LLSD& operator[](const String&) const;
const LLSD& operator[](const char* c) const
- {
- LL_PROFILE_ZONE_SCOPED;
- return (*this)[String(c)];
+ {
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
+ return (*this)[String(c)];
}
//@}
diff --git a/indra/llcommon/llsingleton.h b/indra/llcommon/llsingleton.h
index 6042c0906c..51ef514cf7 100644
--- a/indra/llcommon/llsingleton.h
+++ b/indra/llcommon/llsingleton.h
@@ -455,7 +455,7 @@ public:
static DERIVED_TYPE* getInstance()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// We know the viewer has LLSingleton dependency circularities. If you
// feel strongly motivated to eliminate them, cheers and good luck.
// (At that point we could consider a much simpler locking mechanism.)
diff --git a/indra/llcommon/llstring.cpp b/indra/llcommon/llstring.cpp
index bdea1e76ea..7f501f2e77 100644
--- a/indra/llcommon/llstring.cpp
+++ b/indra/llcommon/llstring.cpp
@@ -1317,7 +1317,7 @@ bool LLStringUtil::formatDatetime(std::string& replacement, std::string token,
template<>
S32 LLStringUtil::format(std::string& s, const format_map_t& substitutions)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STRING;
S32 res = 0;
std::string output;
@@ -1390,7 +1390,7 @@ S32 LLStringUtil::format(std::string& s, const format_map_t& substitutions)
template<>
S32 LLStringUtil::format(std::string& s, const LLSD& substitutions)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STRING;
S32 res = 0;
if (!substitutions.isMap())
diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp
index 11f5a015f1..a807acc56e 100644
--- a/indra/llcommon/llthread.cpp
+++ b/indra/llcommon/llthread.cpp
@@ -333,7 +333,7 @@ bool LLThread::runCondition(void)
// Stop thread execution if requested until unpaused.
void LLThread::checkPause()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mDataLock->lock();
// This is in a while loop because the pthread API allows for spurious wakeups.
@@ -365,20 +365,20 @@ void LLThread::setQuitting()
// static
LLThread::id_t LLThread::currentID()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
return std::this_thread::get_id();
}
// static
void LLThread::yield()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
std::this_thread::yield();
}
void LLThread::wake()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mDataLock->lock();
if(!shouldSleep())
{
@@ -389,7 +389,7 @@ void LLThread::wake()
void LLThread::wakeLocked()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(!shouldSleep())
{
mRunCondition->signal();
@@ -398,13 +398,13 @@ void LLThread::wakeLocked()
void LLThread::lockData()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mDataLock->lock();
}
void LLThread::unlockData()
{
- LL_PROFILE_ZONE_SCOPED
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mDataLock->unlock();
}
diff --git a/indra/llcommon/llthreadsafequeue.h b/indra/llcommon/llthreadsafequeue.h
index 2806506550..68d79cdd12 100644
--- a/indra/llcommon/llthreadsafequeue.h
+++ b/indra/llcommon/llthreadsafequeue.h
@@ -275,7 +275,7 @@ template <typename ElementT, typename QueueT>
template <typename CALLABLE>
bool LLThreadSafeQueue<ElementT, QueueT>::tryLock(CALLABLE&& callable)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock1(mLock, std::defer_lock);
if (!lock1.try_lock())
return false;
@@ -292,7 +292,7 @@ bool LLThreadSafeQueue<ElementT, QueueT>::tryLockUntil(
const std::chrono::time_point<Clock, Duration>& until,
CALLABLE&& callable)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock1(mLock, std::defer_lock);
if (!lock1.try_lock_until(until))
return false;
@@ -306,7 +306,7 @@ template <typename ElementT, typename QueueT>
template <typename T>
bool LLThreadSafeQueue<ElementT, QueueT>::push_(lock_t& lock, T&& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
if (mStorage.size() >= mCapacity)
return false;
@@ -322,7 +322,7 @@ template <typename ElementT, typename QueueT>
template <typename T>
bool LLThreadSafeQueue<ElementT, QueueT>::pushIfOpen(T&& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock1(mLock);
while (true)
{
@@ -345,7 +345,7 @@ template <typename ElementT, typename QueueT>
template<typename T>
void LLThreadSafeQueue<ElementT, QueueT>::push(T&& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
if (! pushIfOpen(std::forward<T>(element)))
{
LLTHROW(LLThreadSafeQueueInterrupt());
@@ -357,7 +357,7 @@ template<typename ElementT, typename QueueT>
template<typename T>
bool LLThreadSafeQueue<ElementT, QueueT>::tryPush(T&& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryLock(
[this, element=std::move(element)](lock_t& lock)
{
@@ -374,7 +374,7 @@ bool LLThreadSafeQueue<ElementT, QueueT>::tryPushFor(
const std::chrono::duration<Rep, Period>& timeout,
T&& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Convert duration to time_point: passing the same timeout duration to
// each of multiple calls is wrong.
return tryPushUntil(std::chrono::steady_clock::now() + timeout,
@@ -388,7 +388,7 @@ bool LLThreadSafeQueue<ElementT, QueueT>::tryPushUntil(
const std::chrono::time_point<Clock, Duration>& until,
T&& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryLockUntil(
until,
[this, until, element=std::move(element)](lock_t& lock)
@@ -421,7 +421,7 @@ template <typename ElementT, typename QueueT>
typename LLThreadSafeQueue<ElementT, QueueT>::pop_result
LLThreadSafeQueue<ElementT, QueueT>::pop_(lock_t& lock, ElementT& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// If mStorage is empty, there's no head element.
if (mStorage.empty())
return mClosed? DONE : EMPTY;
@@ -443,7 +443,7 @@ LLThreadSafeQueue<ElementT, QueueT>::pop_(lock_t& lock, ElementT& element)
template<typename ElementT, typename QueueT>
ElementT LLThreadSafeQueue<ElementT, QueueT>::pop(void)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock1(mLock);
ElementT value;
while (true)
@@ -472,7 +472,7 @@ ElementT LLThreadSafeQueue<ElementT, QueueT>::pop(void)
template<typename ElementT, typename QueueT>
bool LLThreadSafeQueue<ElementT, QueueT>::tryPop(ElementT & element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryLock(
[this, &element](lock_t& lock)
{
@@ -490,7 +490,7 @@ bool LLThreadSafeQueue<ElementT, QueueT>::tryPopFor(
const std::chrono::duration<Rep, Period>& timeout,
ElementT& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Convert duration to time_point: passing the same timeout duration to
// each of multiple calls is wrong.
return tryPopUntil(std::chrono::steady_clock::now() + timeout, element);
@@ -503,7 +503,7 @@ bool LLThreadSafeQueue<ElementT, QueueT>::tryPopUntil(
const std::chrono::time_point<Clock, Duration>& until,
ElementT& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryLockUntil(
until,
[this, until, &element](lock_t& lock)
@@ -523,7 +523,7 @@ LLThreadSafeQueue<ElementT, QueueT>::tryPopUntil_(
const std::chrono::time_point<Clock, Duration>& until,
ElementT& element)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
while (true)
{
pop_result popped = pop_(lock, element);
@@ -550,7 +550,7 @@ LLThreadSafeQueue<ElementT, QueueT>::tryPopUntil_(
template<typename ElementT, typename QueueT>
size_t LLThreadSafeQueue<ElementT, QueueT>::size(void)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
return mStorage.size();
}
@@ -559,7 +559,7 @@ size_t LLThreadSafeQueue<ElementT, QueueT>::size(void)
template<typename ElementT, typename QueueT>
void LLThreadSafeQueue<ElementT, QueueT>::close()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
mClosed = true;
lock.unlock();
@@ -573,7 +573,7 @@ void LLThreadSafeQueue<ElementT, QueueT>::close()
template<typename ElementT, typename QueueT>
bool LLThreadSafeQueue<ElementT, QueueT>::isClosed()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
return mClosed;
}
@@ -582,7 +582,7 @@ bool LLThreadSafeQueue<ElementT, QueueT>::isClosed()
template<typename ElementT, typename QueueT>
bool LLThreadSafeQueue<ElementT, QueueT>::done()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
return mClosed && mStorage.empty();
}
diff --git a/indra/llcommon/lltrace.h b/indra/llcommon/lltrace.h
index 4051c558a4..fcd8753f75 100644
--- a/indra/llcommon/lltrace.h
+++ b/indra/llcommon/lltrace.h
@@ -227,7 +227,7 @@ public:
void setName(const char* name)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mName = name;
setKey(name);
}
@@ -236,13 +236,13 @@ public:
StatType<MemAccumulator::AllocationFacet>& allocations()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return static_cast<StatType<MemAccumulator::AllocationFacet>&>(*(StatType<MemAccumulator>*)this);
}
StatType<MemAccumulator::DeallocationFacet>& deallocations()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return static_cast<StatType<MemAccumulator::DeallocationFacet>&>(*(StatType<MemAccumulator>*)this);
}
};
@@ -264,7 +264,7 @@ struct MeasureMem<T, typename T::mem_trackable_tag_t, IS_BYTES>
{
static size_t measureFootprint(const T& value)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return sizeof(T) + value.getMemFootprint();
}
};
@@ -274,7 +274,7 @@ struct MeasureMem<T, IS_MEM_TRACKABLE, typename T::is_unit_t>
{
static size_t measureFootprint(const T& value)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return U32Bytes(value).value();
}
};
@@ -284,7 +284,7 @@ struct MeasureMem<T*, IS_MEM_TRACKABLE, IS_BYTES>
{
static size_t measureFootprint(const T* value)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
if (!value)
{
return 0;
@@ -329,7 +329,7 @@ struct MeasureMem<std::basic_string<T>, IS_MEM_TRACKABLE, IS_BYTES>
{
static size_t measureFootprint(const std::basic_string<T>& value)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return value.capacity() * sizeof(T);
}
};
@@ -338,7 +338,7 @@ struct MeasureMem<std::basic_string<T>, IS_MEM_TRACKABLE, IS_BYTES>
template<typename T>
inline void claim_alloc(MemStatHandle& measurement, const T& value)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
#if LL_TRACE_ENABLED
S32 size = MeasureMem<T>::measureFootprint(value);
if(size == 0) return;
@@ -351,7 +351,7 @@ inline void claim_alloc(MemStatHandle& measurement, const T& value)
template<typename T>
inline void disclaim_alloc(MemStatHandle& measurement, const T& value)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
#if LL_TRACE_ENABLED
S32 size = MeasureMem<T>::measureFootprint(value);
if(size == 0) return;
diff --git a/indra/llcommon/lltraceaccumulators.cpp b/indra/llcommon/lltraceaccumulators.cpp
index 8e9aaee0e6..34299f5a29 100644
--- a/indra/llcommon/lltraceaccumulators.cpp
+++ b/indra/llcommon/lltraceaccumulators.cpp
@@ -41,7 +41,7 @@ extern MemStatHandle gTraceMemStat;
AccumulatorBufferGroup::AccumulatorBufferGroup()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator));
claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator));
claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator));
@@ -56,7 +56,7 @@ AccumulatorBufferGroup::AccumulatorBufferGroup(const AccumulatorBufferGroup& oth
mStackTimers(other.mStackTimers),
mMemStats(other.mMemStats)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator));
claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator));
claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator));
@@ -66,7 +66,7 @@ AccumulatorBufferGroup::AccumulatorBufferGroup(const AccumulatorBufferGroup& oth
AccumulatorBufferGroup::~AccumulatorBufferGroup()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
disclaim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator));
disclaim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator));
disclaim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator));
@@ -76,7 +76,7 @@ AccumulatorBufferGroup::~AccumulatorBufferGroup()
void AccumulatorBufferGroup::handOffTo(AccumulatorBufferGroup& other)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
other.mCounts.reset(&mCounts);
other.mSamples.reset(&mSamples);
other.mEvents.reset(&mEvents);
@@ -86,7 +86,7 @@ void AccumulatorBufferGroup::handOffTo(AccumulatorBufferGroup& other)
void AccumulatorBufferGroup::makeCurrent()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mCounts.makeCurrent();
mSamples.makeCurrent();
mEvents.makeCurrent();
@@ -109,7 +109,7 @@ void AccumulatorBufferGroup::makeCurrent()
//static
void AccumulatorBufferGroup::clearCurrent()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
AccumulatorBuffer<CountAccumulator>::clearCurrent();
AccumulatorBuffer<SampleAccumulator>::clearCurrent();
AccumulatorBuffer<EventAccumulator>::clearCurrent();
@@ -124,7 +124,7 @@ bool AccumulatorBufferGroup::isCurrent() const
void AccumulatorBufferGroup::append( const AccumulatorBufferGroup& other )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mCounts.addSamples(other.mCounts, SEQUENTIAL);
mSamples.addSamples(other.mSamples, SEQUENTIAL);
mEvents.addSamples(other.mEvents, SEQUENTIAL);
@@ -134,7 +134,7 @@ void AccumulatorBufferGroup::append( const AccumulatorBufferGroup& other )
void AccumulatorBufferGroup::merge( const AccumulatorBufferGroup& other)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mCounts.addSamples(other.mCounts, NON_SEQUENTIAL);
mSamples.addSamples(other.mSamples, NON_SEQUENTIAL);
mEvents.addSamples(other.mEvents, NON_SEQUENTIAL);
@@ -145,7 +145,7 @@ void AccumulatorBufferGroup::merge( const AccumulatorBufferGroup& other)
void AccumulatorBufferGroup::reset(AccumulatorBufferGroup* other)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mCounts.reset(other ? &other->mCounts : NULL);
mSamples.reset(other ? &other->mSamples : NULL);
mEvents.reset(other ? &other->mEvents : NULL);
@@ -155,7 +155,7 @@ void AccumulatorBufferGroup::reset(AccumulatorBufferGroup* other)
void AccumulatorBufferGroup::sync()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
if (isCurrent())
{
F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds();
diff --git a/indra/llcommon/lltraceaccumulators.h b/indra/llcommon/lltraceaccumulators.h
index b183fcd14a..7267a44300 100644
--- a/indra/llcommon/lltraceaccumulators.h
+++ b/indra/llcommon/lltraceaccumulators.h
@@ -66,7 +66,7 @@ namespace LLTrace
: mStorageSize(0),
mStorage(NULL)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
const AccumulatorBuffer& other = *getDefaultBuffer();
resize(sNextStorageSlot);
for (S32 i = 0; i < sNextStorageSlot; i++)
@@ -77,7 +77,7 @@ namespace LLTrace
~AccumulatorBuffer()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
if (isCurrent())
{
LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL);
@@ -100,7 +100,7 @@ namespace LLTrace
: mStorageSize(0),
mStorage(NULL)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
resize(sNextStorageSlot);
for (S32 i = 0; i < sNextStorageSlot; i++)
{
@@ -110,7 +110,7 @@ namespace LLTrace
void addSamples(const AccumulatorBuffer<ACCUMULATOR>& other, EBufferAppendType append_type)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot);
for (size_t i = 0; i < sNextStorageSlot; i++)
{
@@ -120,7 +120,7 @@ namespace LLTrace
void copyFrom(const AccumulatorBuffer<ACCUMULATOR>& other)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot);
for (size_t i = 0; i < sNextStorageSlot; i++)
{
@@ -130,7 +130,7 @@ namespace LLTrace
void reset(const AccumulatorBuffer<ACCUMULATOR>* other = NULL)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
llassert(mStorageSize >= sNextStorageSlot);
for (size_t i = 0; i < sNextStorageSlot; i++)
{
@@ -140,7 +140,7 @@ namespace LLTrace
void sync(F64SecondsImplicit time_stamp)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
llassert(mStorageSize >= sNextStorageSlot);
for (size_t i = 0; i < sNextStorageSlot; i++)
{
@@ -166,7 +166,7 @@ namespace LLTrace
// NOTE: this is not thread-safe. We assume that slots are reserved in the main thread before any child threads are spawned
size_t reserveSlot()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
size_t next_slot = sNextStorageSlot++;
if (next_slot >= mStorageSize)
{
@@ -180,7 +180,7 @@ namespace LLTrace
void resize(size_t new_size)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
if (new_size <= mStorageSize) return;
ACCUMULATOR* old_storage = mStorage;
@@ -221,7 +221,7 @@ namespace LLTrace
static self_t* getDefaultBuffer()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
static bool sInitialized = false;
if (!sInitialized)
{
@@ -336,7 +336,7 @@ namespace LLTrace
void sample(F64 value)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds();
// store effect of last value
@@ -550,7 +550,7 @@ namespace LLTrace
void addSamples(const MemAccumulator& other, EBufferAppendType append_type)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mAllocations.addSamples(other.mAllocations, append_type);
mDeallocations.addSamples(other.mDeallocations, append_type);
@@ -569,7 +569,7 @@ namespace LLTrace
void reset(const MemAccumulator* other)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mSize.reset(other ? &other->mSize : NULL);
mAllocations.reset(other ? &other->mAllocations : NULL);
mDeallocations.reset(other ? &other->mDeallocations : NULL);
diff --git a/indra/llcommon/lltracerecording.cpp b/indra/llcommon/lltracerecording.cpp
index 5ce1b337fe..1613af1dcf 100644
--- a/indra/llcommon/lltracerecording.cpp
+++ b/indra/llcommon/lltracerecording.cpp
@@ -50,7 +50,7 @@ Recording::Recording(EPlayState state)
: mElapsedSeconds(0),
mActiveBuffers(NULL)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
claim_alloc(gTraceMemStat, this);
mBuffers = new AccumulatorBufferGroup();
claim_alloc(gTraceMemStat, mBuffers);
@@ -60,14 +60,14 @@ Recording::Recording(EPlayState state)
Recording::Recording( const Recording& other )
: mActiveBuffers(NULL)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
claim_alloc(gTraceMemStat, this);
*this = other;
}
Recording& Recording::operator = (const Recording& other)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
// this will allow us to seamlessly start without affecting any data we've acquired from other
setPlayState(PAUSED);
@@ -88,7 +88,7 @@ Recording& Recording::operator = (const Recording& other)
Recording::~Recording()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
disclaim_alloc(gTraceMemStat, this);
disclaim_alloc(gTraceMemStat, mBuffers);
@@ -107,7 +107,7 @@ void Recording::update()
#if LL_TRACE_ENABLED
if (isStarted())
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mElapsedSeconds += mSamplingTimer.getElapsedTimeF64();
// must have
@@ -128,7 +128,7 @@ void Recording::update()
void Recording::handleReset()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
#if LL_TRACE_ENABLED
mBuffers.write()->reset();
@@ -139,7 +139,7 @@ void Recording::handleReset()
void Recording::handleStart()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
#if LL_TRACE_ENABLED
mSamplingTimer.reset();
mBuffers.setStayUnique(true);
@@ -151,7 +151,7 @@ void Recording::handleStart()
void Recording::handleStop()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
#if LL_TRACE_ENABLED
mElapsedSeconds += mSamplingTimer.getElapsedTimeF64();
// must have thread recorder running on this thread
@@ -583,20 +583,20 @@ PeriodicRecording::PeriodicRecording( S32 num_periods, EPlayState state)
mNumRecordedPeriods(0),
mRecordingPeriods(num_periods ? num_periods : 1)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
setPlayState(state);
claim_alloc(gTraceMemStat, this);
}
PeriodicRecording::~PeriodicRecording()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
disclaim_alloc(gTraceMemStat, this);
}
void PeriodicRecording::nextPeriod()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
if (mAutoResize)
{
mRecordingPeriods.push_back(Recording());
@@ -611,7 +611,7 @@ void PeriodicRecording::nextPeriod()
void PeriodicRecording::appendRecording(Recording& recording)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
getCurRecording().appendRecording(recording);
nextPeriod();
}
@@ -619,7 +619,7 @@ void PeriodicRecording::appendRecording(Recording& recording)
void PeriodicRecording::appendPeriodicRecording( PeriodicRecording& other )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
if (other.mRecordingPeriods.empty()) return;
getCurRecording().update();
@@ -693,7 +693,7 @@ void PeriodicRecording::appendPeriodicRecording( PeriodicRecording& other )
F64Seconds PeriodicRecording::getDuration() const
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
F64Seconds duration;
S32 num_periods = mRecordingPeriods.size();
for (S32 i = 1; i <= num_periods; i++)
@@ -707,7 +707,7 @@ F64Seconds PeriodicRecording::getDuration() const
LLTrace::Recording PeriodicRecording::snapshotCurRecording() const
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
Recording recording_copy(getCurRecording());
recording_copy.stop();
return recording_copy;
@@ -750,19 +750,19 @@ const Recording& PeriodicRecording::getPrevRecording( S32 offset ) const
void PeriodicRecording::handleStart()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
getCurRecording().start();
}
void PeriodicRecording::handleStop()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
getCurRecording().pause();
}
void PeriodicRecording::handleReset()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
getCurRecording().stop();
if (mAutoResize)
@@ -786,13 +786,13 @@ void PeriodicRecording::handleReset()
void PeriodicRecording::handleSplitTo(PeriodicRecording& other)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
getCurRecording().splitTo(other.getCurRecording());
}
F64 PeriodicRecording::getPeriodMin( const StatType<EventAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -814,7 +814,7 @@ F64 PeriodicRecording::getPeriodMin( const StatType<EventAccumulator>& stat, S32
F64 PeriodicRecording::getPeriodMax( const StatType<EventAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -837,7 +837,7 @@ F64 PeriodicRecording::getPeriodMax( const StatType<EventAccumulator>& stat, S32
// calculates means using aggregates per period
F64 PeriodicRecording::getPeriodMean( const StatType<EventAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64 mean = 0;
@@ -860,7 +860,7 @@ F64 PeriodicRecording::getPeriodMean( const StatType<EventAccumulator>& stat, S3
F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<EventAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64 period_mean = getPeriodMean(stat, num_periods);
@@ -885,7 +885,7 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<EventAccumulat
F64 PeriodicRecording::getPeriodMin( const StatType<SampleAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -907,7 +907,7 @@ F64 PeriodicRecording::getPeriodMin( const StatType<SampleAccumulator>& stat, S3
F64 PeriodicRecording::getPeriodMax(const StatType<SampleAccumulator>& stat, S32 num_periods /*= S32_MAX*/)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -930,7 +930,7 @@ F64 PeriodicRecording::getPeriodMax(const StatType<SampleAccumulator>& stat, S32
F64 PeriodicRecording::getPeriodMean( const StatType<SampleAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
S32 valid_period_count = 0;
@@ -953,7 +953,7 @@ F64 PeriodicRecording::getPeriodMean( const StatType<SampleAccumulator>& stat, S
F64 PeriodicRecording::getPeriodMedian( const StatType<SampleAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
std::vector<F64> buf;
@@ -979,7 +979,7 @@ F64 PeriodicRecording::getPeriodMedian( const StatType<SampleAccumulator>& stat,
F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<SampleAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64 period_mean = getPeriodMean(stat, num_periods);
@@ -1005,7 +1005,7 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<SampleAccumula
F64Kilobytes PeriodicRecording::getPeriodMin( const StatType<MemAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64Kilobytes min_val(std::numeric_limits<F64>::max());
@@ -1025,7 +1025,7 @@ F64Kilobytes PeriodicRecording::getPeriodMin(const MemStatHandle& stat, S32 num_
F64Kilobytes PeriodicRecording::getPeriodMax(const StatType<MemAccumulator>& stat, S32 num_periods /*= S32_MAX*/)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64Kilobytes max_val(0.0);
@@ -1045,7 +1045,7 @@ F64Kilobytes PeriodicRecording::getPeriodMax(const MemStatHandle& stat, S32 num_
F64Kilobytes PeriodicRecording::getPeriodMean( const StatType<MemAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64Kilobytes mean(0);
@@ -1066,7 +1066,7 @@ F64Kilobytes PeriodicRecording::getPeriodMean(const MemStatHandle& stat, S32 num
F64Kilobytes PeriodicRecording::getPeriodStandardDeviation( const StatType<MemAccumulator>& stat, S32 num_periods /*= S32_MAX*/ )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64Kilobytes period_mean = getPeriodMean(stat, num_periods);
@@ -1100,7 +1100,7 @@ F64Kilobytes PeriodicRecording::getPeriodStandardDeviation(const MemStatHandle&
void ExtendableRecording::extend()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
// push the data back to accepted recording
mAcceptedRecording.appendRecording(mPotentialRecording);
// flush data, so we can start from scratch
@@ -1109,26 +1109,26 @@ void ExtendableRecording::extend()
void ExtendableRecording::handleStart()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mPotentialRecording.start();
}
void ExtendableRecording::handleStop()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mPotentialRecording.pause();
}
void ExtendableRecording::handleReset()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mAcceptedRecording.reset();
mPotentialRecording.reset();
}
void ExtendableRecording::handleSplitTo(ExtendableRecording& other)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mPotentialRecording.splitTo(other.mPotentialRecording);
}
@@ -1145,7 +1145,7 @@ ExtendablePeriodicRecording::ExtendablePeriodicRecording()
void ExtendablePeriodicRecording::extend()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
// push the data back to accepted recording
mAcceptedRecording.appendPeriodicRecording(mPotentialRecording);
// flush data, so we can start from scratch
@@ -1155,26 +1155,26 @@ void ExtendablePeriodicRecording::extend()
void ExtendablePeriodicRecording::handleStart()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mPotentialRecording.start();
}
void ExtendablePeriodicRecording::handleStop()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mPotentialRecording.pause();
}
void ExtendablePeriodicRecording::handleReset()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mAcceptedRecording.reset();
mPotentialRecording.reset();
}
void ExtendablePeriodicRecording::handleSplitTo(ExtendablePeriodicRecording& other)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mPotentialRecording.splitTo(other.mPotentialRecording);
}
@@ -1189,7 +1189,7 @@ PeriodicRecording& get_frame_recording()
void LLStopWatchControlsMixinCommon::start()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
switch (mPlayState)
{
case STOPPED:
@@ -1211,7 +1211,7 @@ void LLStopWatchControlsMixinCommon::start()
void LLStopWatchControlsMixinCommon::stop()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
switch (mPlayState)
{
case STOPPED:
@@ -1231,7 +1231,7 @@ void LLStopWatchControlsMixinCommon::stop()
void LLStopWatchControlsMixinCommon::pause()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
switch (mPlayState)
{
case STOPPED:
@@ -1251,7 +1251,7 @@ void LLStopWatchControlsMixinCommon::pause()
void LLStopWatchControlsMixinCommon::unpause()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
switch (mPlayState)
{
case STOPPED:
@@ -1271,7 +1271,7 @@ void LLStopWatchControlsMixinCommon::unpause()
void LLStopWatchControlsMixinCommon::resume()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
switch (mPlayState)
{
case STOPPED:
@@ -1292,7 +1292,7 @@ void LLStopWatchControlsMixinCommon::resume()
void LLStopWatchControlsMixinCommon::restart()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
switch (mPlayState)
{
case STOPPED:
@@ -1316,13 +1316,13 @@ void LLStopWatchControlsMixinCommon::restart()
void LLStopWatchControlsMixinCommon::reset()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
handleReset();
}
void LLStopWatchControlsMixinCommon::setPlayState( EPlayState state )
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
switch(state)
{
case STOPPED:
diff --git a/indra/llcommon/lltracerecording.h b/indra/llcommon/lltracerecording.h
index 1f3d37336a..556b7470cf 100644
--- a/indra/llcommon/lltracerecording.h
+++ b/indra/llcommon/lltracerecording.h
@@ -355,7 +355,7 @@ namespace LLTrace
template <typename T>
S32 getSampleCount(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
S32 num_samples = 0;
@@ -375,7 +375,7 @@ namespace LLTrace
template <typename T>
typename T::value_t getPeriodMin(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -398,7 +398,7 @@ namespace LLTrace
template<typename T>
T getPeriodMin(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return T(getPeriodMin(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -406,7 +406,7 @@ namespace LLTrace
template<typename T>
T getPeriodMin(const SampleStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return T(getPeriodMin(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
}
@@ -414,7 +414,7 @@ namespace LLTrace
template<typename T>
T getPeriodMin(const EventStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return T(getPeriodMin(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
}
@@ -424,7 +424,7 @@ namespace LLTrace
template <typename T>
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMinPerSec(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
typename RelatedTypes<typename T::value_t>::fractional_t min_val(std::numeric_limits<F64>::max());
@@ -439,7 +439,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMinPerSec(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return typename RelatedTypes<T>::fractional_t(getPeriodMinPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -451,7 +451,7 @@ namespace LLTrace
template <typename T>
typename T::value_t getPeriodMax(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
bool has_value = false;
@@ -474,7 +474,7 @@ namespace LLTrace
template<typename T>
T getPeriodMax(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return T(getPeriodMax(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -482,7 +482,7 @@ namespace LLTrace
template<typename T>
T getPeriodMax(const SampleStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return T(getPeriodMax(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
}
@@ -490,7 +490,7 @@ namespace LLTrace
template<typename T>
T getPeriodMax(const EventStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return T(getPeriodMax(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
}
@@ -500,7 +500,7 @@ namespace LLTrace
template <typename T>
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMaxPerSec(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
F64 max_val = std::numeric_limits<F64>::min();
@@ -515,7 +515,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMaxPerSec(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return typename RelatedTypes<T>::fractional_t(getPeriodMaxPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -527,7 +527,7 @@ namespace LLTrace
template <typename T>
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMean(const StatType<T >& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
typename RelatedTypes<typename T::value_t>::fractional_t mean(0);
@@ -548,14 +548,14 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMean(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
F64 getPeriodMean(const StatType<SampleAccumulator>& stat, S32 num_periods = S32_MAX);
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMean(const SampleStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
}
@@ -563,7 +563,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMean(const EventStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
}
@@ -573,7 +573,7 @@ namespace LLTrace
template <typename T>
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMeanPerSec(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
typename RelatedTypes<typename T::value_t>::fractional_t mean = 0;
@@ -595,7 +595,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMeanPerSec(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return typename RelatedTypes<T>::fractional_t(getPeriodMeanPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -604,7 +604,7 @@ namespace LLTrace
template <typename T>
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMedianPerSec(const StatType<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
num_periods = llmin(num_periods, getNumRecordedPeriods());
std::vector <typename RelatedTypes<typename T::value_t>::fractional_t> buf;
@@ -624,7 +624,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodMedianPerSec(const CountStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return typename RelatedTypes<T>::fractional_t(getPeriodMedianPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
}
@@ -637,7 +637,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodStandardDeviation(const SampleStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return typename RelatedTypes<T>::fractional_t(getPeriodStandardDeviation(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
}
@@ -645,7 +645,7 @@ namespace LLTrace
template<typename T>
typename RelatedTypes<T>::fractional_t getPeriodStandardDeviation(const EventStatHandle<T>& stat, S32 num_periods = S32_MAX)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return typename RelatedTypes<T>::fractional_t(getPeriodStandardDeviation(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
}
diff --git a/indra/llcommon/lltracethreadrecorder.cpp b/indra/llcommon/lltracethreadrecorder.cpp
index 7ae1e72784..090d3297a0 100644
--- a/indra/llcommon/lltracethreadrecorder.cpp
+++ b/indra/llcommon/lltracethreadrecorder.cpp
@@ -277,7 +277,7 @@ void ThreadRecorder::pushToParent()
void ThreadRecorder::pullFromChildren()
{
#if LL_TRACE_ENABLED
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
if (mActiveRecordings.empty()) return;
{ LLMutexLock lock(&mChildListMutex);
diff --git a/indra/llcommon/threadsafeschedule.h b/indra/llcommon/threadsafeschedule.h
index 601681d550..3e0da94c02 100644
--- a/indra/llcommon/threadsafeschedule.h
+++ b/indra/llcommon/threadsafeschedule.h
@@ -98,14 +98,14 @@ namespace LL
// we could minimize redundancy by breaking out a common base class...
void push(const DataTuple& tuple)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
push(tuple_cons(Clock::now(), tuple));
}
/// individually pass each component of the TimeTuple
void push(const TimePoint& time, Args&&... args)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
push(TimeTuple(time, std::forward<Args>(args)...));
}
@@ -116,7 +116,7 @@ namespace LL
// and call that overload.
void push(Args&&... args)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
push(Clock::now(), std::forward<Args>(args)...);
}
@@ -127,21 +127,21 @@ namespace LL
/// DataTuple with implicit now
bool tryPush(const DataTuple& tuple)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryPush(tuple_cons(Clock::now(), tuple));
}
/// individually pass components
bool tryPush(const TimePoint& time, Args&&... args)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryPush(TimeTuple(time, std::forward<Args>(args)...));
}
/// individually pass components with implicit now
bool tryPush(Args&&... args)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryPush(Clock::now(), std::forward<Args>(args)...);
}
@@ -154,7 +154,7 @@ namespace LL
bool tryPushFor(const std::chrono::duration<Rep, Period>& timeout,
const DataTuple& tuple)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryPushFor(timeout, tuple_cons(Clock::now(), tuple));
}
@@ -163,7 +163,7 @@ namespace LL
bool tryPushFor(const std::chrono::duration<Rep, Period>& timeout,
const TimePoint& time, Args&&... args)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryPushFor(TimeTuple(time, std::forward<Args>(args)...));
}
@@ -172,7 +172,7 @@ namespace LL
bool tryPushFor(const std::chrono::duration<Rep, Period>& timeout,
Args&&... args)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryPushFor(Clock::now(), std::forward<Args>(args)...);
}
@@ -185,7 +185,7 @@ namespace LL
bool tryPushUntil(const std::chrono::time_point<Clock, Duration>& until,
const DataTuple& tuple)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryPushUntil(until, tuple_cons(Clock::now(), tuple));
}
@@ -194,7 +194,7 @@ namespace LL
bool tryPushUntil(const std::chrono::time_point<Clock, Duration>& until,
const TimePoint& time, Args&&... args)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryPushUntil(until, TimeTuple(time, std::forward<Args>(args)...));
}
@@ -203,7 +203,7 @@ namespace LL
bool tryPushUntil(const std::chrono::time_point<Clock, Duration>& until,
Args&&... args)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryPushUntil(until, Clock::now(), std::forward<Args>(args)...);
}
@@ -221,14 +221,14 @@ namespace LL
// haven't yet jumped through those hoops.
DataTuple pop()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tuple_cdr(popWithTime());
}
/// pop TimeTuple by value
TimeTuple popWithTime()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(super::mLock);
// We can't just sit around waiting forever, given that there may
// be items in the queue that are not yet ready but will *become*
@@ -268,7 +268,7 @@ namespace LL
/// tryPop(DataTuple&)
bool tryPop(DataTuple& tuple)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
TimeTuple tt;
if (! super::tryPop(tt))
return false;
@@ -279,7 +279,7 @@ namespace LL
/// for when Args has exactly one type
bool tryPop(typename std::tuple_element<1, TimeTuple>::type& value)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
TimeTuple tt;
if (! super::tryPop(tt))
return false;
@@ -291,7 +291,7 @@ namespace LL
template <typename Rep, typename Period, typename Tuple>
bool tryPopFor(const std::chrono::duration<Rep, Period>& timeout, Tuple& tuple)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// It's important to use OUR tryPopUntil() implementation, rather
// than delegating immediately to our base class.
return tryPopUntil(Clock::now() + timeout, tuple);
@@ -302,7 +302,7 @@ namespace LL
bool tryPopUntil(const std::chrono::time_point<Clock, Duration>& until,
TimeTuple& tuple)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// super::tryPopUntil() wakes up when an item becomes available or
// we hit 'until', whichever comes first. Thing is, the current
// head of the queue could become ready sooner than either of
@@ -322,7 +322,7 @@ namespace LL
pop_result tryPopUntil_(lock_t& lock, const TimePoint& until, TimeTuple& tuple)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
TimePoint adjusted = until;
if (! super::mStorage.empty())
{
@@ -350,7 +350,7 @@ namespace LL
bool tryPopUntil(const std::chrono::time_point<Clock, Duration>& until,
DataTuple& tuple)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
TimeTuple tt;
if (! tryPopUntil(until, tt))
return false;
@@ -363,7 +363,7 @@ namespace LL
bool tryPopUntil(const std::chrono::time_point<Clock, Duration>& until,
typename std::tuple_element<1, TimeTuple>::type& value)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
TimeTuple tt;
if (! tryPopUntil(until, tt))
return false;
@@ -387,7 +387,7 @@ namespace LL
// considering whether to deliver the current head element
bool canPop(const TimeTuple& head) const override
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// an item with a future timestamp isn't yet ready to pop
// (should we add some slop for overhead?)
return std::get<0>(head) <= Clock::now();
diff --git a/indra/llcommon/workqueue.cpp b/indra/llcommon/workqueue.cpp
index c74dada2e4..eb06890468 100644
--- a/indra/llcommon/workqueue.cpp
+++ b/indra/llcommon/workqueue.cpp
@@ -60,7 +60,7 @@ void LL::WorkQueue::runUntilClose()
{
for (;;)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
callWork(mQueue.pop());
}
}
@@ -71,7 +71,7 @@ void LL::WorkQueue::runUntilClose()
bool LL::WorkQueue::runPending()
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
for (Work work; mQueue.tryPop(work); )
{
callWork(work);
@@ -91,7 +91,7 @@ bool LL::WorkQueue::runOne()
bool LL::WorkQueue::runUntil(const TimePoint& until)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Should we subtract some slop to allow for typical Work execution time?
// How much slop?
// runUntil() is simply a time-bounded runPending().
@@ -129,7 +129,7 @@ void LL::WorkQueue::callWork(const Queue::DataTuple& work)
void LL::WorkQueue::callWork(const Work& work)
{
- LL_PROFILE_ZONE_SCOPED;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
try
{
work();