From 8d20480c5f77fe1fab8149d3cda79bdd61e77656 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 28 Oct 2021 18:06:21 +0000 Subject: SL-16148 SL-16244 SL-16270 SL-16253 Remove most BlockTimers, remove LLMemTracked, introduce alignas, hook most/all reamining allocs, disable synchronous occlusion, and convert frequently accessed LLSingletons to LLSimpleton --- indra/llcommon/lltraceaccumulators.h | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'indra/llcommon/lltraceaccumulators.h') diff --git a/indra/llcommon/lltraceaccumulators.h b/indra/llcommon/lltraceaccumulators.h index 8eb5338a2a..b183fcd14a 100644 --- a/indra/llcommon/lltraceaccumulators.h +++ b/indra/llcommon/lltraceaccumulators.h @@ -66,6 +66,7 @@ namespace LLTrace : mStorageSize(0), mStorage(NULL) { + LL_PROFILE_ZONE_SCOPED; const AccumulatorBuffer& other = *getDefaultBuffer(); resize(sNextStorageSlot); for (S32 i = 0; i < sNextStorageSlot; i++) @@ -76,6 +77,7 @@ namespace LLTrace ~AccumulatorBuffer() { + LL_PROFILE_ZONE_SCOPED; if (isCurrent()) { LLThreadLocalSingletonPointer::setInstance(NULL); @@ -98,6 +100,7 @@ namespace LLTrace : mStorageSize(0), mStorage(NULL) { + LL_PROFILE_ZONE_SCOPED; resize(sNextStorageSlot); for (S32 i = 0; i < sNextStorageSlot; i++) { @@ -107,6 +110,7 @@ namespace LLTrace void addSamples(const AccumulatorBuffer& other, EBufferAppendType append_type) { + LL_PROFILE_ZONE_SCOPED; llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -116,6 +120,7 @@ namespace LLTrace void copyFrom(const AccumulatorBuffer& other) { + LL_PROFILE_ZONE_SCOPED; llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -125,6 +130,7 @@ namespace LLTrace void reset(const AccumulatorBuffer* other = NULL) { + LL_PROFILE_ZONE_SCOPED; llassert(mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -134,6 +140,7 @@ namespace LLTrace void sync(F64SecondsImplicit time_stamp) { + LL_PROFILE_ZONE_SCOPED; llassert(mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -153,12 +160,13 @@ namespace LLTrace static void clearCurrent() { - LLThreadLocalSingletonPointer::setInstance(NULL); + LLThreadLocalSingletonPointer::setInstance(NULL); } // NOTE: this is not thread-safe. We assume that slots are reserved in the main thread before any child threads are spawned size_t reserveSlot() { + LL_PROFILE_ZONE_SCOPED; size_t next_slot = sNextStorageSlot++; if (next_slot >= mStorageSize) { @@ -172,6 +180,7 @@ namespace LLTrace void resize(size_t new_size) { + LL_PROFILE_ZONE_SCOPED; if (new_size <= mStorageSize) return; ACCUMULATOR* old_storage = mStorage; @@ -212,6 +221,7 @@ namespace LLTrace static self_t* getDefaultBuffer() { + LL_PROFILE_ZONE_SCOPED; static bool sInitialized = false; if (!sInitialized) { @@ -326,6 +336,7 @@ namespace LLTrace void sample(F64 value) { + LL_PROFILE_ZONE_SCOPED; F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds(); // store effect of last value @@ -444,9 +455,9 @@ namespace LLTrace S32 mNumSamples; }; - class TimeBlockAccumulator + class alignas(32) TimeBlockAccumulator { - public: + public: typedef F64Seconds value_t; static F64Seconds getDefaultValue() { return F64Seconds(0); } @@ -539,6 +550,7 @@ namespace LLTrace void addSamples(const MemAccumulator& other, EBufferAppendType append_type) { + LL_PROFILE_ZONE_SCOPED; mAllocations.addSamples(other.mAllocations, append_type); mDeallocations.addSamples(other.mDeallocations, append_type); @@ -557,6 +569,7 @@ namespace LLTrace void reset(const MemAccumulator* other) { + LL_PROFILE_ZONE_SCOPED; mSize.reset(other ? &other->mSize : NULL); mAllocations.reset(other ? &other->mAllocations : NULL); mDeallocations.reset(other ? &other->mDeallocations : NULL); -- cgit v1.2.3 From 31b0e8cef83780de19fc713791a30f56108b75f6 Mon Sep 17 00:00:00 2001 From: Ptolemy Date: Thu, 13 Jan 2022 12:47:54 -0800 Subject: SL-16606: Add profiler category STATS --- indra/llcommon/lltraceaccumulators.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'indra/llcommon/lltraceaccumulators.h') diff --git a/indra/llcommon/lltraceaccumulators.h b/indra/llcommon/lltraceaccumulators.h index b183fcd14a..7267a44300 100644 --- a/indra/llcommon/lltraceaccumulators.h +++ b/indra/llcommon/lltraceaccumulators.h @@ -66,7 +66,7 @@ namespace LLTrace : mStorageSize(0), mStorage(NULL) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; const AccumulatorBuffer& other = *getDefaultBuffer(); resize(sNextStorageSlot); for (S32 i = 0; i < sNextStorageSlot; i++) @@ -77,7 +77,7 @@ namespace LLTrace ~AccumulatorBuffer() { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; if (isCurrent()) { LLThreadLocalSingletonPointer::setInstance(NULL); @@ -100,7 +100,7 @@ namespace LLTrace : mStorageSize(0), mStorage(NULL) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; resize(sNextStorageSlot); for (S32 i = 0; i < sNextStorageSlot; i++) { @@ -110,7 +110,7 @@ namespace LLTrace void addSamples(const AccumulatorBuffer& other, EBufferAppendType append_type) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -120,7 +120,7 @@ namespace LLTrace void copyFrom(const AccumulatorBuffer& other) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -130,7 +130,7 @@ namespace LLTrace void reset(const AccumulatorBuffer* other = NULL) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; llassert(mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -140,7 +140,7 @@ namespace LLTrace void sync(F64SecondsImplicit time_stamp) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; llassert(mStorageSize >= sNextStorageSlot); for (size_t i = 0; i < sNextStorageSlot; i++) { @@ -166,7 +166,7 @@ namespace LLTrace // NOTE: this is not thread-safe. We assume that slots are reserved in the main thread before any child threads are spawned size_t reserveSlot() { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; size_t next_slot = sNextStorageSlot++; if (next_slot >= mStorageSize) { @@ -180,7 +180,7 @@ namespace LLTrace void resize(size_t new_size) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; if (new_size <= mStorageSize) return; ACCUMULATOR* old_storage = mStorage; @@ -221,7 +221,7 @@ namespace LLTrace static self_t* getDefaultBuffer() { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; static bool sInitialized = false; if (!sInitialized) { @@ -336,7 +336,7 @@ namespace LLTrace void sample(F64 value) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds(); // store effect of last value @@ -550,7 +550,7 @@ namespace LLTrace void addSamples(const MemAccumulator& other, EBufferAppendType append_type) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mAllocations.addSamples(other.mAllocations, append_type); mDeallocations.addSamples(other.mDeallocations, append_type); @@ -569,7 +569,7 @@ namespace LLTrace void reset(const MemAccumulator* other) { - LL_PROFILE_ZONE_SCOPED; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; mSize.reset(other ? &other->mSize : NULL); mAllocations.reset(other ? &other->mAllocations : NULL); mDeallocations.reset(other ? &other->mDeallocations : NULL); -- cgit v1.2.3