diff options
author | Andrey Lihatskiy <alihatskiy@productengine.com> | 2024-05-15 11:16:27 +0300 |
---|---|---|
committer | Andrey Lihatskiy <alihatskiy@productengine.com> | 2024-05-15 11:16:27 +0300 |
commit | bccc10db9a90d365c353baebf443fde2030ce970 (patch) | |
tree | 2c2e1fd94b29667a809f8d7285d049f5ff5d424d /indra/llcommon/lltraceaccumulators.cpp | |
parent | 531cd34f670170ade57f8813fe48012b61a1d3c2 (diff) | |
parent | bb3c36f5cbc0c3b542045fd27255eee24e03da22 (diff) |
Merge branch 'main' into marchcat/x-b-merge
# Conflicts:
# autobuild.xml
# indra/cmake/ConfigurePkgConfig.cmake
# indra/cmake/ICU4C.cmake
# indra/media_plugins/gstreamer010/llmediaimplgstreamer_syms.cpp
# indra/media_plugins/gstreamer010/llmediaimplgstreamer_syms.h
# indra/media_plugins/gstreamer010/llmediaimplgstreamertriviallogging.h
# indra/media_plugins/gstreamer010/llmediaimplgstreamervidplug.cpp
# indra/media_plugins/gstreamer010/llmediaimplgstreamervidplug.h
# indra/media_plugins/gstreamer010/media_plugin_gstreamer010.cpp
# indra/newview/llappviewerlinux_api.h
# indra/newview/llappviewerlinux_api_dbus.cpp
# indra/newview/llappviewerlinux_api_dbus.h
# indra/newview/llfloateremojipicker.cpp
# indra/newview/lloutfitslist.cpp
Diffstat (limited to 'indra/llcommon/lltraceaccumulators.cpp')
-rw-r--r-- | indra/llcommon/lltraceaccumulators.cpp | 350 |
1 files changed, 175 insertions, 175 deletions
diff --git a/indra/llcommon/lltraceaccumulators.cpp b/indra/llcommon/lltraceaccumulators.cpp index b5b32cba38..8741087f3a 100644 --- a/indra/llcommon/lltraceaccumulators.cpp +++ b/indra/llcommon/lltraceaccumulators.cpp @@ -38,246 +38,246 @@ namespace LLTrace AccumulatorBufferGroup::AccumulatorBufferGroup() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; } AccumulatorBufferGroup::AccumulatorBufferGroup(const AccumulatorBufferGroup& other) -: mCounts(other.mCounts), - mSamples(other.mSamples), - mEvents(other.mEvents), - mStackTimers(other.mStackTimers) +: mCounts(other.mCounts), + mSamples(other.mSamples), + mEvents(other.mEvents), + mStackTimers(other.mStackTimers) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; } AccumulatorBufferGroup::~AccumulatorBufferGroup() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; } void AccumulatorBufferGroup::handOffTo(AccumulatorBufferGroup& other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - other.mCounts.reset(&mCounts); - other.mSamples.reset(&mSamples); - other.mEvents.reset(&mEvents); - other.mStackTimers.reset(&mStackTimers); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + other.mCounts.reset(&mCounts); + other.mSamples.reset(&mSamples); + other.mEvents.reset(&mEvents); + other.mStackTimers.reset(&mStackTimers); } void AccumulatorBufferGroup::makeCurrent() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - mCounts.makeCurrent(); - mSamples.makeCurrent(); - mEvents.makeCurrent(); - mStackTimers.makeCurrent(); - - ThreadRecorder* thread_recorder = get_thread_recorder(); - AccumulatorBuffer<TimeBlockAccumulator>& timer_accumulator_buffer = mStackTimers; - // update stacktimer parent pointers - for (size_t i = 0, end_i = mStackTimers.size(); i < end_i; i++) - { - TimeBlockTreeNode* tree_node = thread_recorder->getTimeBlockTreeNode(narrow<size_t>(i)); - if (tree_node) - { - timer_accumulator_buffer[i].mParent = tree_node->mParent; - } - } + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + mCounts.makeCurrent(); + mSamples.makeCurrent(); + mEvents.makeCurrent(); + mStackTimers.makeCurrent(); + + ThreadRecorder* thread_recorder = get_thread_recorder(); + AccumulatorBuffer<TimeBlockAccumulator>& timer_accumulator_buffer = mStackTimers; + // update stacktimer parent pointers + for (size_t i = 0, end_i = mStackTimers.size(); i < end_i; i++) + { + TimeBlockTreeNode* tree_node = thread_recorder->getTimeBlockTreeNode(narrow<size_t>(i)); + if (tree_node) + { + timer_accumulator_buffer[i].mParent = tree_node->mParent; + } + } } //static void AccumulatorBufferGroup::clearCurrent() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - AccumulatorBuffer<CountAccumulator>::clearCurrent(); - AccumulatorBuffer<SampleAccumulator>::clearCurrent(); - AccumulatorBuffer<EventAccumulator>::clearCurrent(); - AccumulatorBuffer<TimeBlockAccumulator>::clearCurrent(); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + AccumulatorBuffer<CountAccumulator>::clearCurrent(); + AccumulatorBuffer<SampleAccumulator>::clearCurrent(); + AccumulatorBuffer<EventAccumulator>::clearCurrent(); + AccumulatorBuffer<TimeBlockAccumulator>::clearCurrent(); } bool AccumulatorBufferGroup::isCurrent() const { - return mCounts.isCurrent(); + return mCounts.isCurrent(); } void AccumulatorBufferGroup::append( const AccumulatorBufferGroup& other ) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - mCounts.addSamples(other.mCounts, SEQUENTIAL); - mSamples.addSamples(other.mSamples, SEQUENTIAL); - mEvents.addSamples(other.mEvents, SEQUENTIAL); - mStackTimers.addSamples(other.mStackTimers, SEQUENTIAL); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + mCounts.addSamples(other.mCounts, SEQUENTIAL); + mSamples.addSamples(other.mSamples, SEQUENTIAL); + mEvents.addSamples(other.mEvents, SEQUENTIAL); + mStackTimers.addSamples(other.mStackTimers, SEQUENTIAL); } void AccumulatorBufferGroup::merge( const AccumulatorBufferGroup& other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - mCounts.addSamples(other.mCounts, NON_SEQUENTIAL); - mSamples.addSamples(other.mSamples, NON_SEQUENTIAL); - mEvents.addSamples(other.mEvents, NON_SEQUENTIAL); - // for now, hold out timers from merge, need to be displayed per thread - //mStackTimers.addSamples(other.mStackTimers, NON_SEQUENTIAL); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + mCounts.addSamples(other.mCounts, NON_SEQUENTIAL); + mSamples.addSamples(other.mSamples, NON_SEQUENTIAL); + mEvents.addSamples(other.mEvents, NON_SEQUENTIAL); + // for now, hold out timers from merge, need to be displayed per thread + //mStackTimers.addSamples(other.mStackTimers, NON_SEQUENTIAL); } void AccumulatorBufferGroup::reset(AccumulatorBufferGroup* other) { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - mCounts.reset(other ? &other->mCounts : NULL); - mSamples.reset(other ? &other->mSamples : NULL); - mEvents.reset(other ? &other->mEvents : NULL); - mStackTimers.reset(other ? &other->mStackTimers : NULL); + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + mCounts.reset(other ? &other->mCounts : NULL); + mSamples.reset(other ? &other->mSamples : NULL); + mEvents.reset(other ? &other->mEvents : NULL); + mStackTimers.reset(other ? &other->mStackTimers : NULL); } void AccumulatorBufferGroup::sync() { - LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - if (isCurrent()) - { - F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds(); - mSamples.sync(time_stamp); - } + LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; + if (isCurrent()) + { + F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds(); + mSamples.sync(time_stamp); + } } F64 SampleAccumulator::mergeSumsOfSquares(const SampleAccumulator& a, const SampleAccumulator& b) { - const F64 epsilon = 0.0000001; - - if (a.getSamplingTime() > epsilon && b.getSamplingTime() > epsilon) - { - // combine variance (and hence standard deviation) of 2 different sized sample groups using - // the following formula: http://www.mrc-bsu.cam.ac.uk/cochrane/handbook/chapter_7/7_7_3_8_combining_groups.htm - F64 n_1 = a.getSamplingTime(), - n_2 = b.getSamplingTime(); - F64 m_1 = a.getMean(), - m_2 = b.getMean(); - F64 v_1 = a.getSumOfSquares() / a.getSamplingTime(), - v_2 = b.getSumOfSquares() / b.getSamplingTime(); - if (n_1 < epsilon) - { - return b.getSumOfSquares(); - } - else - { - return a.getSamplingTime() - * ((((n_1 - epsilon) * v_1) - + ((n_2 - epsilon) * v_2) - + (((n_1 * n_2) / (n_1 + n_2)) - * ((m_1 * m_1) + (m_2 * m_2) - (2.f * m_1 * m_2)))) - / (n_1 + n_2 - epsilon)); - } - } - - return a.getSumOfSquares(); + const F64 epsilon = 0.0000001; + + if (a.getSamplingTime() > epsilon && b.getSamplingTime() > epsilon) + { + // combine variance (and hence standard deviation) of 2 different sized sample groups using + // the following formula: http://www.mrc-bsu.cam.ac.uk/cochrane/handbook/chapter_7/7_7_3_8_combining_groups.htm + F64 n_1 = a.getSamplingTime(), + n_2 = b.getSamplingTime(); + F64 m_1 = a.getMean(), + m_2 = b.getMean(); + F64 v_1 = a.getSumOfSquares() / a.getSamplingTime(), + v_2 = b.getSumOfSquares() / b.getSamplingTime(); + if (n_1 < epsilon) + { + return b.getSumOfSquares(); + } + else + { + return a.getSamplingTime() + * ((((n_1 - epsilon) * v_1) + + ((n_2 - epsilon) * v_2) + + (((n_1 * n_2) / (n_1 + n_2)) + * ((m_1 * m_1) + (m_2 * m_2) - (2.f * m_1 * m_2)))) + / (n_1 + n_2 - epsilon)); + } + } + + return a.getSumOfSquares(); } void SampleAccumulator::addSamples( const SampleAccumulator& other, EBufferAppendType append_type ) { - if (append_type == NON_SEQUENTIAL) - { - return; - } - - if (!mHasValue) - { - *this = other; - - if (append_type == NON_SEQUENTIAL) - { - // restore own last value state - mLastValue = NaN; - mHasValue = false; - } - } - else if (other.mHasValue) - { - mSum += other.mSum; - - if (other.mMin < mMin) { mMin = other.mMin; } - if (other.mMax > mMax) { mMax = other.mMax; } - - mSumOfSquares = mergeSumsOfSquares(*this, other); - - if (append_type == SEQUENTIAL) - { - mLastValue = other.mLastValue; - mLastSampleTimeStamp = other.mLastSampleTimeStamp; - } - } + if (append_type == NON_SEQUENTIAL) + { + return; + } + + if (!mHasValue) + { + *this = other; + + if (append_type == NON_SEQUENTIAL) + { + // restore own last value state + mLastValue = NaN; + mHasValue = false; + } + } + else if (other.mHasValue) + { + mSum += other.mSum; + + if (other.mMin < mMin) { mMin = other.mMin; } + if (other.mMax > mMax) { mMax = other.mMax; } + + mSumOfSquares = mergeSumsOfSquares(*this, other); + + if (append_type == SEQUENTIAL) + { + mLastValue = other.mLastValue; + mLastSampleTimeStamp = other.mLastSampleTimeStamp; + } + } } void SampleAccumulator::reset( const SampleAccumulator* other ) { - mLastValue = other ? other->mLastValue : NaN; - mHasValue = other ? other->mHasValue : false; - mNumSamples = 0; - mSum = 0; - mMin = mLastValue; - mMax = mLastValue; - mMean = mLastValue; - llassert(!mHasValue || mMean < 0 || mMean >= 0); - mSumOfSquares = 0; - mLastSampleTimeStamp = LLTimer::getTotalSeconds(); - mTotalSamplingTime = 0; + mLastValue = other ? other->mLastValue : NaN; + mHasValue = other ? other->mHasValue : false; + mNumSamples = 0; + mSum = 0; + mMin = mLastValue; + mMax = mLastValue; + mMean = mLastValue; + llassert(!mHasValue || mMean < 0 || mMean >= 0); + mSumOfSquares = 0; + mLastSampleTimeStamp = LLTimer::getTotalSeconds(); + mTotalSamplingTime = 0; } F64 EventAccumulator::mergeSumsOfSquares(const EventAccumulator& a, const EventAccumulator& b) { - if (a.mNumSamples && b.mNumSamples) - { - // combine variance (and hence standard deviation) of 2 different sized sample groups using - // the following formula: http://www.mrc-bsu.cam.ac.uk/cochrane/handbook/chapter_7/7_7_3_8_combining_groups.htm - F64 n_1 = a.mNumSamples, - n_2 = b.mNumSamples; - F64 m_1 = a.mMean, - m_2 = b.mMean; - F64 v_1 = a.mSumOfSquares / a.mNumSamples, - v_2 = b.mSumOfSquares / b.mNumSamples; - return (F64)a.mNumSamples - * ((((n_1 - 1.f) * v_1) - + ((n_2 - 1.f) * v_2) - + (((n_1 * n_2) / (n_1 + n_2)) - * ((m_1 * m_1) + (m_2 * m_2) - (2.f * m_1 * m_2)))) - / (n_1 + n_2 - 1.f)); - } - - return a.mSumOfSquares; + if (a.mNumSamples && b.mNumSamples) + { + // combine variance (and hence standard deviation) of 2 different sized sample groups using + // the following formula: http://www.mrc-bsu.cam.ac.uk/cochrane/handbook/chapter_7/7_7_3_8_combining_groups.htm + F64 n_1 = a.mNumSamples, + n_2 = b.mNumSamples; + F64 m_1 = a.mMean, + m_2 = b.mMean; + F64 v_1 = a.mSumOfSquares / a.mNumSamples, + v_2 = b.mSumOfSquares / b.mNumSamples; + return (F64)a.mNumSamples + * ((((n_1 - 1.f) * v_1) + + ((n_2 - 1.f) * v_2) + + (((n_1 * n_2) / (n_1 + n_2)) + * ((m_1 * m_1) + (m_2 * m_2) - (2.f * m_1 * m_2)))) + / (n_1 + n_2 - 1.f)); + } + + return a.mSumOfSquares; } void EventAccumulator::addSamples( const EventAccumulator& other, EBufferAppendType append_type ) { - if (other.mNumSamples) - { - if (!mNumSamples) - { - *this = other; - } - else - { - mSum += other.mSum; - - // NOTE: both conditions will hold first time through - if (other.mMin < mMin) { mMin = other.mMin; } - if (other.mMax > mMax) { mMax = other.mMax; } - - mSumOfSquares = mergeSumsOfSquares(*this, other); - - F64 weight = (F64)mNumSamples / (F64)(mNumSamples + other.mNumSamples); - mNumSamples += other.mNumSamples; - mMean = mMean * weight + other.mMean * (1.f - weight); - if (append_type == SEQUENTIAL) mLastValue = other.mLastValue; - } - } + if (other.mNumSamples) + { + if (!mNumSamples) + { + *this = other; + } + else + { + mSum += other.mSum; + + // NOTE: both conditions will hold first time through + if (other.mMin < mMin) { mMin = other.mMin; } + if (other.mMax > mMax) { mMax = other.mMax; } + + mSumOfSquares = mergeSumsOfSquares(*this, other); + + F64 weight = (F64)mNumSamples / (F64)(mNumSamples + other.mNumSamples); + mNumSamples += other.mNumSamples; + mMean = mMean * weight + other.mMean * (1.f - weight); + if (append_type == SEQUENTIAL) mLastValue = other.mLastValue; + } + } } void EventAccumulator::reset( const EventAccumulator* other ) { - mNumSamples = 0; - mSum = 0; - mMin = F32(NaN); - mMax = F32(NaN); - mMean = NaN; - mSumOfSquares = 0; - mLastValue = other ? other->mLastValue : NaN; + mNumSamples = 0; + mSum = 0; + mMin = F32(NaN); + mMax = F32(NaN); + mMean = NaN; + mSumOfSquares = 0; + mLastValue = other ? other->mLastValue : NaN; } } |