From 955b967623983cb50ba09f7b82e5f01f2c6bcebb Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Tue, 5 Oct 2021 17:31:53 -0400 Subject: SL-16024: Add ThreadSafeSchedule, a timestamped LLThreadSafeQueue. ThreadSafeSchedule orders its items by timestamp, which can be passed either implicitly or explicitly. The timestamp specifies earliest delivery time: an item cannot be popped until that time. Add initial tests. Tweak the LLThreadSafeQueue base class to support ThreadSafeSchedule: introduce virtual canPop() method to report whether the current head item is available to pop. The base class unconditionally says yes, ThreadSafeSchedule says it depends on whether its timestamp is still in the future. This replaces the protected pop_() overload accepting a predicate. Rather than explicitly passing a predicate through a couple levels of function call, use canPop() at the level it matters. Runtime behavior that varies depending on an object's leaf class is what virtual functions were invented for. Give pop_() a three-state enum return so pop() can distinguish between "closed and empty" (throws exception) versus "closed, not yet drained because we're not yet ready to pop the head item" (waits). Also break out protected tryPopUntil_() method, the body logic of tryPopUntil(). The public method locks the data structure, the protected method requires that its caller has already done so. Add chrono.h with a more full-featured LL::time_point_cast() function than the one found in , which only converts between time_point durations, not between time_points based on different clocks. --- indra/llcommon/threadsafeschedule.h | 334 ++++++++++++++++++++++++++++++++++++ 1 file changed, 334 insertions(+) create mode 100644 indra/llcommon/threadsafeschedule.h (limited to 'indra/llcommon/threadsafeschedule.h') diff --git a/indra/llcommon/threadsafeschedule.h b/indra/llcommon/threadsafeschedule.h new file mode 100644 index 0000000000..545c820f53 --- /dev/null +++ b/indra/llcommon/threadsafeschedule.h @@ -0,0 +1,334 @@ +/** + * @file threadsafeschedule.h + * @author Nat Goodspeed + * @date 2021-10-02 + * @brief ThreadSafeSchedule is an ordered queue in which every item has an + * associated timestamp. + * + * $LicenseInfo:firstyear=2021&license=viewerlgpl$ + * Copyright (c) 2021, Linden Research, Inc. + * $/LicenseInfo$ + */ + +#if ! defined(LL_THREADSAFESCHEDULE_H) +#define LL_THREADSAFESCHEDULE_H + +#include "chrono.h" +#include "llexception.h" +#include "llthreadsafequeue.h" +#include "tuple.h" +#include +#include + +namespace LL +{ + namespace ThreadSafeSchedulePrivate + { + using TimePoint = std::chrono::steady_clock::time_point; + // Bundle consumer's data with a TimePoint to order items by timestamp. + template + using TimestampedTuple = std::tuple; + + // comparison functor for TimedTuples -- see TimedQueue comments + struct ReverseTupleOrder + { + template + bool operator()(const Tuple& left, const Tuple& right) const + { + return std::get<0>(left) > std::get<0>(right); + } + }; + + template + using TimedQueue = PriorityQueueAdapter< + TimestampedTuple, + // std::vector is the default storage for std::priority_queue, + // have to restate to specify comparison template parameter + std::vector>, + // std::priority_queue uses a counterintuitive comparison + // behavior: the default std::less comparator is used to present + // the *highest* value as top(). So to sort by earliest timestamp, + // we must invert by using >. + ReverseTupleOrder>; + } // namespace ThreadSafeSchedulePrivate + + /** + * ThreadSafeSchedule is an ordered LLThreadSafeQueue in which every item + * is given an associated timestamp. That is, TimePoint is implicitly + * prepended to the std::tuple with the specified types. + * + * Items are popped in increasing chronological order. Moreover, any item + * with a timestamp in the future is held back until + * std::chrono::steady_clock reaches that timestamp. + */ + template + class ThreadSafeSchedule: + public LLThreadSafeQueue, + ThreadSafeSchedulePrivate::TimedQueue> + { + public: + using DataTuple = std::tuple; + using TimeTuple = ThreadSafeSchedulePrivate::TimestampedTuple; + + private: + using super = LLThreadSafeQueue>; + using lock_t = typename super::lock_t; + using super::pop_; + using super::push_; + using super::mClosed; + using super::mEmptyCond; + using super::mCapacityCond; + + public: + using TimePoint = ThreadSafeSchedulePrivate::TimePoint; + using Clock = TimePoint::clock; + + ThreadSafeSchedule(U32 capacity=1024): + super(capacity) + {} + + /*----------------------------- push() -----------------------------*/ + /// explicitly pass TimeTuple + using super::push; + + /// pass DataTuple with implicit now + void push(const DataTuple& tuple) + { + push(tuple_cons(Clock::now(), tuple)); + } + + /// individually pass each component of the TimeTuple + void push(const TimePoint& time, Args&&... args) + { + push(TimeTuple(time, std::forward(args)...)); + } + + /// individually pass every component except the TimePoint (implies + /// now) -- could be ambiguous if the first specified template + /// parameter type is also TimePoint -- we could try to disambiguate, + /// but a simpler approach would be for the caller to explicitly + /// construct DataTuple and call that overload + void push(Args&&... args) + { + push(Clock::now(), std::forward(args)...); + } + + /*--------------------------- tryPush() ----------------------------*/ + /// explicit TimeTuple + using super::tryPush; + + /// DataTuple with implicit now + bool tryPush(const DataTuple& tuple) + { + return tryPush(tuple_cons(Clock::now(), tuple)); + } + + /// individually pass components + bool tryPush(const TimePoint& time, Args&&... args) + { + return tryPush(TimeTuple(time, std::forward(args)...)); + } + + /// individually pass components with implicit now + bool tryPush(Args&&... args) + { + return tryPush(Clock::now(), std::forward(args)...); + } + + /*-------------------------- tryPushFor() --------------------------*/ + /// explicit TimeTuple + using super::tryPushFor; + + /// DataTuple with implicit now + template + bool tryPushFor(const std::chrono::duration& timeout, + const DataTuple& tuple) + { + return tryPushFor(timeout, tuple_cons(Clock::now(), tuple)); + } + + /// individually pass components + template + bool tryPushFor(const std::chrono::duration& timeout, + const TimePoint& time, Args&&... args) + { + return tryPushFor(TimeTuple(time, std::forward(args)...)); + } + + /// individually pass components with implicit now + template + bool tryPushFor(const std::chrono::duration& timeout, + Args&&... args) + { + return tryPushFor(Clock::now(), std::forward(args)...); + } + + /*------------------------- tryPushUntil() -------------------------*/ + /// explicit TimeTuple + using super::tryPushUntil; + + /// DataTuple with implicit now + template + bool tryPushUntil(const std::chrono::time_point& until, + const DataTuple& tuple) + { + return tryPushUntil(until, tuple_cons(Clock::now(), tuple)); + } + + /// individually pass components + template + bool tryPushUntil(const std::chrono::time_point& until, + const TimePoint& time, Args&&... args) + { + return tryPushUntil(until, TimeTuple(time, std::forward(args)...)); + } + + /// individually pass components with implicit now + template + bool tryPushUntil(const std::chrono::time_point& until, + Args&&... args) + { + return tryPushUntil(until, Clock::now(), std::forward(args)...); + } + + /*----------------------------- pop() ------------------------------*/ + // Our consumer may or may not care about the timestamp associated + // with each popped item, so we allow retrieving either DataTuple or + // TimeTuple. One potential use would be to observe, and possibly + // adjust for, the time lag between the item time and the actual + // current time. + + /// pop DataTuple by value + DataTuple pop() + { + return tuple_cdr(popWithTime()); + } + + /// pop TimeTuple by value + TimeTuple popWithTime() + { + lock_t lock(super::mLock); + // We can't just sit around waiting forever, given that there may + // be items in the queue that are not yet ready but will *become* + // ready in the near future. So in fact, with this class, every + // pop() becomes a tryPopUntil(), constrained to the timestamp of + // the head item. It almost doesn't matter what we specify for the + // caller's time constraint -- all we really care about is the + // head item's timestamp. Since pop() and popWithTime() are + // defined to wait until either an item becomes available or the + // queue is closed, loop until one of those things happens. The + // constraint we pass just determines how often we'll loop while + // waiting. + TimeTuple tt; + while (true) + { + // Pick a point suitably far into the future. + TimePoint until = TimePoint::clock::now() + std::chrono::hours(24); + if (tryPopUntil_(lock, until, tt)) + return std::move(tt); + + // empty and closed: throw, just as super::pop() does + if (super::mStorage.empty() && super::mClosed) + { + LLTHROW(LLThreadSafeQueueInterrupt()); + } + // If not empty, we've still got items to drain. + // If not closed, it's worth waiting for more items. + // Either way, loop back to wait. + } + } + + // We can use tryPop(TimeTuple&) just as it stands; the only behavior + // difference is in our canPop() override method. + using super::tryPop; + + /// tryPop(DataTuple&) + bool tryPop(DataTuple& tuple) + { + TimeTuple tt; + if (! super::tryPop(tt)) + return false; + tuple = tuple_cdr(std::move(tt)); + return true; + } + + /// tryPopFor() + template + bool tryPopFor(const std::chrono::duration& timeout, Tuple& tuple) + { + // It's important to use OUR tryPopUntil() implementation, rather + // than delegating immediately to our base class. + return tryPopUntil(Clock::now() + timeout, tuple); + } + + /// tryPopUntil(TimeTuple&) + template + bool tryPopUntil(const std::chrono::time_point& until, + TimeTuple& tuple) + { + // super::tryPopUntil() wakes up when an item becomes available or + // we hit 'until', whichever comes first. Thing is, the current + // head of the queue could become ready sooner than either of + // those events, and we need to deliver it as soon as it does. + // Don't wait past the TimePoint of the head item. + // Naturally, lock the queue before peeking at mStorage. + return super::tryLockUntil( + until, + [this, until, &tuple](lock_t& lock) + { + // Use our time_point_cast to allow for 'until' that's a + // time_point type other than TimePoint. + return tryPopUntil_(lock, time_point_cast(until), tuple); + }); + } + + bool tryPopUntil_(lock_t& lock, const TimePoint& until, TimeTuple& tuple) + { + TimePoint adjusted = until; + if (! super::mStorage.empty()) + { + // use whichever is earlier: the head item's timestamp, or + // the caller's limit + adjusted = min(std::get<0>(super::mStorage.front()), adjusted); + } + // now delegate to base-class tryPopUntil_() + return super::tryPopUntil_(lock, adjusted, tuple); + } + + /// tryPopUntil(DataTuple&) + template + bool tryPopUntil(const std::chrono::time_point& until, + DataTuple& tuple) + { + TimeTuple tt; + if (! tryPopUntil(until, tt)) + return false; + tuple = tuple_cdr(std::move(tt)); + return true; + } + + /*------------------------------ etc. ------------------------------*/ + // We can't hide items that aren't yet ready because we can't traverse + // the underlying priority_queue: it has no iterators, only top(). So + // a consumer could observe size() > 0 and yet tryPop() returns false. + // Shrug, in a multi-consumer scenario that would be expected behavior. + using super::size; + // open/closed state + using super::close; + using super::isClosed; + using super::done; + + private: + // this method is called by base class pop_() every time we're + // considering whether to deliver the current head element + bool canPop(const TimeTuple& head) const override + { + // an item with a future timestamp isn't yet ready to pop + // (should we add some slop for overhead?) + return std::get<0>(head) <= Clock::now(); + } + }; + +} // namespace LL + +#endif /* ! defined(LL_THREADSAFESCHEDULE_H) */ -- cgit v1.2.3 From cf70766b4504f7ee745822926c526ed9c86c9339 Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Wed, 6 Oct 2021 12:54:29 -0400 Subject: SL-16024: Fix ThreadSafeSchedule::tryPopFor(), tryPopUntil(). ThreadSafeSchedule::tryPopUntil() (and therefore tryPopFor()) was simply delegating to LLThreadSafeQueue::tryPopUntil(), with an adjusted timeout since we want to wake up as soon as the head item, if any, becomes ready. But then we have to loop back to retry the pop to actually deal with that head item. In addition, ThreadSafeSchedule::popWithTime() was spinning rather than properly blocking on a timed condition variable. Fixed. --- indra/llcommon/threadsafeschedule.h | 72 +++++++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 18 deletions(-) (limited to 'indra/llcommon/threadsafeschedule.h') diff --git a/indra/llcommon/threadsafeschedule.h b/indra/llcommon/threadsafeschedule.h index 545c820f53..8ab4311ca1 100644 --- a/indra/llcommon/threadsafeschedule.h +++ b/indra/llcommon/threadsafeschedule.h @@ -73,11 +73,7 @@ namespace LL private: using super = LLThreadSafeQueue>; using lock_t = typename super::lock_t; - using super::pop_; - using super::push_; - using super::mClosed; - using super::mEmptyCond; - using super::mCapacityCond; + using pop_result = typename super::pop_result; public: using TimePoint = ThreadSafeSchedulePrivate::TimePoint; @@ -92,6 +88,11 @@ namespace LL using super::push; /// pass DataTuple with implicit now + // This could be ambiguous for Args with a single type. Unfortunately + // we can't enable_if an individual method with a condition based on + // the *class* template arguments, only on that method's template + // arguments. We could specialize this class for the single-Args case; + // we could minimize redundancy by breaking out a common base class... void push(const DataTuple& tuple) { push(tuple_cons(Clock::now(), tuple)); @@ -103,11 +104,11 @@ namespace LL push(TimeTuple(time, std::forward(args)...)); } - /// individually pass every component except the TimePoint (implies - /// now) -- could be ambiguous if the first specified template - /// parameter type is also TimePoint -- we could try to disambiguate, - /// but a simpler approach would be for the caller to explicitly - /// construct DataTuple and call that overload + /// individually pass every component except the TimePoint (implies now) + // This could be ambiguous if the first specified template parameter + // type is also TimePoint. We could try to disambiguate, but a simpler + // approach would be for the caller to explicitly construct DataTuple + // and call that overload. void push(Args&&... args) { push(Clock::now(), std::forward(args)...); @@ -199,6 +200,10 @@ namespace LL // current time. /// pop DataTuple by value + // It would be great to notice when sizeof...(Args) == 1 and directly + // return the first (only) value, instead of making pop()'s caller + // call std::get<0>(value). See push(DataTuple) remarks for why we + // haven't yet jumped through those hoops. DataTuple pop() { return tuple_cdr(popWithTime()); @@ -224,16 +229,17 @@ namespace LL { // Pick a point suitably far into the future. TimePoint until = TimePoint::clock::now() + std::chrono::hours(24); - if (tryPopUntil_(lock, until, tt)) + pop_result popped = tryPopUntil_(lock, until, tt); + if (popped == super::POPPED) return std::move(tt); - // empty and closed: throw, just as super::pop() does - if (super::mStorage.empty() && super::mClosed) + // DONE: throw, just as super::pop() does + if (popped == super::DONE) { LLTHROW(LLThreadSafeQueueInterrupt()); } - // If not empty, we've still got items to drain. - // If not closed, it's worth waiting for more items. + // WAITING: we've still got items to drain. + // EMPTY: not closed, so it's worth waiting for more items. // Either way, loop back to wait. } } @@ -252,6 +258,16 @@ namespace LL return true; } + /// for when Args has exactly one type + bool tryPop(typename std::tuple_element<1, TimeTuple>::type& value) + { + TimeTuple tt; + if (! super::tryPop(tt)) + return false; + value = std::get<1>(std::move(tt)); + return true; + } + /// tryPopFor() template bool tryPopFor(const std::chrono::duration& timeout, Tuple& tuple) @@ -278,11 +294,12 @@ namespace LL { // Use our time_point_cast to allow for 'until' that's a // time_point type other than TimePoint. - return tryPopUntil_(lock, time_point_cast(until), tuple); + return super::POPPED == + tryPopUntil_(lock, LL::time_point_cast(until), tuple); }); } - bool tryPopUntil_(lock_t& lock, const TimePoint& until, TimeTuple& tuple) + pop_result tryPopUntil_(lock_t& lock, const TimePoint& until, TimeTuple& tuple) { TimePoint adjusted = until; if (! super::mStorage.empty()) @@ -292,7 +309,14 @@ namespace LL adjusted = min(std::get<0>(super::mStorage.front()), adjusted); } // now delegate to base-class tryPopUntil_() - return super::tryPopUntil_(lock, adjusted, tuple); + pop_result popped; + while ((popped = super::tryPopUntil_(lock, adjusted, tuple)) == super::WAITING) + { + // If super::tryPopUntil_() returns WAITING, it means there's + // a head item, but it's not yet time. But it's worth looping + // back to recheck. + } + return popped; } /// tryPopUntil(DataTuple&) @@ -307,6 +331,18 @@ namespace LL return true; } + /// for when Args has exactly one type + template + bool tryPopUntil(const std::chrono::time_point& until, + typename std::tuple_element<1, TimeTuple>::type& value) + { + TimeTuple tt; + if (! tryPopUntil(until, tt)) + return false; + value = std::get<1>(std::move(tt)); + return true; + } + /*------------------------------ etc. ------------------------------*/ // We can't hide items that aren't yet ready because we can't traverse // the underlying priority_queue: it has no iterators, only top(). So -- cgit v1.2.3 From 1ef78e2afa9e8424dd5d84b2b104b31e72e9e95a Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Wed, 6 Oct 2021 15:28:58 -0400 Subject: SL-16024: Work around VS bug regarding base-class enum. --- indra/llcommon/threadsafeschedule.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'indra/llcommon/threadsafeschedule.h') diff --git a/indra/llcommon/threadsafeschedule.h b/indra/llcommon/threadsafeschedule.h index 8ab4311ca1..0e70d30714 100644 --- a/indra/llcommon/threadsafeschedule.h +++ b/indra/llcommon/threadsafeschedule.h @@ -73,7 +73,9 @@ namespace LL private: using super = LLThreadSafeQueue>; using lock_t = typename super::lock_t; - using pop_result = typename super::pop_result; + // VS 2017 needs this due to a bug: + // https://developercommunity.visualstudio.com/t/cannot-access-protected-enumerator-of-enclosing-cl/203430 + enum pop_result { EMPTY=super::EMPTY, DONE=super::DONE, WAITING=super::WAITING, POPPED=super::POPPED }; public: using TimePoint = ThreadSafeSchedulePrivate::TimePoint; @@ -230,11 +232,11 @@ namespace LL // Pick a point suitably far into the future. TimePoint until = TimePoint::clock::now() + std::chrono::hours(24); pop_result popped = tryPopUntil_(lock, until, tt); - if (popped == super::POPPED) + if (popped == POPPED) return std::move(tt); // DONE: throw, just as super::pop() does - if (popped == super::DONE) + if (popped == DONE) { LLTHROW(LLThreadSafeQueueInterrupt()); } @@ -294,7 +296,7 @@ namespace LL { // Use our time_point_cast to allow for 'until' that's a // time_point type other than TimePoint. - return super::POPPED == + return POPPED == tryPopUntil_(lock, LL::time_point_cast(until), tuple); }); } @@ -310,7 +312,7 @@ namespace LL } // now delegate to base-class tryPopUntil_() pop_result popped; - while ((popped = super::tryPopUntil_(lock, adjusted, tuple)) == super::WAITING) + while ((popped = pop_result(super::tryPopUntil_(lock, adjusted, tuple))) == WAITING) { // If super::tryPopUntil_() returns WAITING, it means there's // a head item, but it's not yet time. But it's worth looping -- cgit v1.2.3 From 623ac79120a417ec445ce5c106a907fe46734309 Mon Sep 17 00:00:00 2001 From: Nat Goodspeed Date: Thu, 7 Oct 2021 15:32:51 -0400 Subject: SL-16024: Add LL::WorkQueue for passing work items between threads. A typical WorkQueue has a string name, which can be used to find it to post work to it. "Work" is a nullary callable. WorkQueue is a multi-producer, multi-consumer thread-safe queue: multiple threads can service the WorkQueue, multiple threads can post work to it. Work can be scheduled in the future by submitting with a timestamp. In addition, a given work item can be scheduled to run on a recurring basis. A requesting thread servicing a WorkQueue of its own, such as the viewer's main thread, can submit work to another WorkQueue along with a callback to be passed the result (of arbitrary type) of the first work item. The callback is posted to the originating WorkQueue, permitting safe data exchange between participating threads. Methods are provided for different kinds of servicing threads. runUntilClose() is useful for a simple worker thread. runFor(duration) devotes no more than a specified time slice to that WorkQueue, e.g. for use by the main thread. --- indra/llcommon/threadsafeschedule.h | 1 + 1 file changed, 1 insertion(+) (limited to 'indra/llcommon/threadsafeschedule.h') diff --git a/indra/llcommon/threadsafeschedule.h b/indra/llcommon/threadsafeschedule.h index 0e70d30714..c8ad23532b 100644 --- a/indra/llcommon/threadsafeschedule.h +++ b/indra/llcommon/threadsafeschedule.h @@ -78,6 +78,7 @@ namespace LL enum pop_result { EMPTY=super::EMPTY, DONE=super::DONE, WAITING=super::WAITING, POPPED=super::POPPED }; public: + using Closed = LLThreadSafeQueueInterrupt; using TimePoint = ThreadSafeSchedulePrivate::TimePoint; using Clock = TimePoint::clock; -- cgit v1.2.3