1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
|
/**
* @file llthreadsafequeue.h
* @brief Queue protected with mutexes for cross-thread use
*
* $LicenseInfo:firstyear=2004&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2010, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
#ifndef LL_LLTHREADSAFEQUEUE_H
#define LL_LLTHREADSAFEQUEUE_H
#include "llcoros.h"
#include LLCOROS_MUTEX_HEADER
#include <boost/fiber/timed_mutex.hpp>
#include LLCOROS_CONDVAR_HEADER
#include "llexception.h"
#include "mutex.h"
#include <chrono>
#include <queue>
#include <string>
/*****************************************************************************
* LLThreadSafeQueue
*****************************************************************************/
//
// A general queue exception.
//
class LL_COMMON_API LLThreadSafeQueueError:
public LLException
{
public:
LLThreadSafeQueueError(std::string const & message):
LLException(message)
{
; // No op.
}
};
//
// An exception raised when blocking operations are interrupted.
//
class LL_COMMON_API LLThreadSafeQueueInterrupt:
public LLThreadSafeQueueError
{
public:
LLThreadSafeQueueInterrupt(void):
LLThreadSafeQueueError("queue operation interrupted")
{
; // No op.
}
};
/**
* Implements a thread safe FIFO.
*/
// Let the default std::queue default to underlying std::deque. Override if
// desired.
template<typename ElementT, typename QueueT=std::queue<ElementT>>
class LLThreadSafeQueue
{
public:
typedef ElementT value_type;
// Limiting the number of pending items prevents unbounded growth of the
// underlying queue.
LLThreadSafeQueue(size_t capacity = 1024);
virtual ~LLThreadSafeQueue() {}
// Add an element to the queue (will block if the queue has reached
// capacity).
//
// This call will raise an interrupt error if the queue is closed while
// the caller is blocked.
template <typename T>
void push(T&& element);
// legacy name
void pushFront(ElementT const & element) { return push(element); }
// Add an element to the queue (will block if the queue has reached
// capacity). Return false if the queue is closed before push is possible.
template <typename T>
bool pushIfOpen(T&& element);
// Try to add an element to the queue without blocking. Returns
// true only if the element was actually added.
template <typename T>
bool tryPush(T&& element);
// legacy name
bool tryPushFront(ElementT const & element) { return tryPush(element); }
// Try to add an element to the queue, blocking if full but with timeout
// after specified duration. Returns true if the element was added.
// There are potentially two different timeouts involved: how long to try
// to lock the mutex, versus how long to wait for the queue to stop being
// full. Careful settings for each timeout might be orders of magnitude
// apart. However, this method conflates them.
template <typename Rep, typename Period, typename T>
bool tryPushFor(const std::chrono::duration<Rep, Period>& timeout,
T&& element);
// legacy name
template <typename Rep, typename Period>
bool tryPushFrontFor(const std::chrono::duration<Rep, Period>& timeout,
ElementT const & element) { return tryPushFor(timeout, element); }
// Try to add an element to the queue, blocking if full but with
// timeout at specified time_point. Returns true if the element was added.
template <typename Clock, typename Duration, typename T>
bool tryPushUntil(const std::chrono::time_point<Clock, Duration>& until,
T&& element);
// no legacy name because this is a newer method
// Pop the element at the head of the queue (will block if the queue is
// empty).
//
// This call will raise an interrupt error if the queue is closed while
// the caller is blocked.
ElementT pop(void);
// legacy name
ElementT popBack(void) { return pop(); }
// Pop an element from the head of the queue if there is one available.
// Returns true only if an element was popped.
bool tryPop(ElementT & element);
// legacy name
bool tryPopBack(ElementT & element) { return tryPop(element); }
// Pop the element at the head of the queue, blocking if empty, with
// timeout after specified duration. Returns true if an element was popped.
template <typename Rep, typename Period>
bool tryPopFor(const std::chrono::duration<Rep, Period>& timeout, ElementT& element);
// no legacy name because this is a newer method
// Pop the element at the head of the queue, blocking if empty, with
// timeout at specified time_point. Returns true if an element was popped.
template <typename Clock, typename Duration>
bool tryPopUntil(const std::chrono::time_point<Clock, Duration>& until,
ElementT& element);
// no legacy name because this is a newer method
// Returns the size of the queue.
size_t size();
//Returns the capacity of the queue.
U32 capacity() { return mCapacity; }
// closes the queue:
// - every subsequent push() call will throw LLThreadSafeQueueInterrupt
// - every subsequent tryPush() call will return false
// - pop() calls will return normally until the queue is drained, then
// every subsequent pop() will throw LLThreadSafeQueueInterrupt
// - tryPop() calls will return normally until the queue is drained,
// then every subsequent tryPop() call will return false
void close();
// producer end: are we prevented from pushing any additional items?
bool isClosed();
// consumer end: are we done, is the queue entirely drained?
bool done();
protected:
typedef QueueT queue_type;
QueueT mStorage;
size_t mCapacity;
bool mClosed;
boost::fibers::timed_mutex mLock;
typedef std::unique_lock<decltype(mLock)> lock_t;
boost::fibers::condition_variable_any mCapacityCond;
boost::fibers::condition_variable_any mEmptyCond;
enum pop_result { EMPTY, DONE, WAITING, POPPED };
// implementation logic, suitable for passing to tryLockUntil()
template <typename Clock, typename Duration>
pop_result tryPopUntil_(lock_t& lock,
const std::chrono::time_point<Clock, Duration>& until,
ElementT& element);
// if we're able to lock immediately, do so and run the passed callable,
// which must accept lock_t& and return bool
template <typename CALLABLE>
bool tryLock(CALLABLE&& callable);
// if we're able to lock before the passed time_point, do so and run the
// passed callable, which must accept lock_t& and return bool
template <typename Clock, typename Duration, typename CALLABLE>
bool tryLockUntil(const std::chrono::time_point<Clock, Duration>& until,
CALLABLE&& callable);
// while lock is locked, really push the passed element, if we can
template <typename T>
bool push_(lock_t& lock, T&& element);
// while lock is locked, really pop the head element, if we can
pop_result pop_(lock_t& lock, ElementT& element);
// Is the current head element ready to pop? We say yes; subclass can
// override as needed.
virtual bool canPop(const ElementT& head) const { return true; }
};
/*****************************************************************************
* PriorityQueueAdapter
*****************************************************************************/
namespace LL
{
/**
* std::priority_queue's API is almost like std::queue, intentionally of
* course, but you must access the element about to pop() as top() rather
* than as front(). Make an adapter for use with LLThreadSafeQueue.
*/
template <typename T, typename Container=std::vector<T>,
typename Compare=std::less<typename Container::value_type>>
class PriorityQueueAdapter
{
public:
// publish all the same types
typedef std::priority_queue<T, Container, Compare> queue_type;
typedef typename queue_type::container_type container_type;
typedef typename queue_type::value_compare value_compare;
typedef typename queue_type::value_type value_type;
typedef typename queue_type::size_type size_type;
typedef typename queue_type::reference reference;
typedef typename queue_type::const_reference const_reference;
// Although std::queue defines both const and non-const front()
// methods, std::priority_queue defines only const top().
const_reference front() const { return mQ.top(); }
// std::priority_queue has no equivalent to back(), so it's good that
// LLThreadSafeQueue doesn't use it.
// All the rest of these merely forward to the corresponding
// queue_type methods.
bool empty() const { return mQ.empty(); }
size_type size() const { return mQ.size(); }
void push(const value_type& value) { mQ.push(value); }
void push(value_type&& value) { mQ.push(std::move(value)); }
template <typename... Args>
void emplace(Args&&... args) { mQ.emplace(std::forward<Args>(args)...); }
void pop() { mQ.pop(); }
private:
queue_type mQ;
};
} // namespace LL
/*****************************************************************************
* LLThreadSafeQueue implementation
*****************************************************************************/
template<typename ElementT, typename QueueT>
LLThreadSafeQueue<ElementT, QueueT>::LLThreadSafeQueue(size_t capacity) :
mCapacity(capacity),
mClosed(false)
{
}
// if we're able to lock immediately, do so and run the passed callable, which
// must accept lock_t& and return bool
template <typename ElementT, typename QueueT>
template <typename CALLABLE>
bool LLThreadSafeQueue<ElementT, QueueT>::tryLock(CALLABLE&& callable)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock1(mLock, std::defer_lock);
if (!lock1.try_lock())
return false;
return std::forward<CALLABLE>(callable)(lock1);
}
// if we're able to lock before the passed time_point, do so and run the
// passed callable, which must accept lock_t& and return bool
template <typename ElementT, typename QueueT>
template <typename Clock, typename Duration, typename CALLABLE>
bool LLThreadSafeQueue<ElementT, QueueT>::tryLockUntil(
const std::chrono::time_point<Clock, Duration>& until,
CALLABLE&& callable)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock1(mLock, std::defer_lock);
if (!lock1.try_lock_until(until))
return false;
return std::forward<CALLABLE>(callable)(lock1);
}
// while lock is locked, really push the passed element, if we can
template <typename ElementT, typename QueueT>
template <typename T>
bool LLThreadSafeQueue<ElementT, QueueT>::push_(lock_t& lock, T&& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
if (mStorage.size() >= mCapacity)
return false;
mStorage.push(std::forward<T>(element));
lock.unlock();
// now that we've pushed, if somebody's been waiting to pop, signal them
mEmptyCond.notify_one();
return true;
}
template <typename ElementT, typename QueueT>
template <typename T>
bool LLThreadSafeQueue<ElementT, QueueT>::pushIfOpen(T&& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock1(mLock);
while (true)
{
// On the producer side, it doesn't matter whether the queue has been
// drained or not: the moment either end calls close(), further push()
// operations will fail.
if (mClosed)
return false;
if (push_(lock1, std::forward<T>(element)))
return true;
// Storage Full. Wait for signal.
mCapacityCond.wait(lock1);
}
}
template <typename ElementT, typename QueueT>
template<typename T>
void LLThreadSafeQueue<ElementT, QueueT>::push(T&& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
if (! pushIfOpen(std::forward<T>(element)))
{
LLTHROW(LLThreadSafeQueueInterrupt());
}
}
template<typename ElementT, typename QueueT>
template<typename T>
bool LLThreadSafeQueue<ElementT, QueueT>::tryPush(T&& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryLock(
[this, element=std::move(element)](lock_t& lock)
{
if (mClosed)
return false;
return push_(lock, std::move(element));
});
}
template <typename ElementT, typename QueueT>
template <typename Rep, typename Period, typename T>
bool LLThreadSafeQueue<ElementT, QueueT>::tryPushFor(
const std::chrono::duration<Rep, Period>& timeout,
T&& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Convert duration to time_point: passing the same timeout duration to
// each of multiple calls is wrong.
return tryPushUntil(std::chrono::steady_clock::now() + timeout,
std::forward<T>(element));
}
template <typename ElementT, typename QueueT>
template <typename Clock, typename Duration, typename T>
bool LLThreadSafeQueue<ElementT, QueueT>::tryPushUntil(
const std::chrono::time_point<Clock, Duration>& until,
T&& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryLockUntil(
until,
[this, until, element=std::move(element)](lock_t& lock)
{
while (true)
{
if (mClosed)
{
return false;
}
if (push_(lock, std::move(element)))
return true;
// Storage Full. Wait for signal.
if (LLCoros::cv_status::timeout == mCapacityCond.wait_until(lock, until))
{
// timed out -- formally we might recheck both conditions above
return false;
}
// If we didn't time out, we were notified for some reason. Loop back
// to check.
}
});
}
// while lock is locked, really pop the head element, if we can
template <typename ElementT, typename QueueT>
typename LLThreadSafeQueue<ElementT, QueueT>::pop_result
LLThreadSafeQueue<ElementT, QueueT>::pop_(lock_t& lock, ElementT& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// If mStorage is empty, there's no head element.
if (mStorage.empty())
return mClosed? DONE : EMPTY;
// If there's a head element, pass it to canPop() to see if it's ready to pop.
if (! canPop(mStorage.front()))
return WAITING;
// std::queue::front() is the element about to pop()
element = mStorage.front();
mStorage.pop();
lock.unlock();
// now that we've popped, if somebody's been waiting to push, signal them
mCapacityCond.notify_one();
return POPPED;
}
template<typename ElementT, typename QueueT>
ElementT LLThreadSafeQueue<ElementT, QueueT>::pop(void)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock1(mLock);
ElementT value;
while (true)
{
// On the consumer side, we always try to pop before checking mClosed
// so we can finish draining the queue.
pop_result popped = pop_(lock1, value);
if (popped == POPPED)
// don't use std::move when returning local value because
// it prevents the compiler from optimizing with copy elision
//return std::move(value);
return value;
// Once the queue is DONE, there will never be any more coming.
if (popped == DONE)
{
LLTHROW(LLThreadSafeQueueInterrupt());
}
// If we didn't pop because WAITING, i.e. canPop() returned false,
// then even if the producer end has been closed, there's still at
// least one item to drain: wait for it. Or we might be EMPTY, with
// the queue still open. Either way, wait for signal.
mEmptyCond.wait(lock1);
}
}
template<typename ElementT, typename QueueT>
bool LLThreadSafeQueue<ElementT, QueueT>::tryPop(ElementT & element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryLock(
[this, &element](lock_t& lock)
{
// conflate EMPTY, DONE, WAITING: tryPop() behavior when the queue
// is closed is implemented by simple inability to push any new
// elements
return pop_(lock, element) == POPPED;
});
}
template <typename ElementT, typename QueueT>
template <typename Rep, typename Period>
bool LLThreadSafeQueue<ElementT, QueueT>::tryPopFor(
const std::chrono::duration<Rep, Period>& timeout,
ElementT& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Convert duration to time_point: passing the same timeout duration to
// each of multiple calls is wrong.
return tryPopUntil(std::chrono::steady_clock::now() + timeout, element);
}
template <typename ElementT, typename QueueT>
template <typename Clock, typename Duration>
bool LLThreadSafeQueue<ElementT, QueueT>::tryPopUntil(
const std::chrono::time_point<Clock, Duration>& until,
ElementT& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
return tryLockUntil(
until,
[this, until, &element](lock_t& lock)
{
// conflate EMPTY, DONE, WAITING
return tryPopUntil_(lock, until, element) == POPPED;
});
}
// body of tryPopUntil(), called once we have the lock
template <typename ElementT, typename QueueT>
template <typename Clock, typename Duration>
typename LLThreadSafeQueue<ElementT, QueueT>::pop_result
LLThreadSafeQueue<ElementT, QueueT>::tryPopUntil_(
lock_t& lock,
const std::chrono::time_point<Clock, Duration>& until,
ElementT& element)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
while (true)
{
pop_result popped = pop_(lock, element);
if (popped == POPPED || popped == DONE)
{
// If we succeeded, great! If we've drained the last item, so be
// it. Either way, break the loop and tell caller.
return popped;
}
// EMPTY or WAITING: wait for signal.
if (LLCoros::cv_status::timeout == mEmptyCond.wait_until(lock, until))
{
// timed out -- formally we might recheck
// as it is, break loop
return popped;
}
// If we didn't time out, we were notified for some reason. Loop back
// to check.
}
}
template<typename ElementT, typename QueueT>
size_t LLThreadSafeQueue<ElementT, QueueT>::size(void)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
return mStorage.size();
}
template<typename ElementT, typename QueueT>
void LLThreadSafeQueue<ElementT, QueueT>::close()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
mClosed = true;
lock.unlock();
// wake up any blocked pop() calls
mEmptyCond.notify_all();
// wake up any blocked push() calls
mCapacityCond.notify_all();
}
template<typename ElementT, typename QueueT>
bool LLThreadSafeQueue<ElementT, QueueT>::isClosed()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
return mClosed;
}
template<typename ElementT, typename QueueT>
bool LLThreadSafeQueue<ElementT, QueueT>::done()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
return mClosed && mStorage.empty();
}
#endif
|