summaryrefslogtreecommitdiff
path: root/indra/llcommon/llmutex.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llcommon/llmutex.cpp')
-rw-r--r--indra/llcommon/llmutex.cpp225
1 files changed, 208 insertions, 17 deletions
diff --git a/indra/llcommon/llmutex.cpp b/indra/llcommon/llmutex.cpp
index 0273dd5970..c90c00b5f4 100644
--- a/indra/llcommon/llmutex.cpp
+++ b/indra/llcommon/llmutex.cpp
@@ -28,23 +28,35 @@
#include "llmutex.h"
#include "llthread.h"
#include "lltimer.h"
+#include "llcoros.h"
-//============================================================================
+//---------------------------------------------------------------------
+//
+// LLMutex
+//
LLMutex::LLMutex() :
mCount(0)
{
}
-
LLMutex::~LLMutex()
{
}
-
void LLMutex::lock()
{
- LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+
+ // LLMutex is not coroutine aware and should not be used from a coroutine
+ // If your code is running in a coroutine, you should use LLCoros::Mutex instead
+ // NOTE: If the stack trace you're staring at contains non-thread-safe code,
+ // you should use LLAppViewer::instance().postToMainThread() to shuttle execution
+ // back to the main loop.
+ // NOTE: If you got here from seeing this assert in your log and you're not seeing
+ // a stack trace that points here, put a breakpoint in on_main_coro and try again.
+ llassert(LLCoros::on_main_coro());
+
if(isSelfLocked())
{ //redundant lock
mCount++;
@@ -56,9 +68,9 @@ void LLMutex::lock()
#if MUTEX_DEBUG
// Have to have the lock before we can access the debug info
auto id = LLThread::currentID();
- if (mIsLocked[id] != FALSE)
+ if (mIsLocked[id] != false)
LL_ERRS() << "Already locked in Thread: " << id << LL_ENDL;
- mIsLocked[id] = TRUE;
+ mIsLocked[id] = true;
#endif
mLockingThread = LLThread::currentID();
@@ -66,7 +78,8 @@ void LLMutex::lock()
void LLMutex::unlock()
{
- LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+
if (mCount > 0)
{ //not the root unlock
mCount--;
@@ -76,9 +89,9 @@ void LLMutex::unlock()
#if MUTEX_DEBUG
// Access the debug info while we have the lock
auto id = LLThread::currentID();
- if (mIsLocked[id] != TRUE)
- LL_ERRS() << "Not locked in Thread: " << id << LL_ENDL;
- mIsLocked[id] = FALSE;
+ if (mIsLocked[id] != true)
+ LL_ERRS() << "Not locked in Thread: " << id << LL_ENDL;
+ mIsLocked[id] = false;
#endif
mLockingThread = LLThread::id_t();
@@ -112,7 +125,7 @@ LLThread::id_t LLMutex::lockingThread() const
bool LLMutex::trylock()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
- if(isSelfLocked())
+ if (isSelfLocked())
{ //redundant lock
mCount++;
return true;
@@ -126,28 +139,203 @@ bool LLMutex::trylock()
#if MUTEX_DEBUG
// Have to have the lock before we can access the debug info
auto id = LLThread::currentID();
- if (mIsLocked[id] != FALSE)
+ if (mIsLocked[id] != false)
LL_ERRS() << "Already locked in Thread: " << id << LL_ENDL;
- mIsLocked[id] = TRUE;
+ mIsLocked[id] = true;
#endif
mLockingThread = LLThread::currentID();
return true;
}
-//============================================================================
+//---------------------------------------------------------------------
+//
+// LLSharedMutex
+//
+LLSharedMutex::LLSharedMutex()
+: mLockingThreads(2) // Reserve 2 slots in the map hash table
+, mIsShared(false)
+{
+}
+
+bool LLSharedMutex::isLocked() const
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ std::lock_guard<std::mutex> lock(mLockMutex);
+
+ return !mLockingThreads.empty();
+}
+
+bool LLSharedMutex::isThreadLocked() const
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ LLThread::id_t current_thread = LLThread::currentID();
+ std::lock_guard<std::mutex> lock(mLockMutex);
+
+ const_iterator it = mLockingThreads.find(current_thread);
+ return it != mLockingThreads.end();
+}
+
+void LLSharedMutex::lockShared()
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ LLThread::id_t current_thread = LLThread::currentID();
+
+ mLockMutex.lock();
+ iterator it = mLockingThreads.find(current_thread);
+ if (it != mLockingThreads.end())
+ {
+ it->second++;
+ }
+ else
+ {
+ // Acquire the mutex immediately if the mutex is not locked exclusively
+ // or enter a locking state if the mutex is already locked exclusively
+ mLockMutex.unlock();
+ mSharedMutex.lock_shared();
+ mLockMutex.lock();
+ // Continue after acquiring the mutex
+ mLockingThreads.emplace(std::make_pair(current_thread, 1));
+ mIsShared = true;
+ }
+ mLockMutex.unlock();
+}
+
+void LLSharedMutex::lockExclusive()
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ LLThread::id_t current_thread = LLThread::currentID();
+
+ mLockMutex.lock();
+ iterator it = mLockingThreads.find(current_thread);
+ if (it != mLockingThreads.end())
+ {
+ if (mIsShared)
+ {
+ // The mutex is already locked in the current thread
+ // but this lock is SHARED (not EXCLISIVE)
+ // We can't lock it again, the lock stays shared
+ // This can lead to a collision (theoretically)
+ llassert_always(!"The current thread is already locked SHARED and can't be locked EXCLUSIVE");
+ }
+ it->second++;
+ }
+ else
+ {
+ // Acquire the mutex immediately if mLockingThreads is empty
+ // or enter a locking state if mLockingThreads is not empty
+ mLockMutex.unlock();
+ mSharedMutex.lock();
+ mLockMutex.lock();
+ // Continue after acquiring the mutex (and possible quitting the locking state)
+ mLockingThreads.emplace(std::make_pair(current_thread, 1));
+ mIsShared = false;
+ }
+ mLockMutex.unlock();
+}
+
+bool LLSharedMutex::trylockShared()
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ LLThread::id_t current_thread = LLThread::currentID();
+ std::lock_guard<std::mutex> lock(mLockMutex);
+
+ iterator it = mLockingThreads.find(current_thread);
+ if (it != mLockingThreads.end())
+ {
+ it->second++;
+ }
+ else
+ {
+ if (!mSharedMutex.try_lock_shared())
+ return false;
+
+ mLockingThreads.emplace(std::make_pair(current_thread, 1));
+ mIsShared = true;
+ }
+
+ return true;
+}
+
+bool LLSharedMutex::trylockExclusive()
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ LLThread::id_t current_thread = LLThread::currentID();
+ std::lock_guard<std::mutex> lock(mLockMutex);
+
+ if (mLockingThreads.size() == 1 && mLockingThreads.begin()->first == current_thread)
+ {
+ mLockingThreads.begin()->second++;
+ }
+ else
+ {
+ if (!mSharedMutex.try_lock())
+ return false;
+
+ mLockingThreads.emplace(std::make_pair(current_thread, 1));
+ mIsShared = false;
+ }
+
+ return true;
+}
+
+void LLSharedMutex::unlockShared()
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ LLThread::id_t current_thread = LLThread::currentID();
+ std::lock_guard<std::mutex> lock(mLockMutex);
+
+ iterator it = mLockingThreads.find(current_thread);
+ if (it != mLockingThreads.end())
+ {
+ if (it->second > 1)
+ {
+ it->second--;
+ }
+ else
+ {
+ mLockingThreads.erase(it);
+ mSharedMutex.unlock_shared();
+ }
+ }
+}
+
+void LLSharedMutex::unlockExclusive()
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+ LLThread::id_t current_thread = LLThread::currentID();
+ std::lock_guard<std::mutex> lock(mLockMutex);
+
+ iterator it = mLockingThreads.find(current_thread);
+ if (it != mLockingThreads.end())
+ {
+ if (it->second > 1)
+ {
+ it->second--;
+ }
+ else
+ {
+ mLockingThreads.erase(it);
+ mSharedMutex.unlock();
+ }
+ }
+}
+
+
+//---------------------------------------------------------------------
+//
+// LLCondition
+//
LLCondition::LLCondition() :
LLMutex()
{
}
-
LLCondition::~LLCondition()
{
}
-
void LLCondition::wait()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
@@ -168,7 +356,10 @@ void LLCondition::broadcast()
}
-
+//---------------------------------------------------------------------
+//
+// LLMutexTrylock
+//
LLMutexTrylock::LLMutexTrylock(LLMutex* mutex)
: mMutex(mutex),
mLocked(false)