summaryrefslogtreecommitdiff
path: root/indra/llcommon
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llcommon')
-rw-r--r--indra/llcommon/CMakeLists.txt4
-rw-r--r--indra/llcommon/commoncontrol.cpp106
-rw-r--r--indra/llcommon/commoncontrol.h75
-rw-r--r--indra/llcommon/llapr.cpp2
-rw-r--r--indra/llcommon/llassettype.cpp1
-rw-r--r--indra/llcommon/llassettype.h5
-rw-r--r--indra/llcommon/llcallstack.h12
-rw-r--r--indra/llcommon/llcommon.cpp29
-rw-r--r--indra/llcommon/llerror.cpp14
-rw-r--r--indra/llcommon/llerror.h27
-rw-r--r--indra/llcommon/llframetimer.cpp5
-rw-r--r--indra/llcommon/llinstancetracker.h97
-rw-r--r--indra/llcommon/llinstancetrackersubclass.h98
-rw-r--r--indra/llcommon/llmemory.cpp45
-rw-r--r--indra/llcommon/llmutex.h5
-rw-r--r--indra/llcommon/llpointer.h24
-rw-r--r--indra/llcommon/llprofiler.h46
-rw-r--r--indra/llcommon/llprofilercategories.h2
-rw-r--r--indra/llcommon/llqueuedthread.cpp303
-rw-r--r--indra/llcommon/llqueuedthread.h51
-rw-r--r--indra/llcommon/llsdserialize.cpp6
-rw-r--r--indra/llcommon/llsdserialize_xml.cpp2
-rw-r--r--indra/llcommon/llsys.cpp24
-rw-r--r--indra/llcommon/llsys.h5
-rw-r--r--indra/llcommon/llthread.cpp10
-rw-r--r--indra/llcommon/lltimer.cpp43
-rw-r--r--indra/llcommon/lluuid.cpp1423
-rw-r--r--indra/llcommon/llworkerthread.cpp29
-rw-r--r--indra/llcommon/llworkerthread.h11
-rw-r--r--indra/llcommon/tests/workqueue_test.cpp26
-rw-r--r--indra/llcommon/threadpool.cpp122
-rw-r--r--indra/llcommon/threadpool.h80
-rw-r--r--indra/llcommon/threadpool_fwd.h25
-rw-r--r--indra/llcommon/workqueue.cpp179
-rw-r--r--indra/llcommon/workqueue.h441
35 files changed, 2097 insertions, 1280 deletions
diff --git a/indra/llcommon/CMakeLists.txt b/indra/llcommon/CMakeLists.txt
index ef4899978e..fa9c6eaa79 100644
--- a/indra/llcommon/CMakeLists.txt
+++ b/indra/llcommon/CMakeLists.txt
@@ -16,6 +16,7 @@ include(Tracy)
set(llcommon_SOURCE_FILES
+ commoncontrol.cpp
indra_constants.cpp
llallocator.cpp
llallocator_heap_profile.cpp
@@ -116,6 +117,7 @@ set(llcommon_HEADER_FILES
chrono.h
classic_callback.h
+ commoncontrol.h
ctype_workaround.h
fix_macros.h
indra_constants.h
@@ -172,6 +174,7 @@ set(llcommon_HEADER_FILES
llinitdestroyclass.h
llinitparam.h
llinstancetracker.h
+ llinstancetrackersubclass.h
llkeybind.h
llkeythrottle.h
llleap.h
@@ -245,6 +248,7 @@ set(llcommon_HEADER_FILES
stdtypes.h
stringize.h
threadpool.h
+ threadpool_fwd.h
threadsafeschedule.h
timer.h
tuple.h
diff --git a/indra/llcommon/commoncontrol.cpp b/indra/llcommon/commoncontrol.cpp
new file mode 100644
index 0000000000..81e66baf8c
--- /dev/null
+++ b/indra/llcommon/commoncontrol.cpp
@@ -0,0 +1,106 @@
+/**
+ * @file commoncontrol.cpp
+ * @author Nat Goodspeed
+ * @date 2022-06-08
+ * @brief Implementation for commoncontrol.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+// Precompiled header
+#include "linden_common.h"
+// associated header
+#include "commoncontrol.h"
+// STL headers
+// std headers
+// external library headers
+// other Linden headers
+#include "llevents.h"
+#include "llsdutil.h"
+
+LLSD LL::CommonControl::access(const LLSD& params)
+{
+ // We can't actually introduce a link-time dependency on llxml, or on any
+ // global LLControlGroup (*koff* gSavedSettings *koff*) but we can issue a
+ // runtime query. If we're running as part of a viewer with
+ // LLViewerControlListener, we can use that to interact with any
+ // instantiated LLControGroup.
+ LLSD response;
+ {
+ LLEventStream reply("reply");
+ LLTempBoundListener connection = reply.listen("listener",
+ [&response] (const LLSD& event)
+ {
+ response = event;
+ return false;
+ });
+ LLSD rparams{ params };
+ rparams["reply"] = reply.getName();
+ LLEventPumps::instance().obtain("LLViewerControl").post(rparams);
+ }
+ // LLViewerControlListener responds immediately. If it's listening at all,
+ // it will already have set response.
+ if (! response.isDefined())
+ {
+ LLTHROW(NoListener("No LLViewerControl listener instantiated"));
+ }
+ LLSD error{ response["error"] };
+ if (error.isDefined())
+ {
+ LLTHROW(ParamError(error));
+ }
+ response.erase("error");
+ response.erase("reqid");
+ return response;
+}
+
+/// set control group.key to defined default value
+LLSD LL::CommonControl::set_default(const std::string& group, const std::string& key)
+{
+ return access(llsd::map("op", "set",
+ "group", group, "key", key))["value"];
+}
+
+/// set control group.key to specified value
+LLSD LL::CommonControl::set(const std::string& group, const std::string& key, const LLSD& value)
+{
+ return access(llsd::map("op", "set",
+ "group", group, "key", key, "value", value))["value"];
+}
+
+/// toggle boolean control group.key
+LLSD LL::CommonControl::toggle(const std::string& group, const std::string& key)
+{
+ return access(llsd::map("op", "toggle",
+ "group", group, "key", key))["value"];
+}
+
+/// get the definition for control group.key, (! isDefined()) if bad
+/// ["name"], ["type"], ["value"], ["comment"]
+LLSD LL::CommonControl::get_def(const std::string& group, const std::string& key)
+{
+ return access(llsd::map("op", "get",
+ "group", group, "key", key));
+}
+
+/// get the value of control group.key
+LLSD LL::CommonControl::get(const std::string& group, const std::string& key)
+{
+ return access(llsd::map("op", "get",
+ "group", group, "key", key))["value"];
+}
+
+/// get defined groups
+std::vector<std::string> LL::CommonControl::get_groups()
+{
+ auto groups{ access(llsd::map("op", "groups"))["groups"] };
+ return { groups.beginArray(), groups.endArray() };
+}
+
+/// get definitions for all variables in group
+LLSD LL::CommonControl::get_vars(const std::string& group)
+{
+ return access(llsd::map("op", "vars", "group", group))["vars"];
+}
diff --git a/indra/llcommon/commoncontrol.h b/indra/llcommon/commoncontrol.h
new file mode 100644
index 0000000000..07d4a45ac5
--- /dev/null
+++ b/indra/llcommon/commoncontrol.h
@@ -0,0 +1,75 @@
+/**
+ * @file commoncontrol.h
+ * @author Nat Goodspeed
+ * @date 2022-06-08
+ * @brief Access LLViewerControl LLEventAPI, if process has one.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_COMMONCONTROL_H)
+#define LL_COMMONCONTROL_H
+
+#include <vector>
+#include "llexception.h"
+#include "llsd.h"
+
+namespace LL
+{
+ class CommonControl
+ {
+ public:
+ struct Error: public LLException
+ {
+ Error(const std::string& what): LLException(what) {}
+ };
+
+ /// Exception thrown if there's no LLViewerControl LLEventAPI
+ struct NoListener: public Error
+ {
+ NoListener(const std::string& what): Error(what) {}
+ };
+
+ struct ParamError: public Error
+ {
+ ParamError(const std::string& what): Error(what) {}
+ };
+
+ /// set control group.key to defined default value
+ static
+ LLSD set_default(const std::string& group, const std::string& key);
+
+ /// set control group.key to specified value
+ static
+ LLSD set(const std::string& group, const std::string& key, const LLSD& value);
+
+ /// toggle boolean control group.key
+ static
+ LLSD toggle(const std::string& group, const std::string& key);
+
+ /// get the definition for control group.key, (! isDefined()) if bad
+ /// ["name"], ["type"], ["value"], ["comment"]
+ static
+ LLSD get_def(const std::string& group, const std::string& key);
+
+ /// get the value of control group.key
+ static
+ LLSD get(const std::string& group, const std::string& key);
+
+ /// get defined groups
+ static
+ std::vector<std::string> get_groups();
+
+ /// get definitions for all variables in group
+ static
+ LLSD get_vars(const std::string& group);
+
+ private:
+ static
+ LLSD access(const LLSD& params);
+ };
+} // namespace LL
+
+#endif /* ! defined(LL_COMMONCONTROL_H) */
diff --git a/indra/llcommon/llapr.cpp b/indra/llcommon/llapr.cpp
index 69466df2d1..575c524219 100644
--- a/indra/llcommon/llapr.cpp
+++ b/indra/llcommon/llapr.cpp
@@ -528,6 +528,7 @@ S32 LLAPRFile::seek(apr_file_t* file_handle, apr_seek_where_t where, S32 offset)
//static
S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
{
+ LL_PROFILE_ZONE_SCOPED;
//*****************************************
LLAPRFilePoolScope scope(pool);
apr_file_t* file_handle = open(filename, scope.getVolatileAPRPool(), APR_READ|APR_BINARY);
@@ -572,6 +573,7 @@ S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nb
//static
S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
{
+ LL_PROFILE_ZONE_SCOPED;
apr_int32_t flags = APR_CREATE|APR_WRITE|APR_BINARY;
if (offset < 0)
{
diff --git a/indra/llcommon/llassettype.cpp b/indra/llcommon/llassettype.cpp
index 4c84223dad..6492d888c1 100644
--- a/indra/llcommon/llassettype.cpp
+++ b/indra/llcommon/llassettype.cpp
@@ -96,6 +96,7 @@ LLAssetDictionary::LLAssetDictionary()
addEntry(LLAssetType::AT_WIDGET, new AssetEntry("WIDGET", "widget", "widget", false, false, false));
addEntry(LLAssetType::AT_PERSON, new AssetEntry("PERSON", "person", "person", false, false, false));
addEntry(LLAssetType::AT_SETTINGS, new AssetEntry("SETTINGS", "settings", "settings blob", true, true, true));
+ addEntry(LLAssetType::AT_MATERIAL, new AssetEntry("MATERIAL", "material", "render material", true, true, true));
addEntry(LLAssetType::AT_UNKNOWN, new AssetEntry("UNKNOWN", "invalid", NULL, false, false, false));
addEntry(LLAssetType::AT_NONE, new AssetEntry("NONE", "-1", NULL, FALSE, FALSE, FALSE));
diff --git a/indra/llcommon/llassettype.h b/indra/llcommon/llassettype.h
index 652c548d59..e8df8574f7 100644
--- a/indra/llcommon/llassettype.h
+++ b/indra/llcommon/llassettype.h
@@ -127,8 +127,9 @@ public:
AT_RESERVED_6 = 55,
AT_SETTINGS = 56, // Collection of settings
-
- AT_COUNT = 57,
+ AT_MATERIAL = 57, // Render Material
+
+ AT_COUNT = 58,
// +*********************************************************+
// | TO ADD AN ELEMENT TO THIS ENUM: |
diff --git a/indra/llcommon/llcallstack.h b/indra/llcommon/llcallstack.h
index 5acf04a49f..d5a2b7b157 100644
--- a/indra/llcommon/llcallstack.h
+++ b/indra/llcommon/llcallstack.h
@@ -79,9 +79,9 @@ struct LLContextStatus
LL_COMMON_API std::ostream& operator<<(std::ostream& s, const LLContextStatus& context_status);
-#define dumpStack(tag) \
- if (debugLoggingEnabled(tag)) \
- { \
- LLCallStack cs; \
- LL_DEBUGS(tag) << "STACK:\n" << "====================\n" << cs << "====================" << LL_ENDL; \
- }
+#define dumpStack(tag) \
+ LL_DEBUGS(tag) << "STACK:\n" \
+ << "====================\n" \
+ << LLCallStack() \
+ << "====================" \
+ << LL_ENDL;
diff --git a/indra/llcommon/llcommon.cpp b/indra/llcommon/llcommon.cpp
index d2c4e66160..6e988260a9 100644
--- a/indra/llcommon/llcommon.cpp
+++ b/indra/llcommon/llcommon.cpp
@@ -37,12 +37,13 @@ thread_local bool gProfilerEnabled = false;
#if (TRACY_ENABLE)
// Override new/delete for tracy memory profiling
-void *operator new(size_t size)
+
+void* ll_tracy_new(size_t size)
{
void* ptr;
if (gProfilerEnabled)
{
- LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ //LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
ptr = (malloc)(size);
}
else
@@ -57,12 +58,22 @@ void *operator new(size_t size)
return ptr;
}
-void operator delete(void *ptr) noexcept
+void* operator new(size_t size)
+{
+ return ll_tracy_new(size);
+}
+
+void* operator new[](std::size_t count)
+{
+ return ll_tracy_new(count);
+}
+
+void ll_tracy_delete(void* ptr)
{
TracyFree(ptr);
if (gProfilerEnabled)
{
- LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ //LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
(free)(ptr);
}
else
@@ -71,6 +82,16 @@ void operator delete(void *ptr) noexcept
}
}
+void operator delete(void *ptr) noexcept
+{
+ ll_tracy_delete(ptr);
+}
+
+void operator delete[](void* ptr) noexcept
+{
+ ll_tracy_delete(ptr);
+}
+
// C-style malloc/free can't be so easily overridden, so we define tracy versions and use
// a pre-processor #define in linden_common.h to redirect to them. The parens around the native
// functions below prevents recursive substitution by the preprocessor.
diff --git a/indra/llcommon/llerror.cpp b/indra/llcommon/llerror.cpp
index 05e719b494..414515854a 100644
--- a/indra/llcommon/llerror.cpp
+++ b/indra/llcommon/llerror.cpp
@@ -1603,20 +1603,6 @@ namespace LLError
}
}
-bool debugLoggingEnabled(const std::string& tag)
-{
- LLMutexTrylock lock(getMutex<LOG_MUTEX>(), 5);
- if (!lock.isLocked())
- {
- return false;
- }
-
- SettingsConfigPtr s = Globals::getInstance()->getSettingsConfig();
- LLError::ELevel level = LLError::LEVEL_DEBUG;
- bool res = checkLevelMap(s->mTagLevelMap, tag, level);
- return res;
-}
-
void crashdriver(void (*callback)(int*))
{
// The LLERROR_CRASH macro used to have inline code of the form:
diff --git a/indra/llcommon/llerror.h b/indra/llcommon/llerror.h
index 624a5fb37a..05dd88ee51 100644
--- a/indra/llcommon/llerror.h
+++ b/indra/llcommon/llerror.h
@@ -82,9 +82,11 @@ const int LL_ERR_NOERR = 0;
#ifdef SHOW_ASSERT
#define llassert(func) llassert_always_msg(func, #func)
+#define llassert_msg(func, msg) llassert_always_msg(func, msg)
#define llverify(func) llassert_always_msg(func, #func)
#else
#define llassert(func)
+#define llassert_msg(func, msg)
#define llverify(func) do {if (func) {}} while(0)
#endif
@@ -462,8 +464,31 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;
LLError::CallSite& _site(_sites[which]); \
lllog_test_()
-// Check at run-time whether logging is enabled, without generating output
+/*
+// Check at run-time whether logging is enabled, without generating output.
+Resist the temptation to add a function like this because it incurs the
+expense of locking and map-searching every time control reaches it.
bool debugLoggingEnabled(const std::string& tag);
+
+Instead of:
+
+if debugLoggingEnabled("SomeTag")
+{
+ // ... presumably expensive operation ...
+ LL_DEBUGS("SomeTag") << ... << LL_ENDL;
+}
+
+Use this:
+
+LL_DEBUGS("SomeTag");
+// ... presumably expensive operation ...
+LL_CONT << ...;
+LL_ENDL;
+
+LL_DEBUGS("SomeTag") performs the locking and map-searching ONCE, then caches
+the result in a static variable.
+*/
+
// used by LLERROR_CRASH
void crashdriver(void (*)(int*));
diff --git a/indra/llcommon/llframetimer.cpp b/indra/llcommon/llframetimer.cpp
index c54029e8b4..1e9920746b 100644
--- a/indra/llcommon/llframetimer.cpp
+++ b/indra/llcommon/llframetimer.cpp
@@ -29,11 +29,6 @@
#include "llframetimer.h"
-// We don't bother building a stand alone lib; we just need to include the one source file for Tracy support
-#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY || LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
- #include "TracyClient.cpp"
-#endif // LL_PROFILER_CONFIGURATION
-
// Static members
//LLTimer LLFrameTimer::sInternalTimer;
U64 LLFrameTimer::sStartTotalTime = totalTime();
diff --git a/indra/llcommon/llinstancetracker.h b/indra/llcommon/llinstancetracker.h
index 34f2a5985a..27422e1266 100644
--- a/indra/llcommon/llinstancetracker.h
+++ b/indra/llcommon/llinstancetracker.h
@@ -104,22 +104,26 @@ public:
return LockStatic()->mMap.size();
}
- // snapshot of std::pair<const KEY, std::shared_ptr<T>> pairs
- class snapshot
+ // snapshot of std::pair<const KEY, std::shared_ptr<SUBCLASS>> pairs, for
+ // some SUBCLASS derived from T
+ template <typename SUBCLASS>
+ class snapshot_of
{
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
typedef std::vector<std::pair<const KEY, weak_t>> VectorType;
- // Dereferencing our iterator produces a std::shared_ptr for each
- // instance that still exists. Since we store weak_ptrs, that involves
- // two chained transformations:
+ // Dereferencing the iterator we publish produces a
+ // std::shared_ptr<SUBCLASS> for each instance that still exists.
+ // Since we store weak_ptr<T>, that involves two chained
+ // transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr
- // - a filter_iterator to skip any shared_ptr that has become invalid.
+ // - a filter_iterator to skip any shared_ptr<T> that has become
+ // invalid or references any T instance that isn't SUBCLASS.
// It is very important that we filter lazily, that is, during
// traversal. Any one of our stored weak_ptrs might expire during
// traversal.
- typedef std::pair<const KEY, ptr_t> strong_pair;
+ typedef std::pair<const KEY, std::shared_ptr<SUBCLASS>> strong_pair;
// Note for future reference: nat has not yet had any luck (up to
// Boost 1.67) trying to use boost::transform_iterator with a hand-
// coded functor, only with actual functions. In my experience, an
@@ -127,7 +131,7 @@ public:
// result_type typedef. But this works.
static strong_pair strengthen(typename VectorType::value_type& pair)
{
- return { pair.first, pair.second.lock() };
+ return { pair.first, std::dynamic_pointer_cast<SUBCLASS>(pair.second.lock()) };
}
static bool dead_skipper(const strong_pair& pair)
{
@@ -135,7 +139,7 @@ public:
}
public:
- snapshot():
+ snapshot_of():
// populate our vector with a snapshot of (locked!) InstanceMap
// note, this assigns pair<KEY, shared_ptr> to pair<KEY, weak_ptr>
mData(mLock->mMap.begin(), mLock->mMap.end())
@@ -184,44 +188,51 @@ public:
#endif // LL_WINDOWS
VectorType mData;
};
+ using snapshot = snapshot_of<T>;
- // iterate over this for references to each instance
- class instance_snapshot: public snapshot
+ // iterate over this for references to each SUBCLASS instance
+ template <typename SUBCLASS>
+ class instance_snapshot_of: public snapshot_of<SUBCLASS>
{
private:
- static T& instance_getter(typename snapshot::iterator::reference pair)
+ using super = snapshot_of<SUBCLASS>;
+ static T& instance_getter(typename super::iterator::reference pair)
{
return *pair.second;
}
public:
typedef boost::transform_iterator<decltype(instance_getter)*,
- typename snapshot::iterator> iterator;
- iterator begin() { return iterator(snapshot::begin(), instance_getter); }
- iterator end() { return iterator(snapshot::end(), instance_getter); }
+ typename super::iterator> iterator;
+ iterator begin() { return iterator(super::begin(), instance_getter); }
+ iterator end() { return iterator(super::end(), instance_getter); }
void deleteAll()
{
- for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it)
+ for (auto it(super::begin()), end(super::end()); it != end; ++it)
{
delete it->second.get();
}
}
- };
+ };
+ using instance_snapshot = instance_snapshot_of<T>;
// iterate over this for each key
- class key_snapshot: public snapshot
+ template <typename SUBCLASS>
+ class key_snapshot_of: public snapshot_of<SUBCLASS>
{
private:
- static KEY key_getter(typename snapshot::iterator::reference pair)
+ using super = snapshot_of<SUBCLASS>;
+ static KEY key_getter(typename super::iterator::reference pair)
{
return pair.first;
}
public:
typedef boost::transform_iterator<decltype(key_getter)*,
- typename snapshot::iterator> iterator;
- iterator begin() { return iterator(snapshot::begin(), key_getter); }
- iterator end() { return iterator(snapshot::end(), key_getter); }
+ typename super::iterator> iterator;
+ iterator begin() { return iterator(super::begin(), key_getter); }
+ iterator end() { return iterator(super::end(), key_getter); }
};
+ using key_snapshot = key_snapshot_of<T>;
static ptr_t getInstance(const KEY& k)
{
@@ -368,22 +379,25 @@ public:
return LockStatic()->mSet.size();
}
- // snapshot of std::shared_ptr<T> pointers
- class snapshot
+ // snapshot of std::shared_ptr<SUBCLASS> pointers
+ template <typename SUBCLASS>
+ class snapshot_of
{
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
typedef std::vector<weak_t> VectorType;
- // Dereferencing our iterator produces a std::shared_ptr for each
- // instance that still exists. Since we store weak_ptrs, that involves
- // two chained transformations:
+ // Dereferencing the iterator we publish produces a
+ // std::shared_ptr<SUBCLASS> for each instance that still exists.
+ // Since we store weak_ptrs, that involves two chained
+ // transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr
- // - a filter_iterator to skip any shared_ptr that has become invalid.
- typedef std::shared_ptr<T> strong_ptr;
+ // - a filter_iterator to skip any shared_ptr that has become invalid
+ // or references any T instance that isn't SUBCLASS.
+ typedef std::shared_ptr<SUBCLASS> strong_ptr;
static strong_ptr strengthen(typename VectorType::value_type& ptr)
{
- return ptr.lock();
+ return std::dynamic_pointer_cast<SUBCLASS>(ptr.lock());
}
static bool dead_skipper(const strong_ptr& ptr)
{
@@ -391,7 +405,7 @@ public:
}
public:
- snapshot():
+ snapshot_of():
// populate our vector with a snapshot of (locked!) InstanceSet
// note, this assigns stored shared_ptrs to weak_ptrs for snapshot
mData(mLock->mSet.begin(), mLock->mSet.end())
@@ -437,22 +451,33 @@ public:
#endif // LL_WINDOWS
VectorType mData;
};
+ using snapshot = snapshot_of<T>;
// iterate over this for references to each instance
- struct instance_snapshot: public snapshot
+ template <typename SUBCLASS>
+ class instance_snapshot_of: public snapshot_of<SUBCLASS>
{
- typedef boost::indirect_iterator<typename snapshot::iterator> iterator;
- iterator begin() { return iterator(snapshot::begin()); }
- iterator end() { return iterator(snapshot::end()); }
+ private:
+ using super = snapshot_of<SUBCLASS>;
+
+ public:
+ typedef boost::indirect_iterator<typename super::iterator> iterator;
+ iterator begin() { return iterator(super::begin()); }
+ iterator end() { return iterator(super::end()); }
void deleteAll()
{
- for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it)
+ for (auto it(super::begin()), end(super::end()); it != end; ++it)
{
delete it->get();
}
}
};
+ using instance_snapshot = instance_snapshot_of<T>;
+ // key_snapshot_of isn't really meaningful, but define it anyway to avoid
+ // requiring two different LLInstanceTrackerSubclass implementations.
+ template <typename SUBCLASS>
+ using key_snapshot_of = instance_snapshot_of<SUBCLASS>;
protected:
LLInstanceTracker()
diff --git a/indra/llcommon/llinstancetrackersubclass.h b/indra/llcommon/llinstancetrackersubclass.h
new file mode 100644
index 0000000000..ea9a38200f
--- /dev/null
+++ b/indra/llcommon/llinstancetrackersubclass.h
@@ -0,0 +1,98 @@
+/**
+ * @file llinstancetrackersubclass.h
+ * @author Nat Goodspeed
+ * @date 2022-12-09
+ * @brief Intermediate class to get subclass-specific types from
+ * LLInstanceTracker instance-retrieval methods.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_LLINSTANCETRACKERSUBCLASS_H)
+#define LL_LLINSTANCETRACKERSUBCLASS_H
+
+#include <memory> // std::shared_ptr, std::weak_ptr
+
+/**
+ * Derive your subclass S of a subclass T of LLInstanceTracker<T> from
+ * LLInstanceTrackerSubclass<S, T> to perform appropriate downcasting and
+ * filtering for LLInstanceTracker access methods.
+ *
+ * LLInstanceTracker<T> uses CRTP, so that getWeak(), getInstance(), snapshot
+ * and instance_snapshot return pointers and references to T. The trouble is
+ * that subclasses T0 and T1 derived from T also get pointers and references
+ * to their base class T, requiring explicit downcasting. Moreover,
+ * T0::getInstance() shouldn't find an instance of any T subclass other than
+ * T0. Nor should T0::snapshot.
+ *
+ * @code
+ * class Tracked: public LLInstanceTracker<Tracked, std::string>
+ * {
+ * private:
+ * using super = LLInstanceTracker<Tracked, std::string>;
+ * public:
+ * Tracked(const std::string& name): super(name) {}
+ * // All references to Tracked::ptr_t, Tracked::getInstance() etc.
+ * // appropriately use Tracked.
+ * // ...
+ * };
+ *
+ * // But now we derive SubTracked from Tracked. We need SubTracked::ptr_t,
+ * // SubTracked::getInstance() etc. to use SubTracked, not Tracked.
+ * // This LLInstanceTrackerSubclass specialization is itself derived from
+ * // Tracked.
+ * class SubTracked: public LLInstanceTrackerSubclass<SubTracked, Tracked>
+ * {
+ * private:
+ * using super = LLInstanceTrackerSubclass<SubTracked, Tracked>;
+ * public:
+ * // LLInstanceTrackerSubclass's constructor forwards to Tracked's.
+ * SubTracked(const std::string& name): super(name) {}
+ * // SubTracked::getInstance() returns std::shared_ptr<SubTracked>, etc.
+ * // ...
+ * @endcode
+ */
+template <typename SUBCLASS, typename T>
+class LLInstanceTrackerSubclass: public T
+{
+public:
+ using ptr_t = std::shared_ptr<SUBCLASS>;
+ using weak_t = std::weak_ptr<SUBCLASS>;
+
+ // forward any constructor call to the corresponding T ctor
+ template <typename... ARGS>
+ LLInstanceTrackerSubclass(ARGS&&... args):
+ T(std::forward<ARGS>(args)...)
+ {}
+
+ weak_t getWeak()
+ {
+ // call base-class getWeak(), try to lock, downcast to SUBCLASS
+ return std::dynamic_pointer_cast<SUBCLASS>(T::getWeak().lock());
+ }
+
+ template <typename KEY>
+ static ptr_t getInstance(const KEY& k)
+ {
+ return std::dynamic_pointer_cast<SUBCLASS>(T::getInstance(k));
+ }
+
+ using snapshot = typename T::template snapshot_of<SUBCLASS>;
+ using instance_snapshot = typename T::template instance_snapshot_of<SUBCLASS>;
+ using key_snapshot = typename T::template key_snapshot_of<SUBCLASS>;
+
+ static size_t instanceCount()
+ {
+ // T::instanceCount() lies because our snapshot, et al., won't
+ // necessarily return all the T instances -- only those that are also
+ // SUBCLASS instances. Count those.
+ size_t count = 0;
+ for (const auto& pair : snapshot())
+ ++count;
+ return count;
+ }
+};
+
+#endif /* ! defined(LL_LLINSTANCETRACKERSUBCLASS_H) */
diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp
index d6ae1284d3..7cdf7254ff 100644
--- a/indra/llcommon/llmemory.cpp
+++ b/indra/llcommon/llmemory.cpp
@@ -35,6 +35,7 @@
# include <sys/types.h>
# include <mach/task.h>
# include <mach/mach_init.h>
+#include <mach/mach_host.h>
#elif LL_LINUX
# include <unistd.h>
#endif
@@ -109,6 +110,50 @@ void LLMemory::updateMemoryInfo()
{
sAvailPhysicalMemInKB = U32Kilobytes(0);
}
+
+#elif defined(LL_DARWIN)
+ task_vm_info info;
+ mach_msg_type_number_t infoCount = TASK_VM_INFO_COUNT;
+ // MACH_TASK_BASIC_INFO reports the same resident_size, but does not tell us the reusable bytes or phys_footprint.
+ if (task_info(mach_task_self(), TASK_VM_INFO, reinterpret_cast<task_info_t>(&info), &infoCount) == KERN_SUCCESS)
+ {
+ // Our Windows definition of PagefileUsage is documented by Microsoft as "the total amount of
+ // memory that the memory manager has committed for a running process", which is rss.
+ sAllocatedPageSizeInKB = U32Bytes(info.resident_size);
+
+ // Activity Monitor => Inspect Process => Real Memory Size appears to report resident_size
+ // Activity monitor => main window memory column appears to report phys_footprint, which spot checks as at least 30% less.
+ // I think that is because of compression, which isn't going to give us a consistent measurement. We want uncompressed totals.
+ //
+ // In between is resident_size - reusable. This is what Chrome source code uses, with source comments saying it is 'the "Real Memory" value
+ // reported for the app by the Memory Monitor in Instruments.' It is still about 8% bigger than phys_footprint.
+ //
+ // (On Windows, we use WorkingSetSize.)
+ sAllocatedMemInKB = U32Bytes(info.resident_size - info.reusable);
+ }
+ else
+ {
+ LL_WARNS() << "task_info failed" << LL_ENDL;
+ }
+
+ // Total installed and available physical memory are properties of the host, not just our process.
+ vm_statistics64_data_t vmstat;
+ mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
+ mach_port_t host = mach_host_self();
+ vm_size_t page_size;
+ host_page_size(host, &page_size);
+ kern_return_t result = host_statistics64(host, HOST_VM_INFO64, reinterpret_cast<host_info_t>(&vmstat), &count);
+ if (result == KERN_SUCCESS) {
+ // This is what Chrome reports as 'the "Physical Memory Free" value reported by the Memory Monitor in Instruments.'
+ // Note though that inactive pages are not included here and not yet free, but could become so under memory pressure.
+ sAvailPhysicalMemInKB = U32Bytes(vmstat.free_count * page_size);
+ sMaxPhysicalMemInKB = LLMemoryInfo::getHardwareMemSize();
+ }
+ else
+ {
+ LL_WARNS() << "task_info failed" << LL_ENDL;
+ }
+
#else
//not valid for other systems for now.
sAllocatedMemInKB = U64Bytes(LLMemory::getCurrentRSS());
diff --git a/indra/llcommon/llmutex.h b/indra/llcommon/llmutex.h
index 838d7d34c0..0d70da6178 100644
--- a/indra/llcommon/llmutex.h
+++ b/indra/llcommon/llmutex.h
@@ -36,7 +36,8 @@
//============================================================================
-#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO)
+//#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO)
+#define MUTEX_DEBUG 0 //disable mutex debugging as it's interfering with profiles
#if MUTEX_DEBUG
#include <map>
@@ -61,7 +62,7 @@ protected:
mutable LLThread::id_t mLockingThread;
#if MUTEX_DEBUG
- std::map<LLThread::id_t, BOOL> mIsLocked;
+ std::unordered_map<LLThread::id_t, BOOL> mIsLocked;
#endif
};
diff --git a/indra/llcommon/llpointer.h b/indra/llcommon/llpointer.h
index 96ccfb481e..64aceddf32 100644
--- a/indra/llcommon/llpointer.h
+++ b/indra/llcommon/llpointer.h
@@ -341,4 +341,28 @@ private:
bool mStayUnique;
};
+
+// boost hash adapter
+template <class Type>
+struct boost::hash<LLPointer<Type>>
+{
+ typedef LLPointer<Type> argument_type;
+ typedef std::size_t result_type;
+ result_type operator()(argument_type const& s) const
+ {
+ return (std::size_t) s.get();
+ }
+};
+
+// Adapt boost hash to std hash
+namespace std
+{
+ template<class Type> struct hash<LLPointer<Type>>
+ {
+ std::size_t operator()(LLPointer<Type> const& s) const noexcept
+ {
+ return boost::hash<LLPointer<Type>>()(s);
+ }
+ };
+}
#endif
diff --git a/indra/llcommon/llprofiler.h b/indra/llcommon/llprofiler.h
index f9d7ae7ce4..af5e5777bf 100644
--- a/indra/llcommon/llprofiler.h
+++ b/indra/llcommon/llprofiler.h
@@ -86,8 +86,12 @@ extern thread_local bool gProfilerEnabled;
#define TRACY_ONLY_IPV4 1
#include "Tracy.hpp"
- // Mutually exclusive with detailed memory tracing
+ // Enable OpenGL profiling
#define LL_PROFILER_ENABLE_TRACY_OPENGL 0
+
+ // Enable RenderDoc labeling
+ #define LL_PROFILER_ENABLE_RENDER_DOC 0
+
#endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY
@@ -104,14 +108,13 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
- #define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
- #define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_FAST_TIMER
#define LL_PROFILER_FRAME_END
#define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name)
#define LL_RECORD_BLOCK_TIME(name) const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(name)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__);
#define LL_PROFILE_ZONE_NAMED(name) // LL_PROFILE_ZONE_NAMED is a no-op when Tracy is disabled
+ #define LL_PROFILE_ZONE_NAMED_COLOR(name,color) // LL_PROFILE_ZONE_NAMED_COLOR is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_SCOPED // LL_PROFILE_ZONE_SCOPED is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_COLOR(name,color) // LL_RECORD_BLOCK_TIME(name)
@@ -121,8 +124,6 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_INFO(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_WARN(name) (void)(name); // Not supported
- #define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
- #define LL_PROFILE_FREE(ptr) (void)(ptr);
#endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
#define LL_PROFILER_FRAME_END FrameMark
@@ -138,14 +139,45 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
- #define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
- #define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#endif
#else
#define LL_PROFILER_FRAME_END
#define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name)
#endif // LL_PROFILER
+#if LL_PROFILER_ENABLE_TRACY_OPENGL
+#define LL_PROFILE_GPU_ZONE(name) TracyGpuZone(name)
+#define LL_PROFILE_GPU_ZONEC(name,color) TracyGpuZoneC(name,color)
+#define LL_PROFILER_GPU_COLLECT TracyGpuCollect
+#define LL_PROFILER_GPU_CONTEXT TracyGpuContext
+
+// disable memory tracking (incompatible with GPU tracing
+#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
+#define LL_PROFILE_FREE(ptr) (void)(ptr);
+#else
+#define LL_PROFILE_GPU_ZONE(name) (void)name;
+#define LL_PROFILE_GPU_ZONEC(name,color) (void)name;(void)color;
+#define LL_PROFILER_GPU_COLLECT
+#define LL_PROFILER_GPU_CONTEXT
+
+#define LL_LABEL_OBJECT_GL(type, name, length, label)
+
+#if LL_PROFILER_CONFIGURATION > 1
+#define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
+#define LL_PROFILE_FREE(ptr) TracyFree(ptr)
+#else
+#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
+#define LL_PROFILE_FREE(ptr) (void)(ptr);
+#endif
+
+#endif
+
+#if LL_PROFILER_ENABLE_RENDER_DOC
+#define LL_LABEL_OBJECT_GL(type, name, length, label) glObjectLabel(type, name, length, label)
+#else
+#define LL_LABEL_OBJECT_GL(type, name, length, label)
+#endif
+
#include "llprofilercategories.h"
#endif // LL_PROFILER_H
diff --git a/indra/llcommon/llprofilercategories.h b/indra/llcommon/llprofilercategories.h
index 8db29468cc..617431f629 100644
--- a/indra/llcommon/llprofilercategories.h
+++ b/indra/llcommon/llprofilercategories.h
@@ -52,7 +52,7 @@
#define LL_PROFILER_CATEGORY_ENABLE_LOGGING 1
#define LL_PROFILER_CATEGORY_ENABLE_MATERIAL 1
#define LL_PROFILER_CATEGORY_ENABLE_MEDIA 1
-#define LL_PROFILER_CATEGORY_ENABLE_MEMORY 1
+#define LL_PROFILER_CATEGORY_ENABLE_MEMORY 0
#define LL_PROFILER_CATEGORY_ENABLE_NETWORK 1
#define LL_PROFILER_CATEGORY_ENABLE_OCTREE 1
#define LL_PROFILER_CATEGORY_ENABLE_PIPELINE 1
diff --git a/indra/llcommon/llqueuedthread.cpp b/indra/llcommon/llqueuedthread.cpp
index b06fee0ec2..394212ee0d 100644
--- a/indra/llcommon/llqueuedthread.cpp
+++ b/indra/llcommon/llqueuedthread.cpp
@@ -26,20 +26,26 @@
#include "linden_common.h"
#include "llqueuedthread.h"
+#include <chrono>
+
#include "llstl.h"
#include "lltimer.h" // ms_sleep()
-#include "lltracethreadrecorder.h"
+#include "llmutex.h"
//============================================================================
// MAIN THREAD
LLQueuedThread::LLQueuedThread(const std::string& name, bool threaded, bool should_pause) :
- LLThread(name),
- mThreaded(threaded),
- mIdleThread(TRUE),
- mNextHandle(0),
- mStarted(FALSE)
+ LLThread(name),
+ mIdleThread(TRUE),
+ mNextHandle(0),
+ mStarted(FALSE),
+ mThreaded(threaded),
+ mRequestQueue(name, 1024 * 1024)
{
+ llassert(threaded); // not threaded implementation is deprecated
+ mMainQueue = LL::WorkQueue::getInstance("mainloop");
+
if (mThreaded)
{
if(should_pause)
@@ -69,6 +75,11 @@ void LLQueuedThread::shutdown()
unpause(); // MAIN THREAD
if (mThreaded)
{
+ if (mRequestQueue.size() == 0)
+ {
+ mRequestQueue.close();
+ }
+
S32 timeout = 100;
for ( ; timeout>0; timeout--)
{
@@ -104,6 +115,8 @@ void LLQueuedThread::shutdown()
{
LL_WARNS() << "~LLQueuedThread() called with active requests: " << active_count << LL_ENDL;
}
+
+ mRequestQueue.close();
}
//----------------------------------------------------------------------------
@@ -112,6 +125,7 @@ void LLQueuedThread::shutdown()
// virtual
size_t LLQueuedThread::update(F32 max_time_ms)
{
+ LL_PROFILE_ZONE_SCOPED;
if (!mStarted)
{
if (!mThreaded)
@@ -125,29 +139,34 @@ size_t LLQueuedThread::update(F32 max_time_ms)
size_t LLQueuedThread::updateQueue(F32 max_time_ms)
{
- F64 max_time = (F64)max_time_ms * .001;
- LLTimer timer;
- size_t pending = 1;
-
+ LL_PROFILE_ZONE_SCOPED;
// Frame Update
if (mThreaded)
{
- pending = getPending();
- if(pending > 0)
+ // schedule a call to threadedUpdate for every call to updateQueue
+ if (!isQuitting())
+ {
+ mRequestQueue.post([=]()
+ {
+ LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qt - update");
+ mIdleThread = FALSE;
+ threadedUpdate();
+ mIdleThread = TRUE;
+ }
+ );
+ }
+
+ if(getPending() > 0)
{
- unpause();
- }
+ unpause();
+ }
}
else
{
- while (pending > 0)
- {
- pending = processNextRequest();
- if (max_time && timer.getElapsedTimeF64() > max_time)
- break;
- }
+ mRequestQueue.runFor(std::chrono::microseconds((int) (max_time_ms*1000.f)));
+ threadedUpdate();
}
- return pending;
+ return getPending();
}
void LLQueuedThread::incQueue()
@@ -166,11 +185,7 @@ void LLQueuedThread::incQueue()
// May be called from any thread
size_t LLQueuedThread::getPending()
{
- size_t res;
- lockData();
- res = mRequestQueue.size();
- unlockData();
- return res;
+ return mRequestQueue.size();
}
// MAIN thread
@@ -195,35 +210,28 @@ void LLQueuedThread::waitOnPending()
// MAIN thread
void LLQueuedThread::printQueueStats()
{
- lockData();
- if (!mRequestQueue.empty())
+ U32 size = mRequestQueue.size();
+ if (size > 0)
{
- QueuedRequest *req = *mRequestQueue.begin();
- LL_INFOS() << llformat("Pending Requests:%d Current status:%d", mRequestQueue.size(), req->getStatus()) << LL_ENDL;
+ LL_INFOS() << llformat("Pending Requests:%d ", mRequestQueue.size()) << LL_ENDL;
}
else
{
LL_INFOS() << "Queued Thread Idle" << LL_ENDL;
}
- unlockData();
}
// MAIN thread
LLQueuedThread::handle_t LLQueuedThread::generateHandle()
{
- lockData();
- while ((mNextHandle == nullHandle()) || (mRequestHash.find(mNextHandle)))
- {
- mNextHandle++;
- }
- const LLQueuedThread::handle_t res = mNextHandle++;
- unlockData();
+ U32 res = ++mNextHandle;
return res;
}
// MAIN thread
bool LLQueuedThread::addRequest(QueuedRequest* req)
{
+ LL_PROFILE_ZONE_SCOPED;
if (mStatus == QUITTING)
{
return false;
@@ -231,14 +239,14 @@ bool LLQueuedThread::addRequest(QueuedRequest* req)
lockData();
req->setStatus(STATUS_QUEUED);
- mRequestQueue.insert(req);
- mRequestHash.insert(req);
+ mRequestHash.insert(req);
#if _DEBUG
// LL_INFOS() << llformat("LLQueuedThread::Added req [%08d]",handle) << LL_ENDL;
#endif
unlockData();
- incQueue();
+ llassert(!mDataLock->isSelfLocked());
+ mRequestQueue.post([this, req]() { processRequest(req); });
return true;
}
@@ -246,6 +254,7 @@ bool LLQueuedThread::addRequest(QueuedRequest* req)
// MAIN thread
bool LLQueuedThread::waitForResult(LLQueuedThread::handle_t handle, bool auto_complete)
{
+ LL_PROFILE_ZONE_SCOPED;
llassert (handle != nullHandle());
bool res = false;
bool waspaused = isPaused();
@@ -312,6 +321,7 @@ LLQueuedThread::status_t LLQueuedThread::getRequestStatus(handle_t handle)
void LLQueuedThread::abortRequest(handle_t handle, bool autocomplete)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
if (req)
@@ -333,30 +343,9 @@ void LLQueuedThread::setFlags(handle_t handle, U32 flags)
unlockData();
}
-void LLQueuedThread::setPriority(handle_t handle, U32 priority)
-{
- lockData();
- QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
- if (req)
- {
- if(req->getStatus() == STATUS_INPROGRESS)
- {
- // not in list
- req->setPriority(priority);
- }
- else if(req->getStatus() == STATUS_QUEUED)
- {
- // remove from list then re-insert
- llverify(mRequestQueue.erase(req) == 1);
- req->setPriority(priority);
- mRequestQueue.insert(req);
- }
- }
- unlockData();
-}
-
bool LLQueuedThread::completeRequest(handle_t handle)
{
+ LL_PROFILE_ZONE_SCOPED;
bool res = false;
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
@@ -399,88 +388,120 @@ bool LLQueuedThread::check()
//============================================================================
// Runs on its OWN thread
-size_t LLQueuedThread::processNextRequest()
+void LLQueuedThread::processRequest(LLQueuedThread::QueuedRequest* req)
{
- QueuedRequest *req;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+
+ mIdleThread = FALSE;
+ //threadedUpdate();
+
// Get next request from pool
lockData();
- while(1)
+ if ((req->getFlags() & FLAG_ABORT) || (mStatus == QUITTING))
{
- req = NULL;
- if (mRequestQueue.empty())
+ LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - abort");
+ req->setStatus(STATUS_ABORTED);
+ req->finishRequest(false);
+ if (req->getFlags() & FLAG_AUTO_COMPLETE)
{
- break;
- }
- req = *mRequestQueue.begin();
- mRequestQueue.erase(mRequestQueue.begin());
- if ((req->getFlags() & FLAG_ABORT) || (mStatus == QUITTING))
- {
- req->setStatus(STATUS_ABORTED);
- req->finishRequest(false);
- if (req->getFlags() & FLAG_AUTO_COMPLETE)
- {
- mRequestHash.erase(req);
- req->deleteRequest();
+ mRequestHash.erase(req);
+ req->deleteRequest();
// check();
- }
- continue;
}
- llassert_always(req->getStatus() == STATUS_QUEUED);
- break;
- }
- U32 start_priority = 0 ;
- if (req)
- {
- req->setStatus(STATUS_INPROGRESS);
- start_priority = req->getPriority();
- }
- unlockData();
-
- // This is the only place we will call req->setStatus() after
- // it has initially been seet to STATUS_QUEUED, so it is
- // safe to access req.
- if (req)
- {
- // process request
- bool complete = req->processRequest();
-
- if (complete)
- {
- lockData();
- req->setStatus(STATUS_COMPLETE);
- req->finishRequest(true);
- if (req->getFlags() & FLAG_AUTO_COMPLETE)
- {
- mRequestHash.erase(req);
- req->deleteRequest();
-// check();
- }
- unlockData();
- }
- else
- {
- lockData();
- req->setStatus(STATUS_QUEUED);
- mRequestQueue.insert(req);
- unlockData();
- if (mThreaded && start_priority < PRIORITY_NORMAL)
- {
- ms_sleep(1); // sleep the thread a little
- }
- }
-
- LLTrace::get_thread_recorder()->pushToParent();
+ unlockData();
}
+ else
+ {
+ llassert_always(req->getStatus() == STATUS_QUEUED);
+
+ if (req)
+ {
+ req->setStatus(STATUS_INPROGRESS);
+ }
+ unlockData();
+
+ // This is the only place we will call req->setStatus() after
+ // it has initially been seet to STATUS_QUEUED, so it is
+ // safe to access req.
+ if (req)
+ {
+ // process request
+ bool complete = req->processRequest();
+
+ if (complete)
+ {
+ LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - complete");
+ lockData();
+ req->setStatus(STATUS_COMPLETE);
+ req->finishRequest(true);
+ if (req->getFlags() & FLAG_AUTO_COMPLETE)
+ {
+ mRequestHash.erase(req);
+ req->deleteRequest();
+ // check();
+ }
+ unlockData();
+ }
+ else
+ {
+ LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - retry");
+ //put back on queue and try again in 0.1ms
+ lockData();
+ req->setStatus(STATUS_QUEUED);
+
+ unlockData();
+
+ llassert(!mDataLock->isSelfLocked());
+
+#if 0
+ // try again on next frame
+ // NOTE: tried using "post" with a time in the future, but this
+ // would invariably cause this thread to wait for a long time (10+ ms)
+ // while work is pending
+ bool ret = LL::WorkQueue::postMaybe(
+ mMainQueue,
+ [=]()
+ {
+ LL_PROFILE_ZONE_NAMED("processRequest - retry");
+ mRequestQueue.post([=]()
+ {
+ LL_PROFILE_ZONE_NAMED("processRequest - retry"); // <-- not redundant, track retry on both queues
+ processRequest(req);
+ });
+ });
+ llassert(ret);
+#else
+ using namespace std::chrono_literals;
+ auto retry_time = LL::WorkQueue::TimePoint::clock::now() + 16ms;
+ mRequestQueue.post([=]
+ {
+ LL_PROFILE_ZONE_NAMED("processRequest - retry");
+ if (LL::WorkQueue::TimePoint::clock::now() < retry_time)
+ {
+ auto sleep_time = std::chrono::duration_cast<std::chrono::milliseconds>(retry_time - LL::WorkQueue::TimePoint::clock::now());
+
+ if (sleep_time.count() > 0)
+ {
+ ms_sleep(sleep_time.count());
+ }
+ }
+ processRequest(req);
+ });
+#endif
+
+ }
+ }
+ }
- return getPending();
+ mIdleThread = TRUE;
}
// virtual
bool LLQueuedThread::runCondition()
{
// mRunCondition must be locked here
- if (mRequestQueue.empty() && mIdleThread)
+ if (mRequestQueue.size() == 0 && mIdleThread)
return false;
else
return true;
@@ -494,18 +515,13 @@ void LLQueuedThread::run()
startThread();
mStarted = TRUE;
- while (1)
+
+ /*while (1)
{
+ LL_PROFILE_ZONE_SCOPED;
// this will block on the condition until runCondition() returns true, the thread is unpaused, or the thread leaves the RUNNING state.
checkPause();
- if (isQuitting())
- {
- LLTrace::get_thread_recorder()->pushToParent();
- endThread();
- break;
- }
-
mIdleThread = FALSE;
threadedUpdate();
@@ -514,12 +530,18 @@ void LLQueuedThread::run()
if (pending_work == 0)
{
+ //LL_PROFILE_ZONE_NAMED("LLQueuedThread - sleep");
mIdleThread = TRUE;
- ms_sleep(1);
+ //ms_sleep(1);
}
//LLThread::yield(); // thread should yield after each request
- }
+ }*/
+ mRequestQueue.runUntilClose();
+
+ endThread();
LL_INFOS() << "LLQueuedThread " << mName << " EXITING." << LL_ENDL;
+
+
}
// virtual
@@ -539,10 +561,9 @@ void LLQueuedThread::threadedUpdate()
//============================================================================
-LLQueuedThread::QueuedRequest::QueuedRequest(LLQueuedThread::handle_t handle, U32 priority, U32 flags) :
+LLQueuedThread::QueuedRequest::QueuedRequest(LLQueuedThread::handle_t handle, U32 flags) :
LLSimpleHashEntry<LLQueuedThread::handle_t>(handle),
mStatus(STATUS_UNKNOWN),
- mPriority(priority),
mFlags(flags)
{
}
diff --git a/indra/llcommon/llqueuedthread.h b/indra/llcommon/llqueuedthread.h
index 90fce3dc5d..814dbc4c38 100644
--- a/indra/llcommon/llqueuedthread.h
+++ b/indra/llcommon/llqueuedthread.h
@@ -36,6 +36,7 @@
#include "llthread.h"
#include "llsimplehash.h"
+#include "workqueue.h"
//============================================================================
// Note: ~LLQueuedThread is O(N) N=# of queued threads, assumed to be small
@@ -45,15 +46,6 @@ class LL_COMMON_API LLQueuedThread : public LLThread
{
//------------------------------------------------------------------------
public:
- enum priority_t {
- PRIORITY_IMMEDIATE = 0x7FFFFFFF,
- PRIORITY_URGENT = 0x40000000,
- PRIORITY_HIGH = 0x30000000,
- PRIORITY_NORMAL = 0x20000000,
- PRIORITY_LOW = 0x10000000,
- PRIORITY_LOWBITS = 0x0FFFFFFF,
- PRIORITY_HIGHBITS = 0x70000000
- };
enum status_t {
STATUS_EXPIRED = -1,
STATUS_UNKNOWN = 0,
@@ -82,28 +74,17 @@ public:
virtual ~QueuedRequest(); // use deleteRequest()
public:
- QueuedRequest(handle_t handle, U32 priority, U32 flags = 0);
+ QueuedRequest(handle_t handle, U32 flags = 0);
status_t getStatus()
{
return mStatus;
}
- U32 getPriority() const
- {
- return mPriority;
- }
U32 getFlags() const
{
return mFlags;
}
- bool higherPriority(const QueuedRequest& second) const
- {
- if ( mPriority == second.mPriority)
- return mHashKey < second.mHashKey;
- else
- return mPriority > second.mPriority;
- }
-
+
protected:
status_t setStatus(status_t newstatus)
{
@@ -121,28 +102,11 @@ public:
virtual void finishRequest(bool completed); // Always called from thread after request has completed or aborted
virtual void deleteRequest(); // Only method to delete a request
- void setPriority(U32 pri)
- {
- // Only do this on a request that is not in a queued list!
- mPriority = pri;
- };
-
protected:
LLAtomicBase<status_t> mStatus;
- U32 mPriority;
U32 mFlags;
};
-protected:
- struct queued_request_less
- {
- bool operator()(const QueuedRequest* lhs, const QueuedRequest* rhs) const
- {
- return lhs->higherPriority(*rhs); // higher priority in front of queue (set)
- }
- };
-
-
//------------------------------------------------------------------------
public:
@@ -167,7 +131,7 @@ private:
protected:
handle_t generateHandle();
bool addRequest(QueuedRequest* req);
- size_t processNextRequest(void);
+ void processRequest(QueuedRequest* req);
void incQueue();
public:
@@ -186,7 +150,6 @@ public:
status_t getRequestStatus(handle_t handle);
void abortRequest(handle_t handle, bool autocomplete);
void setFlags(handle_t handle, U32 flags);
- void setPriority(handle_t handle, U32 priority);
bool completeRequest(handle_t handle);
// This is public for support classes like LLWorkerThread,
// but generally the methods above should be used.
@@ -200,8 +163,10 @@ protected:
BOOL mStarted; // required when mThreaded is false to call startThread() from update()
LLAtomicBool mIdleThread; // request queue is empty (or we are quitting) and the thread is idle
- typedef std::set<QueuedRequest*, queued_request_less> request_queue_t;
- request_queue_t mRequestQueue;
+ //typedef std::set<QueuedRequest*, queued_request_less> request_queue_t;
+ //request_queue_t mRequestQueue;
+ LL::WorkQueue mRequestQueue;
+ LL::WorkQueue::weak_t mMainQueue;
enum { REQUEST_HASH_SIZE = 512 }; // must be power of 2
typedef LLSimpleHash<handle_t, REQUEST_HASH_SIZE> request_hash_t;
diff --git a/indra/llcommon/llsdserialize.cpp b/indra/llcommon/llsdserialize.cpp
index 3db456ddb3..a475be6293 100644
--- a/indra/llcommon/llsdserialize.cpp
+++ b/indra/llcommon/llsdserialize.cpp
@@ -475,6 +475,7 @@ LLSDNotationParser::~LLSDNotationParser()
// virtual
S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// map: { string:object, string:object }
// array: [ object, object, object ]
// undef: !
@@ -734,6 +735,7 @@ S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) c
S32 LLSDNotationParser::parseMap(std::istream& istr, LLSD& map, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// map: { string:object, string:object }
map = LLSD::emptyMap();
S32 parse_count = 0;
@@ -794,6 +796,7 @@ S32 LLSDNotationParser::parseMap(std::istream& istr, LLSD& map, S32 max_depth) c
S32 LLSDNotationParser::parseArray(std::istream& istr, LLSD& array, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// array: [ object, object, object ]
array = LLSD::emptyArray();
S32 parse_count = 0;
@@ -833,6 +836,7 @@ S32 LLSDNotationParser::parseArray(std::istream& istr, LLSD& array, S32 max_dept
bool LLSDNotationParser::parseString(std::istream& istr, LLSD& data) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
std::string value;
auto count = deserialize_string(istr, value, mMaxBytesLeft);
if(PARSE_FAILURE == count) return false;
@@ -843,6 +847,7 @@ bool LLSDNotationParser::parseString(std::istream& istr, LLSD& data) const
bool LLSDNotationParser::parseBinary(std::istream& istr, LLSD& data) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// binary: b##"ff3120ab1"
// or: b(len)"..."
@@ -945,6 +950,7 @@ LLSDBinaryParser::~LLSDBinaryParser()
// virtual
S32 LLSDBinaryParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
/**
* Undefined: '!'<br>
* Boolean: '1' for true '0' for false<br>
diff --git a/indra/llcommon/llsdserialize_xml.cpp b/indra/llcommon/llsdserialize_xml.cpp
index ac128c9f86..38b11eb32b 100644
--- a/indra/llcommon/llsdserialize_xml.cpp
+++ b/indra/llcommon/llsdserialize_xml.cpp
@@ -923,6 +923,8 @@ void LLSDXMLParser::parsePart(const char *buf, llssize len)
// virtual
S32 LLSDXMLParser::doParse(std::istream& input, LLSD& data, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
+
#ifdef XML_PARSER_PERFORMANCE_TESTS
XML_Timer timer( &parseTime );
#endif // XML_PARSER_PERFORMANCE_TESTS
diff --git a/indra/llcommon/llsys.cpp b/indra/llcommon/llsys.cpp
index 91cb65b815..938685bae6 100644
--- a/indra/llcommon/llsys.cpp
+++ b/indra/llcommon/llsys.cpp
@@ -771,20 +771,28 @@ static U32Kilobytes LLMemoryAdjustKBResult(U32Kilobytes inKB)
}
#endif
+#if LL_DARWIN
+// static
+U32Kilobytes LLMemoryInfo::getHardwareMemSize()
+{
+ // This might work on Linux as well. Someone check...
+ uint64_t phys = 0;
+ int mib[2] = { CTL_HW, HW_MEMSIZE };
+
+ size_t len = sizeof(phys);
+ sysctl(mib, 2, &phys, &len, NULL, 0);
+
+ return U64Bytes(phys);
+}
+#endif
+
U32Kilobytes LLMemoryInfo::getPhysicalMemoryKB() const
{
#if LL_WINDOWS
return LLMemoryAdjustKBResult(U32Kilobytes(mStatsMap["Total Physical KB"].asInteger()));
#elif LL_DARWIN
- // This might work on Linux as well. Someone check...
- uint64_t phys = 0;
- int mib[2] = { CTL_HW, HW_MEMSIZE };
-
- size_t len = sizeof(phys);
- sysctl(mib, 2, &phys, &len, NULL, 0);
-
- return U64Bytes(phys);
+ return getHardwareMemSize();
#elif LL_LINUX
U64 phys = 0;
diff --git a/indra/llcommon/llsys.h b/indra/llcommon/llsys.h
index 70a03810c5..08d4abffa2 100644
--- a/indra/llcommon/llsys.h
+++ b/indra/llcommon/llsys.h
@@ -129,7 +129,10 @@ public:
LLMemoryInfo(); ///< Default constructor
void stream(std::ostream& s) const; ///< output text info to s
- U32Kilobytes getPhysicalMemoryKB() const;
+ U32Kilobytes getPhysicalMemoryKB() const;
+#if LL_DARWIN
+ static U32Kilobytes getHardwareMemSize(); // Because some Mac linkers won't let us reference extern gSysMemory from a different lib.
+#endif
//get the available memory infomation in KiloBytes.
static void getAvailableMemoryKB(U32Kilobytes& avail_physical_mem_kb, U32Kilobytes& avail_virtual_mem_kb);
diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp
index a807acc56e..4eaa05c335 100644
--- a/indra/llcommon/llthread.cpp
+++ b/indra/llcommon/llthread.cpp
@@ -42,6 +42,7 @@
#ifdef LL_WINDOWS
+
const DWORD MS_VC_EXCEPTION=0x406D1388;
#pragma pack(push,8)
@@ -133,6 +134,15 @@ void LLThread::threadRun()
{
#ifdef LL_WINDOWS
set_thread_name(-1, mName.c_str());
+
+#if 0 // probably a bad idea, see usage of SetThreadIdealProcessor in LLWindowWin32)
+ HANDLE hThread = GetCurrentThread();
+ if (hThread)
+ {
+ SetThreadAffinityMask(hThread, (DWORD_PTR) 0xFFFFFFFFFFFFFFFE);
+ }
+#endif
+
#endif
LL_PROFILER_SET_THREAD_NAME( mName.c_str() );
diff --git a/indra/llcommon/lltimer.cpp b/indra/llcommon/lltimer.cpp
index 58bedacf43..1a99ac2886 100644
--- a/indra/llcommon/lltimer.cpp
+++ b/indra/llcommon/lltimer.cpp
@@ -30,6 +30,9 @@
#include "u64.h"
+#include <chrono>
+#include <thread>
+
#if LL_WINDOWS
# include "llwin32headerslean.h"
#elif LL_LINUX || LL_DARWIN
@@ -62,9 +65,18 @@ LLTimer* LLTimer::sTimer = NULL;
//---------------------------------------------------------------------------
#if LL_WINDOWS
+
+
+#if 0
void ms_sleep(U32 ms)
{
- Sleep(ms);
+ LL_PROFILE_ZONE_SCOPED;
+ using TimePoint = std::chrono::steady_clock::time_point;
+ auto resume_time = TimePoint::clock::now() + std::chrono::milliseconds(ms);
+ while (TimePoint::clock::now() < resume_time)
+ {
+ std::this_thread::yield(); //note: don't use LLThread::yield here to avoid yielding for too long
+ }
}
U32 micro_sleep(U64 us, U32 max_yields)
@@ -74,6 +86,35 @@ U32 micro_sleep(U64 us, U32 max_yields)
ms_sleep((U32)(us / 1000));
return 0;
}
+
+#else
+
+U32 micro_sleep(U64 us, U32 max_yields)
+{
+ LL_PROFILE_ZONE_SCOPED
+#if 0
+ LARGE_INTEGER ft;
+ ft.QuadPart = -static_cast<S64>(us * 10); // '-' using relative time
+
+ HANDLE timer = CreateWaitableTimer(NULL, TRUE, NULL);
+ SetWaitableTimer(timer, &ft, 0, NULL, NULL, 0);
+ WaitForSingleObject(timer, INFINITE);
+ CloseHandle(timer);
+#else
+ Sleep(us / 1000);
+#endif
+
+ return 0;
+}
+
+void ms_sleep(U32 ms)
+{
+ LL_PROFILE_ZONE_SCOPED
+ micro_sleep(ms * 1000, 0);
+}
+
+#endif
+
#elif LL_LINUX || LL_DARWIN
static void _sleep_loop(struct timespec& thiswait)
{
diff --git a/indra/llcommon/lluuid.cpp b/indra/llcommon/lluuid.cpp
index 5655e8e2f2..200add404f 100644
--- a/indra/llcommon/lluuid.cpp
+++ b/indra/llcommon/lluuid.cpp
@@ -1,31 +1,31 @@
-/**
+/**
* @file lluuid.cpp
*
* $LicenseInfo:firstyear=2000&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2010, Linden Research, Inc.
- *
+ *
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
- *
+ *
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
- *
+ *
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
+ *
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
#include "linden_common.h"
-// We can't use WIN32_LEAN_AND_MEAN here, needs lots of includes.
+ // We can't use WIN32_LEAN_AND_MEAN here, needs lots of includes.
#if LL_WINDOWS
#include "llwin32headers.h"
// ugh, this is ugly. We need to straighten out our linking for this library
@@ -51,7 +51,7 @@ const LLUUID LLUUID::null;
const LLTransactionID LLTransactionID::tnull;
// static
-LLMutex * LLUUID::mMutex = NULL;
+LLMutex* LLUUID::mMutex = NULL;
@@ -60,15 +60,15 @@ LLMutex * LLUUID::mMutex = NULL;
NOT DONE YET!!!
static char BASE85_TABLE[] = {
- '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
- 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
- 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
- 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd',
- 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
- 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
- 'y', 'z', '!', '#', '$', '%', '&', '(', ')', '*',
- '+', '-', ';', '[', '=', '>', '?', '@', '^', '_',
- '`', '{', '|', '}', '~', '\0'
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
+ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
+ 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd',
+ 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
+ 'y', 'z', '!', '#', '$', '%', '&', '(', ')', '*',
+ '+', '-', ';', '[', '=', '>', '?', '@', '^', '_',
+ '`', '{', '|', '}', '~', '\0'
};
@@ -94,28 +94,28 @@ return ret;
void LLUUID::toBase85(char* out)
{
- U32* me = (U32*)&(mData[0]);
- for(S32 i = 0; i < 4; ++i)
- {
- char* o = &out[i*i];
- for(S32 j = 0; j < 5; ++j)
- {
- o[4-j] = BASE85_TABLE[ me[i] % 85];
- word /= 85;
- }
- }
+ U32* me = (U32*)&(mData[0]);
+ for(S32 i = 0; i < 4; ++i)
+ {
+ char* o = &out[i*i];
+ for(S32 j = 0; j < 5; ++j)
+ {
+ o[4-j] = BASE85_TABLE[ me[i] % 85];
+ word /= 85;
+ }
+ }
}
unsigned int decode( char const * fiveChars ) throw( bad_input_data )
{
- unsigned int ret = 0;
- for( S32 ix = 0; ix < 5; ++ix )
- {
- char * s = strchr( encodeTable, fiveChars[ ix ] );
- ret = ret * 85 + (s-encodeTable);
- }
- return ret;
-}
+ unsigned int ret = 0;
+ for( S32 ix = 0; ix < 5; ++ix )
+ {
+ char * s = strchr( encodeTable, fiveChars[ ix ] );
+ ret = ret * 85 + (s-encodeTable);
+ }
+ return ret;
+}
*/
#define LL_USE_JANKY_RANDOM_NUMBER_GENERATOR 0
@@ -130,8 +130,8 @@ static U64 sJankyRandomSeed(LLUUID::getRandomSeed());
*/
U32 janky_fast_random_bytes()
{
- sJankyRandomSeed = U64L(1664525) * sJankyRandomSeed + U64L(1013904223);
- return (U32)sJankyRandomSeed;
+ sJankyRandomSeed = U64L(1664525) * sJankyRandomSeed + U64L(1013904223);
+ return (U32)sJankyRandomSeed;
}
/**
@@ -139,8 +139,8 @@ U32 janky_fast_random_bytes()
*/
U32 janky_fast_random_byes_range(U32 val)
{
- sJankyRandomSeed = U64L(1664525) * sJankyRandomSeed + U64L(1013904223);
- return (U32)(sJankyRandomSeed) % val;
+ sJankyRandomSeed = U64L(1664525) * sJankyRandomSeed + U64L(1013904223);
+ return (U32)(sJankyRandomSeed) % val;
}
/**
@@ -148,257 +148,257 @@ U32 janky_fast_random_byes_range(U32 val)
*/
U32 janky_fast_random_seeded_bytes(U32 seed, U32 val)
{
- seed = U64L(1664525) * (U64)(seed) + U64L(1013904223);
- return (U32)(seed) % val;
+ seed = U64L(1664525) * (U64)(seed)+U64L(1013904223);
+ return (U32)(seed) % val;
}
#endif
// Common to all UUID implementations
void LLUUID::toString(std::string& out) const
{
- out = llformat(
- "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
- (U8)(mData[0]),
- (U8)(mData[1]),
- (U8)(mData[2]),
- (U8)(mData[3]),
- (U8)(mData[4]),
- (U8)(mData[5]),
- (U8)(mData[6]),
- (U8)(mData[7]),
- (U8)(mData[8]),
- (U8)(mData[9]),
- (U8)(mData[10]),
- (U8)(mData[11]),
- (U8)(mData[12]),
- (U8)(mData[13]),
- (U8)(mData[14]),
- (U8)(mData[15]));
+ out = llformat(
+ "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+ (U8)(mData[0]),
+ (U8)(mData[1]),
+ (U8)(mData[2]),
+ (U8)(mData[3]),
+ (U8)(mData[4]),
+ (U8)(mData[5]),
+ (U8)(mData[6]),
+ (U8)(mData[7]),
+ (U8)(mData[8]),
+ (U8)(mData[9]),
+ (U8)(mData[10]),
+ (U8)(mData[11]),
+ (U8)(mData[12]),
+ (U8)(mData[13]),
+ (U8)(mData[14]),
+ (U8)(mData[15]));
}
// *TODO: deprecate
-void LLUUID::toString(char *out) const
+void LLUUID::toString(char* out) const
{
- std::string buffer;
- toString(buffer);
- strcpy(out,buffer.c_str()); /* Flawfinder: ignore */
+ std::string buffer;
+ toString(buffer);
+ strcpy(out, buffer.c_str()); /* Flawfinder: ignore */
}
void LLUUID::toCompressedString(std::string& out) const
{
- char bytes[UUID_BYTES+1];
- memcpy(bytes, mData, UUID_BYTES); /* Flawfinder: ignore */
- bytes[UUID_BYTES] = '\0';
- out.assign(bytes, UUID_BYTES);
+ char bytes[UUID_BYTES + 1];
+ memcpy(bytes, mData, UUID_BYTES); /* Flawfinder: ignore */
+ bytes[UUID_BYTES] = '\0';
+ out.assign(bytes, UUID_BYTES);
}
// *TODO: deprecate
-void LLUUID::toCompressedString(char *out) const
+void LLUUID::toCompressedString(char* out) const
{
- memcpy(out, mData, UUID_BYTES); /* Flawfinder: ignore */
- out[UUID_BYTES] = '\0';
+ memcpy(out, mData, UUID_BYTES); /* Flawfinder: ignore */
+ out[UUID_BYTES] = '\0';
}
std::string LLUUID::getString() const
{
- return asString();
+ return asString();
}
std::string LLUUID::asString() const
{
- std::string str;
- toString(str);
- return str;
+ std::string str;
+ toString(str);
+ return str;
}
BOOL LLUUID::set(const char* in_string, BOOL emit)
{
- return set(ll_safe_string(in_string),emit);
+ return set(ll_safe_string(in_string), emit);
}
BOOL LLUUID::set(const std::string& in_string, BOOL emit)
{
- BOOL broken_format = FALSE;
-
- // empty strings should make NULL uuid
- if (in_string.empty())
- {
- setNull();
- return TRUE;
- }
-
- if (in_string.length() != (UUID_STR_LENGTH - 1)) /* Flawfinder: ignore */
- {
- // I'm a moron. First implementation didn't have the right UUID format.
- // Shouldn't see any of these any more
- if (in_string.length() == (UUID_STR_LENGTH - 2)) /* Flawfinder: ignore */
- {
- if(emit)
- {
- LL_WARNS() << "Warning! Using broken UUID string format" << LL_ENDL;
- }
- broken_format = TRUE;
- }
- else
- {
- // Bad UUID string. Spam as INFO, as most cases we don't care.
- if(emit)
- {
- //don't spam the logs because a resident can't spell.
- LL_WARNS() << "Bad UUID string: " << in_string << LL_ENDL;
- }
- setNull();
- return FALSE;
- }
- }
-
- U8 cur_pos = 0;
- S32 i;
- for (i = 0; i < UUID_BYTES; i++)
- {
- if ((i == 4) || (i == 6) || (i == 8) || (i == 10))
- {
- cur_pos++;
- if (broken_format && (i==10))
- {
- // Missing - in the broken format
- cur_pos--;
- }
- }
-
- mData[i] = 0;
-
- if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
- {
- mData[i] += (U8)(in_string[cur_pos] - '0');
- }
- else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <='f'))
- {
- mData[i] += (U8)(10 + in_string[cur_pos] - 'a');
- }
- else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <='F'))
- {
- mData[i] += (U8)(10 + in_string[cur_pos] - 'A');
- }
- else
- {
- if(emit)
- {
- LL_WARNS() << "Invalid UUID string character" << LL_ENDL;
- }
- setNull();
- return FALSE;
- }
-
- mData[i] = mData[i] << 4;
- cur_pos++;
-
- if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
- {
- mData[i] += (U8)(in_string[cur_pos] - '0');
- }
- else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <='f'))
- {
- mData[i] += (U8)(10 + in_string[cur_pos] - 'a');
- }
- else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <='F'))
- {
- mData[i] += (U8)(10 + in_string[cur_pos] - 'A');
- }
- else
- {
- if(emit)
- {
- LL_WARNS() << "Invalid UUID string character" << LL_ENDL;
- }
- setNull();
- return FALSE;
- }
- cur_pos++;
- }
-
- return TRUE;
+ BOOL broken_format = FALSE;
+
+ // empty strings should make NULL uuid
+ if (in_string.empty())
+ {
+ setNull();
+ return TRUE;
+ }
+
+ if (in_string.length() != (UUID_STR_LENGTH - 1)) /* Flawfinder: ignore */
+ {
+ // I'm a moron. First implementation didn't have the right UUID format.
+ // Shouldn't see any of these any more
+ if (in_string.length() == (UUID_STR_LENGTH - 2)) /* Flawfinder: ignore */
+ {
+ if (emit)
+ {
+ LL_WARNS() << "Warning! Using broken UUID string format" << LL_ENDL;
+ }
+ broken_format = TRUE;
+ }
+ else
+ {
+ // Bad UUID string. Spam as INFO, as most cases we don't care.
+ if (emit)
+ {
+ //don't spam the logs because a resident can't spell.
+ LL_WARNS() << "Bad UUID string: " << in_string << LL_ENDL;
+ }
+ setNull();
+ return FALSE;
+ }
+ }
+
+ U8 cur_pos = 0;
+ S32 i;
+ for (i = 0; i < UUID_BYTES; i++)
+ {
+ if ((i == 4) || (i == 6) || (i == 8) || (i == 10))
+ {
+ cur_pos++;
+ if (broken_format && (i == 10))
+ {
+ // Missing - in the broken format
+ cur_pos--;
+ }
+ }
+
+ mData[i] = 0;
+
+ if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
+ {
+ mData[i] += (U8)(in_string[cur_pos] - '0');
+ }
+ else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <= 'f'))
+ {
+ mData[i] += (U8)(10 + in_string[cur_pos] - 'a');
+ }
+ else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <= 'F'))
+ {
+ mData[i] += (U8)(10 + in_string[cur_pos] - 'A');
+ }
+ else
+ {
+ if (emit)
+ {
+ LL_WARNS() << "Invalid UUID string character" << LL_ENDL;
+ }
+ setNull();
+ return FALSE;
+ }
+
+ mData[i] = mData[i] << 4;
+ cur_pos++;
+
+ if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
+ {
+ mData[i] += (U8)(in_string[cur_pos] - '0');
+ }
+ else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <= 'f'))
+ {
+ mData[i] += (U8)(10 + in_string[cur_pos] - 'a');
+ }
+ else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <= 'F'))
+ {
+ mData[i] += (U8)(10 + in_string[cur_pos] - 'A');
+ }
+ else
+ {
+ if (emit)
+ {
+ LL_WARNS() << "Invalid UUID string character" << LL_ENDL;
+ }
+ setNull();
+ return FALSE;
+ }
+ cur_pos++;
+ }
+
+ return TRUE;
}
BOOL LLUUID::validate(const std::string& in_string)
{
- BOOL broken_format = FALSE;
- if (in_string.length() != (UUID_STR_LENGTH - 1)) /* Flawfinder: ignore */
- {
- // I'm a moron. First implementation didn't have the right UUID format.
- if (in_string.length() == (UUID_STR_LENGTH - 2)) /* Flawfinder: ignore */
- {
- broken_format = TRUE;
- }
- else
- {
- return FALSE;
- }
- }
-
- U8 cur_pos = 0;
- for (U32 i = 0; i < 16; i++)
- {
- if ((i == 4) || (i == 6) || (i == 8) || (i == 10))
- {
- cur_pos++;
- if (broken_format && (i==10))
- {
- // Missing - in the broken format
- cur_pos--;
- }
- }
-
- if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
- {
- }
- else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <='f'))
- {
- }
- else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <='F'))
- {
- }
- else
- {
- return FALSE;
- }
-
- cur_pos++;
-
- if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
- {
- }
- else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <='f'))
- {
- }
- else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <='F'))
- {
- }
- else
- {
- return FALSE;
- }
- cur_pos++;
- }
- return TRUE;
+ BOOL broken_format = FALSE;
+ if (in_string.length() != (UUID_STR_LENGTH - 1)) /* Flawfinder: ignore */
+ {
+ // I'm a moron. First implementation didn't have the right UUID format.
+ if (in_string.length() == (UUID_STR_LENGTH - 2)) /* Flawfinder: ignore */
+ {
+ broken_format = TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+
+ U8 cur_pos = 0;
+ for (U32 i = 0; i < 16; i++)
+ {
+ if ((i == 4) || (i == 6) || (i == 8) || (i == 10))
+ {
+ cur_pos++;
+ if (broken_format && (i == 10))
+ {
+ // Missing - in the broken format
+ cur_pos--;
+ }
+ }
+
+ if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
+ {
+ }
+ else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <= 'f'))
+ {
+ }
+ else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <= 'F'))
+ {
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ cur_pos++;
+
+ if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
+ {
+ }
+ else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <= 'f'))
+ {
+ }
+ else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <= 'F'))
+ {
+ }
+ else
+ {
+ return FALSE;
+ }
+ cur_pos++;
+ }
+ return TRUE;
}
const LLUUID& LLUUID::operator^=(const LLUUID& rhs)
{
- U32* me = (U32*)&(mData[0]);
- const U32* other = (U32*)&(rhs.mData[0]);
- for(S32 i = 0; i < 4; ++i)
- {
- me[i] = me[i] ^ other[i];
- }
- return *this;
+ U32* me = (U32*)&(mData[0]);
+ const U32* other = (U32*)&(rhs.mData[0]);
+ for (S32 i = 0; i < 4; ++i)
+ {
+ me[i] = me[i] ^ other[i];
+ }
+ return *this;
}
LLUUID LLUUID::operator^(const LLUUID& rhs) const
{
- LLUUID id(*this);
- id ^= rhs;
- return id;
+ LLUUID id(*this);
+ id ^= rhs;
+ return id;
}
// WARNING: this algorithm SHALL NOT be changed. It is also used by the server
@@ -406,107 +406,107 @@ LLUUID LLUUID::operator^(const LLUUID& rhs) const
// it would cause invalid assets.
void LLUUID::combine(const LLUUID& other, LLUUID& result) const
{
- LLMD5 md5_uuid;
- md5_uuid.update((unsigned char*)mData, 16);
- md5_uuid.update((unsigned char*)other.mData, 16);
- md5_uuid.finalize();
- md5_uuid.raw_digest(result.mData);
+ LLMD5 md5_uuid;
+ md5_uuid.update((unsigned char*)mData, 16);
+ md5_uuid.update((unsigned char*)other.mData, 16);
+ md5_uuid.finalize();
+ md5_uuid.raw_digest(result.mData);
}
-LLUUID LLUUID::combine(const LLUUID &other) const
+LLUUID LLUUID::combine(const LLUUID& other) const
{
- LLUUID combination;
- combine(other, combination);
- return combination;
+ LLUUID combination;
+ combine(other, combination);
+ return combination;
}
-std::ostream& operator<<(std::ostream& s, const LLUUID &uuid)
+std::ostream& operator<<(std::ostream& s, const LLUUID& uuid)
{
- std::string uuid_str;
- uuid.toString(uuid_str);
- s << uuid_str;
- return s;
+ std::string uuid_str;
+ uuid.toString(uuid_str);
+ s << uuid_str;
+ return s;
}
-std::istream& operator>>(std::istream &s, LLUUID &uuid)
+std::istream& operator>>(std::istream& s, LLUUID& uuid)
{
- U32 i;
- char uuid_str[UUID_STR_LENGTH]; /* Flawfinder: ignore */
- for (i = 0; i < UUID_STR_LENGTH-1; i++)
- {
- s >> uuid_str[i];
- }
- uuid_str[i] = '\0';
- uuid.set(std::string(uuid_str));
- return s;
+ U32 i;
+ char uuid_str[UUID_STR_LENGTH]; /* Flawfinder: ignore */
+ for (i = 0; i < UUID_STR_LENGTH - 1; i++)
+ {
+ s >> uuid_str[i];
+ }
+ uuid_str[i] = '\0';
+ uuid.set(std::string(uuid_str));
+ return s;
}
-static void get_random_bytes(void *buf, int nbytes)
+static void get_random_bytes(void* buf, int nbytes)
{
- int i;
- char *cp = (char *) buf;
-
- // *NOTE: If we are not using the janky generator ll_rand()
- // generates at least 3 good bytes of data since it is 0 to
- // RAND_MAX. This could be made more efficient by copying all the
- // bytes.
- for (i=0; i < nbytes; i++)
+ int i;
+ char* cp = (char*)buf;
+
+ // *NOTE: If we are not using the janky generator ll_rand()
+ // generates at least 3 good bytes of data since it is 0 to
+ // RAND_MAX. This could be made more efficient by copying all the
+ // bytes.
+ for (i = 0; i < nbytes; i++)
#if LL_USE_JANKY_RANDOM_NUMBER_GENERATOR
- *cp++ = janky_fast_random_bytes() & 0xFF;
+ * cp++ = janky_fast_random_bytes() & 0xFF;
#else
- *cp++ = ll_rand() & 0xFF;
+ * cp++ = ll_rand() & 0xFF;
#endif
- return;
+ return;
}
#if LL_WINDOWS
typedef struct _ASTAT_
{
- ADAPTER_STATUS adapt;
- NAME_BUFFER NameBuff [30];
+ ADAPTER_STATUS adapt;
+ NAME_BUFFER NameBuff[30];
}ASTAT, * PASTAT;
// static
-S32 LLUUID::getNodeID(unsigned char *node_id)
+S32 LLUUID::getNodeID(unsigned char* node_id)
{
- ASTAT Adapter;
- NCB Ncb;
- UCHAR uRetCode;
- LANA_ENUM lenum;
- int i;
- int retval = 0;
-
- memset( &Ncb, 0, sizeof(Ncb) );
- Ncb.ncb_command = NCBENUM;
- Ncb.ncb_buffer = (UCHAR *)&lenum;
- Ncb.ncb_length = sizeof(lenum);
- uRetCode = Netbios( &Ncb );
-
- for(i=0; i < lenum.length ;i++)
- {
- memset( &Ncb, 0, sizeof(Ncb) );
- Ncb.ncb_command = NCBRESET;
- Ncb.ncb_lana_num = lenum.lana[i];
-
- uRetCode = Netbios( &Ncb );
-
- memset( &Ncb, 0, sizeof (Ncb) );
- Ncb.ncb_command = NCBASTAT;
- Ncb.ncb_lana_num = lenum.lana[i];
-
- strcpy( (char *)Ncb.ncb_callname, "* " ); /* Flawfinder: ignore */
- Ncb.ncb_buffer = (unsigned char *)&Adapter;
- Ncb.ncb_length = sizeof(Adapter);
-
- uRetCode = Netbios( &Ncb );
- if ( uRetCode == 0 )
- {
- memcpy(node_id,Adapter.adapt.adapter_address,6); /* Flawfinder: ignore */
- retval = 1;
- }
- }
- return retval;
+ ASTAT Adapter;
+ NCB Ncb;
+ UCHAR uRetCode;
+ LANA_ENUM lenum;
+ int i;
+ int retval = 0;
+
+ memset(&Ncb, 0, sizeof(Ncb));
+ Ncb.ncb_command = NCBENUM;
+ Ncb.ncb_buffer = (UCHAR*)&lenum;
+ Ncb.ncb_length = sizeof(lenum);
+ uRetCode = Netbios(&Ncb);
+
+ for (i = 0; i < lenum.length; i++)
+ {
+ memset(&Ncb, 0, sizeof(Ncb));
+ Ncb.ncb_command = NCBRESET;
+ Ncb.ncb_lana_num = lenum.lana[i];
+
+ uRetCode = Netbios(&Ncb);
+
+ memset(&Ncb, 0, sizeof(Ncb));
+ Ncb.ncb_command = NCBASTAT;
+ Ncb.ncb_lana_num = lenum.lana[i];
+
+ strcpy((char*)Ncb.ncb_callname, "* "); /* Flawfinder: ignore */
+ Ncb.ncb_buffer = (unsigned char*)&Adapter;
+ Ncb.ncb_length = sizeof(Adapter);
+
+ uRetCode = Netbios(&Ncb);
+ if (uRetCode == 0)
+ {
+ memcpy(node_id, Adapter.adapt.adapter_address, 6); /* Flawfinder: ignore */
+ retval = 1;
+ }
+ }
+ return retval;
}
#elif LL_DARWIN
@@ -525,66 +525,66 @@ S32 LLUUID::getNodeID(unsigned char *node_id)
#include <net/route.h>
#include <ifaddrs.h>
-// static
-S32 LLUUID::getNodeID(unsigned char *node_id)
+ // static
+S32 LLUUID::getNodeID(unsigned char* node_id)
{
- int i;
- unsigned char *a = NULL;
- struct ifaddrs *ifap, *ifa;
- int rv;
- S32 result = 0;
-
- if ((rv=getifaddrs(&ifap))==-1)
- {
- return -1;
- }
- if (ifap == NULL)
- {
- return -1;
- }
-
- for (ifa = ifap; ifa != NULL; ifa = ifa->ifa_next)
- {
-// printf("Interface %s, address family %d, ", ifa->ifa_name, ifa->ifa_addr->sa_family);
- for(i=0; i< ifa->ifa_addr->sa_len; i++)
- {
-// printf("%02X ", (unsigned char)ifa->ifa_addr->sa_data[i]);
- }
-// printf("\n");
-
- if(ifa->ifa_addr->sa_family == AF_LINK)
- {
- // This is a link-level address
- struct sockaddr_dl *lla = (struct sockaddr_dl *)ifa->ifa_addr;
-
-// printf("\tLink level address, type %02X\n", lla->sdl_type);
-
- if(lla->sdl_type == IFT_ETHER)
- {
- // Use the first ethernet MAC in the list.
- // For some reason, the macro LLADDR() defined in net/if_dl.h doesn't expand correctly. This is what it would do.
- a = (unsigned char *)&((lla)->sdl_data);
- a += (lla)->sdl_nlen;
-
- if (!a[0] && !a[1] && !a[2] && !a[3] && !a[4] && !a[5])
- {
- continue;
- }
-
- if (node_id)
- {
- memcpy(node_id, a, 6);
- result = 1;
- }
-
- // We found one.
- break;
- }
- }
- }
- freeifaddrs(ifap);
-
- return result;
+ int i;
+ unsigned char* a = NULL;
+ struct ifaddrs* ifap, * ifa;
+ int rv;
+ S32 result = 0;
+
+ if ((rv = getifaddrs(&ifap)) == -1)
+ {
+ return -1;
+ }
+ if (ifap == NULL)
+ {
+ return -1;
+ }
+
+ for (ifa = ifap; ifa != NULL; ifa = ifa->ifa_next)
+ {
+ // printf("Interface %s, address family %d, ", ifa->ifa_name, ifa->ifa_addr->sa_family);
+ for (i = 0; i < ifa->ifa_addr->sa_len; i++)
+ {
+ // printf("%02X ", (unsigned char)ifa->ifa_addr->sa_data[i]);
+ }
+ // printf("\n");
+
+ if (ifa->ifa_addr->sa_family == AF_LINK)
+ {
+ // This is a link-level address
+ struct sockaddr_dl* lla = (struct sockaddr_dl*)ifa->ifa_addr;
+
+ // printf("\tLink level address, type %02X\n", lla->sdl_type);
+
+ if (lla->sdl_type == IFT_ETHER)
+ {
+ // Use the first ethernet MAC in the list.
+ // For some reason, the macro LLADDR() defined in net/if_dl.h doesn't expand correctly. This is what it would do.
+ a = (unsigned char*)&((lla)->sdl_data);
+ a += (lla)->sdl_nlen;
+
+ if (!a[0] && !a[1] && !a[2] && !a[3] && !a[4] && !a[5])
+ {
+ continue;
+ }
+
+ if (node_id)
+ {
+ memcpy(node_id, a, 6);
+ result = 1;
+ }
+
+ // We found one.
+ break;
+ }
+ }
+ }
+ freeifaddrs(ifap);
+
+ return result;
}
#else
@@ -611,22 +611,22 @@ S32 LLUUID::getNodeID(unsigned char *node_id)
#endif
#endif
-// static
-S32 LLUUID::getNodeID(unsigned char *node_id)
+ // static
+S32 LLUUID::getNodeID(unsigned char* node_id)
{
- int sd;
- struct ifreq ifr, *ifrp;
- struct ifconf ifc;
- char buf[1024];
- int n, i;
- unsigned char *a;
-
-/*
- * BSD 4.4 defines the size of an ifreq to be
- * max(sizeof(ifreq), sizeof(ifreq.ifr_name)+ifreq.ifr_addr.sa_len
- * However, under earlier systems, sa_len isn't present, so the size is
- * just sizeof(struct ifreq)
- */
+ int sd;
+ struct ifreq ifr, * ifrp;
+ struct ifconf ifc;
+ char buf[1024];
+ int n, i;
+ unsigned char* a;
+
+ /*
+ * BSD 4.4 defines the size of an ifreq to be
+ * max(sizeof(ifreq), sizeof(ifreq.ifr_name)+ifreq.ifr_addr.sa_len
+ * However, under earlier systems, sa_len isn't present, so the size is
+ * just sizeof(struct ifreq)
+ */
#ifdef HAVE_SA_LEN
#ifndef max
#define max(a,b) ((a) > (b) ? (a) : (b))
@@ -637,252 +637,253 @@ S32 LLUUID::getNodeID(unsigned char *node_id)
#define ifreq_size(i) sizeof(struct ifreq)
#endif /* HAVE_SA_LEN*/
- sd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
- if (sd < 0) {
- return -1;
- }
- memset(buf, 0, sizeof(buf));
- ifc.ifc_len = sizeof(buf);
- ifc.ifc_buf = buf;
- if (ioctl (sd, SIOCGIFCONF, (char *)&ifc) < 0) {
- close(sd);
- return -1;
- }
- n = ifc.ifc_len;
- for (i = 0; i < n; i+= ifreq_size(*ifr) ) {
- ifrp = (struct ifreq *)((char *) ifc.ifc_buf+i);
- strncpy(ifr.ifr_name, ifrp->ifr_name, IFNAMSIZ); /* Flawfinder: ignore */
+ sd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
+ if (sd < 0) {
+ return -1;
+ }
+ memset(buf, 0, sizeof(buf));
+ ifc.ifc_len = sizeof(buf);
+ ifc.ifc_buf = buf;
+ if (ioctl(sd, SIOCGIFCONF, (char*)&ifc) < 0) {
+ close(sd);
+ return -1;
+ }
+ n = ifc.ifc_len;
+ for (i = 0; i < n; i += ifreq_size(*ifr)) {
+ ifrp = (struct ifreq*)((char*)ifc.ifc_buf + i);
+ strncpy(ifr.ifr_name, ifrp->ifr_name, IFNAMSIZ); /* Flawfinder: ignore */
#ifdef SIOCGIFHWADDR
- if (ioctl(sd, SIOCGIFHWADDR, &ifr) < 0)
- continue;
- a = (unsigned char *) &ifr.ifr_hwaddr.sa_data;
+ if (ioctl(sd, SIOCGIFHWADDR, &ifr) < 0)
+ continue;
+ a = (unsigned char*)&ifr.ifr_hwaddr.sa_data;
#else
#ifdef SIOCGENADDR
- if (ioctl(sd, SIOCGENADDR, &ifr) < 0)
- continue;
- a = (unsigned char *) ifr.ifr_enaddr;
+ if (ioctl(sd, SIOCGENADDR, &ifr) < 0)
+ continue;
+ a = (unsigned char*)ifr.ifr_enaddr;
#else
- /*
- * XXX we don't have a way of getting the hardware
- * address
- */
- close(sd);
- return 0;
+ /*
+ * XXX we don't have a way of getting the hardware
+ * address
+ */
+ close(sd);
+ return 0;
#endif /* SIOCGENADDR */
#endif /* SIOCGIFHWADDR */
- if (!a[0] && !a[1] && !a[2] && !a[3] && !a[4] && !a[5])
- continue;
- if (node_id) {
- memcpy(node_id, a, 6); /* Flawfinder: ignore */
- close(sd);
- return 1;
- }
- }
- close(sd);
- return 0;
+ if (!a[0] && !a[1] && !a[2] && !a[3] && !a[4] && !a[5])
+ continue;
+ if (node_id) {
+ memcpy(node_id, a, 6); /* Flawfinder: ignore */
+ close(sd);
+ return 1;
+ }
+ }
+ close(sd);
+ return 0;
}
#endif
-S32 LLUUID::cmpTime(uuid_time_t *t1, uuid_time_t *t2)
+S32 LLUUID::cmpTime(uuid_time_t* t1, uuid_time_t* t2)
{
- // Compare two time values.
+ // Compare two time values.
- if (t1->high < t2->high) return -1;
- if (t1->high > t2->high) return 1;
- if (t1->low < t2->low) return -1;
- if (t1->low > t2->low) return 1;
- return 0;
+ if (t1->high < t2->high) return -1;
+ if (t1->high > t2->high) return 1;
+ if (t1->low < t2->low) return -1;
+ if (t1->low > t2->low) return 1;
+ return 0;
}
-void LLUUID::getSystemTime(uuid_time_t *timestamp)
+void LLUUID::getSystemTime(uuid_time_t* timestamp)
{
- // Get system time with 100ns precision. Time is since Oct 15, 1582.
+ // Get system time with 100ns precision. Time is since Oct 15, 1582.
#if LL_WINDOWS
- ULARGE_INTEGER time;
- GetSystemTimeAsFileTime((FILETIME *)&time);
- // NT keeps time in FILETIME format which is 100ns ticks since
- // Jan 1, 1601. UUIDs use time in 100ns ticks since Oct 15, 1582.
- // The difference is 17 Days in Oct + 30 (Nov) + 31 (Dec)
- // + 18 years and 5 leap days.
- time.QuadPart +=
- (unsigned __int64) (1000*1000*10) // seconds
- * (unsigned __int64) (60 * 60 * 24) // days
- * (unsigned __int64) (17+30+31+365*18+5); // # of days
-
- timestamp->high = time.HighPart;
- timestamp->low = time.LowPart;
+ ULARGE_INTEGER time;
+ GetSystemTimeAsFileTime((FILETIME*)&time);
+ // NT keeps time in FILETIME format which is 100ns ticks since
+ // Jan 1, 1601. UUIDs use time in 100ns ticks since Oct 15, 1582.
+ // The difference is 17 Days in Oct + 30 (Nov) + 31 (Dec)
+ // + 18 years and 5 leap days.
+ time.QuadPart +=
+ (unsigned __int64)(1000 * 1000 * 10) // seconds
+ * (unsigned __int64)(60 * 60 * 24) // days
+ * (unsigned __int64)(17 + 30 + 31 + 365 * 18 + 5); // # of days
+
+ timestamp->high = time.HighPart;
+ timestamp->low = time.LowPart;
#else
- struct timeval tp;
- gettimeofday(&tp, 0);
-
- // Offset between UUID formatted times and Unix formatted times.
- // UUID UTC base time is October 15, 1582.
- // Unix base time is January 1, 1970.
- U64 uuid_time = ((U64)tp.tv_sec * 10000000) + (tp.tv_usec * 10) +
- U64L(0x01B21DD213814000);
- timestamp->high = (U32) (uuid_time >> 32);
- timestamp->low = (U32) (uuid_time & 0xFFFFFFFF);
+ struct timeval tp;
+ gettimeofday(&tp, 0);
+
+ // Offset between UUID formatted times and Unix formatted times.
+ // UUID UTC base time is October 15, 1582.
+ // Unix base time is January 1, 1970.
+ U64 uuid_time = ((U64)tp.tv_sec * 10000000) + (tp.tv_usec * 10) +
+ U64L(0x01B21DD213814000);
+ timestamp->high = (U32)(uuid_time >> 32);
+ timestamp->low = (U32)(uuid_time & 0xFFFFFFFF);
#endif
}
-void LLUUID::getCurrentTime(uuid_time_t *timestamp)
+void LLUUID::getCurrentTime(uuid_time_t* timestamp)
{
- // Get current time as 60 bit 100ns ticks since whenever.
- // Compensate for the fact that real clock resolution is less
- // than 100ns.
-
- const U32 uuids_per_tick = 1024;
-
- static uuid_time_t time_last;
- static U32 uuids_this_tick;
- static BOOL init = FALSE;
-
- if (!init) {
- getSystemTime(&time_last);
- uuids_this_tick = uuids_per_tick;
- init = TRUE;
- mMutex = new LLMutex();
- }
-
- uuid_time_t time_now = {0,0};
-
- while (1) {
- getSystemTime(&time_now);
-
- // if clock reading changed since last UUID generated
- if (cmpTime(&time_last, &time_now)) {
- // reset count of uuid's generated with this clock reading
- uuids_this_tick = 0;
- break;
- }
- if (uuids_this_tick < uuids_per_tick) {
- uuids_this_tick++;
- break;
- }
- // going too fast for our clock; spin
- }
-
- time_last = time_now;
-
- if (uuids_this_tick != 0) {
- if (time_now.low & 0x80000000) {
- time_now.low += uuids_this_tick;
- if (!(time_now.low & 0x80000000))
- time_now.high++;
- } else
- time_now.low += uuids_this_tick;
- }
-
- timestamp->high = time_now.high;
- timestamp->low = time_now.low;
+ // Get current time as 60 bit 100ns ticks since whenever.
+ // Compensate for the fact that real clock resolution is less
+ // than 100ns.
+
+ const U32 uuids_per_tick = 1024;
+
+ static uuid_time_t time_last;
+ static U32 uuids_this_tick;
+ static BOOL init = FALSE;
+
+ if (!init) {
+ getSystemTime(&time_last);
+ uuids_this_tick = uuids_per_tick;
+ init = TRUE;
+ mMutex = new LLMutex();
+ }
+
+ uuid_time_t time_now = { 0,0 };
+
+ while (1) {
+ getSystemTime(&time_now);
+
+ // if clock reading changed since last UUID generated
+ if (cmpTime(&time_last, &time_now)) {
+ // reset count of uuid's generated with this clock reading
+ uuids_this_tick = 0;
+ break;
+ }
+ if (uuids_this_tick < uuids_per_tick) {
+ uuids_this_tick++;
+ break;
+ }
+ // going too fast for our clock; spin
+ }
+
+ time_last = time_now;
+
+ if (uuids_this_tick != 0) {
+ if (time_now.low & 0x80000000) {
+ time_now.low += uuids_this_tick;
+ if (!(time_now.low & 0x80000000))
+ time_now.high++;
+ }
+ else
+ time_now.low += uuids_this_tick;
+ }
+
+ timestamp->high = time_now.high;
+ timestamp->low = time_now.low;
}
void LLUUID::generate()
{
- // Create a UUID.
- uuid_time_t timestamp;
-
- static unsigned char node_id[6]; /* Flawfinder: ignore */
- static int has_init = 0;
-
- // Create a UUID.
- static uuid_time_t time_last = {0,0};
- static U16 clock_seq = 0;
+ // Create a UUID.
+ uuid_time_t timestamp;
+
+ static unsigned char node_id[6]; /* Flawfinder: ignore */
+ static int has_init = 0;
+
+ // Create a UUID.
+ static uuid_time_t time_last = { 0,0 };
+ static U16 clock_seq = 0;
#if LL_USE_JANKY_RANDOM_NUMBER_GENERATOR
- static U32 seed = 0L; // dummy seed. reset it below
+ static U32 seed = 0L; // dummy seed. reset it below
#endif
- if (!has_init)
- {
- has_init = 1;
- if (getNodeID(node_id) <= 0)
- {
- get_random_bytes(node_id, 6);
- /*
- * Set multicast bit, to prevent conflicts
- * with IEEE 802 addresses obtained from
- * network cards
- */
- node_id[0] |= 0x80;
- }
-
- getCurrentTime(&time_last);
+ if (!has_init)
+ {
+ has_init = 1;
+ if (getNodeID(node_id) <= 0)
+ {
+ get_random_bytes(node_id, 6);
+ /*
+ * Set multicast bit, to prevent conflicts
+ * with IEEE 802 addresses obtained from
+ * network cards
+ */
+ node_id[0] |= 0x80;
+ }
+
+ getCurrentTime(&time_last);
#if LL_USE_JANKY_RANDOM_NUMBER_GENERATOR
- seed = time_last.low;
+ seed = time_last.low;
#endif
#if LL_USE_JANKY_RANDOM_NUMBER_GENERATOR
- clock_seq = (U16)janky_fast_random_seeded_bytes(seed, 65536);
+ clock_seq = (U16)janky_fast_random_seeded_bytes(seed, 65536);
#else
- clock_seq = (U16)ll_rand(65536);
+ clock_seq = (U16)ll_rand(65536);
#endif
- }
-
- // get current time
- getCurrentTime(&timestamp);
- U16 our_clock_seq = clock_seq;
-
- // if clock hasn't changed or went backward, change clockseq
- if (cmpTime(&timestamp, &time_last) != 1)
- {
- LLMutexLock lock(mMutex);
- clock_seq = (clock_seq + 1) & 0x3FFF;
- if (clock_seq == 0)
- clock_seq++;
- our_clock_seq = clock_seq; // Ensure we're using a different clock_seq value from previous time
- }
+ }
+
+ // get current time
+ getCurrentTime(&timestamp);
+ U16 our_clock_seq = clock_seq;
+
+ // if clock hasn't changed or went backward, change clockseq
+ if (cmpTime(&timestamp, &time_last) != 1)
+ {
+ LLMutexLock lock(mMutex);
+ clock_seq = (clock_seq + 1) & 0x3FFF;
+ if (clock_seq == 0)
+ clock_seq++;
+ our_clock_seq = clock_seq; // Ensure we're using a different clock_seq value from previous time
+ }
time_last = timestamp;
- memcpy(mData+10, node_id, 6); /* Flawfinder: ignore */
- U32 tmp;
- tmp = timestamp.low;
- mData[3] = (unsigned char) tmp;
- tmp >>= 8;
- mData[2] = (unsigned char) tmp;
- tmp >>= 8;
- mData[1] = (unsigned char) tmp;
- tmp >>= 8;
- mData[0] = (unsigned char) tmp;
-
- tmp = (U16) timestamp.high;
- mData[5] = (unsigned char) tmp;
- tmp >>= 8;
- mData[4] = (unsigned char) tmp;
-
- tmp = (timestamp.high >> 16) | 0x1000;
- mData[7] = (unsigned char) tmp;
- tmp >>= 8;
- mData[6] = (unsigned char) tmp;
-
- tmp = our_clock_seq;
-
- mData[9] = (unsigned char) tmp;
- tmp >>= 8;
- mData[8] = (unsigned char) tmp;
-
- HBXXH128::digest(*this, (const void*)mData, 16);
+ memcpy(mData + 10, node_id, 6); /* Flawfinder: ignore */
+ U32 tmp;
+ tmp = timestamp.low;
+ mData[3] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[2] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[1] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[0] = (unsigned char)tmp;
+
+ tmp = (U16)timestamp.high;
+ mData[5] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[4] = (unsigned char)tmp;
+
+ tmp = (timestamp.high >> 16) | 0x1000;
+ mData[7] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[6] = (unsigned char)tmp;
+
+ tmp = our_clock_seq;
+
+ mData[9] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[8] = (unsigned char)tmp;
+
+ HBXXH128::digest(*this, (const void*)mData, 16);
}
void LLUUID::generate(const std::string& hash_string)
{
- HBXXH128::digest(*this, hash_string);
+ HBXXH128::digest(*this, hash_string);
}
U32 LLUUID::getRandomSeed()
{
- static unsigned char seed[16]; /* Flawfinder: ignore */
-
- getNodeID(&seed[0]);
+ static unsigned char seed[16]; /* Flawfinder: ignore */
+
+ getNodeID(&seed[0]);
- // Incorporate the pid into the seed to prevent
- // processes that start on the same host at the same
- // time from generating the same seed.
- pid_t pid = LLApp::getPid();
+ // Incorporate the pid into the seed to prevent
+ // processes that start on the same host at the same
+ // time from generating the same seed.
+ pid_t pid = LLApp::getPid();
- seed[6]=(unsigned char)(pid >> 8);
- seed[7]=(unsigned char)(pid);
- getSystemTime((uuid_time_t *)(&seed[8]));
+ seed[6] = (unsigned char)(pid >> 8);
+ seed[7] = (unsigned char)(pid);
+ getSystemTime((uuid_time_t*)(&seed[8]));
U64 seed64 = HBXXH64::digest((const void*)seed, 16);
return U32(seed64) ^ U32(seed64 >> 32);
@@ -890,92 +891,92 @@ U32 LLUUID::getRandomSeed()
BOOL LLUUID::parseUUID(const std::string& buf, LLUUID* value)
{
- if( buf.empty() || value == NULL)
- {
- return FALSE;
- }
-
- std::string temp( buf );
- LLStringUtil::trim(temp);
- if( LLUUID::validate( temp ) )
- {
- value->set( temp );
- return TRUE;
- }
- return FALSE;
+ if (buf.empty() || value == NULL)
+ {
+ return FALSE;
+ }
+
+ std::string temp(buf);
+ LLStringUtil::trim(temp);
+ if (LLUUID::validate(temp))
+ {
+ value->set(temp);
+ return TRUE;
+ }
+ return FALSE;
}
//static
LLUUID LLUUID::generateNewID(std::string hash_string)
{
- LLUUID new_id;
- if (hash_string.empty())
- {
- new_id.generate();
- }
- else
- {
- new_id.generate(hash_string);
- }
- return new_id;
+ LLUUID new_id;
+ if (hash_string.empty())
+ {
+ new_id.generate();
+ }
+ else
+ {
+ new_id.generate(hash_string);
+ }
+ return new_id;
}
LLAssetID LLTransactionID::makeAssetID(const LLUUID& session) const
{
- LLAssetID result;
- if (isNull())
- {
- result.setNull();
- }
- else
- {
- combine(session, result);
- }
- return result;
+ LLAssetID result;
+ if (isNull())
+ {
+ result.setNull();
+ }
+ else
+ {
+ combine(session, result);
+ }
+ return result;
}
// Construct
LLUUID::LLUUID()
{
- setNull();
+ setNull();
}
// Faster than copying from memory
- void LLUUID::setNull()
+void LLUUID::setNull()
{
- U32 *word = (U32 *)mData;
- word[0] = 0;
- word[1] = 0;
- word[2] = 0;
- word[3] = 0;
+ U32* word = (U32*)mData;
+ word[0] = 0;
+ word[1] = 0;
+ word[2] = 0;
+ word[3] = 0;
}
// Compare
- bool LLUUID::operator==(const LLUUID& rhs) const
+bool LLUUID::operator==(const LLUUID& rhs) const
{
- U32 *tmp = (U32 *)mData;
- U32 *rhstmp = (U32 *)rhs.mData;
- // Note: binary & to avoid branching
- return
- (tmp[0] == rhstmp[0]) &
- (tmp[1] == rhstmp[1]) &
- (tmp[2] == rhstmp[2]) &
- (tmp[3] == rhstmp[3]);
+ U32* tmp = (U32*)mData;
+ U32* rhstmp = (U32*)rhs.mData;
+ // Note: binary & to avoid branching
+ return
+ (tmp[0] == rhstmp[0]) &
+ (tmp[1] == rhstmp[1]) &
+ (tmp[2] == rhstmp[2]) &
+ (tmp[3] == rhstmp[3]);
}
- bool LLUUID::operator!=(const LLUUID& rhs) const
+bool LLUUID::operator!=(const LLUUID& rhs) const
{
- U32 *tmp = (U32 *)mData;
- U32 *rhstmp = (U32 *)rhs.mData;
- // Note: binary | to avoid branching
- return
- (tmp[0] != rhstmp[0]) |
- (tmp[1] != rhstmp[1]) |
- (tmp[2] != rhstmp[2]) |
- (tmp[3] != rhstmp[3]);
+ U32* tmp = (U32*)mData;
+ U32* rhstmp = (U32*)rhs.mData;
+ // Note: binary | to avoid branching
+ return
+ (tmp[0] != rhstmp[0]) |
+ (tmp[1] != rhstmp[1]) |
+ (tmp[2] != rhstmp[2]) |
+ (tmp[3] != rhstmp[3]);
}
/*
@@ -983,94 +984,94 @@ LLUUID::LLUUID()
// to integers, among other things. Use isNull() or notNull().
LLUUID::operator bool() const
{
- U32 *word = (U32 *)mData;
- return (word[0] | word[1] | word[2] | word[3]) > 0;
+ U32 *word = (U32 *)mData;
+ return (word[0] | word[1] | word[2] | word[3]) > 0;
}
*/
- BOOL LLUUID::notNull() const
+BOOL LLUUID::notNull() const
{
- U32 *word = (U32 *)mData;
- return (word[0] | word[1] | word[2] | word[3]) > 0;
+ U32* word = (U32*)mData;
+ return (word[0] | word[1] | word[2] | word[3]) > 0;
}
// Faster than == LLUUID::null because doesn't require
// as much memory access.
- BOOL LLUUID::isNull() const
+BOOL LLUUID::isNull() const
{
- U32 *word = (U32 *)mData;
- // If all bits are zero, return !0 == TRUE
- return !(word[0] | word[1] | word[2] | word[3]);
+ U32* word = (U32*)mData;
+ // If all bits are zero, return !0 == TRUE
+ return !(word[0] | word[1] | word[2] | word[3]);
}
- LLUUID::LLUUID(const char *in_string)
+LLUUID::LLUUID(const char* in_string)
{
- if (!in_string || in_string[0] == 0)
- {
- setNull();
- return;
- }
-
- set(in_string);
+ if (!in_string || in_string[0] == 0)
+ {
+ setNull();
+ return;
+ }
+
+ set(in_string);
}
- LLUUID::LLUUID(const std::string& in_string)
+LLUUID::LLUUID(const std::string& in_string)
{
- if (in_string.empty())
- {
- setNull();
- return;
- }
+ if (in_string.empty())
+ {
+ setNull();
+ return;
+ }
- set(in_string);
+ set(in_string);
}
// IW: DON'T "optimize" these w/ U32s or you'll scoogie the sort order
// IW: this will make me very sad
- bool LLUUID::operator<(const LLUUID &rhs) const
+bool LLUUID::operator<(const LLUUID& rhs) const
{
- U32 i;
- for( i = 0; i < (UUID_BYTES - 1); i++ )
- {
- if( mData[i] != rhs.mData[i] )
- {
- return (mData[i] < rhs.mData[i]);
- }
- }
- return (mData[UUID_BYTES - 1] < rhs.mData[UUID_BYTES - 1]);
+ U32 i;
+ for (i = 0; i < (UUID_BYTES - 1); i++)
+ {
+ if (mData[i] != rhs.mData[i])
+ {
+ return (mData[i] < rhs.mData[i]);
+ }
+ }
+ return (mData[UUID_BYTES - 1] < rhs.mData[UUID_BYTES - 1]);
}
- bool LLUUID::operator>(const LLUUID &rhs) const
+bool LLUUID::operator>(const LLUUID& rhs) const
{
- U32 i;
- for( i = 0; i < (UUID_BYTES - 1); i++ )
- {
- if( mData[i] != rhs.mData[i] )
- {
- return (mData[i] > rhs.mData[i]);
- }
- }
- return (mData[UUID_BYTES - 1] > rhs.mData[UUID_BYTES - 1]);
+ U32 i;
+ for (i = 0; i < (UUID_BYTES - 1); i++)
+ {
+ if (mData[i] != rhs.mData[i])
+ {
+ return (mData[i] > rhs.mData[i]);
+ }
+ }
+ return (mData[UUID_BYTES - 1] > rhs.mData[UUID_BYTES - 1]);
}
- U16 LLUUID::getCRC16() const
+U16 LLUUID::getCRC16() const
{
- // A UUID is 16 bytes, or 8 shorts.
- U16 *short_data = (U16*)mData;
- U16 out = 0;
- out += short_data[0];
- out += short_data[1];
- out += short_data[2];
- out += short_data[3];
- out += short_data[4];
- out += short_data[5];
- out += short_data[6];
- out += short_data[7];
- return out;
+ // A UUID is 16 bytes, or 8 shorts.
+ U16* short_data = (U16*)mData;
+ U16 out = 0;
+ out += short_data[0];
+ out += short_data[1];
+ out += short_data[2];
+ out += short_data[3];
+ out += short_data[4];
+ out += short_data[5];
+ out += short_data[6];
+ out += short_data[7];
+ return out;
}
- U32 LLUUID::getCRC32() const
+U32 LLUUID::getCRC32() const
{
- U32 *tmp = (U32*)mData;
- return tmp[0] + tmp[1] + tmp[2] + tmp[3];
+ U32* tmp = (U32*)mData;
+ return tmp[0] + tmp[1] + tmp[2] + tmp[3];
}
diff --git a/indra/llcommon/llworkerthread.cpp b/indra/llcommon/llworkerthread.cpp
index e5eda1eac7..06c74bdba0 100644
--- a/indra/llcommon/llworkerthread.cpp
+++ b/indra/llcommon/llworkerthread.cpp
@@ -73,6 +73,7 @@ void LLWorkerThread::clearDeleteList()
{
worker->mRequestHandle = LLWorkerThread::nullHandle();
worker->clearFlags(LLWorkerClass::WCF_HAVE_WORK);
+ worker->clearFlags(LLWorkerClass::WCF_WORKING);
delete worker;
}
mDeleteList.clear() ;
@@ -97,6 +98,7 @@ size_t LLWorkerThread::update(F32 max_time_ms)
{
if (worker->getFlags(LLWorkerClass::WCF_WORK_FINISHED))
{
+ worker->setFlags(LLWorkerClass::WCF_DELETE_REQUESTED);
delete_list.push_back(worker);
mDeleteList.erase(curiter);
}
@@ -130,11 +132,11 @@ size_t LLWorkerThread::update(F32 max_time_ms)
//----------------------------------------------------------------------------
-LLWorkerThread::handle_t LLWorkerThread::addWorkRequest(LLWorkerClass* workerclass, S32 param, U32 priority)
+LLWorkerThread::handle_t LLWorkerThread::addWorkRequest(LLWorkerClass* workerclass, S32 param)
{
handle_t handle = generateHandle();
- WorkRequest* req = new WorkRequest(handle, priority, workerclass, param);
+ WorkRequest* req = new WorkRequest(handle, workerclass, param);
bool res = addRequest(req);
if (!res)
@@ -157,8 +159,8 @@ void LLWorkerThread::deleteWorker(LLWorkerClass* workerclass)
//============================================================================
// Runs on its OWN thread
-LLWorkerThread::WorkRequest::WorkRequest(handle_t handle, U32 priority, LLWorkerClass* workerclass, S32 param) :
- LLQueuedThread::QueuedRequest(handle, priority),
+LLWorkerThread::WorkRequest::WorkRequest(handle_t handle, LLWorkerClass* workerclass, S32 param) :
+ LLQueuedThread::QueuedRequest(handle),
mWorkerClass(workerclass),
mParam(param)
{
@@ -177,6 +179,7 @@ void LLWorkerThread::WorkRequest::deleteRequest()
// virtual
bool LLWorkerThread::WorkRequest::processRequest()
{
+ LL_PROFILE_ZONE_SCOPED;
LLWorkerClass* workerclass = getWorkerClass();
workerclass->setWorking(true);
bool complete = workerclass->doWork(getParam());
@@ -187,6 +190,7 @@ bool LLWorkerThread::WorkRequest::processRequest()
// virtual
void LLWorkerThread::WorkRequest::finishRequest(bool completed)
{
+ LL_PROFILE_ZONE_SCOPED;
LLWorkerClass* workerclass = getWorkerClass();
workerclass->finishWork(getParam(), completed);
U32 flags = LLWorkerClass::WCF_WORK_FINISHED | (completed ? 0 : LLWorkerClass::WCF_WORK_ABORTED);
@@ -200,7 +204,6 @@ LLWorkerClass::LLWorkerClass(LLWorkerThread* workerthread, const std::string& na
: mWorkerThread(workerthread),
mWorkerClassName(name),
mRequestHandle(LLWorkerThread::nullHandle()),
- mRequestPriority(LLWorkerThread::PRIORITY_NORMAL),
mMutex(),
mWorkFlags(0)
{
@@ -289,7 +292,7 @@ bool LLWorkerClass::yield()
//----------------------------------------------------------------------------
// calls startWork, adds doWork() to queue
-void LLWorkerClass::addWork(S32 param, U32 priority)
+void LLWorkerClass::addWork(S32 param)
{
mMutex.lock();
llassert_always(!(mWorkFlags & (WCF_WORKING|WCF_HAVE_WORK)));
@@ -303,7 +306,7 @@ void LLWorkerClass::addWork(S32 param, U32 priority)
startWork(param);
clearFlags(WCF_WORK_FINISHED|WCF_WORK_ABORTED);
setFlags(WCF_HAVE_WORK);
- mRequestHandle = mWorkerThread->addWorkRequest(this, param, priority);
+ mRequestHandle = mWorkerThread->addWorkRequest(this, param);
mMutex.unlock();
}
@@ -318,7 +321,6 @@ void LLWorkerClass::abortWork(bool autocomplete)
if (mRequestHandle != LLWorkerThread::nullHandle())
{
mWorkerThread->abortRequest(mRequestHandle, autocomplete);
- mWorkerThread->setPriority(mRequestHandle, LLQueuedThread::PRIORITY_IMMEDIATE);
setFlags(WCF_ABORT_REQUESTED);
}
mMutex.unlock();
@@ -392,16 +394,5 @@ void LLWorkerClass::scheduleDelete()
}
}
-void LLWorkerClass::setPriority(U32 priority)
-{
- mMutex.lock();
- if (mRequestHandle != LLWorkerThread::nullHandle() && mRequestPriority != priority)
- {
- mRequestPriority = priority;
- mWorkerThread->setPriority(mRequestHandle, priority);
- }
- mMutex.unlock();
-}
-
//============================================================================
diff --git a/indra/llcommon/llworkerthread.h b/indra/llcommon/llworkerthread.h
index e3004d7242..bf94c84090 100644
--- a/indra/llcommon/llworkerthread.h
+++ b/indra/llcommon/llworkerthread.h
@@ -56,7 +56,7 @@ public:
virtual ~WorkRequest(); // use deleteRequest()
public:
- WorkRequest(handle_t handle, U32 priority, LLWorkerClass* workerclass, S32 param);
+ WorkRequest(handle_t handle, LLWorkerClass* workerclass, S32 param);
S32 getParam()
{
@@ -90,7 +90,7 @@ public:
/*virtual*/ size_t update(F32 max_time_ms);
- handle_t addWorkRequest(LLWorkerClass* workerclass, S32 param, U32 priority = PRIORITY_NORMAL);
+ handle_t addWorkRequest(LLWorkerClass* workerclass, S32 param);
S32 getNumDeletes() { return (S32)mDeleteList.size(); } // debug
@@ -151,10 +151,6 @@ public:
bool isWorking() { return getFlags(WCF_WORKING); }
bool wasAborted() { return getFlags(WCF_ABORT_REQUESTED); }
- // setPriority(): changes the priority of a request
- void setPriority(U32 priority);
- U32 getPriority() { return mRequestPriority; }
-
const std::string& getName() const { return mWorkerClassName; }
protected:
@@ -169,7 +165,7 @@ protected:
void setWorkerThread(LLWorkerThread* workerthread);
// addWork(): calls startWork, adds doWork() to queue
- void addWork(S32 param, U32 priority = LLWorkerThread::PRIORITY_NORMAL);
+ void addWork(S32 param);
// abortWork(): requests that work be aborted
void abortWork(bool autocomplete);
@@ -193,7 +189,6 @@ protected:
LLWorkerThread* mWorkerThread;
std::string mWorkerClassName;
handle_t mRequestHandle;
- U32 mRequestPriority; // last priority set
private:
LLMutex mMutex;
diff --git a/indra/llcommon/tests/workqueue_test.cpp b/indra/llcommon/tests/workqueue_test.cpp
index 7655a7aa1f..df16f4a46e 100644
--- a/indra/llcommon/tests/workqueue_test.cpp
+++ b/indra/llcommon/tests/workqueue_test.cpp
@@ -38,7 +38,7 @@ namespace tut
{
struct workqueue_data
{
- WorkQueue queue{"queue"};
+ WorkSchedule queue{"queue"};
};
typedef test_group<workqueue_data> workqueue_group;
typedef workqueue_group::object object;
@@ -49,8 +49,8 @@ namespace tut
{
set_test_name("name");
ensure_equals("didn't capture name", queue.getKey(), "queue");
- ensure("not findable", WorkQueue::getInstance("queue") == queue.getWeak().lock());
- WorkQueue q2;
+ ensure("not findable", WorkSchedule::getInstance("queue") == queue.getWeak().lock());
+ WorkSchedule q2;
ensure("has no name", LLStringUtil::startsWith(q2.getKey(), "WorkQueue"));
}
@@ -73,16 +73,16 @@ namespace tut
{
set_test_name("postEvery");
// record of runs
- using Shared = std::deque<WorkQueue::TimePoint>;
+ using Shared = std::deque<WorkSchedule::TimePoint>;
// This is an example of how to share data between the originator of
- // postEvery(work) and the work item itself, since usually a WorkQueue
+ // postEvery(work) and the work item itself, since usually a WorkSchedule
// is used to dispatch work to a different thread. Neither of them
// should call any of LLCond's wait methods: you don't want to stall
// either the worker thread or the originating thread (conventionally
// main). Use LLCond or a subclass even if all you want to do is
// signal the work item that it can quit; consider LLOneShotCond.
LLCond<Shared> data;
- auto start = WorkQueue::TimePoint::clock::now();
+ auto start = WorkSchedule::TimePoint::clock::now();
// 2s seems like a long time to wait, since it directly impacts the
// duration of this test program. Unfortunately GitHub's Mac runners
// are pretty wimpy, and we're getting spurious "too late" errors just
@@ -97,7 +97,7 @@ namespace tut
data.update_one(
[](Shared& data)
{
- data.push_back(WorkQueue::TimePoint::clock::now());
+ data.push_back(WorkSchedule::TimePoint::clock::now());
});
// by the 3rd call, return false to stop
return (++count < 3);
@@ -106,7 +106,7 @@ namespace tut
// postEvery() running, so run until we have exhausted the iterations
// or we time out waiting
for (auto finish = start + 10*interval;
- WorkQueue::TimePoint::clock::now() < finish &&
+ WorkSchedule::TimePoint::clock::now() < finish &&
data.get([](const Shared& data){ return data.size(); }) < 3; )
{
queue.runPending();
@@ -143,8 +143,8 @@ namespace tut
void object::test<4>()
{
set_test_name("postTo");
- WorkQueue main("main");
- auto qptr = WorkQueue::getInstance("queue");
+ WorkSchedule main("main");
+ auto qptr = WorkSchedule::getInstance("queue");
int result = 0;
main.postTo(
qptr,
@@ -175,8 +175,8 @@ namespace tut
void object::test<5>()
{
set_test_name("postTo with void return");
- WorkQueue main("main");
- auto qptr = WorkQueue::getInstance("queue");
+ WorkSchedule main("main");
+ auto qptr = WorkSchedule::getInstance("queue");
std::string observe;
main.postTo(
qptr,
@@ -198,7 +198,7 @@ namespace tut
std::string stored;
// Try to call waitForResult() on this thread's main coroutine. It
// should throw because the main coroutine must service the queue.
- auto what{ catch_what<WorkQueue::Error>(
+ auto what{ catch_what<WorkSchedule::Error>(
[this, &stored](){ stored = queue.waitForResult(
[](){ return "should throw"; }); }) };
ensure("lambda should not have run", stored.empty());
diff --git a/indra/llcommon/threadpool.cpp b/indra/llcommon/threadpool.cpp
index d5adf11264..3a9a5a2062 100644
--- a/indra/llcommon/threadpool.cpp
+++ b/indra/llcommon/threadpool.cpp
@@ -17,18 +17,58 @@
// std headers
// external library headers
// other Linden headers
+#include "commoncontrol.h"
#include "llerror.h"
#include "llevents.h"
+#include "llsd.h"
#include "stringize.h"
-LL::ThreadPool::ThreadPool(const std::string& name, size_t threads, size_t capacity):
+#include <boost/fiber/algo/round_robin.hpp>
+
+/*****************************************************************************
+* Custom fiber scheduler for worker threads
+*****************************************************************************/
+// As of 2022-12-06, each of our worker threads only runs a single (default)
+// fiber: we don't launch explicit fibers within worker threads, nor do we
+// anticipate doing so. So a worker thread that's simply waiting for incoming
+// tasks should really sleep a little. Override the default fiber scheduler to
+// implement that.
+struct sleepy_robin: public boost::fibers::algo::round_robin
+{
+ virtual void suspend_until( std::chrono::steady_clock::time_point const&) noexcept
+ {
+#if LL_WINDOWS
+ // round_robin holds a std::condition_variable, and
+ // round_robin::suspend_until() calls
+ // std::condition_variable::wait_until(). On Windows, that call seems
+ // busier than it ought to be. Try just sleeping.
+ Sleep(1);
+#else
+ // currently unused other than windows, but might as well have something here
+ // different units than Sleep(), but we actually just want to sleep for any de-minimis duration
+ usleep(1);
+#endif
+ }
+
+ virtual void notify() noexcept
+ {
+ // Since our Sleep() call above will wake up on its own, we need not
+ // take any special action to wake it.
+ }
+};
+
+/*****************************************************************************
+* ThreadPoolBase
+*****************************************************************************/
+LL::ThreadPoolBase::ThreadPoolBase(const std::string& name, size_t threads,
+ WorkQueueBase* queue):
super(name),
- mQueue(name, capacity),
mName("ThreadPool:" + name),
- mThreadCount(threads)
+ mThreadCount(getConfiguredWidth(name, threads)),
+ mQueue(queue)
{}
-void LL::ThreadPool::start()
+void LL::ThreadPoolBase::start()
{
for (size_t i = 0; i < mThreadCount; ++i)
{
@@ -56,17 +96,17 @@ void LL::ThreadPool::start()
});
}
-LL::ThreadPool::~ThreadPool()
+LL::ThreadPoolBase::~ThreadPoolBase()
{
close();
}
-void LL::ThreadPool::close()
+void LL::ThreadPoolBase::close()
{
- if (! mQueue.isClosed())
+ if (! mQueue->isClosed())
{
LL_DEBUGS("ThreadPool") << mName << " closing queue and joining threads" << LL_ENDL;
- mQueue.close();
+ mQueue->close();
for (auto& pair: mThreads)
{
LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL;
@@ -76,14 +116,74 @@ void LL::ThreadPool::close()
}
}
-void LL::ThreadPool::run(const std::string& name)
+void LL::ThreadPoolBase::run(const std::string& name)
{
+#if LL_WINDOWS
+ // Try using sleepy_robin fiber scheduler.
+ boost::fibers::use_scheduling_algorithm<sleepy_robin>();
+#endif // LL_WINDOWS
+
LL_DEBUGS("ThreadPool") << name << " starting" << LL_ENDL;
run();
LL_DEBUGS("ThreadPool") << name << " stopping" << LL_ENDL;
}
-void LL::ThreadPool::run()
+void LL::ThreadPoolBase::run()
+{
+ mQueue->runUntilClose();
+}
+
+//static
+size_t LL::ThreadPoolBase::getConfiguredWidth(const std::string& name, size_t dft)
+{
+ LLSD poolSizes;
+ try
+ {
+ poolSizes = LL::CommonControl::get("Global", "ThreadPoolSizes");
+ // "ThreadPoolSizes" is actually a map containing the sizes of
+ // interest -- or should be, if this process has an
+ // LLViewerControlListener instance and its settings include
+ // "ThreadPoolSizes". If we failed to retrieve it, perhaps we're in a
+ // program that doesn't define that, or perhaps there's no such
+ // setting, or perhaps we're asking too early, before the LLEventAPI
+ // itself has been instantiated. In any of those cases, it seems worth
+ // warning.
+ if (! poolSizes.isDefined())
+ {
+ // Note: we don't warn about absence of an override key for a
+ // particular ThreadPool name, that's fine. This warning is about
+ // complete absence of a ThreadPoolSizes setting, which we expect
+ // in a normal viewer session.
+ LL_WARNS("ThreadPool") << "No 'ThreadPoolSizes' setting for ThreadPool '"
+ << name << "'" << LL_ENDL;
+ }
+ }
+ catch (const LL::CommonControl::Error& exc)
+ {
+ // We don't want ThreadPool to *require* LLViewerControlListener.
+ // Just log it and carry on.
+ LL_WARNS("ThreadPool") << "Can't check 'ThreadPoolSizes': " << exc.what() << LL_ENDL;
+ }
+
+ LL_DEBUGS("ThreadPool") << "ThreadPoolSizes = " << poolSizes << LL_ENDL;
+ // LLSD treats an undefined value as an empty map when asked to retrieve a
+ // key, so we don't need this to be conditional.
+ LLSD sizeSpec{ poolSizes[name] };
+ // We retrieve sizeSpec as LLSD, rather than immediately as LLSD::Integer,
+ // so we can distinguish the case when it's undefined.
+ return sizeSpec.isInteger() ? sizeSpec.asInteger() : dft;
+}
+
+//static
+size_t LL::ThreadPoolBase::getWidth(const std::string& name, size_t dft)
{
- mQueue.runUntilClose();
+ auto instance{ getInstance(name) };
+ if (instance)
+ {
+ return instance->getWidth();
+ }
+ else
+ {
+ return getConfiguredWidth(name, dft);
+ }
}
diff --git a/indra/llcommon/threadpool.h b/indra/llcommon/threadpool.h
index f8eec3b457..60f4a0ce1b 100644
--- a/indra/llcommon/threadpool.h
+++ b/indra/llcommon/threadpool.h
@@ -13,7 +13,9 @@
#if ! defined(LL_THREADPOOL_H)
#define LL_THREADPOOL_H
+#include "threadpool_fwd.h"
#include "workqueue.h"
+#include <memory> // std::unique_ptr
#include <string>
#include <thread>
#include <utility> // std::pair
@@ -22,17 +24,24 @@
namespace LL
{
- class ThreadPool: public LLInstanceTracker<ThreadPool, std::string>
+ class ThreadPoolBase: public LLInstanceTracker<ThreadPoolBase, std::string>
{
private:
- using super = LLInstanceTracker<ThreadPool, std::string>;
+ using super = LLInstanceTracker<ThreadPoolBase, std::string>;
+
public:
/**
- * Pass ThreadPool a string name. This can be used to look up the
+ * Pass ThreadPoolBase a string name. This can be used to look up the
* relevant WorkQueue.
+ *
+ * The number of threads you pass sets the compile-time default. But
+ * if the user has overridden the LLSD map in the "ThreadPoolSizes"
+ * setting with a key matching this ThreadPool name, that setting
+ * overrides this parameter.
*/
- ThreadPool(const std::string& name, size_t threads=1, size_t capacity=1024);
- virtual ~ThreadPool();
+ ThreadPoolBase(const std::string& name, size_t threads,
+ WorkQueueBase* queue);
+ virtual ~ThreadPoolBase();
/**
* Launch the ThreadPool. Until this call, a constructed ThreadPool
@@ -50,8 +59,6 @@ namespace LL
std::string getName() const { return mName; }
size_t getWidth() const { return mThreads.size(); }
- /// obtain a non-const reference to the WorkQueue to post work to it
- WorkQueue& getQueue() { return mQueue; }
/**
* Override run() if you need special processing. The default run()
@@ -59,15 +66,72 @@ namespace LL
*/
virtual void run();
+ /**
+ * getConfiguredWidth() returns the setting, if any, for the specified
+ * ThreadPool name. Returns dft if the "ThreadPoolSizes" map does not
+ * contain the specified name.
+ */
+ static
+ size_t getConfiguredWidth(const std::string& name, size_t dft=0);
+
+ /**
+ * This getWidth() returns the width of the instantiated ThreadPool
+ * with the specified name, if any. If no instance exists, returns its
+ * getConfiguredWidth() if any. If there's no instance and no relevant
+ * override, return dft. Presumably dft should match the threads
+ * parameter passed to the ThreadPool constructor call that will
+ * eventually instantiate the ThreadPool with that name.
+ */
+ static
+ size_t getWidth(const std::string& name, size_t dft);
+
+ protected:
+ std::unique_ptr<WorkQueueBase> mQueue;
+
private:
void run(const std::string& name);
- WorkQueue mQueue;
std::string mName;
size_t mThreadCount;
std::vector<std::pair<std::string, std::thread>> mThreads;
};
+ /**
+ * Specialize with WorkQueue or, for timestamped tasks, WorkSchedule
+ */
+ template <class QUEUE>
+ struct ThreadPoolUsing: public ThreadPoolBase
+ {
+ using queue_t = QUEUE;
+
+ /**
+ * Pass ThreadPoolUsing a string name. This can be used to look up the
+ * relevant WorkQueue.
+ *
+ * The number of threads you pass sets the compile-time default. But
+ * if the user has overridden the LLSD map in the "ThreadPoolSizes"
+ * setting with a key matching this ThreadPool name, that setting
+ * overrides this parameter.
+ *
+ * Pass an explicit capacity to limit the size of the queue.
+ * Constraining the queue can cause a submitter to block. Do not
+ * constrain any ThreadPool accepting work from the main thread.
+ */
+ ThreadPoolUsing(const std::string& name, size_t threads=1, size_t capacity=1024*1024):
+ ThreadPoolBase(name, threads, new queue_t(name, capacity))
+ {}
+ ~ThreadPoolUsing() override {}
+
+ /**
+ * obtain a non-const reference to the specific WorkQueue subclass to
+ * post work to it
+ */
+ queue_t& getQueue() { return static_cast<queue_t&>(*mQueue); }
+ };
+
+ /// ThreadPool is shorthand for using the simpler WorkQueue
+ using ThreadPool = ThreadPoolUsing<WorkQueue>;
+
} // namespace LL
#endif /* ! defined(LL_THREADPOOL_H) */
diff --git a/indra/llcommon/threadpool_fwd.h b/indra/llcommon/threadpool_fwd.h
new file mode 100644
index 0000000000..1aa3c4a0e2
--- /dev/null
+++ b/indra/llcommon/threadpool_fwd.h
@@ -0,0 +1,25 @@
+/**
+ * @file threadpool_fwd.h
+ * @author Nat Goodspeed
+ * @date 2022-12-09
+ * @brief Forward declarations for ThreadPool et al.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_THREADPOOL_FWD_H)
+#define LL_THREADPOOL_FWD_H
+
+#include "workqueue.h"
+
+namespace LL
+{
+ template <class QUEUE>
+ struct ThreadPoolUsing;
+
+ using ThreadPool = ThreadPoolUsing<WorkQueue>;
+} // namespace LL
+
+#endif /* ! defined(LL_THREADPOOL_FWD_H) */
diff --git a/indra/llcommon/workqueue.cpp b/indra/llcommon/workqueue.cpp
index eb06890468..cf80ce0656 100644
--- a/indra/llcommon/workqueue.cpp
+++ b/indra/llcommon/workqueue.cpp
@@ -26,83 +26,65 @@
using Mutex = LLCoros::Mutex;
using Lock = LLCoros::LockType;
-LL::WorkQueue::WorkQueue(const std::string& name, size_t capacity):
- super(makeName(name)),
- mQueue(capacity)
+/*****************************************************************************
+* WorkQueueBase
+*****************************************************************************/
+LL::WorkQueueBase::WorkQueueBase(const std::string& name):
+ super(makeName(name))
{
// TODO: register for "LLApp" events so we can implicitly close() on
// viewer shutdown.
}
-void LL::WorkQueue::close()
-{
- mQueue.close();
-}
-
-size_t LL::WorkQueue::size()
-{
- return mQueue.size();
-}
-
-bool LL::WorkQueue::isClosed()
-{
- return mQueue.isClosed();
-}
-
-bool LL::WorkQueue::done()
-{
- return mQueue.done();
-}
-
-void LL::WorkQueue::runUntilClose()
+void LL::WorkQueueBase::runUntilClose()
{
try
{
for (;;)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
- callWork(mQueue.pop());
+ callWork(pop_());
}
}
- catch (const Queue::Closed&)
+ catch (const Closed&)
{
}
}
-bool LL::WorkQueue::runPending()
+bool LL::WorkQueueBase::runPending()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
- for (Work work; mQueue.tryPop(work); )
+ for (Work work; tryPop_(work); )
{
callWork(work);
}
- return ! mQueue.done();
+ return ! done();
}
-bool LL::WorkQueue::runOne()
+bool LL::WorkQueueBase::runOne()
{
Work work;
- if (mQueue.tryPop(work))
+ if (tryPop_(work))
{
callWork(work);
}
- return ! mQueue.done();
+ return ! done();
}
-bool LL::WorkQueue::runUntil(const TimePoint& until)
+bool LL::WorkQueueBase::runUntil(const TimePoint& until)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Should we subtract some slop to allow for typical Work execution time?
// How much slop?
// runUntil() is simply a time-bounded runPending().
- for (Work work; TimePoint::clock::now() < until && mQueue.tryPop(work); )
+ for (Work work; TimePoint::clock::now() < until && tryPop_(work); )
{
callWork(work);
}
- return ! mQueue.done();
+ return ! done();
}
-std::string LL::WorkQueue::makeName(const std::string& name)
+std::string LL::WorkQueueBase::makeName(const std::string& name)
{
if (! name.empty())
return name;
@@ -120,14 +102,7 @@ std::string LL::WorkQueue::makeName(const std::string& name)
return STRINGIZE("WorkQueue" << num);
}
-void LL::WorkQueue::callWork(const Queue::DataTuple& work)
-{
- // ThreadSafeSchedule::pop() always delivers a tuple, even when
- // there's only one data field per item, as for us.
- callWork(std::get<0>(work));
-}
-
-void LL::WorkQueue::callWork(const Work& work)
+void LL::WorkQueueBase::callWork(const Work& work)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
try
@@ -142,12 +117,12 @@ void LL::WorkQueue::callWork(const Work& work)
}
}
-void LL::WorkQueue::error(const std::string& msg)
+void LL::WorkQueueBase::error(const std::string& msg)
{
LL_ERRS("WorkQueue") << msg << LL_ENDL;
}
-void LL::WorkQueue::checkCoroutine(const std::string& method)
+void LL::WorkQueueBase::checkCoroutine(const std::string& method)
{
// By convention, the default coroutine on each thread has an empty name
// string. See also LLCoros::logname().
@@ -156,3 +131,115 @@ void LL::WorkQueue::checkCoroutine(const std::string& method)
LLTHROW(Error("Do not call " + method + " from a thread's default coroutine"));
}
}
+
+/*****************************************************************************
+* WorkQueue
+*****************************************************************************/
+LL::WorkQueue::WorkQueue(const std::string& name, size_t capacity):
+ super(name),
+ mQueue(capacity)
+{
+}
+
+void LL::WorkQueue::close()
+{
+ mQueue.close();
+}
+
+size_t LL::WorkQueue::size()
+{
+ return mQueue.size();
+}
+
+bool LL::WorkQueue::isClosed()
+{
+ return mQueue.isClosed();
+}
+
+bool LL::WorkQueue::done()
+{
+ return mQueue.done();
+}
+
+bool LL::WorkQueue::post(const Work& callable)
+{
+ return mQueue.pushIfOpen(callable);
+}
+
+bool LL::WorkQueue::tryPost(const Work& callable)
+{
+ return mQueue.tryPush(callable);
+}
+
+LL::WorkQueue::Work LL::WorkQueue::pop_()
+{
+ return mQueue.pop();
+}
+
+bool LL::WorkQueue::tryPop_(Work& work)
+{
+ return mQueue.tryPop(work);
+}
+
+/*****************************************************************************
+* WorkSchedule
+*****************************************************************************/
+LL::WorkSchedule::WorkSchedule(const std::string& name, size_t capacity):
+ super(name),
+ mQueue(capacity)
+{
+}
+
+void LL::WorkSchedule::close()
+{
+ mQueue.close();
+}
+
+size_t LL::WorkSchedule::size()
+{
+ return mQueue.size();
+}
+
+bool LL::WorkSchedule::isClosed()
+{
+ return mQueue.isClosed();
+}
+
+bool LL::WorkSchedule::done()
+{
+ return mQueue.done();
+}
+
+bool LL::WorkSchedule::post(const Work& callable)
+{
+ // Use TimePoint::clock::now() instead of TimePoint's representation of
+ // the epoch because this WorkSchedule may contain a mix of past-due
+ // TimedWork items and TimedWork items scheduled for the future. Sift this
+ // new item into the correct place.
+ return post(callable, TimePoint::clock::now());
+}
+
+bool LL::WorkSchedule::post(const Work& callable, const TimePoint& time)
+{
+ return mQueue.pushIfOpen(TimedWork(time, callable));
+}
+
+bool LL::WorkSchedule::tryPost(const Work& callable)
+{
+ return tryPost(callable, TimePoint::clock::now());
+}
+
+bool LL::WorkSchedule::tryPost(const Work& callable, const TimePoint& time)
+{
+ return mQueue.tryPush(TimedWork(time, callable));
+}
+
+LL::WorkSchedule::Work LL::WorkSchedule::pop_()
+{
+ return std::get<0>(mQueue.pop());
+}
+
+bool LL::WorkSchedule::tryPop_(Work& work)
+{
+ return mQueue.tryPop(work);
+}
diff --git a/indra/llcommon/workqueue.h b/indra/llcommon/workqueue.h
index 70fd65bd0c..ec0700a718 100644
--- a/indra/llcommon/workqueue.h
+++ b/indra/llcommon/workqueue.h
@@ -15,6 +15,7 @@
#include "llcoros.h"
#include "llexception.h"
#include "llinstancetracker.h"
+#include "llinstancetrackersubclass.h"
#include "threadsafeschedule.h"
#include <chrono>
#include <exception> // std::current_exception
@@ -23,27 +24,23 @@
namespace LL
{
+
+/*****************************************************************************
+* WorkQueueBase: API for WorkQueue and WorkSchedule
+*****************************************************************************/
/**
* A typical WorkQueue has a string name that can be used to find it.
*/
- class WorkQueue: public LLInstanceTracker<WorkQueue, std::string>
+ class WorkQueueBase: public LLInstanceTracker<WorkQueueBase, std::string>
{
private:
- using super = LLInstanceTracker<WorkQueue, std::string>;
+ using super = LLInstanceTracker<WorkQueueBase, std::string>;
public:
using Work = std::function<void()>;
-
- private:
- using Queue = ThreadSafeSchedule<Work>;
- // helper for postEvery()
- template <typename Rep, typename Period, typename CALLABLE>
- class BackJack;
-
- public:
- using TimePoint = Queue::TimePoint;
- using TimedWork = Queue::TimeTuple;
- using Closed = Queue::Closed;
+ using Closed = LLThreadSafeQueueInterrupt;
+ // for runFor()
+ using TimePoint = std::chrono::steady_clock::time_point;
struct Error: public LLException
{
@@ -51,18 +48,18 @@ namespace LL
};
/**
- * You may omit the WorkQueue name, in which case a unique name is
+ * You may omit the WorkQueueBase name, in which case a unique name is
* synthesized; for practical purposes that makes it anonymous.
*/
- WorkQueue(const std::string& name = std::string(), size_t capacity=1024);
+ WorkQueueBase(const std::string& name);
/**
* Since the point of WorkQueue is to pass work to some other worker
- * thread(s) asynchronously, it's important that the WorkQueue continue
- * to exist until the worker thread(s) have drained it. To communicate
- * that it's time for them to quit, close() the queue.
+ * thread(s) asynchronously, it's important that it continue to exist
+ * until the worker thread(s) have drained it. To communicate that
+ * it's time for them to quit, close() the queue.
*/
- void close();
+ virtual void close() = 0;
/**
* WorkQueue supports multiple producers and multiple consumers. In
@@ -78,152 +75,57 @@ namespace LL
* * If you're the only consumer, noticing that size() > 0 is
* meaningful.
*/
- size_t size();
+ virtual size_t size() = 0;
/// producer end: are we prevented from pushing any additional items?
- bool isClosed();
+ virtual bool isClosed() = 0;
/// consumer end: are we done, is the queue entirely drained?
- bool done();
+ virtual bool done() = 0;
/*---------------------- fire and forget API -----------------------*/
- /// fire-and-forget, but at a particular (future?) time
- template <typename CALLABLE>
- void post(const TimePoint& time, CALLABLE&& callable)
- {
- // Defer reifying an arbitrary CALLABLE until we hit this or
- // postIfOpen(). All other methods should accept CALLABLEs of
- // arbitrary type to avoid multiple levels of std::function
- // indirection.
- mQueue.push(TimedWork(time, std::move(callable)));
- }
-
- /// fire-and-forget
- template <typename CALLABLE>
- void post(CALLABLE&& callable)
- {
- // We use TimePoint::clock::now() instead of TimePoint's
- // representation of the epoch because this WorkQueue may contain
- // a mix of past-due TimedWork items and TimedWork items scheduled
- // for the future. Sift this new item into the correct place.
- post(TimePoint::clock::now(), std::move(callable));
- }
-
- /**
- * post work for a particular time, unless the queue is closed before
- * we can post
- */
- template <typename CALLABLE>
- bool postIfOpen(const TimePoint& time, CALLABLE&& callable)
- {
- // Defer reifying an arbitrary CALLABLE until we hit this or
- // post(). All other methods should accept CALLABLEs of arbitrary
- // type to avoid multiple levels of std::function indirection.
- return mQueue.pushIfOpen(TimedWork(time, std::move(callable)));
- }
-
/**
* post work, unless the queue is closed before we can post
*/
- template <typename CALLABLE>
- bool postIfOpen(CALLABLE&& callable)
- {
- return postIfOpen(TimePoint::clock::now(), std::move(callable));
- }
+ virtual bool post(const Work&) = 0;
/**
- * Post work to be run at a specified time to another WorkQueue, which
- * may or may not still exist and be open. Return true if we were able
- * to post.
+ * post work, unless the queue is full
*/
- template <typename CALLABLE>
- static bool postMaybe(weak_t target, const TimePoint& time, CALLABLE&& callable);
+ virtual bool tryPost(const Work&) = 0;
/**
* Post work to another WorkQueue, which may or may not still exist
- * and be open. Return true if we were able to post.
- */
- template <typename CALLABLE>
- static bool postMaybe(weak_t target, CALLABLE&& callable)
- {
- return postMaybe(target, TimePoint::clock::now(),
- std::forward<CALLABLE>(callable));
- }
-
- /**
- * Launch a callable returning bool that will trigger repeatedly at
- * specified interval, until the callable returns false.
- *
- * If you need to signal that callable from outside, DO NOT bind a
- * reference to a simple bool! That's not thread-safe. Instead, bind
- * an LLCond variant, e.g. LLOneShotCond or LLBoolCond.
+ * and be open. Support any post() overload. Return true if we were
+ * able to post.
*/
- template <typename Rep, typename Period, typename CALLABLE>
- void postEvery(const std::chrono::duration<Rep, Period>& interval,
- CALLABLE&& callable);
-
- template <typename CALLABLE>
- bool tryPost(CALLABLE&& callable)
- {
- return mQueue.tryPush(TimedWork(TimePoint::clock::now(), std::move(callable)));
- }
+ template <typename... ARGS>
+ static bool postMaybe(weak_t target, ARGS&&... args);
/*------------------------- handshake API --------------------------*/
/**
- * Post work to another WorkQueue to be run at a specified time,
- * requesting a specific callback to be run on this WorkQueue on
- * completion.
- *
- * Returns true if able to post, false if the other WorkQueue is
- * inaccessible.
- */
- // Apparently some Microsoft header file defines a macro CALLBACK? The
- // natural template argument name CALLBACK produces very weird Visual
- // Studio compile errors that seem utterly unrelated to this source
- // code.
- template <typename CALLABLE, typename FOLLOWUP>
- bool postTo(weak_t target,
- const TimePoint& time, CALLABLE&& callable, FOLLOWUP&& callback);
-
- /**
* Post work to another WorkQueue, requesting a specific callback to
- * be run on this WorkQueue on completion.
+ * be run on this WorkQueue on completion. Optional final argument is
+ * TimePoint for WorkSchedule.
*
* Returns true if able to post, false if the other WorkQueue is
* inaccessible.
*/
- template <typename CALLABLE, typename FOLLOWUP>
- bool postTo(weak_t target, CALLABLE&& callable, FOLLOWUP&& callback)
- {
- return postTo(target, TimePoint::clock::now(),
- std::move(callable), std::move(callback));
- }
-
- /**
- * Post work to another WorkQueue to be run at a specified time,
- * blocking the calling coroutine until then, returning the result to
- * caller on completion.
- *
- * In general, we assume that each thread's default coroutine is busy
- * servicing its WorkQueue or whatever. To try to prevent mistakes, we
- * forbid calling waitForResult() from a thread's default coroutine.
- */
- template <typename CALLABLE>
- auto waitForResult(const TimePoint& time, CALLABLE&& callable);
+ template <typename CALLABLE, typename FOLLOWUP, typename... ARGS>
+ bool postTo(weak_t target, CALLABLE&& callable, FOLLOWUP&& callback,
+ ARGS&&... args);
/**
* Post work to another WorkQueue, blocking the calling coroutine
- * until then, returning the result to caller on completion.
+ * until then, returning the result to caller on completion. Optional
+ * final argument is TimePoint for WorkSchedule.
*
* In general, we assume that each thread's default coroutine is busy
* servicing its WorkQueue or whatever. To try to prevent mistakes, we
* forbid calling waitForResult() from a thread's default coroutine.
*/
- template <typename CALLABLE>
- auto waitForResult(CALLABLE&& callable)
- {
- return waitForResult(TimePoint::clock::now(), std::move(callable));
- }
+ template <typename CALLABLE, typename... ARGS>
+ auto waitForResult(CALLABLE&& callable, ARGS&&... args);
/*--------------------------- worker API ---------------------------*/
@@ -270,7 +172,7 @@ namespace LL
*/
bool runUntil(const TimePoint& until);
- private:
+ protected:
template <typename CALLABLE, typename FOLLOWUP>
static auto makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback);
/// general case: arbitrary C++ return type
@@ -290,13 +192,170 @@ namespace LL
static void checkCoroutine(const std::string& method);
static void error(const std::string& msg);
static std::string makeName(const std::string& name);
- void callWork(const Queue::DataTuple& work);
void callWork(const Work& work);
+
+ private:
+ virtual Work pop_() = 0;
+ virtual bool tryPop_(Work&) = 0;
+ };
+
+/*****************************************************************************
+* WorkQueue: no timestamped task support
+*****************************************************************************/
+ class WorkQueue: public LLInstanceTrackerSubclass<WorkQueue, WorkQueueBase>
+ {
+ private:
+ using super = LLInstanceTrackerSubclass<WorkQueue, WorkQueueBase>;
+
+ public:
+ /**
+ * You may omit the WorkQueue name, in which case a unique name is
+ * synthesized; for practical purposes that makes it anonymous.
+ */
+ WorkQueue(const std::string& name = std::string(), size_t capacity=1024);
+
+ /**
+ * Since the point of WorkQueue is to pass work to some other worker
+ * thread(s) asynchronously, it's important that it continue to exist
+ * until the worker thread(s) have drained it. To communicate that
+ * it's time for them to quit, close() the queue.
+ */
+ void close() override;
+
+ /**
+ * WorkQueue supports multiple producers and multiple consumers. In
+ * the general case it's misleading to test size(), since any other
+ * thread might change it the nanosecond the lock is released. On that
+ * basis, some might argue against publishing a size() method at all.
+ *
+ * But there are two specific cases in which a test based on size()
+ * might be reasonable:
+ *
+ * * If you're the only producer, noticing that size() == 0 is
+ * meaningful.
+ * * If you're the only consumer, noticing that size() > 0 is
+ * meaningful.
+ */
+ size_t size() override;
+ /// producer end: are we prevented from pushing any additional items?
+ bool isClosed() override;
+ /// consumer end: are we done, is the queue entirely drained?
+ bool done() override;
+
+ /*---------------------- fire and forget API -----------------------*/
+
+ /**
+ * post work, unless the queue is closed before we can post
+ */
+ bool post(const Work&) override;
+
+ /**
+ * post work, unless the queue is full
+ */
+ bool tryPost(const Work&) override;
+
+ private:
+ using Queue = LLThreadSafeQueue<Work>;
+ Queue mQueue;
+
+ Work pop_() override;
+ bool tryPop_(Work&) override;
+ };
+
+/*****************************************************************************
+* WorkSchedule: add support for timestamped tasks
+*****************************************************************************/
+ class WorkSchedule: public LLInstanceTrackerSubclass<WorkSchedule, WorkQueueBase>
+ {
+ private:
+ using super = LLInstanceTrackerSubclass<WorkSchedule, WorkQueueBase>;
+ using Queue = ThreadSafeSchedule<Work>;
+ // helper for postEvery()
+ template <typename Rep, typename Period, typename CALLABLE>
+ class BackJack;
+
+ public:
+ using TimePoint = Queue::TimePoint;
+ using TimedWork = Queue::TimeTuple;
+
+ /**
+ * You may omit the WorkSchedule name, in which case a unique name is
+ * synthesized; for practical purposes that makes it anonymous.
+ */
+ WorkSchedule(const std::string& name = std::string(), size_t capacity=1024);
+
+ /**
+ * Since the point of WorkSchedule is to pass work to some other worker
+ * thread(s) asynchronously, it's important that the WorkSchedule continue
+ * to exist until the worker thread(s) have drained it. To communicate
+ * that it's time for them to quit, close() the queue.
+ */
+ void close() override;
+
+ /**
+ * WorkSchedule supports multiple producers and multiple consumers. In
+ * the general case it's misleading to test size(), since any other
+ * thread might change it the nanosecond the lock is released. On that
+ * basis, some might argue against publishing a size() method at all.
+ *
+ * But there are two specific cases in which a test based on size()
+ * might be reasonable:
+ *
+ * * If you're the only producer, noticing that size() == 0 is
+ * meaningful.
+ * * If you're the only consumer, noticing that size() > 0 is
+ * meaningful.
+ */
+ size_t size() override;
+ /// producer end: are we prevented from pushing any additional items?
+ bool isClosed() override;
+ /// consumer end: are we done, is the queue entirely drained?
+ bool done() override;
+
+ /*---------------------- fire and forget API -----------------------*/
+
+ /**
+ * post work, unless the queue is closed before we can post
+ */
+ bool post(const Work& callable) override;
+
+ /**
+ * post work for a particular time, unless the queue is closed before
+ * we can post
+ */
+ bool post(const Work& callable, const TimePoint& time);
+
+ /**
+ * post work, unless the queue is full
+ */
+ bool tryPost(const Work& callable) override;
+
+ /**
+ * post work for a particular time, unless the queue is full
+ */
+ bool tryPost(const Work& callable, const TimePoint& time);
+
+ /**
+ * Launch a callable returning bool that will trigger repeatedly at
+ * specified interval, until the callable returns false.
+ *
+ * If you need to signal that callable from outside, DO NOT bind a
+ * reference to a simple bool! That's not thread-safe. Instead, bind
+ * an LLCond variant, e.g. LLOneShotCond or LLBoolCond.
+ */
+ template <typename Rep, typename Period, typename CALLABLE>
+ bool postEvery(const std::chrono::duration<Rep, Period>& interval,
+ CALLABLE&& callable);
+
+ private:
Queue mQueue;
+
+ Work pop_() override;
+ bool tryPop_(Work&) override;
};
/**
- * BackJack is, in effect, a hand-rolled lambda, binding a WorkQueue, a
+ * BackJack is, in effect, a hand-rolled lambda, binding a WorkSchedule, a
* CALLABLE that returns bool, a TimePoint and an interval at which to
* relaunch it. As long as the callable continues returning true, BackJack
* keeps resubmitting it to the target WorkQueue.
@@ -305,7 +364,7 @@ namespace LL
// class method gets its own 'this' pointer -- which we need to resubmit
// the whole BackJack callable.
template <typename Rep, typename Period, typename CALLABLE>
- class WorkQueue::BackJack
+ class WorkSchedule::BackJack
{
public:
// bind the desired data
@@ -319,9 +378,10 @@ namespace LL
mCallable(std::move(callable))
{}
- // Call by target WorkQueue -- note that although WE require a
- // callable returning bool, WorkQueue wants a void callable. We
- // consume the bool.
+ // This operator() method, called by target WorkSchedule, is what
+ // makes this object a Work item. Although WE require a callable
+ // returning bool, WorkSchedule wants a void callable. We consume the
+ // bool.
void operator()()
{
// If mCallable() throws an exception, don't catch it here: if it
@@ -337,7 +397,7 @@ namespace LL
// register our intent to fire at exact mIntervals.
mStart += mInterval;
- // We're being called at this moment by the target WorkQueue.
+ // We're being called at this moment by the target WorkSchedule.
// Assume it still exists, rather than checking the result of
// lock().
// Resubmit the whole *this callable: that's why we're a class
@@ -345,14 +405,10 @@ namespace LL
// move-only callable; but naturally this statement must be
// the last time we reference this instance, which may become
// moved-from.
- try
- {
- mTarget.lock()->post(mStart, std::move(*this));
- }
- catch (const Closed&)
- {
- // Once this queue is closed, oh well, just stop
- }
+ auto target{ std::dynamic_pointer_cast<WorkSchedule>(mTarget.lock()) };
+ // Discard bool return: once this queue is closed, oh well,
+ // just stop
+ target->post(std::move(*this), mStart);
}
}
@@ -364,8 +420,8 @@ namespace LL
};
template <typename Rep, typename Period, typename CALLABLE>
- void WorkQueue::postEvery(const std::chrono::duration<Rep, Period>& interval,
- CALLABLE&& callable)
+ bool WorkSchedule::postEvery(const std::chrono::duration<Rep, Period>& interval,
+ CALLABLE&& callable)
{
if (interval.count() <= 0)
{
@@ -381,14 +437,14 @@ namespace LL
// Instantiate and post a suitable BackJack, binding a weak_ptr to
// self, the current time, the desired interval and the desired
// callable.
- post(
+ return post(
BackJack<Rep, Period, CALLABLE>(
getWeak(), TimePoint::clock::now(), interval, std::move(callable)));
}
/// general case: arbitrary C++ return type
template <typename CALLABLE, typename FOLLOWUP, typename RETURNTYPE>
- struct WorkQueue::MakeReplyLambda
+ struct WorkQueueBase::MakeReplyLambda
{
auto operator()(CALLABLE&& callable, FOLLOWUP&& callback)
{
@@ -409,7 +465,7 @@ namespace LL
/// specialize for CALLABLE returning void
template <typename CALLABLE, typename FOLLOWUP>
- struct WorkQueue::MakeReplyLambda<CALLABLE, FOLLOWUP, void>
+ struct WorkQueueBase::MakeReplyLambda<CALLABLE, FOLLOWUP, void>
{
auto operator()(CALLABLE&& callable, FOLLOWUP&& callback)
{
@@ -421,16 +477,16 @@ namespace LL
};
template <typename CALLABLE, typename FOLLOWUP>
- auto WorkQueue::makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback)
+ auto WorkQueueBase::makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback)
{
return MakeReplyLambda<CALLABLE, FOLLOWUP,
decltype(std::forward<CALLABLE>(callable)())>()
(std::move(callable), std::move(callback));
}
- template <typename CALLABLE, typename FOLLOWUP>
- bool WorkQueue::postTo(weak_t target,
- const TimePoint& time, CALLABLE&& callable, FOLLOWUP&& callback)
+ template <typename CALLABLE, typename FOLLOWUP, typename... ARGS>
+ bool WorkQueueBase::postTo(weak_t target, CALLABLE&& callable, FOLLOWUP&& callback,
+ ARGS&&... args)
{
LL_PROFILE_ZONE_SCOPED;
// We're being asked to post to the WorkQueue at target.
@@ -443,13 +499,12 @@ namespace LL
// Here we believe target WorkQueue still exists. Post to it a
// lambda that packages our callable, our callback and a weak_ptr
// to this originating WorkQueue.
- tptr->post(
- time,
+ return tptr->post(
[reply = super::getWeak(),
callable = std::move(callable),
callback = std::move(callback)]
- ()
- mutable {
+ () mutable
+ {
// Use postMaybe() below in case this originating WorkQueue
// has been closed or destroyed. Remember, the outer lambda is
// now running on a thread servicing the target WorkQueue, and
@@ -472,44 +527,34 @@ namespace LL
// originating WorkQueue. Once there, rethrow it.
[exc = std::current_exception()](){ std::rethrow_exception(exc); });
}
- });
-
- // looks like we were able to post()
- return true;
+ },
+ // if caller passed a TimePoint, pass it along to post()
+ std::forward<ARGS>(args)...);
}
- template <typename CALLABLE>
- bool WorkQueue::postMaybe(weak_t target, const TimePoint& time, CALLABLE&& callable)
+ template <typename... ARGS>
+ bool WorkQueueBase::postMaybe(weak_t target, ARGS&&... args)
{
LL_PROFILE_ZONE_SCOPED;
// target is a weak_ptr: have to lock it to check it
auto tptr = target.lock();
if (tptr)
{
- try
- {
- tptr->post(time, std::forward<CALLABLE>(callable));
- // we were able to post()
- return true;
- }
- catch (const Closed&)
- {
- // target WorkQueue still exists, but is Closed
- }
+ return tptr->post(std::forward<ARGS>(args)...);
}
- // either target no longer exists, or its WorkQueue is Closed
+ // target no longer exists
return false;
}
/// general case: arbitrary C++ return type
template <typename CALLABLE, typename RETURNTYPE>
- struct WorkQueue::WaitForResult
+ struct WorkQueueBase::WaitForResult
{
- auto operator()(WorkQueue* self, const TimePoint& time, CALLABLE&& callable)
+ template <typename... ARGS>
+ auto operator()(WorkQueueBase* self, CALLABLE&& callable, ARGS&&... args)
{
LLCoros::Promise<RETURNTYPE> promise;
- self->post(
- time,
+ bool posted = self->post(
// We dare to bind a reference to Promise because it's
// specifically designed for cross-thread communication.
[&promise, callable = std::move(callable)]()
@@ -523,7 +568,13 @@ namespace LL
{
promise.set_exception(std::current_exception());
}
- });
+ },
+ // if caller passed a TimePoint, pass it to post()
+ std::forward<ARGS>(args)...);
+ if (! posted)
+ {
+ LLTHROW(WorkQueueBase::Closed());
+ }
auto future{ LLCoros::getFuture(promise) };
// now, on the calling thread, wait for that result
LLCoros::TempStatus st("waiting for WorkQueue::waitForResult()");
@@ -533,13 +584,13 @@ namespace LL
/// specialize for CALLABLE returning void
template <typename CALLABLE>
- struct WorkQueue::WaitForResult<CALLABLE, void>
+ struct WorkQueueBase::WaitForResult<CALLABLE, void>
{
- void operator()(WorkQueue* self, const TimePoint& time, CALLABLE&& callable)
+ template <typename... ARGS>
+ void operator()(WorkQueueBase* self, CALLABLE&& callable, ARGS&&... args)
{
LLCoros::Promise<void> promise;
- self->post(
- time,
+ bool posted = self->post(
// &promise is designed for cross-thread access
[&promise, callable = std::move(callable)]()
mutable {
@@ -552,7 +603,13 @@ namespace LL
{
promise.set_exception(std::current_exception());
}
- });
+ },
+ // if caller passed a TimePoint, pass it to post()
+ std::forward<ARGS>(args)...);
+ if (! posted)
+ {
+ LLTHROW(WorkQueueBase::Closed());
+ }
auto future{ LLCoros::getFuture(promise) };
// block until set_value()
LLCoros::TempStatus st("waiting for void WorkQueue::waitForResult()");
@@ -560,13 +617,13 @@ namespace LL
}
};
- template <typename CALLABLE>
- auto WorkQueue::waitForResult(const TimePoint& time, CALLABLE&& callable)
+ template <typename CALLABLE, typename... ARGS>
+ auto WorkQueueBase::waitForResult(CALLABLE&& callable, ARGS&&... args)
{
checkCoroutine("waitForResult()");
// derive callable's return type so we can specialize for void
return WaitForResult<CALLABLE, decltype(std::forward<CALLABLE>(callable)())>()
- (this, time, std::forward<CALLABLE>(callable));
+ (this, std::forward<CALLABLE>(callable), std::forward<ARGS>(args)...);
}
} // namespace LL