summaryrefslogtreecommitdiff
path: root/indra/llcommon
diff options
context:
space:
mode:
authorNat Goodspeed <nat@lindenlab.com>2024-01-18 13:15:32 -0500
committerNat Goodspeed <nat@lindenlab.com>2024-01-18 13:15:32 -0500
commitfea1d9f4d21ceccb6ebb13270c40ebbae737e769 (patch)
treec6e4fa3a9c1f0ff39e3a65e4694b3bb891d2812e /indra/llcommon
parent5fa7f69101a889009194eeddb927599d7536613f (diff)
parentfe796dac711c7ecdc7d6d17e0b692abf468b754a (diff)
SL-20546: Merge branch 'DRTVWR-588-maint-W' into sl-20546.
Diffstat (limited to 'indra/llcommon')
-rw-r--r--indra/llcommon/CMakeLists.txt12
-rw-r--r--indra/llcommon/always_return.h124
-rw-r--r--indra/llcommon/apply.cpp29
-rw-r--r--indra/llcommon/apply.h144
-rw-r--r--indra/llcommon/commoncontrol.cpp106
-rw-r--r--indra/llcommon/commoncontrol.h75
-rw-r--r--indra/llcommon/function_types.h49
-rw-r--r--indra/llcommon/lazyeventapi.cpp72
-rw-r--r--indra/llcommon/lazyeventapi.h205
-rw-r--r--indra/llcommon/llapr.cpp2
-rw-r--r--indra/llcommon/llassettype.cpp1
-rw-r--r--indra/llcommon/llassettype.h5
-rw-r--r--indra/llcommon/llcallstack.h12
-rw-r--r--indra/llcommon/llcommon.cpp29
-rw-r--r--indra/llcommon/llcoros.cpp1
-rw-r--r--indra/llcommon/llerror.cpp14
-rw-r--r--indra/llcommon/llerror.h27
-rw-r--r--indra/llcommon/lleventapi.cpp8
-rw-r--r--indra/llcommon/lleventapi.h32
-rw-r--r--indra/llcommon/lleventdispatcher.cpp619
-rw-r--r--indra/llcommon/lleventdispatcher.h841
-rw-r--r--indra/llcommon/lleventfilter.h47
-rw-r--r--indra/llcommon/llevents.cpp71
-rw-r--r--indra/llcommon/llevents.h43
-rw-r--r--indra/llcommon/llframetimer.cpp5
-rw-r--r--indra/llcommon/llinstancetracker.h97
-rw-r--r--indra/llcommon/llinstancetrackersubclass.h98
-rw-r--r--indra/llcommon/llleap.cpp27
-rw-r--r--indra/llcommon/llleaplistener.cpp70
-rw-r--r--indra/llcommon/llmemory.cpp45
-rw-r--r--indra/llcommon/llmutex.h5
-rw-r--r--indra/llcommon/llpointer.h24
-rw-r--r--indra/llcommon/llprofiler.h46
-rw-r--r--indra/llcommon/llprofilercategories.h2
-rw-r--r--indra/llcommon/llptrto.h88
-rw-r--r--indra/llcommon/llqueuedthread.cpp303
-rw-r--r--indra/llcommon/llqueuedthread.h51
-rw-r--r--indra/llcommon/llsdserialize.cpp6
-rw-r--r--indra/llcommon/llsdserialize_xml.cpp2
-rw-r--r--indra/llcommon/llsdutil.cpp35
-rw-r--r--indra/llcommon/llsdutil.h150
-rw-r--r--indra/llcommon/llsys.cpp24
-rw-r--r--indra/llcommon/llsys.h5
-rw-r--r--indra/llcommon/llthread.cpp10
-rw-r--r--indra/llcommon/lltimer.cpp43
-rw-r--r--indra/llcommon/lluuid.cpp1423
-rw-r--r--indra/llcommon/llworkerthread.cpp29
-rw-r--r--indra/llcommon/llworkerthread.h11
-rw-r--r--indra/llcommon/tests/apply_test.cpp240
-rw-r--r--indra/llcommon/tests/lazyeventapi_test.cpp136
-rw-r--r--indra/llcommon/tests/lleventdispatcher_test.cpp569
-rw-r--r--indra/llcommon/tests/workqueue_test.cpp26
-rw-r--r--indra/llcommon/tests/wrapllerrs.h5
-rw-r--r--indra/llcommon/threadpool.cpp122
-rw-r--r--indra/llcommon/threadpool.h80
-rw-r--r--indra/llcommon/threadpool_fwd.h25
-rw-r--r--indra/llcommon/workqueue.cpp179
-rw-r--r--indra/llcommon/workqueue.h441
58 files changed, 5113 insertions, 1877 deletions
diff --git a/indra/llcommon/CMakeLists.txt b/indra/llcommon/CMakeLists.txt
index ef4899978e..5f4ed2fffa 100644
--- a/indra/llcommon/CMakeLists.txt
+++ b/indra/llcommon/CMakeLists.txt
@@ -16,7 +16,10 @@ include(Tracy)
set(llcommon_SOURCE_FILES
+ apply.cpp
+ commoncontrol.cpp
indra_constants.cpp
+ lazyeventapi.cpp
llallocator.cpp
llallocator_heap_profile.cpp
llapp.cpp
@@ -114,11 +117,16 @@ set(llcommon_SOURCE_FILES
set(llcommon_HEADER_FILES
CMakeLists.txt
+ always_return.h
+ apply.h
chrono.h
classic_callback.h
+ commoncontrol.h
ctype_workaround.h
fix_macros.h
+ function_types.h
indra_constants.h
+ lazyeventapi.h
linden_common.h
llalignedarray.h
llallocator.h
@@ -172,6 +180,7 @@ set(llcommon_HEADER_FILES
llinitdestroyclass.h
llinitparam.h
llinstancetracker.h
+ llinstancetrackersubclass.h
llkeybind.h
llkeythrottle.h
llleap.h
@@ -245,6 +254,7 @@ set(llcommon_HEADER_FILES
stdtypes.h
stringize.h
threadpool.h
+ threadpool_fwd.h
threadsafeschedule.h
timer.h
tuple.h
@@ -288,9 +298,11 @@ if (LL_TESTS)
#set(TEST_DEBUG on)
set(test_libs llcommon)
+ LL_ADD_INTEGRATION_TEST(apply "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(bitpack "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(classic_callback "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(commonmisc "" "${test_libs}")
+ LL_ADD_INTEGRATION_TEST(lazyeventapi "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llbase64 "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llcond "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(lldate "" "${test_libs}")
diff --git a/indra/llcommon/always_return.h b/indra/llcommon/always_return.h
new file mode 100644
index 0000000000..6b9f1fdeaf
--- /dev/null
+++ b/indra/llcommon/always_return.h
@@ -0,0 +1,124 @@
+/**
+ * @file always_return.h
+ * @author Nat Goodspeed
+ * @date 2023-01-20
+ * @brief Call specified callable with arbitrary arguments, but always return
+ * specified type.
+ *
+ * $LicenseInfo:firstyear=2023&license=viewerlgpl$
+ * Copyright (c) 2023, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_ALWAYS_RETURN_H)
+#define LL_ALWAYS_RETURN_H
+
+#include <type_traits> // std::enable_if, std::is_convertible
+
+namespace LL
+{
+
+#if __cpp_lib_is_invocable >= 201703L // C++17
+ template <typename CALLABLE, typename... ARGS>
+ using invoke_result = std::invoke_result<CALLABLE, ARGS...>;
+#else // C++14
+ template <typename CALLABLE, typename... ARGS>
+ using invoke_result = std::result_of<CALLABLE(ARGS...)>;
+#endif // C++14
+
+ /**
+ * AlwaysReturn<T>()(some_function, some_args...) calls
+ * some_function(some_args...). It is guaranteed to return a value of type
+ * T, regardless of the return type of some_function(). If some_function()
+ * returns a type convertible to T, it will convert and return that value.
+ * Otherwise (notably if some_function() is void), AlwaysReturn returns
+ * T().
+ *
+ * When some_function() returns a type not convertible to T, if
+ * you want AlwaysReturn to return some T value other than
+ * default-constructed T(), pass that value to AlwaysReturn's constructor.
+ */
+ template <typename DESIRED>
+ class AlwaysReturn
+ {
+ public:
+ /// pass explicit default value if other than default-constructed type
+ AlwaysReturn(const DESIRED& dft=DESIRED()): mDefault(dft) {}
+
+ // callable returns a type not convertible to DESIRED, return default
+ template <typename CALLABLE, typename... ARGS,
+ typename std::enable_if<
+ ! std::is_convertible<
+ typename invoke_result<CALLABLE, ARGS...>::type,
+ DESIRED
+ >::value,
+ bool
+ >::type=true>
+ DESIRED operator()(CALLABLE&& callable, ARGS&&... args)
+ {
+ // discard whatever callable(args) returns
+ std::forward<CALLABLE>(callable)(std::forward<ARGS>(args)...);
+ return mDefault;
+ }
+
+ // callable returns a type convertible to DESIRED
+ template <typename CALLABLE, typename... ARGS,
+ typename std::enable_if<
+ std::is_convertible<
+ typename invoke_result<CALLABLE, ARGS...>::type,
+ DESIRED
+ >::value,
+ bool
+ >::type=true>
+ DESIRED operator()(CALLABLE&& callable, ARGS&&... args)
+ {
+ return { std::forward<CALLABLE>(callable)(std::forward<ARGS>(args)...) };
+ }
+
+ private:
+ DESIRED mDefault;
+ };
+
+ /**
+ * always_return<T>(some_function, some_args...) calls
+ * some_function(some_args...). It is guaranteed to return a value of type
+ * T, regardless of the return type of some_function(). If some_function()
+ * returns a type convertible to T, it will convert and return that value.
+ * Otherwise (notably if some_function() is void), always_return() returns
+ * T().
+ */
+ template <typename DESIRED, typename CALLABLE, typename... ARGS>
+ DESIRED always_return(CALLABLE&& callable, ARGS&&... args)
+ {
+ return AlwaysReturn<DESIRED>()(std::forward<CALLABLE>(callable),
+ std::forward<ARGS>(args)...);
+ }
+
+ /**
+ * make_always_return<T>(some_function) returns a callable which, when
+ * called with appropriate some_function() arguments, always returns a
+ * value of type T, regardless of the return type of some_function(). If
+ * some_function() returns a type convertible to T, the returned callable
+ * will convert and return that value. Otherwise (notably if
+ * some_function() is void), the returned callable returns T().
+ *
+ * When some_function() returns a type not convertible to T, if
+ * you want the returned callable to return some T value other than
+ * default-constructed T(), pass that value to make_always_return() as its
+ * optional second argument.
+ */
+ template <typename DESIRED, typename CALLABLE>
+ auto make_always_return(CALLABLE&& callable, const DESIRED& dft=DESIRED())
+ {
+ return
+ [dft, callable = std::forward<CALLABLE>(callable)]
+ (auto&&... args)
+ {
+ return AlwaysReturn<DESIRED>(dft)(callable,
+ std::forward<decltype(args)>(args)...);
+ };
+ }
+
+} // namespace LL
+
+#endif /* ! defined(LL_ALWAYS_RETURN_H) */
diff --git a/indra/llcommon/apply.cpp b/indra/llcommon/apply.cpp
new file mode 100644
index 0000000000..417e23d3b4
--- /dev/null
+++ b/indra/llcommon/apply.cpp
@@ -0,0 +1,29 @@
+/**
+ * @file apply.cpp
+ * @author Nat Goodspeed
+ * @date 2022-12-21
+ * @brief Implementation for apply.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+// Precompiled header
+#include "linden_common.h"
+// associated header
+#include "apply.h"
+// STL headers
+// std headers
+// external library headers
+// other Linden headers
+#include "stringize.h"
+
+void LL::apply_validate_size(size_t size, size_t arity)
+{
+ if (size != arity)
+ {
+ LLTHROW(apply_error(stringize("LL::apply(func(", arity, " args), "
+ "std::vector(", size, " elements))")));
+ }
+}
diff --git a/indra/llcommon/apply.h b/indra/llcommon/apply.h
index 7c58d63bc0..cf6161ed50 100644
--- a/indra/llcommon/apply.h
+++ b/indra/llcommon/apply.h
@@ -12,8 +12,11 @@
#if ! defined(LL_APPLY_H)
#define LL_APPLY_H
+#include "llexception.h"
#include <boost/type_traits/function_traits.hpp>
+#include <functional> // std::mem_fn()
#include <tuple>
+#include <type_traits> // std::is_member_pointer
namespace LL
{
@@ -54,20 +57,67 @@ namespace LL
}, \
(ARGS))
-#if __cplusplus >= 201703L
+/*****************************************************************************
+* invoke()
+*****************************************************************************/
+#if __cpp_lib_invoke >= 201411L
// C++17 implementation
-using std::apply;
+using std::invoke;
+
+#else // no std::invoke
+
+// Use invoke() to handle pointer-to-method:
+// derived from https://stackoverflow.com/a/38288251
+template<typename Fn, typename... Args,
+ typename std::enable_if<std::is_member_pointer<typename std::decay<Fn>::type>::value,
+ int>::type = 0 >
+auto invoke(Fn&& f, Args&&... args)
+{
+ return std::mem_fn(std::forward<Fn>(f))(std::forward<Args>(args)...);
+}
+
+template<typename Fn, typename... Args,
+ typename std::enable_if<!std::is_member_pointer<typename std::decay<Fn>::type>::value,
+ int>::type = 0 >
+auto invoke(Fn&& f, Args&&... args)
+{
+ return std::forward<Fn>(f)(std::forward<Args>(args)...);
+}
+
+#endif // no std::invoke
+
+/*****************************************************************************
+* apply(function, tuple); apply(function, array)
+*****************************************************************************/
+#if __cpp_lib_apply >= 201603L
+
+// C++17 implementation
+// We don't just say 'using std::apply;' because that template is too general:
+// it also picks up the apply(function, vector) case, which we want to handle
+// below.
+template <typename CALLABLE, typename... ARGS>
+auto apply(CALLABLE&& func, const std::tuple<ARGS...>& args)
+{
+ return std::apply(std::forward<CALLABLE>(func), args);
+}
#else // C++14
// Derived from https://stackoverflow.com/a/20441189
// and https://en.cppreference.com/w/cpp/utility/apply
-template <typename CALLABLE, typename TUPLE, std::size_t... I>
-auto apply_impl(CALLABLE&& func, TUPLE&& args, std::index_sequence<I...>)
+template <typename CALLABLE, typename... ARGS, std::size_t... I>
+auto apply_impl(CALLABLE&& func, const std::tuple<ARGS...>& args, std::index_sequence<I...>)
{
+ // We accept const std::tuple& so a caller can construct an tuple on the
+ // fly. But std::get<I>(const tuple) adds a const qualifier to everything
+ // it extracts. Get a non-const ref to this tuple so we can extract
+ // without the extraneous const.
+ auto& non_const_args{ const_cast<std::tuple<ARGS...>&>(args) };
+
// call func(unpacked args)
- return std::forward<CALLABLE>(func)(std::move(std::get<I>(args))...);
+ return invoke(std::forward<CALLABLE>(func),
+ std::forward<ARGS>(std::get<I>(non_const_args))...);
}
template <typename CALLABLE, typename... ARGS>
@@ -81,6 +131,8 @@ auto apply(CALLABLE&& func, const std::tuple<ARGS...>& args)
std::index_sequence_for<ARGS...>{});
}
+#endif // C++14
+
// per https://stackoverflow.com/a/57510428/5533635
template <typename CALLABLE, typename T, size_t SIZE>
auto apply(CALLABLE&& func, const std::array<T, SIZE>& args)
@@ -88,28 +140,92 @@ auto apply(CALLABLE&& func, const std::array<T, SIZE>& args)
return apply(std::forward<CALLABLE>(func), std::tuple_cat(args));
}
+/*****************************************************************************
+* bind_front()
+*****************************************************************************/
+// To invoke a non-static member function with a tuple, you need a callable
+// that binds your member function with an instance pointer or reference.
+// std::bind_front() is perfect: std::bind_front(&cls::method, instance).
+// Unfortunately bind_front() only enters the standard library in C++20.
+#if __cpp_lib_bind_front >= 201907L
+
+// C++20 implementation
+using std::bind_front;
+
+#else // no std::bind_front()
+
+template<typename Fn, typename... Args,
+ typename std::enable_if<!std::is_member_pointer<typename std::decay<Fn>::type>::value,
+ int>::type = 0 >
+auto bind_front(Fn&& f, Args&&... args)
+{
+ // Don't use perfect forwarding for f or args: we must bind them for later.
+ return [f, pfx_args=std::make_tuple(args...)]
+ (auto&&... sfx_args)
+ {
+ // Use perfect forwarding for sfx_args because we use them as soon as
+ // we receive them.
+ return apply(
+ f,
+ std::tuple_cat(pfx_args,
+ std::make_tuple(std::forward<decltype(sfx_args)>(sfx_args)...)));
+ };
+}
+
+template<typename Fn, typename... Args,
+ typename std::enable_if<std::is_member_pointer<typename std::decay<Fn>::type>::value,
+ int>::type = 0 >
+auto bind_front(Fn&& f, Args&&... args)
+{
+ return bind_front(std::mem_fn(std::forward<Fn>(f)), std::forward<Args>(args)...);
+}
+
+#endif // C++20 with std::bind_front()
+
+/*****************************************************************************
+* apply(function, std::vector)
+*****************************************************************************/
// per https://stackoverflow.com/a/28411055/5533635
template <typename CALLABLE, typename T, std::size_t... I>
auto apply_impl(CALLABLE&& func, const std::vector<T>& args, std::index_sequence<I...>)
{
+ return apply(std::forward<CALLABLE>(func),
+ std::make_tuple(args[I]...));
+}
+
+// produce suitable error if apply(func, vector) is the wrong size for func()
+void apply_validate_size(size_t size, size_t arity);
+
+/// possible exception from apply() validation
+struct apply_error: public LLException
+{
+ apply_error(const std::string& what): LLException(what) {}
+};
+
+template <size_t ARITY, typename CALLABLE, typename T>
+auto apply_n(CALLABLE&& func, const std::vector<T>& args)
+{
+ apply_validate_size(args.size(), ARITY);
return apply_impl(std::forward<CALLABLE>(func),
- std::make_tuple(std::forward<T>(args[I])...),
- I...);
+ args,
+ std::make_index_sequence<ARITY>());
}
-// this goes beyond C++17 std::apply()
+/**
+ * apply(function, std::vector) goes beyond C++17 std::apply(). For this case
+ * @a function @emph cannot be variadic: the compiler must know at compile
+ * time how many arguments to pass. This isn't Python. (But see apply_n() to
+ * pass a specific number of args to a variadic function.)
+ */
template <typename CALLABLE, typename T>
auto apply(CALLABLE&& func, const std::vector<T>& args)
{
+ // infer arity from the definition of func
constexpr auto arity = boost::function_traits<CALLABLE>::arity;
- assert(args.size() == arity);
- return apply_impl(std::forward<CALLABLE>(func),
- args,
- std::make_index_sequence<arity>());
+ // now that we have a compile-time arity, apply_n() works
+ return apply_n<arity>(std::forward<CALLABLE>(func), args);
}
-#endif // C++14
-
} // namespace LL
#endif /* ! defined(LL_APPLY_H) */
diff --git a/indra/llcommon/commoncontrol.cpp b/indra/llcommon/commoncontrol.cpp
new file mode 100644
index 0000000000..81e66baf8c
--- /dev/null
+++ b/indra/llcommon/commoncontrol.cpp
@@ -0,0 +1,106 @@
+/**
+ * @file commoncontrol.cpp
+ * @author Nat Goodspeed
+ * @date 2022-06-08
+ * @brief Implementation for commoncontrol.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+// Precompiled header
+#include "linden_common.h"
+// associated header
+#include "commoncontrol.h"
+// STL headers
+// std headers
+// external library headers
+// other Linden headers
+#include "llevents.h"
+#include "llsdutil.h"
+
+LLSD LL::CommonControl::access(const LLSD& params)
+{
+ // We can't actually introduce a link-time dependency on llxml, or on any
+ // global LLControlGroup (*koff* gSavedSettings *koff*) but we can issue a
+ // runtime query. If we're running as part of a viewer with
+ // LLViewerControlListener, we can use that to interact with any
+ // instantiated LLControGroup.
+ LLSD response;
+ {
+ LLEventStream reply("reply");
+ LLTempBoundListener connection = reply.listen("listener",
+ [&response] (const LLSD& event)
+ {
+ response = event;
+ return false;
+ });
+ LLSD rparams{ params };
+ rparams["reply"] = reply.getName();
+ LLEventPumps::instance().obtain("LLViewerControl").post(rparams);
+ }
+ // LLViewerControlListener responds immediately. If it's listening at all,
+ // it will already have set response.
+ if (! response.isDefined())
+ {
+ LLTHROW(NoListener("No LLViewerControl listener instantiated"));
+ }
+ LLSD error{ response["error"] };
+ if (error.isDefined())
+ {
+ LLTHROW(ParamError(error));
+ }
+ response.erase("error");
+ response.erase("reqid");
+ return response;
+}
+
+/// set control group.key to defined default value
+LLSD LL::CommonControl::set_default(const std::string& group, const std::string& key)
+{
+ return access(llsd::map("op", "set",
+ "group", group, "key", key))["value"];
+}
+
+/// set control group.key to specified value
+LLSD LL::CommonControl::set(const std::string& group, const std::string& key, const LLSD& value)
+{
+ return access(llsd::map("op", "set",
+ "group", group, "key", key, "value", value))["value"];
+}
+
+/// toggle boolean control group.key
+LLSD LL::CommonControl::toggle(const std::string& group, const std::string& key)
+{
+ return access(llsd::map("op", "toggle",
+ "group", group, "key", key))["value"];
+}
+
+/// get the definition for control group.key, (! isDefined()) if bad
+/// ["name"], ["type"], ["value"], ["comment"]
+LLSD LL::CommonControl::get_def(const std::string& group, const std::string& key)
+{
+ return access(llsd::map("op", "get",
+ "group", group, "key", key));
+}
+
+/// get the value of control group.key
+LLSD LL::CommonControl::get(const std::string& group, const std::string& key)
+{
+ return access(llsd::map("op", "get",
+ "group", group, "key", key))["value"];
+}
+
+/// get defined groups
+std::vector<std::string> LL::CommonControl::get_groups()
+{
+ auto groups{ access(llsd::map("op", "groups"))["groups"] };
+ return { groups.beginArray(), groups.endArray() };
+}
+
+/// get definitions for all variables in group
+LLSD LL::CommonControl::get_vars(const std::string& group)
+{
+ return access(llsd::map("op", "vars", "group", group))["vars"];
+}
diff --git a/indra/llcommon/commoncontrol.h b/indra/llcommon/commoncontrol.h
new file mode 100644
index 0000000000..07d4a45ac5
--- /dev/null
+++ b/indra/llcommon/commoncontrol.h
@@ -0,0 +1,75 @@
+/**
+ * @file commoncontrol.h
+ * @author Nat Goodspeed
+ * @date 2022-06-08
+ * @brief Access LLViewerControl LLEventAPI, if process has one.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_COMMONCONTROL_H)
+#define LL_COMMONCONTROL_H
+
+#include <vector>
+#include "llexception.h"
+#include "llsd.h"
+
+namespace LL
+{
+ class CommonControl
+ {
+ public:
+ struct Error: public LLException
+ {
+ Error(const std::string& what): LLException(what) {}
+ };
+
+ /// Exception thrown if there's no LLViewerControl LLEventAPI
+ struct NoListener: public Error
+ {
+ NoListener(const std::string& what): Error(what) {}
+ };
+
+ struct ParamError: public Error
+ {
+ ParamError(const std::string& what): Error(what) {}
+ };
+
+ /// set control group.key to defined default value
+ static
+ LLSD set_default(const std::string& group, const std::string& key);
+
+ /// set control group.key to specified value
+ static
+ LLSD set(const std::string& group, const std::string& key, const LLSD& value);
+
+ /// toggle boolean control group.key
+ static
+ LLSD toggle(const std::string& group, const std::string& key);
+
+ /// get the definition for control group.key, (! isDefined()) if bad
+ /// ["name"], ["type"], ["value"], ["comment"]
+ static
+ LLSD get_def(const std::string& group, const std::string& key);
+
+ /// get the value of control group.key
+ static
+ LLSD get(const std::string& group, const std::string& key);
+
+ /// get defined groups
+ static
+ std::vector<std::string> get_groups();
+
+ /// get definitions for all variables in group
+ static
+ LLSD get_vars(const std::string& group);
+
+ private:
+ static
+ LLSD access(const LLSD& params);
+ };
+} // namespace LL
+
+#endif /* ! defined(LL_COMMONCONTROL_H) */
diff --git a/indra/llcommon/function_types.h b/indra/llcommon/function_types.h
new file mode 100644
index 0000000000..3f42f6d640
--- /dev/null
+++ b/indra/llcommon/function_types.h
@@ -0,0 +1,49 @@
+/**
+ * @file function_types.h
+ * @author Nat Goodspeed
+ * @date 2023-01-20
+ * @brief Extend boost::function_types to examine boost::function and
+ * std::function
+ *
+ * $LicenseInfo:firstyear=2023&license=viewerlgpl$
+ * Copyright (c) 2023, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_FUNCTION_TYPES_H)
+#define LL_FUNCTION_TYPES_H
+
+#include <boost/function.hpp>
+#include <boost/function_types/function_arity.hpp>
+#include <functional>
+
+namespace LL
+{
+
+ template <typename F>
+ struct function_arity_impl
+ {
+ static constexpr auto value = boost::function_types::function_arity<F>::value;
+ };
+
+ template <typename F>
+ struct function_arity_impl<std::function<F>>
+ {
+ static constexpr auto value = function_arity_impl<F>::value;
+ };
+
+ template <typename F>
+ struct function_arity_impl<boost::function<F>>
+ {
+ static constexpr auto value = function_arity_impl<F>::value;
+ };
+
+ template <typename F>
+ struct function_arity
+ {
+ static constexpr auto value = function_arity_impl<typename std::decay<F>::type>::value;
+ };
+
+} // namespace LL
+
+#endif /* ! defined(LL_FUNCTION_TYPES_H) */
diff --git a/indra/llcommon/lazyeventapi.cpp b/indra/llcommon/lazyeventapi.cpp
new file mode 100644
index 0000000000..028af9f33f
--- /dev/null
+++ b/indra/llcommon/lazyeventapi.cpp
@@ -0,0 +1,72 @@
+/**
+ * @file lazyeventapi.cpp
+ * @author Nat Goodspeed
+ * @date 2022-06-17
+ * @brief Implementation for lazyeventapi.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+// Precompiled header
+#include "linden_common.h"
+// associated header
+#include "lazyeventapi.h"
+// STL headers
+// std headers
+#include <algorithm> // std::find_if
+// external library headers
+// other Linden headers
+#include "llevents.h"
+#include "llsdutil.h"
+
+LL::LazyEventAPIBase::LazyEventAPIBase(
+ const std::string& name, const std::string& desc, const std::string& field)
+{
+ // populate embedded LazyEventAPIParams instance
+ mParams.name = name;
+ mParams.desc = desc;
+ mParams.field = field;
+ // mParams.init and mOperations are populated by subsequent add() calls.
+
+ // Our raison d'etre: register as an LLEventPumps::PumpFactory
+ // so obtain() will notice any request for this name and call us.
+ // Of course, our subclass constructor must finish running (making add()
+ // calls) before mParams will be fully populated, but we expect that to
+ // happen well before the first LLEventPumps::obtain(name) call.
+ mRegistered = LLEventPumps::instance().registerPumpFactory(
+ name,
+ [this](const std::string& name){ return construct(name); });
+}
+
+LL::LazyEventAPIBase::~LazyEventAPIBase()
+{
+ // If our constructor's registerPumpFactory() call was unsuccessful, that
+ // probably means somebody else claimed the name first. If that's the
+ // case, do NOT unregister their name out from under them!
+ // If this is a static instance being destroyed at process shutdown,
+ // LLEventPumps will probably have been cleaned up already.
+ if (mRegistered && ! LLEventPumps::wasDeleted())
+ {
+ // unregister the callback to this doomed instance
+ LLEventPumps::instance().unregisterPumpFactory(mParams.name);
+ }
+}
+
+LLSD LL::LazyEventAPIBase::getMetadata(const std::string& name) const
+{
+ // Since mOperations is a vector rather than a map, just search.
+ auto found = std::find_if(mOperations.begin(), mOperations.end(),
+ [&name](const auto& namedesc)
+ { return (namedesc.first == name); });
+ if (found == mOperations.end())
+ return {};
+
+ // LLEventDispatcher() supplements the returned metadata in different
+ // ways, depending on metadata provided to the specific add() method.
+ // Don't try to emulate all that. At some point we might consider more
+ // closely unifying LLEventDispatcher machinery with LazyEventAPI, but for
+ // now this will have to do.
+ return llsd::map("name", found->first, "desc", found->second);
+}
diff --git a/indra/llcommon/lazyeventapi.h b/indra/llcommon/lazyeventapi.h
new file mode 100644
index 0000000000..e36831270b
--- /dev/null
+++ b/indra/llcommon/lazyeventapi.h
@@ -0,0 +1,205 @@
+/**
+ * @file lazyeventapi.h
+ * @author Nat Goodspeed
+ * @date 2022-06-16
+ * @brief Declaring a static module-scope LazyEventAPI registers a specific
+ * LLEventAPI for future on-demand instantiation.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_LAZYEVENTAPI_H)
+#define LL_LAZYEVENTAPI_H
+
+#include "apply.h"
+#include "lleventapi.h"
+#include "llinstancetracker.h"
+#include <boost/signals2/signal.hpp>
+#include <string>
+#include <tuple>
+#include <utility> // std::pair
+#include <vector>
+
+namespace LL
+{
+ /**
+ * Bundle params we want to pass to LLEventAPI's protected constructor. We
+ * package them this way so a subclass constructor can simply forward an
+ * opaque reference to the LLEventAPI constructor.
+ */
+ // This is a class instead of a plain struct mostly so when we forward-
+ // declare it we don't have to remember the distinction.
+ class LazyEventAPIParams
+ {
+ public:
+ // package the parameters used by the normal LLEventAPI constructor
+ std::string name, desc, field;
+ // bundle LLEventAPI::add() calls collected by LazyEventAPI::add(), so
+ // the special LLEventAPI constructor we engage can "play back" those
+ // add() calls
+ boost::signals2::signal<void(LLEventAPI*)> init;
+ };
+
+ /**
+ * LazyEventAPIBase implements most of the functionality of LazyEventAPI
+ * (q.v.), but we need the LazyEventAPI template subclass so we can accept
+ * the specific LLEventAPI subclass type.
+ */
+ // No LLInstanceTracker key: we don't need to find a specific instance,
+ // LLLeapListener just needs to be able to enumerate all instances.
+ class LazyEventAPIBase: public LLInstanceTracker<LazyEventAPIBase>
+ {
+ public:
+ LazyEventAPIBase(const std::string& name, const std::string& desc,
+ const std::string& field);
+ virtual ~LazyEventAPIBase();
+
+ // Do not copy or move: once constructed, LazyEventAPIBase must stay
+ // put: we bind its instance pointer into a callback.
+ LazyEventAPIBase(const LazyEventAPIBase&) = delete;
+ LazyEventAPIBase(LazyEventAPIBase&&) = delete;
+ LazyEventAPIBase& operator=(const LazyEventAPIBase&) = delete;
+ LazyEventAPIBase& operator=(LazyEventAPIBase&&) = delete;
+
+ // capture add() calls we want to play back on LLEventAPI construction
+ template <typename... ARGS>
+ void add(const std::string& name, const std::string& desc, ARGS&&... rest)
+ {
+ // capture the metadata separately
+ mOperations.push_back(std::make_pair(name, desc));
+ // Use connect_extended() so the lambda is passed its own
+ // connection.
+
+ // apply() can't accept a template per se; it needs a particular
+ // specialization. Specialize out here to work around a clang bug:
+ // https://github.com/llvm/llvm-project/issues/41999
+ auto func{ &LazyEventAPIBase::add_trampoline
+ <const std::string&, const std::string&, ARGS...> };
+ // We can't bind an unexpanded parameter pack into a lambda --
+ // shame really. Instead, capture all our args as a std::tuple and
+ // then, in the lambda, use apply() to pass to add_trampoline().
+ auto args{ std::make_tuple(name, desc, std::forward<ARGS>(rest)...) };
+
+ mParams.init.connect_extended(
+ [func, args]
+ (const boost::signals2::connection& conn, LLEventAPI* instance)
+ {
+ // we only need this connection once
+ conn.disconnect();
+ // apply() expects a tuple specifying ALL the arguments,
+ // so prepend instance.
+ apply(func, std::tuple_cat(std::make_tuple(instance), args));
+ });
+ }
+
+ // The following queries mimic the LLEventAPI / LLEventDispatcher
+ // query API.
+
+ // Get the string name of the subject LLEventAPI
+ std::string getName() const { return mParams.name; }
+ // Get the documentation string
+ std::string getDesc() const { return mParams.desc; }
+ // Retrieve the LLSD key we use for dispatching
+ std::string getDispatchKey() const { return mParams.field; }
+
+ // operations
+ using NameDesc = std::pair<std::string, std::string>;
+
+ private:
+ // metadata that might be queried by LLLeapListener
+ std::vector<NameDesc> mOperations;
+
+ public:
+ using const_iterator = decltype(mOperations)::const_iterator;
+ const_iterator begin() const { return mOperations.begin(); }
+ const_iterator end() const { return mOperations.end(); }
+ LLSD getMetadata(const std::string& name) const;
+
+ protected:
+ // Params with which to instantiate the companion LLEventAPI subclass
+ LazyEventAPIParams mParams;
+
+ private:
+ // true if we successfully registered our LLEventAPI on construction
+ bool mRegistered;
+
+ // actually instantiate the companion LLEventAPI subclass
+ virtual LLEventPump* construct(const std::string& name) = 0;
+
+ // Passing an overloaded function to any function that accepts an
+ // arbitrary callable is a PITB because you have to specify the
+ // correct overload. What we want is for the compiler to select the
+ // correct overload, based on the carefully-wrought enable_ifs in
+ // LLEventDispatcher. This (one and only) add_trampoline() method
+ // exists solely to pass to LL::apply(). Once add_trampoline() is
+ // called with the expanded arguments, we hope the compiler will Do
+ // The Right Thing in selecting the correct LLEventAPI::add()
+ // overload.
+ template <typename... ARGS>
+ static
+ void add_trampoline(LLEventAPI* instance, ARGS&&... args)
+ {
+ instance->add(std::forward<ARGS>(args)...);
+ }
+ };
+
+ /**
+ * LazyEventAPI provides a way to register a particular LLEventAPI to be
+ * instantiated on demand, that is, when its name is passed to
+ * LLEventPumps::obtain().
+ *
+ * Derive your listener from LLEventAPI as usual, with its various
+ * operation methods, but code your constructor to accept
+ * <tt>(const LL::LazyEventAPIParams& params)</tt>
+ * and forward that reference to (the protected)
+ * <tt>LLEventAPI(const LL::LazyEventAPIParams&)</tt> constructor.
+ *
+ * Then derive your listener registrar from
+ * <tt>LazyEventAPI<your LLEventAPI subclass></tt>. The constructor should
+ * look very like a traditional LLEventAPI constructor:
+ *
+ * * pass (name, desc [, field]) to LazyEventAPI's constructor
+ * * in the body, make a series of add() calls referencing your LLEventAPI
+ * subclass methods.
+ *
+ * You may use any LLEventAPI::add() methods, that is, any
+ * LLEventDispatcher::add() methods. But the target methods you pass to
+ * add() must belong to your LLEventAPI subclass, not the LazyEventAPI
+ * subclass.
+ *
+ * Declare a static instance of your LazyEventAPI listener registrar
+ * class. When it's constructed at static initialization time, it will
+ * register your LLEventAPI subclass with LLEventPumps. It will also
+ * collect metadata for the LLEventAPI and its operations to provide to
+ * LLLeapListener's introspection queries.
+ *
+ * When someone later calls LLEventPumps::obtain() to post an event to
+ * your LLEventAPI subclass, obtain() will instantiate it using
+ * LazyEventAPI's name, desc, field and add() calls.
+ */
+ template <class EVENTAPI>
+ class LazyEventAPI: public LazyEventAPIBase
+ {
+ public:
+ // for subclass constructor to reference handler methods
+ using listener = EVENTAPI;
+
+ LazyEventAPI(const std::string& name, const std::string& desc,
+ const std::string& field="op"):
+ // Forward ctor params to LazyEventAPIBase
+ LazyEventAPIBase(name, desc, field)
+ {}
+
+ private:
+ LLEventPump* construct(const std::string& /*name*/) override
+ {
+ // base class has carefully assembled LazyEventAPIParams embedded
+ // in this instance, just pass to LLEventAPI subclass constructor
+ return new EVENTAPI(mParams);
+ }
+ };
+} // namespace LL
+
+#endif /* ! defined(LL_LAZYEVENTAPI_H) */
diff --git a/indra/llcommon/llapr.cpp b/indra/llcommon/llapr.cpp
index 69466df2d1..575c524219 100644
--- a/indra/llcommon/llapr.cpp
+++ b/indra/llcommon/llapr.cpp
@@ -528,6 +528,7 @@ S32 LLAPRFile::seek(apr_file_t* file_handle, apr_seek_where_t where, S32 offset)
//static
S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
{
+ LL_PROFILE_ZONE_SCOPED;
//*****************************************
LLAPRFilePoolScope scope(pool);
apr_file_t* file_handle = open(filename, scope.getVolatileAPRPool(), APR_READ|APR_BINARY);
@@ -572,6 +573,7 @@ S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nb
//static
S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
{
+ LL_PROFILE_ZONE_SCOPED;
apr_int32_t flags = APR_CREATE|APR_WRITE|APR_BINARY;
if (offset < 0)
{
diff --git a/indra/llcommon/llassettype.cpp b/indra/llcommon/llassettype.cpp
index 4c84223dad..6492d888c1 100644
--- a/indra/llcommon/llassettype.cpp
+++ b/indra/llcommon/llassettype.cpp
@@ -96,6 +96,7 @@ LLAssetDictionary::LLAssetDictionary()
addEntry(LLAssetType::AT_WIDGET, new AssetEntry("WIDGET", "widget", "widget", false, false, false));
addEntry(LLAssetType::AT_PERSON, new AssetEntry("PERSON", "person", "person", false, false, false));
addEntry(LLAssetType::AT_SETTINGS, new AssetEntry("SETTINGS", "settings", "settings blob", true, true, true));
+ addEntry(LLAssetType::AT_MATERIAL, new AssetEntry("MATERIAL", "material", "render material", true, true, true));
addEntry(LLAssetType::AT_UNKNOWN, new AssetEntry("UNKNOWN", "invalid", NULL, false, false, false));
addEntry(LLAssetType::AT_NONE, new AssetEntry("NONE", "-1", NULL, FALSE, FALSE, FALSE));
diff --git a/indra/llcommon/llassettype.h b/indra/llcommon/llassettype.h
index 652c548d59..e8df8574f7 100644
--- a/indra/llcommon/llassettype.h
+++ b/indra/llcommon/llassettype.h
@@ -127,8 +127,9 @@ public:
AT_RESERVED_6 = 55,
AT_SETTINGS = 56, // Collection of settings
-
- AT_COUNT = 57,
+ AT_MATERIAL = 57, // Render Material
+
+ AT_COUNT = 58,
// +*********************************************************+
// | TO ADD AN ELEMENT TO THIS ENUM: |
diff --git a/indra/llcommon/llcallstack.h b/indra/llcommon/llcallstack.h
index 5acf04a49f..d5a2b7b157 100644
--- a/indra/llcommon/llcallstack.h
+++ b/indra/llcommon/llcallstack.h
@@ -79,9 +79,9 @@ struct LLContextStatus
LL_COMMON_API std::ostream& operator<<(std::ostream& s, const LLContextStatus& context_status);
-#define dumpStack(tag) \
- if (debugLoggingEnabled(tag)) \
- { \
- LLCallStack cs; \
- LL_DEBUGS(tag) << "STACK:\n" << "====================\n" << cs << "====================" << LL_ENDL; \
- }
+#define dumpStack(tag) \
+ LL_DEBUGS(tag) << "STACK:\n" \
+ << "====================\n" \
+ << LLCallStack() \
+ << "====================" \
+ << LL_ENDL;
diff --git a/indra/llcommon/llcommon.cpp b/indra/llcommon/llcommon.cpp
index d2c4e66160..6e988260a9 100644
--- a/indra/llcommon/llcommon.cpp
+++ b/indra/llcommon/llcommon.cpp
@@ -37,12 +37,13 @@ thread_local bool gProfilerEnabled = false;
#if (TRACY_ENABLE)
// Override new/delete for tracy memory profiling
-void *operator new(size_t size)
+
+void* ll_tracy_new(size_t size)
{
void* ptr;
if (gProfilerEnabled)
{
- LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ //LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
ptr = (malloc)(size);
}
else
@@ -57,12 +58,22 @@ void *operator new(size_t size)
return ptr;
}
-void operator delete(void *ptr) noexcept
+void* operator new(size_t size)
+{
+ return ll_tracy_new(size);
+}
+
+void* operator new[](std::size_t count)
+{
+ return ll_tracy_new(count);
+}
+
+void ll_tracy_delete(void* ptr)
{
TracyFree(ptr);
if (gProfilerEnabled)
{
- LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ //LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
(free)(ptr);
}
else
@@ -71,6 +82,16 @@ void operator delete(void *ptr) noexcept
}
}
+void operator delete(void *ptr) noexcept
+{
+ ll_tracy_delete(ptr);
+}
+
+void operator delete[](void* ptr) noexcept
+{
+ ll_tracy_delete(ptr);
+}
+
// C-style malloc/free can't be so easily overridden, so we define tracy versions and use
// a pre-processor #define in linden_common.h to redirect to them. The parens around the native
// functions below prevents recursive substitution by the preprocessor.
diff --git a/indra/llcommon/llcoros.cpp b/indra/llcommon/llcoros.cpp
index 191c1a9037..3deed2e199 100644
--- a/indra/llcommon/llcoros.cpp
+++ b/indra/llcommon/llcoros.cpp
@@ -278,6 +278,7 @@ std::string LLCoros::launch(const std::string& prefix, const callable_t& callabl
catch (std::bad_alloc&)
{
// Out of memory on stack allocation?
+ printActiveCoroutines();
LL_ERRS("LLCoros") << "Bad memory allocation in LLCoros::launch(" << prefix << ")!" << LL_ENDL;
}
diff --git a/indra/llcommon/llerror.cpp b/indra/llcommon/llerror.cpp
index 05e719b494..414515854a 100644
--- a/indra/llcommon/llerror.cpp
+++ b/indra/llcommon/llerror.cpp
@@ -1603,20 +1603,6 @@ namespace LLError
}
}
-bool debugLoggingEnabled(const std::string& tag)
-{
- LLMutexTrylock lock(getMutex<LOG_MUTEX>(), 5);
- if (!lock.isLocked())
- {
- return false;
- }
-
- SettingsConfigPtr s = Globals::getInstance()->getSettingsConfig();
- LLError::ELevel level = LLError::LEVEL_DEBUG;
- bool res = checkLevelMap(s->mTagLevelMap, tag, level);
- return res;
-}
-
void crashdriver(void (*callback)(int*))
{
// The LLERROR_CRASH macro used to have inline code of the form:
diff --git a/indra/llcommon/llerror.h b/indra/llcommon/llerror.h
index 624a5fb37a..05dd88ee51 100644
--- a/indra/llcommon/llerror.h
+++ b/indra/llcommon/llerror.h
@@ -82,9 +82,11 @@ const int LL_ERR_NOERR = 0;
#ifdef SHOW_ASSERT
#define llassert(func) llassert_always_msg(func, #func)
+#define llassert_msg(func, msg) llassert_always_msg(func, msg)
#define llverify(func) llassert_always_msg(func, #func)
#else
#define llassert(func)
+#define llassert_msg(func, msg)
#define llverify(func) do {if (func) {}} while(0)
#endif
@@ -462,8 +464,31 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;
LLError::CallSite& _site(_sites[which]); \
lllog_test_()
-// Check at run-time whether logging is enabled, without generating output
+/*
+// Check at run-time whether logging is enabled, without generating output.
+Resist the temptation to add a function like this because it incurs the
+expense of locking and map-searching every time control reaches it.
bool debugLoggingEnabled(const std::string& tag);
+
+Instead of:
+
+if debugLoggingEnabled("SomeTag")
+{
+ // ... presumably expensive operation ...
+ LL_DEBUGS("SomeTag") << ... << LL_ENDL;
+}
+
+Use this:
+
+LL_DEBUGS("SomeTag");
+// ... presumably expensive operation ...
+LL_CONT << ...;
+LL_ENDL;
+
+LL_DEBUGS("SomeTag") performs the locking and map-searching ONCE, then caches
+the result in a static variable.
+*/
+
// used by LLERROR_CRASH
void crashdriver(void (*)(int*));
diff --git a/indra/llcommon/lleventapi.cpp b/indra/llcommon/lleventapi.cpp
index ff5459c1eb..3d46ef1034 100644
--- a/indra/llcommon/lleventapi.cpp
+++ b/indra/llcommon/lleventapi.cpp
@@ -35,6 +35,7 @@
// external library headers
// other Linden headers
#include "llerror.h"
+#include "lazyeventapi.h"
LLEventAPI::LLEventAPI(const std::string& name, const std::string& desc, const std::string& field):
lbase(name, field),
@@ -43,6 +44,13 @@ LLEventAPI::LLEventAPI(const std::string& name, const std::string& desc, const s
{
}
+LLEventAPI::LLEventAPI(const LL::LazyEventAPIParams& params):
+ LLEventAPI(params.name, params.desc, params.field)
+{
+ // call initialization functions with our brand-new instance pointer
+ params.init(this);
+}
+
LLEventAPI::~LLEventAPI()
{
}
diff --git a/indra/llcommon/lleventapi.h b/indra/llcommon/lleventapi.h
index 5991fe8fd5..25f6becd8b 100644
--- a/indra/llcommon/lleventapi.h
+++ b/indra/llcommon/lleventapi.h
@@ -35,6 +35,11 @@
#include "llinstancetracker.h"
#include <string>
+namespace LL
+{
+ class LazyEventAPIParams;
+}
+
/**
* LLEventAPI not only provides operation dispatch functionality, inherited
* from LLDispatchListener -- it also gives us event API introspection.
@@ -65,19 +70,6 @@ public:
std::string getDesc() const { return mDesc; }
/**
- * Publish only selected add() methods from LLEventDispatcher.
- * Every LLEventAPI add() @em must have a description string.
- */
- template <typename CALLABLE>
- void add(const std::string& name,
- const std::string& desc,
- CALLABLE callable,
- const LLSD& required=LLSD())
- {
- LLEventDispatcher::add(name, desc, callable, required);
- }
-
- /**
* Instantiate a Response object in any LLEventAPI subclass method that
* wants to guarantee a reply (if requested) will be sent on exit from the
* method. The reply will be sent if request.has(@a replyKey), default
@@ -150,16 +142,20 @@ public:
* @endcode
*/
LLSD& operator[](const LLSD::String& key) { return mResp[key]; }
-
- /**
- * set the response to the given data
- */
- void setResponse(LLSD const & response){ mResp = response; }
+
+ /**
+ * set the response to the given data
+ */
+ void setResponse(LLSD const & response){ mResp = response; }
LLSD mResp, mReq;
LLSD::String mKey;
};
+protected:
+ // constructor used only by subclasses registered by LazyEventAPI
+ LLEventAPI(const LL::LazyEventAPIParams&);
+
private:
std::string mDesc;
};
diff --git a/indra/llcommon/lleventdispatcher.cpp b/indra/llcommon/lleventdispatcher.cpp
index cd0ab6bc29..da96de18f7 100644
--- a/indra/llcommon/lleventdispatcher.cpp
+++ b/indra/llcommon/lleventdispatcher.cpp
@@ -40,71 +40,13 @@
// other Linden headers
#include "llevents.h"
#include "llerror.h"
+#include "llexception.h"
#include "llsdutil.h"
#include "stringize.h"
+#include <iomanip> // std::quoted()
#include <memory> // std::auto_ptr
/*****************************************************************************
-* LLSDArgsSource
-*****************************************************************************/
-/**
- * Store an LLSD array, producing its elements one at a time. Die with LL_ERRS
- * if the consumer requests more elements than the array contains.
- */
-class LL_COMMON_API LLSDArgsSource
-{
-public:
- LLSDArgsSource(const std::string function, const LLSD& args);
- ~LLSDArgsSource();
-
- LLSD next();
-
- void done() const;
-
-private:
- std::string _function;
- LLSD _args;
- LLSD::Integer _index;
-};
-
-LLSDArgsSource::LLSDArgsSource(const std::string function, const LLSD& args):
- _function(function),
- _args(args),
- _index(0)
-{
- if (! (_args.isUndefined() || _args.isArray()))
- {
- LL_ERRS("LLSDArgsSource") << _function << " needs an args array instead of "
- << _args << LL_ENDL;
- }
-}
-
-LLSDArgsSource::~LLSDArgsSource()
-{
- done();
-}
-
-LLSD LLSDArgsSource::next()
-{
- if (_index >= _args.size())
- {
- LL_ERRS("LLSDArgsSource") << _function << " requires more arguments than the "
- << _args.size() << " provided: " << _args << LL_ENDL;
- }
- return _args[_index++];
-}
-
-void LLSDArgsSource::done() const
-{
- if (_index < _args.size())
- {
- LL_WARNS("LLSDArgsSource") << _function << " only consumed " << _index
- << " of the " << _args.size() << " arguments provided: "
- << _args << LL_ENDL;
- }
-}
-
-/*****************************************************************************
* LLSDArgsMapper
*****************************************************************************/
/**
@@ -156,19 +98,26 @@ void LLSDArgsSource::done() const
* - Holes are filled with the default values.
* - Any remaining holes constitute an error.
*/
-class LL_COMMON_API LLSDArgsMapper
+class LL_COMMON_API LLEventDispatcher::LLSDArgsMapper
{
public:
/// Accept description of function: function name, param names, param
/// default values
- LLSDArgsMapper(const std::string& function, const LLSD& names, const LLSD& defaults);
+ LLSDArgsMapper(LLEventDispatcher* parent, const std::string& function,
+ const LLSD& names, const LLSD& defaults);
- /// Given arguments map, return LLSD::Array of parameter values, or LL_ERRS.
+ /// Given arguments map, return LLSD::Array of parameter values, or
+ /// trigger error.
LLSD map(const LLSD& argsmap) const;
private:
static std::string formatlist(const LLSD&);
+ template <typename... ARGS>
+ [[noreturn]] void callFail(ARGS&&... args) const;
+ // store a plain dumb back-pointer because we don't have to manage the
+ // parent LLEventDispatcher's lifespan
+ LLEventDispatcher* _parent;
// The function-name string is purely descriptive. We want error messages
// to be able to indicate which function's LLSDArgsMapper has the problem.
std::string _function;
@@ -187,15 +136,18 @@ private:
FilledVector _has_dft;
};
-LLSDArgsMapper::LLSDArgsMapper(const std::string& function,
- const LLSD& names, const LLSD& defaults):
+LLEventDispatcher::LLSDArgsMapper::LLSDArgsMapper(LLEventDispatcher* parent,
+ const std::string& function,
+ const LLSD& names,
+ const LLSD& defaults):
+ _parent(parent),
_function(function),
_names(names),
_has_dft(names.size())
{
if (! (_names.isUndefined() || _names.isArray()))
{
- LL_ERRS("LLSDArgsMapper") << function << " names must be an array, not " << names << LL_ENDL;
+ callFail(" names must be an array, not ", names);
}
auto nparams(_names.size());
// From _names generate _indexes.
@@ -218,8 +170,7 @@ LLSDArgsMapper::LLSDArgsMapper(const std::string& function,
// defaults is a (possibly empty) array. Right-align it with names.
if (ndefaults > nparams)
{
- LL_ERRS("LLSDArgsMapper") << function << " names array " << names
- << " shorter than defaults array " << defaults << LL_ENDL;
+ callFail(" names array ", names, " shorter than defaults array ", defaults);
}
// Offset by which we slide defaults array right to right-align with
@@ -256,23 +207,20 @@ LLSDArgsMapper::LLSDArgsMapper(const std::string& function,
}
if (bogus.size())
{
- LL_ERRS("LLSDArgsMapper") << function << " defaults specified for nonexistent params "
- << formatlist(bogus) << LL_ENDL;
+ callFail(" defaults specified for nonexistent params ", formatlist(bogus));
}
}
else
{
- LL_ERRS("LLSDArgsMapper") << function << " defaults must be a map or an array, not "
- << defaults << LL_ENDL;
+ callFail(" defaults must be a map or an array, not ", defaults);
}
}
-LLSD LLSDArgsMapper::map(const LLSD& argsmap) const
+LLSD LLEventDispatcher::LLSDArgsMapper::map(const LLSD& argsmap) const
{
if (! (argsmap.isUndefined() || argsmap.isMap() || argsmap.isArray()))
{
- LL_ERRS("LLSDArgsMapper") << _function << " map() needs a map or array, not "
- << argsmap << LL_ENDL;
+ callFail(" map() needs a map or array, not ", argsmap);
}
// Initialize the args array. Indexing a non-const LLSD array grows it
// to appropriate size, but we don't want to resize this one on each
@@ -369,15 +317,14 @@ LLSD LLSDArgsMapper::map(const LLSD& argsmap) const
// by argsmap, that's a problem.
if (unfilled.size())
{
- LL_ERRS("LLSDArgsMapper") << _function << " missing required arguments "
- << formatlist(unfilled) << " from " << argsmap << LL_ENDL;
+ callFail(" missing required arguments ", formatlist(unfilled), " from ", argsmap);
}
// done
return args;
}
-std::string LLSDArgsMapper::formatlist(const LLSD& list)
+std::string LLEventDispatcher::LLSDArgsMapper::formatlist(const LLSD& list)
{
std::ostringstream out;
const char* delim = "";
@@ -390,23 +337,44 @@ std::string LLSDArgsMapper::formatlist(const LLSD& list)
return out.str();
}
-LLEventDispatcher::LLEventDispatcher(const std::string& desc, const std::string& key):
- mDesc(desc),
- mKey(key)
+template <typename... ARGS>
+[[noreturn]] void LLEventDispatcher::LLSDArgsMapper::callFail(ARGS&&... args) const
{
+ _parent->callFail<LLEventDispatcher::DispatchError>
+ (_function, std::forward<ARGS>(args)...);
}
+/*****************************************************************************
+* LLEventDispatcher
+*****************************************************************************/
+LLEventDispatcher::LLEventDispatcher(const std::string& desc, const std::string& key):
+ LLEventDispatcher(desc, key, "args")
+{}
+
+LLEventDispatcher::LLEventDispatcher(const std::string& desc, const std::string& key,
+ const std::string& argskey):
+ mDesc(desc),
+ mKey(key),
+ mArgskey(argskey)
+{}
+
LLEventDispatcher::~LLEventDispatcher()
{
}
+LLEventDispatcher::DispatchEntry::DispatchEntry(LLEventDispatcher* parent, const std::string& desc):
+ mParent(parent),
+ mDesc(desc)
+{}
+
/**
* DispatchEntry subclass used for callables accepting(const LLSD&)
*/
struct LLEventDispatcher::LLSDDispatchEntry: public LLEventDispatcher::DispatchEntry
{
- LLSDDispatchEntry(const std::string& desc, const Callable& func, const LLSD& required):
- DispatchEntry(desc),
+ LLSDDispatchEntry(LLEventDispatcher* parent, const std::string& desc,
+ const Callable& func, const LLSD& required):
+ DispatchEntry(parent, desc),
mFunc(func),
mRequired(required)
{}
@@ -414,22 +382,21 @@ struct LLEventDispatcher::LLSDDispatchEntry: public LLEventDispatcher::DispatchE
Callable mFunc;
LLSD mRequired;
- virtual void call(const std::string& desc, const LLSD& event) const
+ LLSD call(const std::string& desc, const LLSD& event, bool, const std::string&) const override
{
// Validate the syntax of the event itself.
std::string mismatch(llsd_matches(mRequired, event));
if (! mismatch.empty())
{
- LL_ERRS("LLEventDispatcher") << desc << ": bad request: " << mismatch << LL_ENDL;
+ callFail(desc, ": bad request: ", mismatch);
}
// Event syntax looks good, go for it!
- mFunc(event);
+ return mFunc(event);
}
- virtual LLSD addMetadata(LLSD meta) const
+ LLSD getMetadata() const override
{
- meta["required"] = mRequired;
- return meta;
+ return llsd::map("required", mRequired);
}
};
@@ -439,17 +406,27 @@ struct LLEventDispatcher::LLSDDispatchEntry: public LLEventDispatcher::DispatchE
*/
struct LLEventDispatcher::ParamsDispatchEntry: public LLEventDispatcher::DispatchEntry
{
- ParamsDispatchEntry(const std::string& desc, const invoker_function& func):
- DispatchEntry(desc),
+ ParamsDispatchEntry(LLEventDispatcher* parent, const std::string& name,
+ const std::string& desc, const invoker_function& func):
+ DispatchEntry(parent, desc),
+ mName(name),
mInvoker(func)
{}
+ std::string mName;
invoker_function mInvoker;
- virtual void call(const std::string& desc, const LLSD& event) const
+ LLSD call(const std::string&, const LLSD& event, bool, const std::string&) const override
{
- LLSDArgsSource src(desc, event);
- mInvoker(boost::bind(&LLSDArgsSource::next, boost::ref(src)));
+ try
+ {
+ return mInvoker(event);
+ }
+ catch (const LL::apply_error& err)
+ {
+ // could hit runtime errors with LL::apply()
+ callFail(err.what());
+ }
}
};
@@ -459,23 +436,62 @@ struct LLEventDispatcher::ParamsDispatchEntry: public LLEventDispatcher::Dispatc
*/
struct LLEventDispatcher::ArrayParamsDispatchEntry: public LLEventDispatcher::ParamsDispatchEntry
{
- ArrayParamsDispatchEntry(const std::string& desc, const invoker_function& func,
+ ArrayParamsDispatchEntry(LLEventDispatcher* parent, const std::string& name,
+ const std::string& desc, const invoker_function& func,
LLSD::Integer arity):
- ParamsDispatchEntry(desc, func),
+ ParamsDispatchEntry(parent, name, desc, func),
mArity(arity)
{}
LLSD::Integer mArity;
- virtual LLSD addMetadata(LLSD meta) const
+ LLSD call(const std::string& desc, const LLSD& event, bool fromMap, const std::string& argskey) const override
+ {
+// std::string context { stringize(desc, "(", event, ") with argskey ", std::quoted(argskey), ": ") };
+ // Whether we try to extract arguments from 'event' depends on whether
+ // the LLEventDispatcher consumer called one of the (name, event)
+ // methods (! fromMap) or one of the (event) methods (fromMap). If we
+ // were called with (name, event), the passed event must itself be
+ // suitable to pass to the registered callable, no args extraction
+ // required or even attempted. Only if called with plain (event) do we
+ // consider extracting args from that event. Initially assume 'event'
+ // itself contains the arguments.
+ LLSD args{ event };
+ if (fromMap)
+ {
+ if (! mArity)
+ {
+ // When the target function is nullary, and we're called from
+ // an (event) method, just ignore the rest of the map entries.
+ args.clear();
+ }
+ else
+ {
+ // We only require/retrieve argskey if the target function
+ // isn't nullary. For all others, since we require an LLSD
+ // array, we must have an argskey.
+ if (argskey.empty())
+ {
+ callFail("LLEventDispatcher has no args key");
+ }
+ if ((! event.has(argskey)))
+ {
+ callFail("missing required key ", std::quoted(argskey));
+ }
+ args = event[argskey];
+ }
+ }
+ return ParamsDispatchEntry::call(desc, args, fromMap, argskey);
+ }
+
+ LLSD getMetadata() const override
{
LLSD array(LLSD::emptyArray());
// Resize to number of arguments required
if (mArity)
array[mArity - 1] = LLSD();
llassert_always(array.size() == mArity);
- meta["required"] = array;
- return meta;
+ return llsd::map("required", array);
}
};
@@ -485,11 +501,11 @@ struct LLEventDispatcher::ArrayParamsDispatchEntry: public LLEventDispatcher::Pa
*/
struct LLEventDispatcher::MapParamsDispatchEntry: public LLEventDispatcher::ParamsDispatchEntry
{
- MapParamsDispatchEntry(const std::string& name, const std::string& desc,
- const invoker_function& func,
+ MapParamsDispatchEntry(LLEventDispatcher* parent, const std::string& name,
+ const std::string& desc, const invoker_function& func,
const LLSD& params, const LLSD& defaults):
- ParamsDispatchEntry(desc, func),
- mMapper(name, params, defaults),
+ ParamsDispatchEntry(parent, name, desc, func),
+ mMapper(parent, name, params, defaults),
mRequired(LLSD::emptyMap())
{
// Build the set of all param keys, then delete the ones that are
@@ -532,18 +548,27 @@ struct LLEventDispatcher::MapParamsDispatchEntry: public LLEventDispatcher::Para
LLSD mRequired;
LLSD mOptional;
- virtual void call(const std::string& desc, const LLSD& event) const
+ LLSD call(const std::string& desc, const LLSD& event, bool fromMap, const std::string& argskey) const override
{
- // Just convert from LLSD::Map to LLSD::Array using mMapper, then pass
- // to base-class call() method.
- ParamsDispatchEntry::call(desc, mMapper.map(event));
+ // by default, pass the whole event as the arguments map
+ LLSD args{ event };
+ // Were we called by one of the (event) methods (instead of the (name,
+ // event) methods), do we have an argskey, and does the incoming event
+ // have that key?
+ if (fromMap && (! argskey.empty()) && event.has(argskey))
+ {
+ // if so, extract the value of argskey from the incoming event,
+ // and use that as the arguments map
+ args = event[argskey];
+ }
+ // Now convert args from LLSD map to LLSD array using mMapper, then
+ // pass to base-class call() method.
+ return ParamsDispatchEntry::call(desc, mMapper.map(args), fromMap, argskey);
}
- virtual LLSD addMetadata(LLSD meta) const
+ LLSD getMetadata() const override
{
- meta["required"] = mRequired;
- meta["optional"] = mOptional;
- return meta;
+ return llsd::map("required", mRequired, "optional", mOptional);
}
};
@@ -552,9 +577,9 @@ void LLEventDispatcher::addArrayParamsDispatchEntry(const std::string& name,
const invoker_function& invoker,
LLSD::Integer arity)
{
- mDispatch.insert(
- DispatchMap::value_type(name, DispatchMap::mapped_type(
- new ArrayParamsDispatchEntry(desc, invoker, arity))));
+ mDispatch.emplace(
+ name,
+ new ArrayParamsDispatchEntry(this, "", desc, invoker, arity));
}
void LLEventDispatcher::addMapParamsDispatchEntry(const std::string& name,
@@ -563,25 +588,25 @@ void LLEventDispatcher::addMapParamsDispatchEntry(const std::string& name,
const LLSD& params,
const LLSD& defaults)
{
- mDispatch.insert(
- DispatchMap::value_type(name, DispatchMap::mapped_type(
- new MapParamsDispatchEntry(name, desc, invoker, params, defaults))));
+ // Pass instance info as well as this entry name for error messages.
+ mDispatch.emplace(
+ name,
+ new MapParamsDispatchEntry(this, "", desc, invoker, params, defaults));
}
/// Register a callable by name
-void LLEventDispatcher::add(const std::string& name, const std::string& desc,
- const Callable& callable, const LLSD& required)
+void LLEventDispatcher::addLLSD(const std::string& name, const std::string& desc,
+ const Callable& callable, const LLSD& required)
{
- mDispatch.insert(
- DispatchMap::value_type(name, DispatchMap::mapped_type(
- new LLSDDispatchEntry(desc, callable, required))));
+ mDispatch.emplace(name, new LLSDDispatchEntry(this, desc, callable, required));
}
-void LLEventDispatcher::addFail(const std::string& name, const std::string& classname) const
+void LLEventDispatcher::addFail(const std::string& name, const char* classname) const
{
LL_ERRS("LLEventDispatcher") << "LLEventDispatcher(" << mDesc << ")::add(" << name
- << "): " << classname << " is not a subclass "
- << "of LLEventDispatcher" << LL_ENDL;
+ << "): " << LLError::Log::demangle(classname)
+ << " is not a subclass of LLEventDispatcher"
+ << LL_ENDL;
}
/// Unregister a callable
@@ -596,48 +621,105 @@ bool LLEventDispatcher::remove(const std::string& name)
return true;
}
-/// Call a registered callable with an explicitly-specified name. If no
-/// such callable exists, die with LL_ERRS.
-void LLEventDispatcher::operator()(const std::string& name, const LLSD& event) const
+/// Call a registered callable with an explicitly-specified name. It is an
+/// error if no such callable exists.
+LLSD LLEventDispatcher::operator()(const std::string& name, const LLSD& event) const
{
- if (! try_call(name, event))
- {
- LL_ERRS("LLEventDispatcher") << "LLEventDispatcher(" << mDesc << "): '" << name
- << "' not found" << LL_ENDL;
- }
+ return try_call(std::string(), name, event);
}
-/// Extract the @a key value from the incoming @a event, and call the
-/// callable whose name is specified by that map @a key. If no such
-/// callable exists, die with LL_ERRS.
-void LLEventDispatcher::operator()(const LLSD& event) const
+bool LLEventDispatcher::try_call(const std::string& name, const LLSD& event) const
{
- // This could/should be implemented in terms of the two-arg overload.
- // However -- we can produce a more informative error message.
- std::string name(event[mKey]);
- if (! try_call(name, event))
+ try
{
- LL_ERRS("LLEventDispatcher") << "LLEventDispatcher(" << mDesc << "): bad " << mKey
- << " value '" << name << "'" << LL_ENDL;
+ try_call(std::string(), name, event);
+ return true;
+ }
+ // Note that we don't catch the generic DispatchError, only the specific
+ // DispatchMissing. try_call() only promises to return false if the
+ // specified callable name isn't found -- not for general errors.
+ catch (const DispatchMissing&)
+ {
+ return false;
}
}
+/// Extract the @a key value from the incoming @a event, and call the callable
+/// whose name is specified by that map @a key. It is an error if no such
+/// callable exists.
+LLSD LLEventDispatcher::operator()(const LLSD& event) const
+{
+ return try_call(mKey, event[mKey], event);
+}
+
bool LLEventDispatcher::try_call(const LLSD& event) const
{
- return try_call(event[mKey], event);
+ try
+ {
+ try_call(mKey, event[mKey], event);
+ return true;
+ }
+ catch (const DispatchMissing&)
+ {
+ return false;
+ }
}
-bool LLEventDispatcher::try_call(const std::string& name, const LLSD& event) const
+LLSD LLEventDispatcher::try_call(const std::string& key, const std::string& name,
+ const LLSD& event) const
{
+ if (name.empty())
+ {
+ if (key.empty())
+ {
+ callFail<DispatchError>("attempting to call with no name");
+ }
+ else
+ {
+ callFail<DispatchError>("no ", key);
+ }
+ }
+
DispatchMap::const_iterator found = mDispatch.find(name);
if (found == mDispatch.end())
{
- return false;
+ // Here we were passed a non-empty name, but there's no registered
+ // callable with that name. This is the one case in which we throw
+ // DispatchMissing instead of the generic DispatchError.
+ // Distinguish the public method by which our caller reached here:
+ // key.empty() means the name was passed explicitly, non-empty means
+ // we extracted the name from the incoming event using that key.
+ if (key.empty())
+ {
+ callFail<DispatchMissing>(std::quoted(name), " not found");
+ }
+ else
+ {
+ callFail<DispatchMissing>("bad ", key, " value ", std::quoted(name));
+ }
}
+
// Found the name, so it's plausible to even attempt the call.
- found->second->call(STRINGIZE("LLEventDispatcher(" << mDesc << ") calling '" << name << "'"),
- event);
- return true; // tell caller we were able to call
+ const char* delim = (key.empty()? "" : "=");
+ // append either "[key=name]" or just "[name]"
+ SetState transient(this, '[', key, delim, name, ']');
+ return found->second->call("", event, (! key.empty()), mArgskey);
+}
+
+template <typename EXCEPTION, typename... ARGS>
+//static
+[[noreturn]] void LLEventDispatcher::sCallFail(ARGS&&... args)
+{
+ auto error = stringize(std::forward<ARGS>(args)...);
+ LL_WARNS("LLEventDispatcher") << error << LL_ENDL;
+ LLTHROW(EXCEPTION(error));
+}
+
+template <typename EXCEPTION, typename... ARGS>
+[[noreturn]] void LLEventDispatcher::callFail(ARGS&&... args) const
+{
+ // Describe this instance in addition to the error itself.
+ sCallFail<EXCEPTION>(*this, ": ", std::forward<ARGS>(args)...);
}
LLSD LLEventDispatcher::getMetadata(const std::string& name) const
@@ -647,26 +729,243 @@ LLSD LLEventDispatcher::getMetadata(const std::string& name) const
{
return LLSD();
}
- LLSD meta;
+ LLSD meta{ found->second->getMetadata() };
meta["name"] = name;
meta["desc"] = found->second->mDesc;
- return found->second->addMetadata(meta);
+ return meta;
+}
+
+std::ostream& operator<<(std::ostream& out, const LLEventDispatcher& self)
+{
+ // If we're a subclass of LLEventDispatcher, e.g. LLEventAPI, report that.
+ // Also report whatever transient state is active.
+ return out << LLError::Log::classname(self) << '(' << self.mDesc << ')'
+ << self.getState();
}
-LLDispatchListener::LLDispatchListener(const std::string& pumpname, const std::string& key):
- LLEventDispatcher(pumpname, key),
- mPump(pumpname, true), // allow tweaking for uniqueness
- mBoundListener(mPump.listen("self", boost::bind(&LLDispatchListener::process, this, _1)))
+std::string LLEventDispatcher::getState() const
{
+ // default value of fiber_specific_ptr is nullptr, and ~SetState() reverts
+ // to that; infer empty string
+ if (! mState.get())
+ return {};
+ else
+ return *mState;
}
-bool LLDispatchListener::process(const LLSD& event)
+bool LLEventDispatcher::setState(SetState&, const std::string& state) const
{
- (*this)(event);
+ // If SetState is instantiated at multiple levels of function call, ignore
+ // the lower-level call because the outer call presumably provides more
+ // context.
+ if (mState.get())
+ return false;
+
+ // Pass us empty string (a la ~SetState()) to reset to nullptr, else take
+ // a heap copy of the passed state string so we can delete it on
+ // subsequent reset().
+ mState.reset(state.empty()? nullptr : new std::string(state));
+ return true;
+}
+
+/*****************************************************************************
+* LLDispatchListener
+*****************************************************************************/
+std::string LLDispatchListener::mReplyKey{ "reply" };
+
+bool LLDispatchListener::process(const LLSD& event) const
+{
+ // Decide what to do based on the incoming value of the specified dispatch
+ // key.
+ LLSD name{ event[getDispatchKey()] };
+ if (name.isMap())
+ {
+ call_map(name, event);
+ }
+ else if (name.isArray())
+ {
+ call_array(name, event);
+ }
+ else
+ {
+ call_one(name, event);
+ }
return false;
}
-LLEventDispatcher::DispatchEntry::DispatchEntry(const std::string& desc):
- mDesc(desc)
-{}
+void LLDispatchListener::call_one(const LLSD& name, const LLSD& event) const
+{
+ LLSD result;
+ try
+ {
+ result = (*this)(event);
+ }
+ catch (const DispatchError& err)
+ {
+ if (! event.has(mReplyKey))
+ {
+ // Without a reply key, let the exception propagate.
+ throw;
+ }
+
+ // Here there was an error and the incoming event has mReplyKey. Reply
+ // with a map containing an "error" key explaining the problem.
+ return reply(llsd::map("error", err.what()), event);
+ }
+ // We seem to have gotten a valid result. But we don't know whether the
+ // registered callable is void or non-void. If it's void,
+ // LLEventDispatcher returned isUndefined(). Otherwise, try to send it
+ // back to our invoker.
+ if (result.isDefined())
+ {
+ if (! result.isMap())
+ {
+ // wrap the result in a map as the "data" key
+ result = llsd::map("data", result);
+ }
+ reply(result, event);
+ }
+}
+
+void LLDispatchListener::call_map(const LLSD& reqmap, const LLSD& event) const
+{
+ // LLSD map containing returned values
+ LLSD result;
+ // cache dispatch key
+ std::string key{ getDispatchKey() };
+ // collect any error messages here
+ std::ostringstream errors;
+ const char* delim = "";
+
+ for (const auto& pair : llsd::inMap(reqmap))
+ {
+ const LLSD::String& name{ pair.first };
+ const LLSD& args{ pair.second };
+ try
+ {
+ // in case of errors, tell user the dispatch key, the fact that
+ // we're processing a request map and the current key in that map
+ SetState(this, '[', key, '[', name, "]]");
+ // With this form, capture return value even if undefined:
+ // presence of the key in the response map can be used to detect
+ // which request keys succeeded.
+ result[name] = (*this)(name, args);
+ }
+ catch (const std::exception& err)
+ {
+ // Catch not only DispatchError, but any C++ exception thrown by
+ // the target callable. Collect exception name and message in
+ // 'errors'.
+ errors << delim << LLError::Log::classname(err) << ": " << err.what();
+ delim = "\n";
+ }
+ }
+
+ // so, were there any errors?
+ std::string error = errors.str();
+ if (! error.empty())
+ {
+ if (! event.has(mReplyKey))
+ {
+ // can't send reply, throw
+ sCallFail<DispatchError>(error);
+ }
+ else
+ {
+ // reply key present
+ result["error"] = error;
+ }
+ }
+
+ reply(result, event);
+}
+
+void LLDispatchListener::call_array(const LLSD& reqarray, const LLSD& event) const
+{
+ // LLSD array containing returned values
+ LLSD results;
+ // cache the dispatch key
+ std::string key{ getDispatchKey() };
+ // arguments array, if present -- const because, if it's shorter than
+ // reqarray, we don't want to grow it
+ const LLSD argsarray{ event[getArgsKey()] };
+ // error message, if any
+ std::string error;
+
+ // classic index loop because we need the index
+ for (size_t i = 0, size = reqarray.size(); i < size; ++i)
+ {
+ const auto& reqentry{ reqarray[i] };
+ std::string name;
+ LLSD args;
+ if (reqentry.isString())
+ {
+ name = reqentry.asString();
+ args = argsarray[i];
+ }
+ else if (reqentry.isArray() && reqentry.size() == 2 && reqentry[0].isString())
+ {
+ name = reqentry[0].asString();
+ args = reqentry[1];
+ }
+ else
+ {
+ // reqentry isn't in either of the documented forms
+ error = stringize(*this, ": ", getDispatchKey(), '[', i, "] ",
+ reqentry, " unsupported");
+ break;
+ }
+
+ // reqentry is one of the valid forms, got name and args
+ try
+ {
+ // in case of errors, tell user the dispatch key, the fact that
+ // we're processing a request array, the current entry in that
+ // array and the corresponding callable name
+ SetState(this, '[', key, '[', i, "]=", name, ']');
+ // With this form, capture return value even if undefined
+ results.append((*this)(name, args));
+ }
+ catch (const std::exception& err)
+ {
+ // Catch not only DispatchError, but any C++ exception thrown by
+ // the target callable. Report the exception class as well as the
+ // error string.
+ error = stringize(LLError::Log::classname(err), ": ", err.what());
+ break;
+ }
+ }
+
+ LLSD result;
+ // was there an error?
+ if (! error.empty())
+ {
+ if (! event.has(mReplyKey))
+ {
+ // can't send reply, throw
+ sCallFail<DispatchError>(error);
+ }
+ else
+ {
+ // reply key present
+ result["error"] = error;
+ }
+ }
+
+ // wrap the results array as response map "data" key, as promised
+ if (results.isDefined())
+ {
+ result["data"] = results;
+ }
+
+ reply(result, event);
+}
+
+void LLDispatchListener::reply(const LLSD& reply, const LLSD& request) const
+{
+ // Call sendReply() unconditionally: sendReply() itself tests whether the
+ // specified reply key is present in the incoming request, and does
+ // nothing if there's no such key.
+ sendReply(reply, request, mReplyKey);
+}
diff --git a/indra/llcommon/lleventdispatcher.h b/indra/llcommon/lleventdispatcher.h
index 9e1244ef5b..a82bc7a69b 100644
--- a/indra/llcommon/lleventdispatcher.h
+++ b/indra/llcommon/lleventdispatcher.h
@@ -27,55 +27,26 @@
*
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
- *
- * The invoker machinery that constructs a boost::fusion argument list for use
- * with boost::fusion::invoke() is derived from
- * http://www.boost.org/doc/libs/1_45_0/libs/function_types/example/interpreter.hpp
- * whose license information is copied below:
- *
- * "(C) Copyright Tobias Schwinger
- *
- * Use modification and distribution are subject to the boost Software License,
- * Version 1.0. (See http://www.boost.org/LICENSE_1_0.txt)."
*/
#if ! defined(LL_LLEVENTDISPATCHER_H)
#define LL_LLEVENTDISPATCHER_H
-// nil is too generic a term to be allowed to be a global macro. In
-// particular, boost::fusion defines a 'class nil' (properly encapsulated in a
-// namespace) that a global 'nil' macro breaks badly.
-#if defined(nil)
-// Capture the value of the macro 'nil', hoping int is an appropriate type.
-static const auto nil_(nil);
-// Now forget the macro.
-#undef nil
-// Finally, reintroduce 'nil' as a properly-scoped alias for the previously-
-// defined const 'nil_'. Make it static since otherwise it produces duplicate-
-// symbol link errors later.
-static const auto& nil(nil_);
-#endif
-
-#include <string>
-#include <boost/shared_ptr.hpp>
-#include <boost/function.hpp>
-#include <boost/bind.hpp>
-#include <boost/iterator/transform_iterator.hpp>
-#include <boost/utility/enable_if.hpp>
+#include <boost/fiber/fss.hpp>
+#include <boost/function_types/is_member_function_pointer.hpp>
#include <boost/function_types/is_nonmember_callable_builtin.hpp>
-#include <boost/function_types/parameter_types.hpp>
-#include <boost/function_types/function_arity.hpp>
-#include <boost/type_traits/remove_cv.hpp>
-#include <boost/type_traits/remove_reference.hpp>
-#include <boost/fusion/include/push_back.hpp>
-#include <boost/fusion/include/cons.hpp>
-#include <boost/fusion/include/invoke.hpp>
-#include <boost/mpl/begin.hpp>
-#include <boost/mpl/end.hpp>
-#include <boost/mpl/next.hpp>
-#include <boost/mpl/deref.hpp>
+#include <boost/hof/is_invocable.hpp> // until C++17, when we get std::is_invocable
+#include <boost/iterator/transform_iterator.hpp>
+#include <functional> // std::function
+#include <memory> // std::unique_ptr
+#include <string>
#include <typeinfo>
+#include <type_traits>
+#include <utility> // std::pair
+#include "always_return.h"
+#include "function_types.h" // LL::function_arity
#include "llevents.h"
+#include "llptrto.h"
#include "llsdutil.h"
class LLSD;
@@ -89,15 +60,27 @@ class LLSD;
class LL_COMMON_API LLEventDispatcher
{
public:
+ /**
+ * Pass description and the LLSD key used by try_call(const LLSD&) and
+ * operator()(const LLSD&) to extract the name of the registered callable
+ * to invoke.
+ */
LLEventDispatcher(const std::string& desc, const std::string& key);
+ /**
+ * Pass description, the LLSD key used by try_call(const LLSD&) and
+ * operator()(const LLSD&) to extract the name of the registered callable
+ * to invoke, and the LLSD key used by try_call(const LLSD&) and
+ * operator()(const LLSD&) to extract arguments LLSD.
+ */
+ LLEventDispatcher(const std::string& desc, const std::string& key,
+ const std::string& argskey);
virtual ~LLEventDispatcher();
/// @name Register functions accepting(const LLSD&)
//@{
- /// Accept any C++ callable with the right signature, typically a
- /// boost::bind() expression
- typedef boost::function<void(const LLSD&)> Callable;
+ /// Accept any C++ callable with the right signature
+ typedef std::function<LLSD(const LLSD&)> Callable;
/**
* Register a @a callable by @a name. The passed @a callable accepts a
@@ -109,27 +92,54 @@ public:
void add(const std::string& name,
const std::string& desc,
const Callable& callable,
- const LLSD& required=LLSD());
+ const LLSD& required=LLSD())
+ {
+ addLLSD(name, desc, callable, required);
+ }
- /**
- * The case of a free function (or static method) accepting(const LLSD&)
- * could also be intercepted by the arbitrary-args overload below. Ensure
- * that it's directed to the Callable overload above instead.
- */
+ template <typename CALLABLE,
+ typename=typename std::enable_if<
+ boost::hof::is_invocable<CALLABLE, LLSD>::value
+ >::type>
void add(const std::string& name,
const std::string& desc,
- void (*f)(const LLSD&),
+ CALLABLE&& callable,
const LLSD& required=LLSD())
{
- add(name, desc, Callable(f), required);
+ addLLSD(
+ name,
+ desc,
+ Callable(LL::make_always_return<LLSD>(std::forward<CALLABLE>(callable))),
+ required);
}
/**
* Special case: a subclass of this class can pass an unbound member
* function pointer (of an LLEventDispatcher subclass) without explicitly
- * specifying the <tt>boost::bind()</tt> expression. The passed @a method
+ * specifying a <tt>std::bind()</tt> expression. The passed @a method
* accepts a single LLSD value, presumably containing other parameters.
*/
+ template <typename R, class CLASS>
+ void add(const std::string& name,
+ const std::string& desc,
+ R (CLASS::*method)(const LLSD&),
+ const LLSD& required=LLSD())
+ {
+ addMethod<CLASS>(name, desc, method, required);
+ }
+
+ /// Overload for both const and non-const methods. The passed @a method
+ /// accepts a single LLSD value, presumably containing other parameters.
+ template <typename R, class CLASS>
+ void add(const std::string& name,
+ const std::string& desc,
+ R (CLASS::*method)(const LLSD&) const,
+ const LLSD& required=LLSD())
+ {
+ addMethod<CLASS>(name, desc, method, required);
+ }
+
+ // because the compiler can't match a method returning void to the above
template <class CLASS>
void add(const std::string& name,
const std::string& desc,
@@ -150,6 +160,128 @@ public:
addMethod<CLASS>(name, desc, method, required);
}
+ // non-const nullary method
+ template <typename R, class CLASS>
+ void add(const std::string& name,
+ const std::string& desc,
+ R (CLASS::*method)())
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // const nullary method
+ template <typename R, class CLASS>
+ void add(const std::string& name,
+ const std::string& desc,
+ R (CLASS::*method)() const)
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // non-const nullary method returning void
+ template <class CLASS>
+ void add(const std::string& name,
+ const std::string& desc,
+ void (CLASS::*method)())
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // const nullary method returning void
+ template <class CLASS>
+ void add(const std::string& name,
+ const std::string& desc,
+ void (CLASS::*method)() const)
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // non-const unary method (but method accepting LLSD should use the other add())
+ // enable_if usage per https://stackoverflow.com/a/39913395/5533635
+ template <typename R, class CLASS, typename ARG,
+ typename = typename std::enable_if<
+ ! std::is_same<typename std::decay<ARG>::type, LLSD>::value
+ >::type>
+ void add(const std::string& name,
+ const std::string& desc,
+ R (CLASS::*method)(ARG))
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // const unary method (but method accepting LLSD should use the other add())
+ template <typename R, class CLASS, typename ARG,
+ typename = typename std::enable_if<
+ ! std::is_same<typename std::decay<ARG>::type, LLSD>::value
+ >::type>
+ void add(const std::string& name,
+ const std::string& desc,
+ R (CLASS::*method)(ARG) const)
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // non-const unary method returning void
+ // enable_if usage per https://stackoverflow.com/a/39913395/5533635
+ template <class CLASS, typename ARG,
+ typename = typename std::enable_if<
+ ! std::is_same<typename std::decay<ARG>::type, LLSD>::value
+ >::type>
+ void add(const std::string& name,
+ const std::string& desc,
+ void (CLASS::*method)(ARG))
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // const unary method returning void
+ template <class CLASS, typename ARG,
+ typename = typename std::enable_if<
+ ! std::is_same<typename std::decay<ARG>::type, LLSD>::value
+ >::type>
+ void add(const std::string& name,
+ const std::string& desc,
+ void (CLASS::*method)(ARG) const)
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // non-const binary (or more) method
+ template <typename R, class CLASS, typename ARG0, typename ARG1, typename... ARGS>
+ void add(const std::string& name,
+ const std::string& desc,
+ R (CLASS::*method)(ARG0, ARG1, ARGS...))
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // const binary (or more) method
+ template <typename R, class CLASS, typename ARG0, typename ARG1, typename... ARGS>
+ void add(const std::string& name,
+ const std::string& desc,
+ R (CLASS::*method)(ARG0, ARG1, ARGS...) const)
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // non-const binary (or more) method returning void
+ template <class CLASS, typename ARG0, typename ARG1, typename... ARGS>
+ void add(const std::string& name,
+ const std::string& desc,
+ void (CLASS::*method)(ARG0, ARG1, ARGS...))
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
+ // const binary (or more) method returning void
+ template <class CLASS, typename ARG0, typename ARG1, typename... ARGS>
+ void add(const std::string& name,
+ const std::string& desc,
+ void (CLASS::*method)(ARG0, ARG1, ARGS...) const)
+ {
+ addVMethod<CLASS>(name, desc, method);
+ }
+
//@}
/// @name Register functions with arbitrary param lists
@@ -159,51 +291,43 @@ public:
* Register a free function with arbitrary parameters. (This also works
* for static class methods.)
*
- * @note This supports functions with up to about 6 parameters -- after
- * that you start getting dismaying compile errors in which
- * boost::fusion::joint_view is mentioned a surprising number of times.
- *
* When calling this name, pass an LLSD::Array. Each entry in turn will be
* converted to the corresponding parameter type using LLSDParam.
*/
- template<typename Function>
- typename boost::enable_if< boost::function_types::is_nonmember_callable_builtin<Function>
- >::type add(const std::string& name,
- const std::string& desc,
- Function f);
+ template <typename CALLABLE,
+ typename=typename std::enable_if<
+ ! boost::hof::is_invocable<CALLABLE, LLSD>()
+ >::type>
+ void add(const std::string& name,
+ const std::string& desc,
+ CALLABLE&& f)
+ {
+ addV(name, desc, f);
+ }
/**
* Register a nonstatic class method with arbitrary parameters.
*
- * @note This supports functions with up to about 6 parameters -- after
- * that you start getting dismaying compile errors in which
- * boost::fusion::joint_view is mentioned a surprising number of times.
- *
* To cover cases such as a method on an LLSingleton we don't yet want to
* instantiate, instead of directly storing an instance pointer, accept a
* nullary callable returning a pointer/reference to the desired class
- * instance. If you already have an instance in hand,
- * boost::lambda::var(instance) or boost::lambda::constant(instance_ptr)
- * produce suitable callables.
+ * instance.
*
* When calling this name, pass an LLSD::Array. Each entry in turn will be
* converted to the corresponding parameter type using LLSDParam.
*/
- template<typename Method, typename InstanceGetter>
- typename boost::enable_if< boost::function_types::is_member_function_pointer<Method>
- >::type add(const std::string& name,
- const std::string& desc,
- Method f,
- const InstanceGetter& getter);
+ template<typename Method, typename InstanceGetter,
+ typename = typename std::enable_if<
+ boost::function_types::is_member_function_pointer<Method>::value &&
+ ! std::is_convertible<InstanceGetter, LLSD>::value
+ >::type>
+ void add(const std::string& name, const std::string& desc, Method f,
+ const InstanceGetter& getter);
/**
* Register a free function with arbitrary parameters. (This also works
* for static class methods.)
*
- * @note This supports functions with up to about 6 parameters -- after
- * that you start getting dismaying compile errors in which
- * boost::fusion::joint_view is mentioned a surprising number of times.
- *
* Pass an LLSD::Array of parameter names, and optionally another
* LLSD::Array of default parameter values, a la LLSDArgsMapper.
*
@@ -211,21 +335,17 @@ public:
* an LLSD::Array using LLSDArgsMapper and then convert each entry in turn
* to the corresponding parameter type using LLSDParam.
*/
- template<typename Function>
- typename boost::enable_if< boost::function_types::is_nonmember_callable_builtin<Function>
- >::type add(const std::string& name,
- const std::string& desc,
- Function f,
- const LLSD& params,
- const LLSD& defaults=LLSD());
+ template<typename Function,
+ typename = typename std::enable_if<
+ boost::function_types::is_nonmember_callable_builtin<Function>::value &&
+ ! boost::hof::is_invocable<Function, LLSD>::value
+ >::type>
+ void add(const std::string& name, const std::string& desc, Function f,
+ const LLSD& params, const LLSD& defaults=LLSD());
/**
* Register a nonstatic class method with arbitrary parameters.
*
- * @note This supports functions with up to about 6 parameters -- after
- * that you start getting dismaying compile errors in which
- * boost::fusion::joint_view is mentioned a surprising number of times.
- *
* To cover cases such as a method on an LLSingleton we don't yet want to
* instantiate, instead of directly storing an instance pointer, accept a
* nullary callable returning a pointer/reference to the desired class
@@ -233,6 +353,8 @@ public:
* boost::lambda::var(instance) or boost::lambda::constant(instance_ptr)
* produce suitable callables.
*
+ * TODO: variant accepting a method of the containing class, no getter.
+ *
* Pass an LLSD::Array of parameter names, and optionally another
* LLSD::Array of default parameter values, a la LLSDArgsMapper.
*
@@ -240,42 +362,96 @@ public:
* an LLSD::Array using LLSDArgsMapper and then convert each entry in turn
* to the corresponding parameter type using LLSDParam.
*/
- template<typename Method, typename InstanceGetter>
- typename boost::enable_if< boost::function_types::is_member_function_pointer<Method>
- >::type add(const std::string& name,
- const std::string& desc,
- Method f,
- const InstanceGetter& getter,
- const LLSD& params,
- const LLSD& defaults=LLSD());
+ template<typename Method, typename InstanceGetter,
+ typename = typename std::enable_if<
+ boost::function_types::is_member_function_pointer<Method>::value &&
+ ! std::is_convertible<InstanceGetter, LLSD>::value
+ >::type>
+ void add(const std::string& name, const std::string& desc, Method f,
+ const InstanceGetter& getter, const LLSD& params,
+ const LLSD& defaults=LLSD());
//@}
/// Unregister a callable
bool remove(const std::string& name);
- /// Call a registered callable with an explicitly-specified name. If no
- /// such callable exists, die with LL_ERRS. If the @a event fails to match
- /// the @a required prototype specified at add() time, die with LL_ERRS.
- void operator()(const std::string& name, const LLSD& event) const;
+ /// Exception if an attempted call fails for any reason
+ struct DispatchError: public LLException
+ {
+ DispatchError(const std::string& what): LLException(what) {}
+ };
+
+ /// Specific exception for an attempt to call a nonexistent name
+ struct DispatchMissing: public DispatchError
+ {
+ DispatchMissing(const std::string& what): DispatchError(what) {}
+ };
+
+ /**
+ * Call a registered callable with an explicitly-specified name,
+ * converting its return value to LLSD (undefined for a void callable).
+ * It is an error if no such callable exists. It is an error if the @a
+ * event fails to match the @a required prototype specified at add()
+ * time.
+ *
+ * @a event must be an LLSD array for a callable registered to accept its
+ * arguments from such an array. It must be an LLSD map for a callable
+ * registered to accept its arguments from such a map.
+ */
+ LLSD operator()(const std::string& name, const LLSD& event) const;
- /// Call a registered callable with an explicitly-specified name and
- /// return <tt>true</tt>. If no such callable exists, return
- /// <tt>false</tt>. If the @a event fails to match the @a required
- /// prototype specified at add() time, die with LL_ERRS.
+ /**
+ * Call a registered callable with an explicitly-specified name and
+ * return <tt>true</tt>. If no such callable exists, return
+ * <tt>false</tt>. It is an error if the @a event fails to match the @a
+ * required prototype specified at add() time.
+ *
+ * @a event must be an LLSD array for a callable registered to accept its
+ * arguments from such an array. It must be an LLSD map for a callable
+ * registered to accept its arguments from such a map.
+ */
bool try_call(const std::string& name, const LLSD& event) const;
- /// Extract the @a key value from the incoming @a event, and call the
- /// callable whose name is specified by that map @a key. If no such
- /// callable exists, die with LL_ERRS. If the @a event fails to match the
- /// @a required prototype specified at add() time, die with LL_ERRS.
- void operator()(const LLSD& event) const;
-
- /// Extract the @a key value from the incoming @a event, call the callable
- /// whose name is specified by that map @a key and return <tt>true</tt>.
- /// If no such callable exists, return <tt>false</tt>. If the @a event
- /// fails to match the @a required prototype specified at add() time, die
- /// with LL_ERRS.
+ /**
+ * Extract the @a key specified to our constructor from the incoming LLSD
+ * map @a event, and call the callable whose name is specified by that @a
+ * key's value, converting its return value to LLSD (undefined for a void
+ * callable). It is an error if no such callable exists. It is an error if
+ * the @a event fails to match the @a required prototype specified at
+ * add() time.
+ *
+ * For a (non-nullary) callable registered to accept its arguments from an
+ * LLSD array, the @a event map must contain the key @a argskey specified to
+ * our constructor. The value of the @a argskey key must be an LLSD array
+ * containing the arguments to pass to the callable named by @a key.
+ *
+ * For a callable registered to accept its arguments from an LLSD map, if
+ * the @a event map contains the key @a argskey specified our constructor,
+ * extract the value of the @a argskey key and use it as the arguments map.
+ * If @a event contains no @a argskey key, use the whole @a event as the
+ * arguments map.
+ */
+ LLSD operator()(const LLSD& event) const;
+
+ /**
+ * Extract the @a key specified to our constructor from the incoming LLSD
+ * map @a event, call the callable whose name is specified by that @a
+ * key's value and return <tt>true</tt>. If no such callable exists,
+ * return <tt>false</tt>. It is an error if the @a event fails to match
+ * the @a required prototype specified at add() time.
+ *
+ * For a (non-nullary) callable registered to accept its arguments from an
+ * LLSD array, the @a event map must contain the key @a argskey specified to
+ * our constructor. The value of the @a argskey key must be an LLSD array
+ * containing the arguments to pass to the callable named by @a key.
+ *
+ * For a callable registered to accept its arguments from an LLSD map, if
+ * the @a event map contains the key @a argskey specified our constructor,
+ * extract the value of the @a argskey key and use it as the arguments map.
+ * If @a event contains no @a argskey key, use the whole @a event as the
+ * arguments map.
+ */
bool try_call(const LLSD& event) const;
/// @name Iterate over defined names
@@ -285,22 +461,26 @@ public:
private:
struct DispatchEntry
{
- DispatchEntry(const std::string& desc);
+ DispatchEntry(LLEventDispatcher* parent, const std::string& desc);
virtual ~DispatchEntry() {} // suppress MSVC warning, sigh
+ // store a plain dumb back-pointer because the parent
+ // LLEventDispatcher manages the lifespan of each DispatchEntry
+ // subclass instance -- not the other way around
+ LLEventDispatcher* mParent;
std::string mDesc;
- virtual void call(const std::string& desc, const LLSD& event) const = 0;
- virtual LLSD addMetadata(LLSD) const = 0;
+ virtual LLSD call(const std::string& desc, const LLSD& event,
+ bool fromMap, const std::string& argskey) const = 0;
+ virtual LLSD getMetadata() const = 0;
+
+ template <typename... ARGS>
+ [[noreturn]] void callFail(ARGS&&... args) const
+ {
+ mParent->callFail<LLEventDispatcher::DispatchError>(std::forward<ARGS>(args)...);
+ }
};
- // Tried using boost::ptr_map<std::string, DispatchEntry>, but ptr_map<>
- // wants its value type to be "clonable," even just to dereference an
- // iterator. I don't want to clone entries -- if I have to copy an entry
- // around, I want it to continue pointing to the same DispatchEntry
- // subclass object. However, I definitely want DispatchMap to destroy
- // DispatchEntry if no references are outstanding at the time an entry is
- // removed. This looks like a job for boost::shared_ptr.
- typedef std::map<std::string, boost::shared_ptr<DispatchEntry> > DispatchMap;
+ typedef std::map<std::string, std::unique_ptr<DispatchEntry> > DispatchMap;
public:
/// We want the flexibility to redefine what data we store per name,
@@ -323,12 +503,58 @@ public:
/// Retrieve the LLSD key we use for one-arg <tt>operator()</tt> method
std::string getDispatchKey() const { return mKey; }
+ /// Retrieve the LLSD key we use for non-map arguments
+ std::string getArgsKey() const { return mArgskey; }
+
+ /// description of this instance's leaf class and description
+ friend std::ostream& operator<<(std::ostream&, const LLEventDispatcher&);
private:
- template <class CLASS, typename METHOD>
+ void addLLSD(const std::string& name,
+ const std::string& desc,
+ const Callable& callable,
+ const LLSD& required);
+
+ template <class CLASS, typename METHOD,
+ typename std::enable_if<
+ std::is_base_of<LLEventDispatcher, CLASS>::value,
+ bool
+ >::type=true>
void addMethod(const std::string& name, const std::string& desc,
const METHOD& method, const LLSD& required)
{
+ // Why two overloaded addMethod() methods, discriminated with
+ // std::is_base_of? It might seem simpler to use dynamic_cast and test
+ // for nullptr. The trouble is that it doesn't work for LazyEventAPI
+ // deferred registration: we get nullptr even for a method of an
+ // LLEventAPI subclass.
+ CLASS* downcast = static_cast<CLASS*>(this);
+ add(name,
+ desc,
+ Callable(LL::make_always_return<LLSD>(
+ [downcast, method]
+ (const LLSD& args)
+ {
+ return (downcast->*method)(args);
+ })),
+ required);
+ }
+
+ template <class CLASS, typename METHOD,
+ typename std::enable_if<
+ ! std::is_base_of<LLEventDispatcher, CLASS>::value,
+ bool
+ >::type=true>
+ void addMethod(const std::string& name, const std::string& desc,
+ const METHOD&, const LLSD&)
+ {
+ addFail(name, typeid(CLASS).name());
+ }
+
+ template <class CLASS, typename METHOD>
+ void addVMethod(const std::string& name, const std::string& desc,
+ const METHOD& method)
+ {
CLASS* downcast = dynamic_cast<CLASS*>(this);
if (! downcast)
{
@@ -336,38 +562,85 @@ private:
}
else
{
- add(name, desc, boost::bind(method, downcast, _1), required);
+ // add() arbitrary method plus InstanceGetter, where the
+ // InstanceGetter in this case returns 'this'. We don't need to
+ // worry about binding 'this' because, once this LLEventDispatcher
+ // is destroyed, the DispatchEntry goes away too.
+ add(name, desc, method, [downcast](){ return downcast; });
}
}
- void addFail(const std::string& name, const std::string& classname) const;
- std::string mDesc, mKey;
+ template <typename Function>
+ void addV(const std::string& name, const std::string& desc, Function f);
+
+ void addFail(const std::string& name, const char* classname) const;
+ LLSD try_call(const std::string& key, const std::string& name,
+ const LLSD& event) const;
+
+protected:
+ // raise specified EXCEPTION with specified stringize(ARGS)
+ template <typename EXCEPTION, typename... ARGS>
+ [[noreturn]] void callFail(ARGS&&... args) const;
+ template <typename EXCEPTION, typename... ARGS>
+ [[noreturn]] static
+ void sCallFail(ARGS&&... args);
+
+ // Manage transient state, e.g. which registered callable we're attempting
+ // to call, for error reporting
+ class SetState
+ {
+ public:
+ template <typename... ARGS>
+ SetState(const LLEventDispatcher* self, ARGS&&... args):
+ mSelf(self)
+ {
+ mSet = mSelf->setState(*this, stringize(std::forward<ARGS>(args)...));
+ }
+ // RAII class: forbid both copy and move
+ SetState(const SetState&) = delete;
+ SetState(SetState&&) = delete;
+ SetState& operator=(const SetState&) = delete;
+ SetState& operator=(SetState&&) = delete;
+ virtual ~SetState()
+ {
+ // if we're the ones who succeeded in setting state, clear it
+ if (mSet)
+ {
+ mSelf->setState(*this, {});
+ }
+ }
+
+ private:
+ const LLEventDispatcher* mSelf;
+ bool mSet;
+ };
+
+private:
+ std::string mDesc, mKey, mArgskey;
DispatchMap mDispatch;
+ // transient state: must be fiber_specific since multiple threads and/or
+ // multiple fibers may be calling concurrently. Make it mutable so we can
+ // use SetState even within const methods.
+ mutable boost::fibers::fiber_specific_ptr<std::string> mState;
+
+ std::string getState() const;
+ // setState() requires SetState& because only the SetState class should
+ // call it. Make it const so we can use SetState even within const methods.
+ bool setState(SetState&, const std::string& state) const;
static NameDesc makeNameDesc(const DispatchMap::value_type& item)
{
return NameDesc(item.first, item.second->mDesc);
}
+ class LLSDArgsMapper;
struct LLSDDispatchEntry;
struct ParamsDispatchEntry;
struct ArrayParamsDispatchEntry;
struct MapParamsDispatchEntry;
- // Step 2 of parameter analysis. Instantiating invoker<some_function_type>
- // implicitly sets its From and To parameters to the (compile time) begin
- // and end iterators over that function's parameter types.
- template< typename Function
- , class From = typename boost::mpl::begin< boost::function_types::parameter_types<Function> >::type
- , class To = typename boost::mpl::end< boost::function_types::parameter_types<Function> >::type
- >
- struct invoker;
-
- // deliver LLSD arguments one at a time
- typedef boost::function<LLSD()> args_source;
- // obtain args from an args_source to build param list and call target
- // function
- typedef boost::function<void(const args_source&)> invoker_function;
+ // call target function with args from LLSD array
+ typedef std::function<LLSD(const LLSD&)> invoker_function;
template <typename Function>
invoker_function make_invoker(Function f);
@@ -387,101 +660,38 @@ private:
/*****************************************************************************
* LLEventDispatcher template implementation details
*****************************************************************************/
-// Step 3 of parameter analysis, the recursive case.
-template<typename Function, class From, class To>
-struct LLEventDispatcher::invoker
-{
- template<typename T>
- struct remove_cv_ref
- : boost::remove_cv< typename boost::remove_reference<T>::type >
- { };
-
- // apply() accepts an arbitrary boost::fusion sequence as args. It
- // examines the next parameter type in the parameter-types sequence
- // bounded by From and To, obtains the next LLSD object from the passed
- // args_source and constructs an LLSDParam of appropriate type to try
- // to convert the value. It then recurs with the next parameter-types
- // iterator, passing the args sequence thus far.
- template<typename Args>
- static inline
- void apply(Function func, const args_source& argsrc, Args const & args)
- {
- typedef typename boost::mpl::deref<From>::type arg_type;
- typedef typename boost::mpl::next<From>::type next_iter_type;
- typedef typename remove_cv_ref<arg_type>::type plain_arg_type;
-
- invoker<Function, next_iter_type, To>::apply
- ( func, argsrc, boost::fusion::push_back(args, LLSDParam<plain_arg_type>(argsrc())));
- }
-
- // Special treatment for instance (first) parameter of a non-static member
- // function. Accept the instance-getter callable, calling that to produce
- // the first args value. Since we know we're at the top of the recursion
- // chain, we need not also require a partial args sequence from our caller.
- template <typename InstanceGetter>
- static inline
- void method_apply(Function func, const args_source& argsrc, const InstanceGetter& getter)
- {
- typedef typename boost::mpl::next<From>::type next_iter_type;
-
- // Instead of grabbing the first item from argsrc and making an
- // LLSDParam of it, call getter() and pass that as the instance param.
- invoker<Function, next_iter_type, To>::apply
- ( func, argsrc, boost::fusion::push_back(boost::fusion::nil(), boost::ref(getter())));
- }
-};
-
-// Step 4 of parameter analysis, the leaf case. When the general
-// invoker<Function, From, To> logic has advanced From until it matches To,
-// the compiler will pick this template specialization.
-template<typename Function, class To>
-struct LLEventDispatcher::invoker<Function,To,To>
-{
- // the argument list is complete, now call the function
- template<typename Args>
- static inline
- void apply(Function func, const args_source&, Args const & args)
- {
- boost::fusion::invoke(func, args);
- }
-};
-
-template<typename Function>
-typename boost::enable_if< boost::function_types::is_nonmember_callable_builtin<Function> >::type
-LLEventDispatcher::add(const std::string& name, const std::string& desc, Function f)
+template <typename Function>
+void LLEventDispatcher::addV(const std::string& name, const std::string& desc, Function f)
{
- // Construct an invoker_function, a callable accepting const args_source&.
+ // Construct an invoker_function, a callable accepting const LLSD&.
// Add to DispatchMap an ArrayParamsDispatchEntry that will handle the
// caller's LLSD::Array.
addArrayParamsDispatchEntry(name, desc, make_invoker(f),
- boost::function_types::function_arity<Function>::value);
+ LL::function_arity<Function>::value);
}
-template<typename Method, typename InstanceGetter>
-typename boost::enable_if< boost::function_types::is_member_function_pointer<Method> >::type
-LLEventDispatcher::add(const std::string& name, const std::string& desc, Method f,
- const InstanceGetter& getter)
+template<typename Method, typename InstanceGetter, typename>
+void LLEventDispatcher::add(const std::string& name, const std::string& desc, Method f,
+ const InstanceGetter& getter)
{
// Subtract 1 from the compile-time arity because the getter takes care of
// the first parameter. We only need (arity - 1) additional arguments.
addArrayParamsDispatchEntry(name, desc, make_invoker(f, getter),
- boost::function_types::function_arity<Method>::value - 1);
+ LL::function_arity<Method>::value - 1);
}
-template<typename Function>
-typename boost::enable_if< boost::function_types::is_nonmember_callable_builtin<Function> >::type
-LLEventDispatcher::add(const std::string& name, const std::string& desc, Function f,
- const LLSD& params, const LLSD& defaults)
+template<typename Function, typename>
+void LLEventDispatcher::add(const std::string& name, const std::string& desc, Function f,
+ const LLSD& params, const LLSD& defaults)
{
// See comments for previous is_nonmember_callable_builtin add().
addMapParamsDispatchEntry(name, desc, make_invoker(f), params, defaults);
}
-template<typename Method, typename InstanceGetter>
-typename boost::enable_if< boost::function_types::is_member_function_pointer<Method> >::type
-LLEventDispatcher::add(const std::string& name, const std::string& desc, Method f,
- const InstanceGetter& getter,
- const LLSD& params, const LLSD& defaults)
+template<typename Method, typename InstanceGetter, typename>
+void LLEventDispatcher::add(const std::string& name, const std::string& desc, Method f,
+ const InstanceGetter& getter,
+ const LLSD& params, const LLSD& defaults)
{
addMapParamsDispatchEntry(name, desc, make_invoker(f, getter), params, defaults);
}
@@ -490,29 +700,45 @@ template <typename Function>
LLEventDispatcher::invoker_function
LLEventDispatcher::make_invoker(Function f)
{
- // Step 1 of parameter analysis, the top of the recursion. Passing a
- // suitable f (see add()'s enable_if condition) to this method causes it
- // to infer the function type; specifying that function type to invoker<>
- // causes it to fill in the begin/end MPL iterators over the function's
- // list of parameter types.
- // While normally invoker::apply() could infer its template type from the
- // boost::fusion::nil parameter value, here we must be explicit since
- // we're boost::bind()ing it rather than calling it directly.
- return boost::bind(&invoker<Function>::template apply<boost::fusion::nil>,
- f,
- _1,
- boost::fusion::nil());
+ // Return an invoker_function that accepts (const LLSD& args).
+ return [f](const LLSD& args)
+ {
+ // When called, call always_return<LLSD>, directing it to call
+ // f(expanded args). always_return<LLSD> guarantees we'll get an LLSD
+ // value back, even if it's undefined because 'f' doesn't return a
+ // type convertible to LLSD.
+ return LL::always_return<LLSD>(
+ [f, args]
+ ()
+ {
+ return LL::apply(f, args);
+ });
+ };
}
template <typename Method, typename InstanceGetter>
LLEventDispatcher::invoker_function
LLEventDispatcher::make_invoker(Method f, const InstanceGetter& getter)
{
- // Use invoker::method_apply() to treat the instance (first) arg specially.
- return boost::bind(&invoker<Method>::template method_apply<InstanceGetter>,
- f,
- _1,
- getter);
+ return [f, getter](const LLSD& args)
+ {
+ // always_return<LLSD>() immediately calls the lambda we pass, and
+ // returns LLSD whether our passed lambda returns void or non-void.
+ return LL::always_return<LLSD>(
+ [f, getter, args]
+ ()
+ {
+ // function_arity<member function> includes its implicit 'this' pointer
+ constexpr auto arity = LL::function_arity<
+ typename std::remove_reference<Method>::type>::value - 1;
+
+ // Use bind_front() to bind the method to (a pointer to) the object
+ // returned by getter(). It's okay to capture and bind a pointer
+ // because this bind_front() object will last only as long as this
+ // lambda call.
+ return LL::apply_n<arity>(LL::bind_front(f, LL::get_ptr(getter())), args);
+ });
+ };
}
/*****************************************************************************
@@ -521,21 +747,138 @@ LLEventDispatcher::make_invoker(Method f, const InstanceGetter& getter)
/**
* Bundle an LLEventPump and a listener with an LLEventDispatcher. A class
* that contains (or derives from) LLDispatchListener need only specify the
- * LLEventPump name and dispatch key, and add() its methods. Incoming events
- * will automatically be dispatched.
+ * LLEventPump name and dispatch key, and add() its methods. Each incoming
+ * event ("request") will automatically be dispatched.
+ *
+ * If the request contains a "reply" key specifying the LLSD::String name of
+ * an LLEventPump to which to respond, LLDispatchListener will attempt to send
+ * a response to that LLEventPump.
+ *
+ * If some error occurs (e.g. nonexistent callable name, wrong params) and
+ * "reply" is present, LLDispatchListener will send a response map to the
+ * specified LLEventPump containing an "error" key whose value is the relevant
+ * error message. If "reply" is not present, the DispatchError exception will
+ * propagate. Since LLDispatchListener bundles an LLEventStream, which
+ * attempts the call immediately on receiving the post() call, there's a
+ * reasonable chance that the exception will highlight the post() call that
+ * triggered the error.
+ *
+ * If LLDispatchListener successfully calls the target callable, but no
+ * "reply" key is present, any value returned by that callable is discarded.
+ * If a "reply" key is present, but the target callable is void -- or it
+ * returns LLSD::isUndefined() -- no response is sent. If a void callable
+ * wants to send a response, it must do so explicitly.
+ *
+ * If the target callable returns a type convertible to LLSD (and, if it
+ * directly returns LLSD, the return value isDefined()), and if a "reply" key
+ * is present in the request, LLDispatchListener will post the returned value
+ * to the "reply" LLEventPump. If the returned value is an LLSD map, it will
+ * merge the echoed "reqid" key into the map and send that. Otherwise, it will
+ * send an LLSD map containing "reqid" and a "data" key whose value is the
+ * value returned by the target callable.
+ *
+ * (It is inadvisable for a target callable to return an LLSD map containing
+ * keys "data", "reqid" or "error", as that will confuse the invoker.)
+ *
+ * Normally the request will specify the value of the dispatch key as an
+ * LLSD::String naming the target callable. Alternatively, several such calls
+ * may be "batched" as described below.
+ *
+ * If the value of the dispatch key is itself an LLSD map (a "request map"),
+ * each map key must name a target callable, and the value of that key must
+ * contain the parameters to pass to that callable. If a "reply" key is
+ * present in the request, the response map will contain a key for each of the
+ * keys in the request map. The value of every such key is the value returned
+ * by the target callable.
+ *
+ * (Avoid naming any target callable in the LLDispatchListener "data", "reqid"
+ * or "error" to avoid confusion.)
+ *
+ * Since LLDispatchListener calls the target callables specified by a request
+ * map in arbitrary order, this form assumes that the batched operations are
+ * independent of each other. LLDispatchListener will attempt every call, even
+ * if some attempts produce errors. If any keys in the request map produce
+ * errors, LLDispatchListener builds a composite error message string
+ * collecting the relevant messages. The corresponding keys will be missing
+ * from the response map. As in the single-callable case, absent a "reply" key
+ * in the request, this error message will be thrown as a DispatchError. With
+ * a "reply" key, it will be returned as the value of the "error" key. This
+ * form can indicate partial success: some request keys might have
+ * return-value keys in the response, others might have message text in the
+ * "error" key.
+ *
+ * If a specific call sequence is required, the value of the dispatch key may
+ * instead be an LLSD array (a "request array"). Each entry in the request
+ * array ("request entry") names a target callable, to be called in
+ * array-index sequence. Arguments for that callable may be specified in
+ * either of two ways.
+ *
+ * The request entry may itself be a two-element array, whose [0] is an
+ * LLSD::String naming the target callable and whose [1] contains the
+ * arguments to pass to that callable.
+ *
+ * Alternatively, the request entry may be an LLSD::String naming the target
+ * callable, in which case the request must contain an arguments key (optional
+ * third constructor argument) whose value is an array matching the request
+ * array. The arguments for the request entry's target callable are found at
+ * the same index in the arguments key array.
+ *
+ * If a "reply" key is present in the request, the response map will contain a
+ * "data" key whose value is an array. Each entry in that response array will
+ * contain the result from the corresponding request entry.
+ *
+ * This form assumes that any of the batched operations might depend on the
+ * success of a previous operation in the same batch. The @emph first error
+ * encountered will terminate the sequence. The error message might either be
+ * thrown as DispatchError or, given a "reply" key, returned as the "error"
+ * key in the response map. This form can indicate partial success: the first
+ * few request entries might have return-value entries in the "data" response
+ * array, along with an "error" key whose value is the error message that
+ * stopped the sequence.
*/
-class LL_COMMON_API LLDispatchListener: public LLEventDispatcher
+// Instead of containing an LLEventStream, LLDispatchListener derives from it.
+// This allows an LLEventPumps::PumpFactory to return a pointer to an
+// LLDispatchListener (subclass) instance, and still have ~LLEventPumps()
+// properly clean it up.
+class LL_COMMON_API LLDispatchListener:
+ public LLEventDispatcher,
+ public LLEventStream
{
public:
- LLDispatchListener(const std::string& pumpname, const std::string& key);
-
- std::string getPumpName() const { return mPump.getName(); }
+ /// LLEventPump name, dispatch key [, arguments key (see LLEventDispatcher)]
+ template <typename... ARGS>
+ LLDispatchListener(const std::string& pumpname, const std::string& key,
+ ARGS&&... args);
+ virtual ~LLDispatchListener() {}
private:
- bool process(const LLSD& event);
+ bool process(const LLSD& event) const;
+ void call_one(const LLSD& name, const LLSD& event) const;
+ void call_map(const LLSD& reqmap, const LLSD& event) const;
+ void call_array(const LLSD& reqarray, const LLSD& event) const;
+ void reply(const LLSD& reply, const LLSD& request) const;
- LLEventStream mPump;
LLTempBoundListener mBoundListener;
+ static std::string mReplyKey;
};
+template <typename... ARGS>
+LLDispatchListener::LLDispatchListener(const std::string& pumpname, const std::string& key,
+ ARGS&&... args):
+ // pass through any additional arguments to LLEventDispatcher ctor
+ LLEventDispatcher(pumpname, key, std::forward<ARGS>(args)...),
+ // Do NOT tweak the passed pumpname. In practice, when someone
+ // instantiates a subclass of our LLEventAPI subclass, they intend to
+ // claim that LLEventPump name in the global LLEventPumps namespace. It
+ // would be mysterious and distressing if we allowed name tweaking, and
+ // someone else claimed pumpname first for a completely unrelated
+ // LLEventPump. Posted events would never reach our subclass listener
+ // because we would have silently changed its name; meanwhile listeners
+ // (if any) on that other LLEventPump would be confused by the events
+ // intended for our subclass.
+ LLEventStream(pumpname, false),
+ mBoundListener(listen("self", [this](const LLSD& event){ return process(event); }))
+{
+}
+
#endif /* ! defined(LL_LLEVENTDISPATCHER_H) */
diff --git a/indra/llcommon/lleventfilter.h b/indra/llcommon/lleventfilter.h
index 7613850fb2..1fb41e0297 100644
--- a/indra/llcommon/lleventfilter.h
+++ b/indra/llcommon/lleventfilter.h
@@ -435,16 +435,61 @@ public:
// generic type-appropriate store through mTarget, construct an
// LLSDParam<T> and store that, thus engaging LLSDParam's custom
// conversions.
- mTarget = LLSDParam<T>(llsd::drill(event, mPath));
+ storeTarget(LLSDParam<T>(llsd::drill(event, mPath)));
return mConsume;
}
private:
+ // This method disambiguates LLStoreListener<LLSD>. Directly assigning
+ // some_LLSD_var = LLSDParam<LLSD>(some_LLSD_value);
+ // is problematic because the compiler has too many choices: LLSD has
+ // multiple assignment operator overloads, and LLSDParam<LLSD> has a
+ // templated conversion operator. But LLSDParam<LLSD> can convert to a
+ // (const LLSD&) parameter, and LLSD::operator=(const LLSD&) works.
+ void storeTarget(const T& value)
+ {
+ mTarget = value;
+ }
+
T& mTarget;
const LLSD mPath;
const bool mConsume;
};
+/**
+ * LLVarHolder bundles a target variable of the specified type. We use it as a
+ * base class so the target variable will be fully constructed by the time a
+ * subclass constructor tries to pass a reference to some other base class.
+ */
+template <typename T>
+struct LLVarHolder
+{
+ T mVar;
+};
+
+/**
+ * LLCaptureListener isa LLStoreListener that bundles the target variable of
+ * interest.
+ */
+template <typename T>
+class LLCaptureListener: public LLVarHolder<T>,
+ public LLStoreListener<T>
+{
+private:
+ using holder = LLVarHolder<T>;
+ using super = LLStoreListener<T>;
+
+public:
+ LLCaptureListener(const LLSD& path=LLSD(), bool consume=false):
+ super(*this, holder::mVar, path, consume)
+ {}
+
+ void set(T&& newval=T()) { holder::mVar = std::forward<T>(newval); }
+
+ const T& get() const { return holder::mVar; }
+ operator const T&() { return holder::mVar; }
+};
+
/*****************************************************************************
* LLEventLogProxy
*****************************************************************************/
diff --git a/indra/llcommon/llevents.cpp b/indra/llcommon/llevents.cpp
index 0a213bddef..1a305ec3dc 100644
--- a/indra/llcommon/llevents.cpp
+++ b/indra/llcommon/llevents.cpp
@@ -68,19 +68,78 @@
LLEventPumps::LLEventPumps():
mFactories
{
- { "LLEventStream", [](const std::string& name, bool tweak)
+ { "LLEventStream", [](const std::string& name, bool tweak, const std::string& /*type*/)
{ return new LLEventStream(name, tweak); } },
- { "LLEventMailDrop", [](const std::string& name, bool tweak)
+ { "LLEventMailDrop", [](const std::string& name, bool tweak, const std::string& /*type*/)
{ return new LLEventMailDrop(name, tweak); } }
},
mTypes
{
- // LLEventStream is the default for obtain(), so even if somebody DOES
- // call obtain("placeholder"), this sample entry won't break anything.
- { "placeholder", "LLEventStream" }
+// { "placeholder", "LLEventStream" }
}
{}
+bool LLEventPumps::registerTypeFactory(const std::string& type, const TypeFactory& factory)
+{
+ auto found = mFactories.find(type);
+ // can't re-register a TypeFactory for a type name that's already registered
+ if (found != mFactories.end())
+ return false;
+ // doesn't already exist, go ahead and register
+ mFactories[type] = factory;
+ return true;
+}
+
+void LLEventPumps::unregisterTypeFactory(const std::string& type)
+{
+ auto found = mFactories.find(type);
+ if (found != mFactories.end())
+ mFactories.erase(found);
+}
+
+bool LLEventPumps::registerPumpFactory(const std::string& name, const PumpFactory& factory)
+{
+ // Do we already have a pump by this name?
+ if (mPumpMap.find(name) != mPumpMap.end())
+ return false;
+ // Do we already have an override for this pump name?
+ if (mTypes.find(name) != mTypes.end())
+ return false;
+ // Leverage the two-level lookup implemented by mTypes (pump name -> type
+ // name) and mFactories (type name -> factory). We could instead create a
+ // whole separate (pump name -> factory) map, and look in both; or we
+ // could change mTypes to (pump name -> factory) and, for typical type-
+ // based lookups, use a "factory" that looks up the real factory in
+ // mFactories. But this works, and we don't expect many calls to make() -
+ // either explicit or implicit via obtain().
+ // Create a bogus type name extremely unlikely to collide with an actual type.
+ static std::string nul(1, '\0');
+ std::string type_name{ nul + name };
+ mTypes[name] = type_name;
+ // TypeFactory is called with (name, tweak, type), whereas PumpFactory
+ // accepts only name. We could adapt with std::bind(), but this lambda
+ // does the trick.
+ mFactories[type_name] =
+ [factory]
+ (const std::string& name, bool /*tweak*/, const std::string& /*type*/)
+ { return factory(name); };
+ return true;
+}
+
+void LLEventPumps::unregisterPumpFactory(const std::string& name)
+{
+ auto tfound = mTypes.find(name);
+ if (tfound != mTypes.end())
+ {
+ auto ffound = mFactories.find(tfound->second);
+ if (ffound != mFactories.end())
+ {
+ mFactories.erase(ffound);
+ }
+ mTypes.erase(tfound);
+ }
+}
+
LLEventPump& LLEventPumps::obtain(const std::string& name)
{
PumpMap::iterator found = mPumpMap.find(name);
@@ -114,7 +173,7 @@ LLEventPump& LLEventPumps::make(const std::string& name, bool tweak,
// Passing an unrecognized type name is a no-no
LLTHROW(BadType(type));
}
- auto newInstance = (found->second)(name, tweak);
+ auto newInstance = (found->second)(name, tweak, type);
// LLEventPump's constructor implicitly registers each new instance in
// mPumpMap. But remember that we instantiated it (in mOurPumps) so we'll
// delete it later.
diff --git a/indra/llcommon/llevents.h b/indra/llcommon/llevents.h
index ae6e5aabc9..c1dbf4392f 100644
--- a/indra/llcommon/llevents.h
+++ b/indra/llcommon/llevents.h
@@ -268,6 +268,45 @@ public:
LLEventPump& make(const std::string& name, bool tweak=false,
const std::string& type=std::string());
+ /// function passed to registerTypeFactory()
+ typedef std::function<LLEventPump*(const std::string& name, bool tweak, const std::string& type)> TypeFactory;
+
+ /**
+ * Register a TypeFactory for use with make(). When make() is called with
+ * the specified @a type string, call @a factory(name, tweak, type) to
+ * instantiate it.
+ *
+ * Returns true if successfully registered, false if there already exists
+ * a TypeFactory for the specified @a type name.
+ */
+ bool registerTypeFactory(const std::string& type, const TypeFactory& factory);
+ void unregisterTypeFactory(const std::string& type);
+
+ /// function passed to registerPumpFactory()
+ typedef std::function<LLEventPump*(const std::string&)> PumpFactory;
+
+ /**
+ * Register a PumpFactory for use with obtain(). When obtain() is called
+ * with the specified @a name string, if an LLEventPump with the specified
+ * @a name doesn't already exist, call @a factory(name) to instantiate it.
+ *
+ * Returns true if successfully registered, false if there already exists
+ * a factory override for the specified @a name.
+ *
+ * PumpFactory does not support @a tweak because it's only called when
+ * <i>that particular</i> @a name is passed to obtain(). Bear in mind that
+ * <tt>obtain(name)</tt> might still bypass the caller's PumpFactory for a
+ * couple different reasons:
+ *
+ * * registerPumpFactory() returns false because there's already a factory
+ * override for the specified @name
+ * * between a successful <tt>registerPumpFactory(name)</tt> call (returns
+ * true) and a call to <tt>obtain(name)</tt>, someone explicitly
+ * instantiated an LLEventPump(name), so obtain(name) returned that.
+ */
+ bool registerPumpFactory(const std::string& name, const PumpFactory& factory);
+ void unregisterPumpFactory(const std::string& name);
+
/**
* Find the named LLEventPump instance. If it exists post the message to it.
* If the pump does not exist, do nothing.
@@ -325,13 +364,13 @@ testable:
typedef std::set<LLEventPump*> PumpSet;
PumpSet mOurPumps;
// for make(), map string type name to LLEventPump subclass factory function
- typedef std::map<std::string, std::function<LLEventPump*(const std::string&, bool)>> PumpFactories;
+ typedef std::map<std::string, TypeFactory> TypeFactories;
// Data used by make().
// One might think mFactories and mTypes could reasonably be static. So
// they could -- if not for the fact that make() or obtain() might be
// called before this module's static variables have been initialized.
// This is why we use singletons in the first place.
- PumpFactories mFactories;
+ TypeFactories mFactories;
// for obtain(), map desired string instance name to string type when
// obtain() must create the instance
diff --git a/indra/llcommon/llframetimer.cpp b/indra/llcommon/llframetimer.cpp
index c54029e8b4..1e9920746b 100644
--- a/indra/llcommon/llframetimer.cpp
+++ b/indra/llcommon/llframetimer.cpp
@@ -29,11 +29,6 @@
#include "llframetimer.h"
-// We don't bother building a stand alone lib; we just need to include the one source file for Tracy support
-#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY || LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
- #include "TracyClient.cpp"
-#endif // LL_PROFILER_CONFIGURATION
-
// Static members
//LLTimer LLFrameTimer::sInternalTimer;
U64 LLFrameTimer::sStartTotalTime = totalTime();
diff --git a/indra/llcommon/llinstancetracker.h b/indra/llcommon/llinstancetracker.h
index 34f2a5985a..27422e1266 100644
--- a/indra/llcommon/llinstancetracker.h
+++ b/indra/llcommon/llinstancetracker.h
@@ -104,22 +104,26 @@ public:
return LockStatic()->mMap.size();
}
- // snapshot of std::pair<const KEY, std::shared_ptr<T>> pairs
- class snapshot
+ // snapshot of std::pair<const KEY, std::shared_ptr<SUBCLASS>> pairs, for
+ // some SUBCLASS derived from T
+ template <typename SUBCLASS>
+ class snapshot_of
{
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
typedef std::vector<std::pair<const KEY, weak_t>> VectorType;
- // Dereferencing our iterator produces a std::shared_ptr for each
- // instance that still exists. Since we store weak_ptrs, that involves
- // two chained transformations:
+ // Dereferencing the iterator we publish produces a
+ // std::shared_ptr<SUBCLASS> for each instance that still exists.
+ // Since we store weak_ptr<T>, that involves two chained
+ // transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr
- // - a filter_iterator to skip any shared_ptr that has become invalid.
+ // - a filter_iterator to skip any shared_ptr<T> that has become
+ // invalid or references any T instance that isn't SUBCLASS.
// It is very important that we filter lazily, that is, during
// traversal. Any one of our stored weak_ptrs might expire during
// traversal.
- typedef std::pair<const KEY, ptr_t> strong_pair;
+ typedef std::pair<const KEY, std::shared_ptr<SUBCLASS>> strong_pair;
// Note for future reference: nat has not yet had any luck (up to
// Boost 1.67) trying to use boost::transform_iterator with a hand-
// coded functor, only with actual functions. In my experience, an
@@ -127,7 +131,7 @@ public:
// result_type typedef. But this works.
static strong_pair strengthen(typename VectorType::value_type& pair)
{
- return { pair.first, pair.second.lock() };
+ return { pair.first, std::dynamic_pointer_cast<SUBCLASS>(pair.second.lock()) };
}
static bool dead_skipper(const strong_pair& pair)
{
@@ -135,7 +139,7 @@ public:
}
public:
- snapshot():
+ snapshot_of():
// populate our vector with a snapshot of (locked!) InstanceMap
// note, this assigns pair<KEY, shared_ptr> to pair<KEY, weak_ptr>
mData(mLock->mMap.begin(), mLock->mMap.end())
@@ -184,44 +188,51 @@ public:
#endif // LL_WINDOWS
VectorType mData;
};
+ using snapshot = snapshot_of<T>;
- // iterate over this for references to each instance
- class instance_snapshot: public snapshot
+ // iterate over this for references to each SUBCLASS instance
+ template <typename SUBCLASS>
+ class instance_snapshot_of: public snapshot_of<SUBCLASS>
{
private:
- static T& instance_getter(typename snapshot::iterator::reference pair)
+ using super = snapshot_of<SUBCLASS>;
+ static T& instance_getter(typename super::iterator::reference pair)
{
return *pair.second;
}
public:
typedef boost::transform_iterator<decltype(instance_getter)*,
- typename snapshot::iterator> iterator;
- iterator begin() { return iterator(snapshot::begin(), instance_getter); }
- iterator end() { return iterator(snapshot::end(), instance_getter); }
+ typename super::iterator> iterator;
+ iterator begin() { return iterator(super::begin(), instance_getter); }
+ iterator end() { return iterator(super::end(), instance_getter); }
void deleteAll()
{
- for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it)
+ for (auto it(super::begin()), end(super::end()); it != end; ++it)
{
delete it->second.get();
}
}
- };
+ };
+ using instance_snapshot = instance_snapshot_of<T>;
// iterate over this for each key
- class key_snapshot: public snapshot
+ template <typename SUBCLASS>
+ class key_snapshot_of: public snapshot_of<SUBCLASS>
{
private:
- static KEY key_getter(typename snapshot::iterator::reference pair)
+ using super = snapshot_of<SUBCLASS>;
+ static KEY key_getter(typename super::iterator::reference pair)
{
return pair.first;
}
public:
typedef boost::transform_iterator<decltype(key_getter)*,
- typename snapshot::iterator> iterator;
- iterator begin() { return iterator(snapshot::begin(), key_getter); }
- iterator end() { return iterator(snapshot::end(), key_getter); }
+ typename super::iterator> iterator;
+ iterator begin() { return iterator(super::begin(), key_getter); }
+ iterator end() { return iterator(super::end(), key_getter); }
};
+ using key_snapshot = key_snapshot_of<T>;
static ptr_t getInstance(const KEY& k)
{
@@ -368,22 +379,25 @@ public:
return LockStatic()->mSet.size();
}
- // snapshot of std::shared_ptr<T> pointers
- class snapshot
+ // snapshot of std::shared_ptr<SUBCLASS> pointers
+ template <typename SUBCLASS>
+ class snapshot_of
{
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
typedef std::vector<weak_t> VectorType;
- // Dereferencing our iterator produces a std::shared_ptr for each
- // instance that still exists. Since we store weak_ptrs, that involves
- // two chained transformations:
+ // Dereferencing the iterator we publish produces a
+ // std::shared_ptr<SUBCLASS> for each instance that still exists.
+ // Since we store weak_ptrs, that involves two chained
+ // transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr
- // - a filter_iterator to skip any shared_ptr that has become invalid.
- typedef std::shared_ptr<T> strong_ptr;
+ // - a filter_iterator to skip any shared_ptr that has become invalid
+ // or references any T instance that isn't SUBCLASS.
+ typedef std::shared_ptr<SUBCLASS> strong_ptr;
static strong_ptr strengthen(typename VectorType::value_type& ptr)
{
- return ptr.lock();
+ return std::dynamic_pointer_cast<SUBCLASS>(ptr.lock());
}
static bool dead_skipper(const strong_ptr& ptr)
{
@@ -391,7 +405,7 @@ public:
}
public:
- snapshot():
+ snapshot_of():
// populate our vector with a snapshot of (locked!) InstanceSet
// note, this assigns stored shared_ptrs to weak_ptrs for snapshot
mData(mLock->mSet.begin(), mLock->mSet.end())
@@ -437,22 +451,33 @@ public:
#endif // LL_WINDOWS
VectorType mData;
};
+ using snapshot = snapshot_of<T>;
// iterate over this for references to each instance
- struct instance_snapshot: public snapshot
+ template <typename SUBCLASS>
+ class instance_snapshot_of: public snapshot_of<SUBCLASS>
{
- typedef boost::indirect_iterator<typename snapshot::iterator> iterator;
- iterator begin() { return iterator(snapshot::begin()); }
- iterator end() { return iterator(snapshot::end()); }
+ private:
+ using super = snapshot_of<SUBCLASS>;
+
+ public:
+ typedef boost::indirect_iterator<typename super::iterator> iterator;
+ iterator begin() { return iterator(super::begin()); }
+ iterator end() { return iterator(super::end()); }
void deleteAll()
{
- for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it)
+ for (auto it(super::begin()), end(super::end()); it != end; ++it)
{
delete it->get();
}
}
};
+ using instance_snapshot = instance_snapshot_of<T>;
+ // key_snapshot_of isn't really meaningful, but define it anyway to avoid
+ // requiring two different LLInstanceTrackerSubclass implementations.
+ template <typename SUBCLASS>
+ using key_snapshot_of = instance_snapshot_of<SUBCLASS>;
protected:
LLInstanceTracker()
diff --git a/indra/llcommon/llinstancetrackersubclass.h b/indra/llcommon/llinstancetrackersubclass.h
new file mode 100644
index 0000000000..ea9a38200f
--- /dev/null
+++ b/indra/llcommon/llinstancetrackersubclass.h
@@ -0,0 +1,98 @@
+/**
+ * @file llinstancetrackersubclass.h
+ * @author Nat Goodspeed
+ * @date 2022-12-09
+ * @brief Intermediate class to get subclass-specific types from
+ * LLInstanceTracker instance-retrieval methods.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_LLINSTANCETRACKERSUBCLASS_H)
+#define LL_LLINSTANCETRACKERSUBCLASS_H
+
+#include <memory> // std::shared_ptr, std::weak_ptr
+
+/**
+ * Derive your subclass S of a subclass T of LLInstanceTracker<T> from
+ * LLInstanceTrackerSubclass<S, T> to perform appropriate downcasting and
+ * filtering for LLInstanceTracker access methods.
+ *
+ * LLInstanceTracker<T> uses CRTP, so that getWeak(), getInstance(), snapshot
+ * and instance_snapshot return pointers and references to T. The trouble is
+ * that subclasses T0 and T1 derived from T also get pointers and references
+ * to their base class T, requiring explicit downcasting. Moreover,
+ * T0::getInstance() shouldn't find an instance of any T subclass other than
+ * T0. Nor should T0::snapshot.
+ *
+ * @code
+ * class Tracked: public LLInstanceTracker<Tracked, std::string>
+ * {
+ * private:
+ * using super = LLInstanceTracker<Tracked, std::string>;
+ * public:
+ * Tracked(const std::string& name): super(name) {}
+ * // All references to Tracked::ptr_t, Tracked::getInstance() etc.
+ * // appropriately use Tracked.
+ * // ...
+ * };
+ *
+ * // But now we derive SubTracked from Tracked. We need SubTracked::ptr_t,
+ * // SubTracked::getInstance() etc. to use SubTracked, not Tracked.
+ * // This LLInstanceTrackerSubclass specialization is itself derived from
+ * // Tracked.
+ * class SubTracked: public LLInstanceTrackerSubclass<SubTracked, Tracked>
+ * {
+ * private:
+ * using super = LLInstanceTrackerSubclass<SubTracked, Tracked>;
+ * public:
+ * // LLInstanceTrackerSubclass's constructor forwards to Tracked's.
+ * SubTracked(const std::string& name): super(name) {}
+ * // SubTracked::getInstance() returns std::shared_ptr<SubTracked>, etc.
+ * // ...
+ * @endcode
+ */
+template <typename SUBCLASS, typename T>
+class LLInstanceTrackerSubclass: public T
+{
+public:
+ using ptr_t = std::shared_ptr<SUBCLASS>;
+ using weak_t = std::weak_ptr<SUBCLASS>;
+
+ // forward any constructor call to the corresponding T ctor
+ template <typename... ARGS>
+ LLInstanceTrackerSubclass(ARGS&&... args):
+ T(std::forward<ARGS>(args)...)
+ {}
+
+ weak_t getWeak()
+ {
+ // call base-class getWeak(), try to lock, downcast to SUBCLASS
+ return std::dynamic_pointer_cast<SUBCLASS>(T::getWeak().lock());
+ }
+
+ template <typename KEY>
+ static ptr_t getInstance(const KEY& k)
+ {
+ return std::dynamic_pointer_cast<SUBCLASS>(T::getInstance(k));
+ }
+
+ using snapshot = typename T::template snapshot_of<SUBCLASS>;
+ using instance_snapshot = typename T::template instance_snapshot_of<SUBCLASS>;
+ using key_snapshot = typename T::template key_snapshot_of<SUBCLASS>;
+
+ static size_t instanceCount()
+ {
+ // T::instanceCount() lies because our snapshot, et al., won't
+ // necessarily return all the T instances -- only those that are also
+ // SUBCLASS instances. Count those.
+ size_t count = 0;
+ for (const auto& pair : snapshot())
+ ++count;
+ return count;
+ }
+};
+
+#endif /* ! defined(LL_LLINSTANCETRACKERSUBCLASS_H) */
diff --git a/indra/llcommon/llleap.cpp b/indra/llcommon/llleap.cpp
index 5b5bf97cef..8f88e728ce 100644
--- a/indra/llcommon/llleap.cpp
+++ b/indra/llcommon/llleap.cpp
@@ -340,11 +340,28 @@ public:
}
else
{
- // The LLSD object we got from our stream contains the keys we
- // need.
- LLEventPumps::instance().obtain(data["pump"]).post(data["data"]);
- // Block calls to this method; resetting mBlocker unblocks calls
- // to the other method.
+ try
+ {
+ // The LLSD object we got from our stream contains the
+ // keys we need.
+ LLEventPumps::instance().obtain(data["pump"]).post(data["data"]);
+ }
+ catch (const std::exception& err)
+ {
+ // No plugin should be allowed to crash the viewer by
+ // driving an exception -- intentionally or not.
+ LOG_UNHANDLED_EXCEPTION(stringize("handling request ", data));
+ // Whether or not the plugin added a "reply" key to the
+ // request, send a reply. We happen to know who originated
+ // this request, and the reply LLEventPump of interest.
+ // Not our problem if the plugin ignores the reply event.
+ data["reply"] = mReplyPump.getName();
+ sendReply(llsd::map("error",
+ stringize(LLError::Log::classname(err), ": ", err.what())),
+ data);
+ }
+ // Block calls to this method; resetting mBlocker unblocks
+ // calls to the other method.
mBlocker.reset(new LLEventPump::Blocker(mStdoutDataConnection));
// Go check for any more pending events in the buffer.
if (childout.size())
diff --git a/indra/llcommon/llleaplistener.cpp b/indra/llcommon/llleaplistener.cpp
index 11bfec1b31..471f52e91c 100644
--- a/indra/llcommon/llleaplistener.cpp
+++ b/indra/llcommon/llleaplistener.cpp
@@ -14,14 +14,16 @@
// associated header
#include "llleaplistener.h"
// STL headers
-#include <map>
+#include <algorithm> // std::find_if
#include <functional>
+#include <map>
+#include <set>
// std headers
// external library headers
-#include <boost/foreach.hpp>
// other Linden headers
-#include "lluuid.h"
+#include "lazyeventapi.h"
#include "llsdutil.h"
+#include "lluuid.h"
#include "stringize.h"
/*****************************************************************************
@@ -110,7 +112,7 @@ LLLeapListener::~LLLeapListener()
// value_type, and Bad Things would happen if you copied an
// LLTempBoundListener. (Destruction of the original would disconnect the
// listener, invalidating every stored connection.)
- BOOST_FOREACH(ListenersMap::value_type& pair, mListeners)
+ for (ListenersMap::value_type& pair : mListeners)
{
pair.second.disconnect();
}
@@ -208,31 +210,65 @@ void LLLeapListener::getAPIs(const LLSD& request) const
{
Response reply(LLSD(), request);
+ // first, traverse existing LLEventAPI instances
+ std::set<std::string> instances;
for (auto& ea : LLEventAPI::instance_snapshot())
{
- LLSD info;
- info["desc"] = ea.getDesc();
- reply[ea.getName()] = info;
+ // remember which APIs are actually instantiated
+ instances.insert(ea.getName());
+ reply[ea.getName()] = llsd::map("desc", ea.getDesc());
+ }
+ // supplement that with *potential* instances: that is, instances of
+ // LazyEventAPI that can each instantiate an LLEventAPI on demand
+ for (const auto& lea : LL::LazyEventAPIBase::instance_snapshot())
+ {
+ // skip any LazyEventAPI that's already instantiated its LLEventAPI
+ if (instances.find(lea.getName()) == instances.end())
+ {
+ reply[lea.getName()] = llsd::map("desc", lea.getDesc());
+ }
}
}
+// Because LazyEventAPI deliberately mimics LLEventAPI's query API, this
+// function can be passed either -- even though they're unrelated types.
+template <typename API>
+void reportAPI(LLEventAPI::Response& reply, const API& api)
+{
+ reply["name"] = api.getName();
+ reply["desc"] = api.getDesc();
+ reply["key"] = api.getDispatchKey();
+ LLSD ops;
+ for (const auto& namedesc : api)
+ {
+ ops.append(api.getMetadata(namedesc.first));
+ }
+ reply["ops"] = ops;
+}
+
void LLLeapListener::getAPI(const LLSD& request) const
{
Response reply(LLSD(), request);
- auto found = LLEventAPI::getInstance(request["api"]);
- if (found)
+ // check first among existing LLEventAPI instances
+ auto foundea = LLEventAPI::getInstance(request["api"]);
+ if (foundea)
+ {
+ reportAPI(reply, *foundea);
+ }
+ else
{
- reply["name"] = found->getName();
- reply["desc"] = found->getDesc();
- reply["key"] = found->getDispatchKey();
- LLSD ops;
- for (LLEventAPI::const_iterator oi(found->begin()), oend(found->end());
- oi != oend; ++oi)
+ // Here the requested LLEventAPI doesn't yet exist, but do we have a
+ // registered LazyEventAPI for it?
+ LL::LazyEventAPIBase::instance_snapshot snap;
+ auto foundlea = std::find_if(snap.begin(), snap.end(),
+ [api = request["api"].asString()]
+ (const auto& lea)
+ { return (lea.getName() == api); });
+ if (foundlea != snap.end())
{
- ops.append(found->getMetadata(oi->first));
+ reportAPI(reply, *foundlea);
}
- reply["ops"] = ops;
}
}
diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp
index d6ae1284d3..7cdf7254ff 100644
--- a/indra/llcommon/llmemory.cpp
+++ b/indra/llcommon/llmemory.cpp
@@ -35,6 +35,7 @@
# include <sys/types.h>
# include <mach/task.h>
# include <mach/mach_init.h>
+#include <mach/mach_host.h>
#elif LL_LINUX
# include <unistd.h>
#endif
@@ -109,6 +110,50 @@ void LLMemory::updateMemoryInfo()
{
sAvailPhysicalMemInKB = U32Kilobytes(0);
}
+
+#elif defined(LL_DARWIN)
+ task_vm_info info;
+ mach_msg_type_number_t infoCount = TASK_VM_INFO_COUNT;
+ // MACH_TASK_BASIC_INFO reports the same resident_size, but does not tell us the reusable bytes or phys_footprint.
+ if (task_info(mach_task_self(), TASK_VM_INFO, reinterpret_cast<task_info_t>(&info), &infoCount) == KERN_SUCCESS)
+ {
+ // Our Windows definition of PagefileUsage is documented by Microsoft as "the total amount of
+ // memory that the memory manager has committed for a running process", which is rss.
+ sAllocatedPageSizeInKB = U32Bytes(info.resident_size);
+
+ // Activity Monitor => Inspect Process => Real Memory Size appears to report resident_size
+ // Activity monitor => main window memory column appears to report phys_footprint, which spot checks as at least 30% less.
+ // I think that is because of compression, which isn't going to give us a consistent measurement. We want uncompressed totals.
+ //
+ // In between is resident_size - reusable. This is what Chrome source code uses, with source comments saying it is 'the "Real Memory" value
+ // reported for the app by the Memory Monitor in Instruments.' It is still about 8% bigger than phys_footprint.
+ //
+ // (On Windows, we use WorkingSetSize.)
+ sAllocatedMemInKB = U32Bytes(info.resident_size - info.reusable);
+ }
+ else
+ {
+ LL_WARNS() << "task_info failed" << LL_ENDL;
+ }
+
+ // Total installed and available physical memory are properties of the host, not just our process.
+ vm_statistics64_data_t vmstat;
+ mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
+ mach_port_t host = mach_host_self();
+ vm_size_t page_size;
+ host_page_size(host, &page_size);
+ kern_return_t result = host_statistics64(host, HOST_VM_INFO64, reinterpret_cast<host_info_t>(&vmstat), &count);
+ if (result == KERN_SUCCESS) {
+ // This is what Chrome reports as 'the "Physical Memory Free" value reported by the Memory Monitor in Instruments.'
+ // Note though that inactive pages are not included here and not yet free, but could become so under memory pressure.
+ sAvailPhysicalMemInKB = U32Bytes(vmstat.free_count * page_size);
+ sMaxPhysicalMemInKB = LLMemoryInfo::getHardwareMemSize();
+ }
+ else
+ {
+ LL_WARNS() << "task_info failed" << LL_ENDL;
+ }
+
#else
//not valid for other systems for now.
sAllocatedMemInKB = U64Bytes(LLMemory::getCurrentRSS());
diff --git a/indra/llcommon/llmutex.h b/indra/llcommon/llmutex.h
index 838d7d34c0..0d70da6178 100644
--- a/indra/llcommon/llmutex.h
+++ b/indra/llcommon/llmutex.h
@@ -36,7 +36,8 @@
//============================================================================
-#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO)
+//#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO)
+#define MUTEX_DEBUG 0 //disable mutex debugging as it's interfering with profiles
#if MUTEX_DEBUG
#include <map>
@@ -61,7 +62,7 @@ protected:
mutable LLThread::id_t mLockingThread;
#if MUTEX_DEBUG
- std::map<LLThread::id_t, BOOL> mIsLocked;
+ std::unordered_map<LLThread::id_t, BOOL> mIsLocked;
#endif
};
diff --git a/indra/llcommon/llpointer.h b/indra/llcommon/llpointer.h
index 96ccfb481e..64aceddf32 100644
--- a/indra/llcommon/llpointer.h
+++ b/indra/llcommon/llpointer.h
@@ -341,4 +341,28 @@ private:
bool mStayUnique;
};
+
+// boost hash adapter
+template <class Type>
+struct boost::hash<LLPointer<Type>>
+{
+ typedef LLPointer<Type> argument_type;
+ typedef std::size_t result_type;
+ result_type operator()(argument_type const& s) const
+ {
+ return (std::size_t) s.get();
+ }
+};
+
+// Adapt boost hash to std hash
+namespace std
+{
+ template<class Type> struct hash<LLPointer<Type>>
+ {
+ std::size_t operator()(LLPointer<Type> const& s) const noexcept
+ {
+ return boost::hash<LLPointer<Type>>()(s);
+ }
+ };
+}
#endif
diff --git a/indra/llcommon/llprofiler.h b/indra/llcommon/llprofiler.h
index f9d7ae7ce4..af5e5777bf 100644
--- a/indra/llcommon/llprofiler.h
+++ b/indra/llcommon/llprofiler.h
@@ -86,8 +86,12 @@ extern thread_local bool gProfilerEnabled;
#define TRACY_ONLY_IPV4 1
#include "Tracy.hpp"
- // Mutually exclusive with detailed memory tracing
+ // Enable OpenGL profiling
#define LL_PROFILER_ENABLE_TRACY_OPENGL 0
+
+ // Enable RenderDoc labeling
+ #define LL_PROFILER_ENABLE_RENDER_DOC 0
+
#endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY
@@ -104,14 +108,13 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
- #define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
- #define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_FAST_TIMER
#define LL_PROFILER_FRAME_END
#define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name)
#define LL_RECORD_BLOCK_TIME(name) const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(name)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__);
#define LL_PROFILE_ZONE_NAMED(name) // LL_PROFILE_ZONE_NAMED is a no-op when Tracy is disabled
+ #define LL_PROFILE_ZONE_NAMED_COLOR(name,color) // LL_PROFILE_ZONE_NAMED_COLOR is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_SCOPED // LL_PROFILE_ZONE_SCOPED is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_COLOR(name,color) // LL_RECORD_BLOCK_TIME(name)
@@ -121,8 +124,6 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_INFO(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_WARN(name) (void)(name); // Not supported
- #define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
- #define LL_PROFILE_FREE(ptr) (void)(ptr);
#endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
#define LL_PROFILER_FRAME_END FrameMark
@@ -138,14 +139,45 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
- #define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
- #define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#endif
#else
#define LL_PROFILER_FRAME_END
#define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name)
#endif // LL_PROFILER
+#if LL_PROFILER_ENABLE_TRACY_OPENGL
+#define LL_PROFILE_GPU_ZONE(name) TracyGpuZone(name)
+#define LL_PROFILE_GPU_ZONEC(name,color) TracyGpuZoneC(name,color)
+#define LL_PROFILER_GPU_COLLECT TracyGpuCollect
+#define LL_PROFILER_GPU_CONTEXT TracyGpuContext
+
+// disable memory tracking (incompatible with GPU tracing
+#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
+#define LL_PROFILE_FREE(ptr) (void)(ptr);
+#else
+#define LL_PROFILE_GPU_ZONE(name) (void)name;
+#define LL_PROFILE_GPU_ZONEC(name,color) (void)name;(void)color;
+#define LL_PROFILER_GPU_COLLECT
+#define LL_PROFILER_GPU_CONTEXT
+
+#define LL_LABEL_OBJECT_GL(type, name, length, label)
+
+#if LL_PROFILER_CONFIGURATION > 1
+#define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
+#define LL_PROFILE_FREE(ptr) TracyFree(ptr)
+#else
+#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
+#define LL_PROFILE_FREE(ptr) (void)(ptr);
+#endif
+
+#endif
+
+#if LL_PROFILER_ENABLE_RENDER_DOC
+#define LL_LABEL_OBJECT_GL(type, name, length, label) glObjectLabel(type, name, length, label)
+#else
+#define LL_LABEL_OBJECT_GL(type, name, length, label)
+#endif
+
#include "llprofilercategories.h"
#endif // LL_PROFILER_H
diff --git a/indra/llcommon/llprofilercategories.h b/indra/llcommon/llprofilercategories.h
index 8db29468cc..617431f629 100644
--- a/indra/llcommon/llprofilercategories.h
+++ b/indra/llcommon/llprofilercategories.h
@@ -52,7 +52,7 @@
#define LL_PROFILER_CATEGORY_ENABLE_LOGGING 1
#define LL_PROFILER_CATEGORY_ENABLE_MATERIAL 1
#define LL_PROFILER_CATEGORY_ENABLE_MEDIA 1
-#define LL_PROFILER_CATEGORY_ENABLE_MEMORY 1
+#define LL_PROFILER_CATEGORY_ENABLE_MEMORY 0
#define LL_PROFILER_CATEGORY_ENABLE_NETWORK 1
#define LL_PROFILER_CATEGORY_ENABLE_OCTREE 1
#define LL_PROFILER_CATEGORY_ENABLE_PIPELINE 1
diff --git a/indra/llcommon/llptrto.h b/indra/llcommon/llptrto.h
index 4082e30de6..9ef279fdbf 100644
--- a/indra/llcommon/llptrto.h
+++ b/indra/llcommon/llptrto.h
@@ -33,9 +33,12 @@
#include "llpointer.h"
#include "llrefcount.h" // LLRefCount
+#include <boost/intrusive_ptr.hpp>
+#include <boost/shared_ptr.hpp>
#include <boost/type_traits/is_base_of.hpp>
#include <boost/type_traits/remove_pointer.hpp>
-#include <boost/utility/enable_if.hpp>
+#include <memory> // std::shared_ptr, std::unique_ptr
+#include <type_traits>
/**
* LLPtrTo<TARGET>::type is either of two things:
@@ -55,14 +58,14 @@ struct LLPtrTo
/// specialize for subclasses of LLRefCount
template <class T>
-struct LLPtrTo<T, typename boost::enable_if< boost::is_base_of<LLRefCount, T> >::type>
+struct LLPtrTo<T, typename std::enable_if< boost::is_base_of<LLRefCount, T>::value >::type>
{
typedef LLPointer<T> type;
};
/// specialize for subclasses of LLThreadSafeRefCount
template <class T>
-struct LLPtrTo<T, typename boost::enable_if< boost::is_base_of<LLThreadSafeRefCount, T> >::type>
+struct LLPtrTo<T, typename std::enable_if< boost::is_base_of<LLThreadSafeRefCount, T>::value >::type>
{
typedef LLPointer<T> type;
};
@@ -83,4 +86,83 @@ struct LLRemovePointer< LLPointer<SOMECLASS> >
typedef SOMECLASS type;
};
+namespace LL
+{
+
+/*****************************************************************************
+* get_ref()
+*****************************************************************************/
+ template <typename T>
+ struct GetRef
+ {
+ // return const ref or non-const ref, depending on whether we can bind
+ // a non-const lvalue ref to the argument
+ const auto& operator()(const T& obj) const { return obj; }
+ auto& operator()(T& obj) const { return obj; }
+ };
+
+ template <typename T>
+ struct GetRef<const T*>
+ {
+ const auto& operator()(const T* ptr) const { return *ptr; }
+ };
+
+ template <typename T>
+ struct GetRef<T*>
+ {
+ auto& operator()(T* ptr) const { return *ptr; }
+ };
+
+ template <typename T>
+ struct GetRef< LLPointer<T> >
+ {
+ auto& operator()(LLPointer<T> ptr) const { return *ptr; }
+ };
+
+ /// whether we're passed a pointer or a reference, return a reference
+ template <typename T>
+ auto& get_ref(T& ptr_or_ref)
+ {
+ return GetRef<typename std::decay<T>::type>()(ptr_or_ref);
+ }
+
+ template <typename T>
+ const auto& get_ref(const T& ptr_or_ref)
+ {
+ return GetRef<typename std::decay<T>::type>()(ptr_or_ref);
+ }
+
+/*****************************************************************************
+* get_ptr()
+*****************************************************************************/
+ // if T is any pointer type we recognize, return it unchanged
+ template <typename T>
+ const T* get_ptr(const T* ptr) { return ptr; }
+
+ template <typename T>
+ T* get_ptr(T* ptr) { return ptr; }
+
+ template <typename T>
+ const std::shared_ptr<T>& get_ptr(const std::shared_ptr<T>& ptr) { return ptr; }
+
+ template <typename T>
+ const std::unique_ptr<T>& get_ptr(const std::unique_ptr<T>& ptr) { return ptr; }
+
+ template <typename T>
+ const boost::shared_ptr<T>& get_ptr(const boost::shared_ptr<T>& ptr) { return ptr; }
+
+ template <typename T>
+ const boost::intrusive_ptr<T>& get_ptr(const boost::intrusive_ptr<T>& ptr) { return ptr; }
+
+ template <typename T>
+ const LLPointer<T>& get_ptr(const LLPointer<T>& ptr) { return ptr; }
+
+ // T is not any pointer type we recognize, take a pointer to the parameter
+ template <typename T>
+ const T* get_ptr(const T& obj) { return &obj; }
+
+ template <typename T>
+ T* get_ptr(T& obj) { return &obj; }
+} // namespace LL
+
#endif /* ! defined(LL_LLPTRTO_H) */
diff --git a/indra/llcommon/llqueuedthread.cpp b/indra/llcommon/llqueuedthread.cpp
index b06fee0ec2..394212ee0d 100644
--- a/indra/llcommon/llqueuedthread.cpp
+++ b/indra/llcommon/llqueuedthread.cpp
@@ -26,20 +26,26 @@
#include "linden_common.h"
#include "llqueuedthread.h"
+#include <chrono>
+
#include "llstl.h"
#include "lltimer.h" // ms_sleep()
-#include "lltracethreadrecorder.h"
+#include "llmutex.h"
//============================================================================
// MAIN THREAD
LLQueuedThread::LLQueuedThread(const std::string& name, bool threaded, bool should_pause) :
- LLThread(name),
- mThreaded(threaded),
- mIdleThread(TRUE),
- mNextHandle(0),
- mStarted(FALSE)
+ LLThread(name),
+ mIdleThread(TRUE),
+ mNextHandle(0),
+ mStarted(FALSE),
+ mThreaded(threaded),
+ mRequestQueue(name, 1024 * 1024)
{
+ llassert(threaded); // not threaded implementation is deprecated
+ mMainQueue = LL::WorkQueue::getInstance("mainloop");
+
if (mThreaded)
{
if(should_pause)
@@ -69,6 +75,11 @@ void LLQueuedThread::shutdown()
unpause(); // MAIN THREAD
if (mThreaded)
{
+ if (mRequestQueue.size() == 0)
+ {
+ mRequestQueue.close();
+ }
+
S32 timeout = 100;
for ( ; timeout>0; timeout--)
{
@@ -104,6 +115,8 @@ void LLQueuedThread::shutdown()
{
LL_WARNS() << "~LLQueuedThread() called with active requests: " << active_count << LL_ENDL;
}
+
+ mRequestQueue.close();
}
//----------------------------------------------------------------------------
@@ -112,6 +125,7 @@ void LLQueuedThread::shutdown()
// virtual
size_t LLQueuedThread::update(F32 max_time_ms)
{
+ LL_PROFILE_ZONE_SCOPED;
if (!mStarted)
{
if (!mThreaded)
@@ -125,29 +139,34 @@ size_t LLQueuedThread::update(F32 max_time_ms)
size_t LLQueuedThread::updateQueue(F32 max_time_ms)
{
- F64 max_time = (F64)max_time_ms * .001;
- LLTimer timer;
- size_t pending = 1;
-
+ LL_PROFILE_ZONE_SCOPED;
// Frame Update
if (mThreaded)
{
- pending = getPending();
- if(pending > 0)
+ // schedule a call to threadedUpdate for every call to updateQueue
+ if (!isQuitting())
+ {
+ mRequestQueue.post([=]()
+ {
+ LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qt - update");
+ mIdleThread = FALSE;
+ threadedUpdate();
+ mIdleThread = TRUE;
+ }
+ );
+ }
+
+ if(getPending() > 0)
{
- unpause();
- }
+ unpause();
+ }
}
else
{
- while (pending > 0)
- {
- pending = processNextRequest();
- if (max_time && timer.getElapsedTimeF64() > max_time)
- break;
- }
+ mRequestQueue.runFor(std::chrono::microseconds((int) (max_time_ms*1000.f)));
+ threadedUpdate();
}
- return pending;
+ return getPending();
}
void LLQueuedThread::incQueue()
@@ -166,11 +185,7 @@ void LLQueuedThread::incQueue()
// May be called from any thread
size_t LLQueuedThread::getPending()
{
- size_t res;
- lockData();
- res = mRequestQueue.size();
- unlockData();
- return res;
+ return mRequestQueue.size();
}
// MAIN thread
@@ -195,35 +210,28 @@ void LLQueuedThread::waitOnPending()
// MAIN thread
void LLQueuedThread::printQueueStats()
{
- lockData();
- if (!mRequestQueue.empty())
+ U32 size = mRequestQueue.size();
+ if (size > 0)
{
- QueuedRequest *req = *mRequestQueue.begin();
- LL_INFOS() << llformat("Pending Requests:%d Current status:%d", mRequestQueue.size(), req->getStatus()) << LL_ENDL;
+ LL_INFOS() << llformat("Pending Requests:%d ", mRequestQueue.size()) << LL_ENDL;
}
else
{
LL_INFOS() << "Queued Thread Idle" << LL_ENDL;
}
- unlockData();
}
// MAIN thread
LLQueuedThread::handle_t LLQueuedThread::generateHandle()
{
- lockData();
- while ((mNextHandle == nullHandle()) || (mRequestHash.find(mNextHandle)))
- {
- mNextHandle++;
- }
- const LLQueuedThread::handle_t res = mNextHandle++;
- unlockData();
+ U32 res = ++mNextHandle;
return res;
}
// MAIN thread
bool LLQueuedThread::addRequest(QueuedRequest* req)
{
+ LL_PROFILE_ZONE_SCOPED;
if (mStatus == QUITTING)
{
return false;
@@ -231,14 +239,14 @@ bool LLQueuedThread::addRequest(QueuedRequest* req)
lockData();
req->setStatus(STATUS_QUEUED);
- mRequestQueue.insert(req);
- mRequestHash.insert(req);
+ mRequestHash.insert(req);
#if _DEBUG
// LL_INFOS() << llformat("LLQueuedThread::Added req [%08d]",handle) << LL_ENDL;
#endif
unlockData();
- incQueue();
+ llassert(!mDataLock->isSelfLocked());
+ mRequestQueue.post([this, req]() { processRequest(req); });
return true;
}
@@ -246,6 +254,7 @@ bool LLQueuedThread::addRequest(QueuedRequest* req)
// MAIN thread
bool LLQueuedThread::waitForResult(LLQueuedThread::handle_t handle, bool auto_complete)
{
+ LL_PROFILE_ZONE_SCOPED;
llassert (handle != nullHandle());
bool res = false;
bool waspaused = isPaused();
@@ -312,6 +321,7 @@ LLQueuedThread::status_t LLQueuedThread::getRequestStatus(handle_t handle)
void LLQueuedThread::abortRequest(handle_t handle, bool autocomplete)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
if (req)
@@ -333,30 +343,9 @@ void LLQueuedThread::setFlags(handle_t handle, U32 flags)
unlockData();
}
-void LLQueuedThread::setPriority(handle_t handle, U32 priority)
-{
- lockData();
- QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
- if (req)
- {
- if(req->getStatus() == STATUS_INPROGRESS)
- {
- // not in list
- req->setPriority(priority);
- }
- else if(req->getStatus() == STATUS_QUEUED)
- {
- // remove from list then re-insert
- llverify(mRequestQueue.erase(req) == 1);
- req->setPriority(priority);
- mRequestQueue.insert(req);
- }
- }
- unlockData();
-}
-
bool LLQueuedThread::completeRequest(handle_t handle)
{
+ LL_PROFILE_ZONE_SCOPED;
bool res = false;
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
@@ -399,88 +388,120 @@ bool LLQueuedThread::check()
//============================================================================
// Runs on its OWN thread
-size_t LLQueuedThread::processNextRequest()
+void LLQueuedThread::processRequest(LLQueuedThread::QueuedRequest* req)
{
- QueuedRequest *req;
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+
+ mIdleThread = FALSE;
+ //threadedUpdate();
+
// Get next request from pool
lockData();
- while(1)
+ if ((req->getFlags() & FLAG_ABORT) || (mStatus == QUITTING))
{
- req = NULL;
- if (mRequestQueue.empty())
+ LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - abort");
+ req->setStatus(STATUS_ABORTED);
+ req->finishRequest(false);
+ if (req->getFlags() & FLAG_AUTO_COMPLETE)
{
- break;
- }
- req = *mRequestQueue.begin();
- mRequestQueue.erase(mRequestQueue.begin());
- if ((req->getFlags() & FLAG_ABORT) || (mStatus == QUITTING))
- {
- req->setStatus(STATUS_ABORTED);
- req->finishRequest(false);
- if (req->getFlags() & FLAG_AUTO_COMPLETE)
- {
- mRequestHash.erase(req);
- req->deleteRequest();
+ mRequestHash.erase(req);
+ req->deleteRequest();
// check();
- }
- continue;
}
- llassert_always(req->getStatus() == STATUS_QUEUED);
- break;
- }
- U32 start_priority = 0 ;
- if (req)
- {
- req->setStatus(STATUS_INPROGRESS);
- start_priority = req->getPriority();
- }
- unlockData();
-
- // This is the only place we will call req->setStatus() after
- // it has initially been seet to STATUS_QUEUED, so it is
- // safe to access req.
- if (req)
- {
- // process request
- bool complete = req->processRequest();
-
- if (complete)
- {
- lockData();
- req->setStatus(STATUS_COMPLETE);
- req->finishRequest(true);
- if (req->getFlags() & FLAG_AUTO_COMPLETE)
- {
- mRequestHash.erase(req);
- req->deleteRequest();
-// check();
- }
- unlockData();
- }
- else
- {
- lockData();
- req->setStatus(STATUS_QUEUED);
- mRequestQueue.insert(req);
- unlockData();
- if (mThreaded && start_priority < PRIORITY_NORMAL)
- {
- ms_sleep(1); // sleep the thread a little
- }
- }
-
- LLTrace::get_thread_recorder()->pushToParent();
+ unlockData();
}
+ else
+ {
+ llassert_always(req->getStatus() == STATUS_QUEUED);
+
+ if (req)
+ {
+ req->setStatus(STATUS_INPROGRESS);
+ }
+ unlockData();
+
+ // This is the only place we will call req->setStatus() after
+ // it has initially been seet to STATUS_QUEUED, so it is
+ // safe to access req.
+ if (req)
+ {
+ // process request
+ bool complete = req->processRequest();
+
+ if (complete)
+ {
+ LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - complete");
+ lockData();
+ req->setStatus(STATUS_COMPLETE);
+ req->finishRequest(true);
+ if (req->getFlags() & FLAG_AUTO_COMPLETE)
+ {
+ mRequestHash.erase(req);
+ req->deleteRequest();
+ // check();
+ }
+ unlockData();
+ }
+ else
+ {
+ LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - retry");
+ //put back on queue and try again in 0.1ms
+ lockData();
+ req->setStatus(STATUS_QUEUED);
+
+ unlockData();
+
+ llassert(!mDataLock->isSelfLocked());
+
+#if 0
+ // try again on next frame
+ // NOTE: tried using "post" with a time in the future, but this
+ // would invariably cause this thread to wait for a long time (10+ ms)
+ // while work is pending
+ bool ret = LL::WorkQueue::postMaybe(
+ mMainQueue,
+ [=]()
+ {
+ LL_PROFILE_ZONE_NAMED("processRequest - retry");
+ mRequestQueue.post([=]()
+ {
+ LL_PROFILE_ZONE_NAMED("processRequest - retry"); // <-- not redundant, track retry on both queues
+ processRequest(req);
+ });
+ });
+ llassert(ret);
+#else
+ using namespace std::chrono_literals;
+ auto retry_time = LL::WorkQueue::TimePoint::clock::now() + 16ms;
+ mRequestQueue.post([=]
+ {
+ LL_PROFILE_ZONE_NAMED("processRequest - retry");
+ if (LL::WorkQueue::TimePoint::clock::now() < retry_time)
+ {
+ auto sleep_time = std::chrono::duration_cast<std::chrono::milliseconds>(retry_time - LL::WorkQueue::TimePoint::clock::now());
+
+ if (sleep_time.count() > 0)
+ {
+ ms_sleep(sleep_time.count());
+ }
+ }
+ processRequest(req);
+ });
+#endif
+
+ }
+ }
+ }
- return getPending();
+ mIdleThread = TRUE;
}
// virtual
bool LLQueuedThread::runCondition()
{
// mRunCondition must be locked here
- if (mRequestQueue.empty() && mIdleThread)
+ if (mRequestQueue.size() == 0 && mIdleThread)
return false;
else
return true;
@@ -494,18 +515,13 @@ void LLQueuedThread::run()
startThread();
mStarted = TRUE;
- while (1)
+
+ /*while (1)
{
+ LL_PROFILE_ZONE_SCOPED;
// this will block on the condition until runCondition() returns true, the thread is unpaused, or the thread leaves the RUNNING state.
checkPause();
- if (isQuitting())
- {
- LLTrace::get_thread_recorder()->pushToParent();
- endThread();
- break;
- }
-
mIdleThread = FALSE;
threadedUpdate();
@@ -514,12 +530,18 @@ void LLQueuedThread::run()
if (pending_work == 0)
{
+ //LL_PROFILE_ZONE_NAMED("LLQueuedThread - sleep");
mIdleThread = TRUE;
- ms_sleep(1);
+ //ms_sleep(1);
}
//LLThread::yield(); // thread should yield after each request
- }
+ }*/
+ mRequestQueue.runUntilClose();
+
+ endThread();
LL_INFOS() << "LLQueuedThread " << mName << " EXITING." << LL_ENDL;
+
+
}
// virtual
@@ -539,10 +561,9 @@ void LLQueuedThread::threadedUpdate()
//============================================================================
-LLQueuedThread::QueuedRequest::QueuedRequest(LLQueuedThread::handle_t handle, U32 priority, U32 flags) :
+LLQueuedThread::QueuedRequest::QueuedRequest(LLQueuedThread::handle_t handle, U32 flags) :
LLSimpleHashEntry<LLQueuedThread::handle_t>(handle),
mStatus(STATUS_UNKNOWN),
- mPriority(priority),
mFlags(flags)
{
}
diff --git a/indra/llcommon/llqueuedthread.h b/indra/llcommon/llqueuedthread.h
index 90fce3dc5d..814dbc4c38 100644
--- a/indra/llcommon/llqueuedthread.h
+++ b/indra/llcommon/llqueuedthread.h
@@ -36,6 +36,7 @@
#include "llthread.h"
#include "llsimplehash.h"
+#include "workqueue.h"
//============================================================================
// Note: ~LLQueuedThread is O(N) N=# of queued threads, assumed to be small
@@ -45,15 +46,6 @@ class LL_COMMON_API LLQueuedThread : public LLThread
{
//------------------------------------------------------------------------
public:
- enum priority_t {
- PRIORITY_IMMEDIATE = 0x7FFFFFFF,
- PRIORITY_URGENT = 0x40000000,
- PRIORITY_HIGH = 0x30000000,
- PRIORITY_NORMAL = 0x20000000,
- PRIORITY_LOW = 0x10000000,
- PRIORITY_LOWBITS = 0x0FFFFFFF,
- PRIORITY_HIGHBITS = 0x70000000
- };
enum status_t {
STATUS_EXPIRED = -1,
STATUS_UNKNOWN = 0,
@@ -82,28 +74,17 @@ public:
virtual ~QueuedRequest(); // use deleteRequest()
public:
- QueuedRequest(handle_t handle, U32 priority, U32 flags = 0);
+ QueuedRequest(handle_t handle, U32 flags = 0);
status_t getStatus()
{
return mStatus;
}
- U32 getPriority() const
- {
- return mPriority;
- }
U32 getFlags() const
{
return mFlags;
}
- bool higherPriority(const QueuedRequest& second) const
- {
- if ( mPriority == second.mPriority)
- return mHashKey < second.mHashKey;
- else
- return mPriority > second.mPriority;
- }
-
+
protected:
status_t setStatus(status_t newstatus)
{
@@ -121,28 +102,11 @@ public:
virtual void finishRequest(bool completed); // Always called from thread after request has completed or aborted
virtual void deleteRequest(); // Only method to delete a request
- void setPriority(U32 pri)
- {
- // Only do this on a request that is not in a queued list!
- mPriority = pri;
- };
-
protected:
LLAtomicBase<status_t> mStatus;
- U32 mPriority;
U32 mFlags;
};
-protected:
- struct queued_request_less
- {
- bool operator()(const QueuedRequest* lhs, const QueuedRequest* rhs) const
- {
- return lhs->higherPriority(*rhs); // higher priority in front of queue (set)
- }
- };
-
-
//------------------------------------------------------------------------
public:
@@ -167,7 +131,7 @@ private:
protected:
handle_t generateHandle();
bool addRequest(QueuedRequest* req);
- size_t processNextRequest(void);
+ void processRequest(QueuedRequest* req);
void incQueue();
public:
@@ -186,7 +150,6 @@ public:
status_t getRequestStatus(handle_t handle);
void abortRequest(handle_t handle, bool autocomplete);
void setFlags(handle_t handle, U32 flags);
- void setPriority(handle_t handle, U32 priority);
bool completeRequest(handle_t handle);
// This is public for support classes like LLWorkerThread,
// but generally the methods above should be used.
@@ -200,8 +163,10 @@ protected:
BOOL mStarted; // required when mThreaded is false to call startThread() from update()
LLAtomicBool mIdleThread; // request queue is empty (or we are quitting) and the thread is idle
- typedef std::set<QueuedRequest*, queued_request_less> request_queue_t;
- request_queue_t mRequestQueue;
+ //typedef std::set<QueuedRequest*, queued_request_less> request_queue_t;
+ //request_queue_t mRequestQueue;
+ LL::WorkQueue mRequestQueue;
+ LL::WorkQueue::weak_t mMainQueue;
enum { REQUEST_HASH_SIZE = 512 }; // must be power of 2
typedef LLSimpleHash<handle_t, REQUEST_HASH_SIZE> request_hash_t;
diff --git a/indra/llcommon/llsdserialize.cpp b/indra/llcommon/llsdserialize.cpp
index 3db456ddb3..a475be6293 100644
--- a/indra/llcommon/llsdserialize.cpp
+++ b/indra/llcommon/llsdserialize.cpp
@@ -475,6 +475,7 @@ LLSDNotationParser::~LLSDNotationParser()
// virtual
S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// map: { string:object, string:object }
// array: [ object, object, object ]
// undef: !
@@ -734,6 +735,7 @@ S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) c
S32 LLSDNotationParser::parseMap(std::istream& istr, LLSD& map, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// map: { string:object, string:object }
map = LLSD::emptyMap();
S32 parse_count = 0;
@@ -794,6 +796,7 @@ S32 LLSDNotationParser::parseMap(std::istream& istr, LLSD& map, S32 max_depth) c
S32 LLSDNotationParser::parseArray(std::istream& istr, LLSD& array, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// array: [ object, object, object ]
array = LLSD::emptyArray();
S32 parse_count = 0;
@@ -833,6 +836,7 @@ S32 LLSDNotationParser::parseArray(std::istream& istr, LLSD& array, S32 max_dept
bool LLSDNotationParser::parseString(std::istream& istr, LLSD& data) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
std::string value;
auto count = deserialize_string(istr, value, mMaxBytesLeft);
if(PARSE_FAILURE == count) return false;
@@ -843,6 +847,7 @@ bool LLSDNotationParser::parseString(std::istream& istr, LLSD& data) const
bool LLSDNotationParser::parseBinary(std::istream& istr, LLSD& data) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// binary: b##"ff3120ab1"
// or: b(len)"..."
@@ -945,6 +950,7 @@ LLSDBinaryParser::~LLSDBinaryParser()
// virtual
S32 LLSDBinaryParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
/**
* Undefined: '!'<br>
* Boolean: '1' for true '0' for false<br>
diff --git a/indra/llcommon/llsdserialize_xml.cpp b/indra/llcommon/llsdserialize_xml.cpp
index ac128c9f86..38b11eb32b 100644
--- a/indra/llcommon/llsdserialize_xml.cpp
+++ b/indra/llcommon/llsdserialize_xml.cpp
@@ -923,6 +923,8 @@ void LLSDXMLParser::parsePart(const char *buf, llssize len)
// virtual
S32 LLSDXMLParser::doParse(std::istream& input, LLSD& data, S32 max_depth) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
+
#ifdef XML_PARSER_PERFORMANCE_TESTS
XML_Timer timer( &parseTime );
#endif // XML_PARSER_PERFORMANCE_TESTS
diff --git a/indra/llcommon/llsdutil.cpp b/indra/llcommon/llsdutil.cpp
index f70bee9903..e98fc0285a 100644
--- a/indra/llcommon/llsdutil.cpp
+++ b/indra/llcommon/llsdutil.cpp
@@ -1046,3 +1046,38 @@ LLSD llsd_shallow(LLSD value, LLSD filter)
return shallow;
}
+
+LLSD LL::apply_llsd_fix(size_t arity, const LLSD& args)
+{
+ // LLSD supports a number of types, two of which are aggregates: Map and
+ // Array. We don't try to support Map: supporting Map would seem to
+ // promise that we could somehow match the string key to 'func's parameter
+ // names. Uh sorry, maybe in some future version of C++ with reflection.
+ if (args.isMap())
+ {
+ LLTHROW(LL::apply_error("LL::apply(function, Map LLSD) unsupported"));
+ }
+ // We expect an LLSD array, but what the heck, treat isUndefined() as a
+ // zero-length array for calling a nullary 'func'.
+ if (args.isUndefined() || args.isArray())
+ {
+ // this works because LLSD().size() == 0
+ if (args.size() != arity)
+ {
+ LLTHROW(LL::apply_error(stringize("LL::apply(function(", arity, " args), ",
+ args.size(), "-entry LLSD array)")));
+ }
+ return args;
+ }
+
+ // args is one of the scalar types
+ // scalar_LLSD.size() == 0, so don't test that here.
+ // You can pass a scalar LLSD only to a unary 'func'.
+ if (arity != 1)
+ {
+ LLTHROW(LL::apply_error(stringize("LL::apply(function(", arity, " args), "
+ "LLSD ", LLSD::typeString(args.type()), ")")));
+ }
+ // make an array of it
+ return llsd::array(args);
+}
diff --git a/indra/llcommon/llsdutil.h b/indra/llcommon/llsdutil.h
index 372278c51a..ad54d1b0be 100644
--- a/indra/llcommon/llsdutil.h
+++ b/indra/llcommon/llsdutil.h
@@ -29,8 +29,14 @@
#ifndef LL_LLSDUTIL_H
#define LL_LLSDUTIL_H
+#include "apply.h" // LL::invoke()
+#include "function_types.h" // LL::function_arity
#include "llsd.h"
#include <boost/functional/hash.hpp>
+#include <cassert>
+#include <memory> // std::shared_ptr
+#include <type_traits>
+#include <vector>
// U32
LL_COMMON_API LLSD ll_sd_from_U32(const U32);
@@ -298,6 +304,11 @@ LLSD map(Ts&&... vs)
/*****************************************************************************
* LLSDParam
*****************************************************************************/
+struct LLSDParamBase
+{
+ virtual ~LLSDParamBase() {}
+};
+
/**
* LLSDParam is a customization point for passing LLSD values to function
* parameters of more or less arbitrary type. LLSD provides a small set of
@@ -315,7 +326,7 @@ LLSD map(Ts&&... vs)
* @endcode
*/
template <typename T>
-class LLSDParam
+class LLSDParam: public LLSDParamBase
{
public:
/**
@@ -323,13 +334,66 @@ public:
* value for later retrieval
*/
LLSDParam(const LLSD& value):
- _value(value)
+ value_(value)
{}
- operator T() const { return _value; }
+ operator T() const { return value_; }
private:
- T _value;
+ T value_;
+};
+
+/**
+ * LLSDParam<LLSD> is for when you don't already have the target parameter
+ * type in hand. Instantiate LLSDParam<LLSD>(your LLSD object), and the
+ * templated conversion operator will try to select a more specific LLSDParam
+ * specialization.
+ */
+template <>
+class LLSDParam<LLSD>: public LLSDParamBase
+{
+private:
+ LLSD value_;
+ // LLSDParam<LLSD>::operator T() works by instantiating an LLSDParam<T> on
+ // demand. Returning that engages LLSDParam<T>::operator T(), producing
+ // the desired result. But LLSDParam<const char*> owns a std::string whose
+ // c_str() is returned by its operator const char*(). If we return a temp
+ // LLSDParam<const char*>, the compiler can destroy it right away, as soon
+ // as we've called operator const char*(). That's a problem! That
+ // invalidates the const char* we've just passed to the subject function.
+ // This LLSDParam<LLSD> is presumably guaranteed to survive until the
+ // subject function has returned, so we must ensure that any constructed
+ // LLSDParam<T> lives just as long as this LLSDParam<LLSD> does. Putting
+ // each LLSDParam<T> on the heap and capturing a smart pointer in a vector
+ // works. We would have liked to use std::unique_ptr, but vector entries
+ // must be copyable.
+ // (Alternatively we could assume that every instance of LLSDParam<LLSD>
+ // will be asked for at most ONE conversion. We could store a scalar
+ // std::unique_ptr and, when constructing an new LLSDParam<T>, assert that
+ // the unique_ptr is empty. But some future change in usage patterns, and
+ // consequent failure of that assertion, would be very mysterious. Instead
+ // of explaining how to fix it, just fix it now.)
+ mutable std::vector<std::shared_ptr<LLSDParamBase>> converters_;
+
+public:
+ LLSDParam(const LLSD& value): value_(value) {}
+
+ /// if we're literally being asked for an LLSD parameter, avoid infinite
+ /// recursion
+ operator LLSD() const { return value_; }
+
+ /// otherwise, instantiate a more specific LLSDParam<T> to convert; that
+ /// preserves the existing customization mechanism
+ template <typename T>
+ operator T() const
+ {
+ // capture 'ptr' with the specific subclass type because converters_
+ // only stores LLSDParamBase pointers
+ auto ptr{ std::make_shared<LLSDParam<std::decay_t<T>>>(value_) };
+ // keep the new converter alive until we ourselves are destroyed
+ converters_.push_back(ptr);
+ return *ptr;
+ }
};
/**
@@ -346,17 +410,17 @@ private:
*/
#define LLSDParam_for(T, AS) \
template <> \
-class LLSDParam<T> \
+class LLSDParam<T>: public LLSDParamBase \
{ \
public: \
LLSDParam(const LLSD& value): \
- _value((T)value.AS()) \
+ value_((T)value.AS()) \
{} \
\
- operator T() const { return _value; } \
+ operator T() const { return value_; } \
\
private: \
- T _value; \
+ T value_; \
}
LLSDParam_for(float, asReal);
@@ -372,31 +436,31 @@ LLSDParam_for(LLSD::Binary, asBinary);
* safely pass an LLSDParam<const char*>(yourLLSD).
*/
template <>
-class LLSDParam<const char*>
+class LLSDParam<const char*>: public LLSDParamBase
{
private:
// The difference here is that we store a std::string rather than a const
// char*. It's important that the LLSDParam object own the std::string.
- std::string _value;
+ std::string value_;
// We don't bother storing the incoming LLSD object, but we do have to
- // distinguish whether _value is an empty string because the LLSD object
+ // distinguish whether value_ is an empty string because the LLSD object
// contains an empty string or because it's isUndefined().
- bool _undefined;
+ bool undefined_;
public:
LLSDParam(const LLSD& value):
- _value(value),
- _undefined(value.isUndefined())
+ value_(value),
+ undefined_(value.isUndefined())
{}
- // The const char* we retrieve is for storage owned by our _value member.
+ // The const char* we retrieve is for storage owned by our value_ member.
// That's how we guarantee that the const char* is valid for the lifetime
// of this LLSDParam object. Constructing your LLSDParam in the argument
// list should ensure that the LLSDParam object will persist for the
// duration of the function call.
operator const char*() const
{
- if (_undefined)
+ if (undefined_)
{
// By default, an isUndefined() LLSD object's asString() method
// will produce an empty string. But for a function accepting
@@ -406,7 +470,7 @@ public:
// case, though, no LLSD value could pass NULL.
return NULL;
}
- return _value.c_str();
+ return value_.c_str();
}
};
@@ -555,4 +619,56 @@ struct hash<LLSD>
}
};
}
+
+namespace LL
+{
+
+/*****************************************************************************
+* apply(function, LLSD array)
+*****************************************************************************/
+// validate incoming LLSD blob, and return an LLSD array suitable to pass to
+// the function of interest
+LLSD apply_llsd_fix(size_t arity, const LLSD& args);
+
+// Derived from https://stackoverflow.com/a/20441189
+// and https://en.cppreference.com/w/cpp/utility/apply .
+// We can't simply make a tuple from the LLSD array and then apply() that
+// tuple to the function -- how would make_tuple() deduce the correct
+// parameter type for each entry? We must go directly to the target function.
+template <typename CALLABLE, std::size_t... I>
+auto apply_impl(CALLABLE&& func, const LLSD& array, std::index_sequence<I...>)
+{
+ // call func(unpacked args), using generic LLSDParam<LLSD> to convert each
+ // entry in 'array' to the target parameter type
+ return std::forward<CALLABLE>(func)(LLSDParam<LLSD>(array[I])...);
+}
+
+// use apply_n<ARITY>(function, LLSD) to call a specific arity of a variadic
+// function with (that many) items from the passed LLSD array
+template <size_t ARITY, typename CALLABLE>
+auto apply_n(CALLABLE&& func, const LLSD& args)
+{
+ return apply_impl(std::forward<CALLABLE>(func),
+ apply_llsd_fix(ARITY, args),
+ std::make_index_sequence<ARITY>());
+}
+
+/**
+ * apply(function, LLSD) goes beyond C++17 std::apply(). For this case
+ * @a function @emph cannot be variadic: the compiler must know at compile
+ * time how many arguments to pass. This isn't Python. (But see apply_n() to
+ * pass a specific number of args to a variadic function.)
+ */
+template <typename CALLABLE>
+auto apply(CALLABLE&& func, const LLSD& args)
+{
+ // infer arity from the definition of func
+ constexpr auto arity = function_arity<
+ typename std::remove_reference<CALLABLE>::type>::value;
+ // now that we have a compile-time arity, apply_n() works
+ return apply_n<arity>(std::forward<CALLABLE>(func), args);
+}
+
+} // namespace LL
+
#endif // LL_LLSDUTIL_H
diff --git a/indra/llcommon/llsys.cpp b/indra/llcommon/llsys.cpp
index 91cb65b815..938685bae6 100644
--- a/indra/llcommon/llsys.cpp
+++ b/indra/llcommon/llsys.cpp
@@ -771,20 +771,28 @@ static U32Kilobytes LLMemoryAdjustKBResult(U32Kilobytes inKB)
}
#endif
+#if LL_DARWIN
+// static
+U32Kilobytes LLMemoryInfo::getHardwareMemSize()
+{
+ // This might work on Linux as well. Someone check...
+ uint64_t phys = 0;
+ int mib[2] = { CTL_HW, HW_MEMSIZE };
+
+ size_t len = sizeof(phys);
+ sysctl(mib, 2, &phys, &len, NULL, 0);
+
+ return U64Bytes(phys);
+}
+#endif
+
U32Kilobytes LLMemoryInfo::getPhysicalMemoryKB() const
{
#if LL_WINDOWS
return LLMemoryAdjustKBResult(U32Kilobytes(mStatsMap["Total Physical KB"].asInteger()));
#elif LL_DARWIN
- // This might work on Linux as well. Someone check...
- uint64_t phys = 0;
- int mib[2] = { CTL_HW, HW_MEMSIZE };
-
- size_t len = sizeof(phys);
- sysctl(mib, 2, &phys, &len, NULL, 0);
-
- return U64Bytes(phys);
+ return getHardwareMemSize();
#elif LL_LINUX
U64 phys = 0;
diff --git a/indra/llcommon/llsys.h b/indra/llcommon/llsys.h
index 70a03810c5..08d4abffa2 100644
--- a/indra/llcommon/llsys.h
+++ b/indra/llcommon/llsys.h
@@ -129,7 +129,10 @@ public:
LLMemoryInfo(); ///< Default constructor
void stream(std::ostream& s) const; ///< output text info to s
- U32Kilobytes getPhysicalMemoryKB() const;
+ U32Kilobytes getPhysicalMemoryKB() const;
+#if LL_DARWIN
+ static U32Kilobytes getHardwareMemSize(); // Because some Mac linkers won't let us reference extern gSysMemory from a different lib.
+#endif
//get the available memory infomation in KiloBytes.
static void getAvailableMemoryKB(U32Kilobytes& avail_physical_mem_kb, U32Kilobytes& avail_virtual_mem_kb);
diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp
index a051c7f575..cd4975d9d3 100644
--- a/indra/llcommon/llthread.cpp
+++ b/indra/llcommon/llthread.cpp
@@ -42,6 +42,7 @@
#ifdef LL_WINDOWS
+
const DWORD MS_VC_EXCEPTION=0x406D1388;
#pragma pack(push,8)
@@ -134,6 +135,15 @@ void LLThread::threadRun()
{
#ifdef LL_WINDOWS
set_thread_name(-1, mName.c_str());
+
+#if 0 // probably a bad idea, see usage of SetThreadIdealProcessor in LLWindowWin32)
+ HANDLE hThread = GetCurrentThread();
+ if (hThread)
+ {
+ SetThreadAffinityMask(hThread, (DWORD_PTR) 0xFFFFFFFFFFFFFFFE);
+ }
+#endif
+
#endif
LL_PROFILER_SET_THREAD_NAME( mName.c_str() );
diff --git a/indra/llcommon/lltimer.cpp b/indra/llcommon/lltimer.cpp
index 58bedacf43..1a99ac2886 100644
--- a/indra/llcommon/lltimer.cpp
+++ b/indra/llcommon/lltimer.cpp
@@ -30,6 +30,9 @@
#include "u64.h"
+#include <chrono>
+#include <thread>
+
#if LL_WINDOWS
# include "llwin32headerslean.h"
#elif LL_LINUX || LL_DARWIN
@@ -62,9 +65,18 @@ LLTimer* LLTimer::sTimer = NULL;
//---------------------------------------------------------------------------
#if LL_WINDOWS
+
+
+#if 0
void ms_sleep(U32 ms)
{
- Sleep(ms);
+ LL_PROFILE_ZONE_SCOPED;
+ using TimePoint = std::chrono::steady_clock::time_point;
+ auto resume_time = TimePoint::clock::now() + std::chrono::milliseconds(ms);
+ while (TimePoint::clock::now() < resume_time)
+ {
+ std::this_thread::yield(); //note: don't use LLThread::yield here to avoid yielding for too long
+ }
}
U32 micro_sleep(U64 us, U32 max_yields)
@@ -74,6 +86,35 @@ U32 micro_sleep(U64 us, U32 max_yields)
ms_sleep((U32)(us / 1000));
return 0;
}
+
+#else
+
+U32 micro_sleep(U64 us, U32 max_yields)
+{
+ LL_PROFILE_ZONE_SCOPED
+#if 0
+ LARGE_INTEGER ft;
+ ft.QuadPart = -static_cast<S64>(us * 10); // '-' using relative time
+
+ HANDLE timer = CreateWaitableTimer(NULL, TRUE, NULL);
+ SetWaitableTimer(timer, &ft, 0, NULL, NULL, 0);
+ WaitForSingleObject(timer, INFINITE);
+ CloseHandle(timer);
+#else
+ Sleep(us / 1000);
+#endif
+
+ return 0;
+}
+
+void ms_sleep(U32 ms)
+{
+ LL_PROFILE_ZONE_SCOPED
+ micro_sleep(ms * 1000, 0);
+}
+
+#endif
+
#elif LL_LINUX || LL_DARWIN
static void _sleep_loop(struct timespec& thiswait)
{
diff --git a/indra/llcommon/lluuid.cpp b/indra/llcommon/lluuid.cpp
index 5655e8e2f2..200add404f 100644
--- a/indra/llcommon/lluuid.cpp
+++ b/indra/llcommon/lluuid.cpp
@@ -1,31 +1,31 @@
-/**
+/**
* @file lluuid.cpp
*
* $LicenseInfo:firstyear=2000&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2010, Linden Research, Inc.
- *
+ *
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
- *
+ *
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
- *
+ *
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
+ *
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
#include "linden_common.h"
-// We can't use WIN32_LEAN_AND_MEAN here, needs lots of includes.
+ // We can't use WIN32_LEAN_AND_MEAN here, needs lots of includes.
#if LL_WINDOWS
#include "llwin32headers.h"
// ugh, this is ugly. We need to straighten out our linking for this library
@@ -51,7 +51,7 @@ const LLUUID LLUUID::null;
const LLTransactionID LLTransactionID::tnull;
// static
-LLMutex * LLUUID::mMutex = NULL;
+LLMutex* LLUUID::mMutex = NULL;
@@ -60,15 +60,15 @@ LLMutex * LLUUID::mMutex = NULL;
NOT DONE YET!!!
static char BASE85_TABLE[] = {
- '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
- 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
- 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
- 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd',
- 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
- 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
- 'y', 'z', '!', '#', '$', '%', '&', '(', ')', '*',
- '+', '-', ';', '[', '=', '>', '?', '@', '^', '_',
- '`', '{', '|', '}', '~', '\0'
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
+ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
+ 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd',
+ 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
+ 'y', 'z', '!', '#', '$', '%', '&', '(', ')', '*',
+ '+', '-', ';', '[', '=', '>', '?', '@', '^', '_',
+ '`', '{', '|', '}', '~', '\0'
};
@@ -94,28 +94,28 @@ return ret;
void LLUUID::toBase85(char* out)
{
- U32* me = (U32*)&(mData[0]);
- for(S32 i = 0; i < 4; ++i)
- {
- char* o = &out[i*i];
- for(S32 j = 0; j < 5; ++j)
- {
- o[4-j] = BASE85_TABLE[ me[i] % 85];
- word /= 85;
- }
- }
+ U32* me = (U32*)&(mData[0]);
+ for(S32 i = 0; i < 4; ++i)
+ {
+ char* o = &out[i*i];
+ for(S32 j = 0; j < 5; ++j)
+ {
+ o[4-j] = BASE85_TABLE[ me[i] % 85];
+ word /= 85;
+ }
+ }
}
unsigned int decode( char const * fiveChars ) throw( bad_input_data )
{
- unsigned int ret = 0;
- for( S32 ix = 0; ix < 5; ++ix )
- {
- char * s = strchr( encodeTable, fiveChars[ ix ] );
- ret = ret * 85 + (s-encodeTable);
- }
- return ret;
-}
+ unsigned int ret = 0;
+ for( S32 ix = 0; ix < 5; ++ix )
+ {
+ char * s = strchr( encodeTable, fiveChars[ ix ] );
+ ret = ret * 85 + (s-encodeTable);
+ }
+ return ret;
+}
*/
#define LL_USE_JANKY_RANDOM_NUMBER_GENERATOR 0
@@ -130,8 +130,8 @@ static U64 sJankyRandomSeed(LLUUID::getRandomSeed());
*/
U32 janky_fast_random_bytes()
{
- sJankyRandomSeed = U64L(1664525) * sJankyRandomSeed + U64L(1013904223);
- return (U32)sJankyRandomSeed;
+ sJankyRandomSeed = U64L(1664525) * sJankyRandomSeed + U64L(1013904223);
+ return (U32)sJankyRandomSeed;
}
/**
@@ -139,8 +139,8 @@ U32 janky_fast_random_bytes()
*/
U32 janky_fast_random_byes_range(U32 val)
{
- sJankyRandomSeed = U64L(1664525) * sJankyRandomSeed + U64L(1013904223);
- return (U32)(sJankyRandomSeed) % val;
+ sJankyRandomSeed = U64L(1664525) * sJankyRandomSeed + U64L(1013904223);
+ return (U32)(sJankyRandomSeed) % val;
}
/**
@@ -148,257 +148,257 @@ U32 janky_fast_random_byes_range(U32 val)
*/
U32 janky_fast_random_seeded_bytes(U32 seed, U32 val)
{
- seed = U64L(1664525) * (U64)(seed) + U64L(1013904223);
- return (U32)(seed) % val;
+ seed = U64L(1664525) * (U64)(seed)+U64L(1013904223);
+ return (U32)(seed) % val;
}
#endif
// Common to all UUID implementations
void LLUUID::toString(std::string& out) const
{
- out = llformat(
- "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
- (U8)(mData[0]),
- (U8)(mData[1]),
- (U8)(mData[2]),
- (U8)(mData[3]),
- (U8)(mData[4]),
- (U8)(mData[5]),
- (U8)(mData[6]),
- (U8)(mData[7]),
- (U8)(mData[8]),
- (U8)(mData[9]),
- (U8)(mData[10]),
- (U8)(mData[11]),
- (U8)(mData[12]),
- (U8)(mData[13]),
- (U8)(mData[14]),
- (U8)(mData[15]));
+ out = llformat(
+ "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+ (U8)(mData[0]),
+ (U8)(mData[1]),
+ (U8)(mData[2]),
+ (U8)(mData[3]),
+ (U8)(mData[4]),
+ (U8)(mData[5]),
+ (U8)(mData[6]),
+ (U8)(mData[7]),
+ (U8)(mData[8]),
+ (U8)(mData[9]),
+ (U8)(mData[10]),
+ (U8)(mData[11]),
+ (U8)(mData[12]),
+ (U8)(mData[13]),
+ (U8)(mData[14]),
+ (U8)(mData[15]));
}
// *TODO: deprecate
-void LLUUID::toString(char *out) const
+void LLUUID::toString(char* out) const
{
- std::string buffer;
- toString(buffer);
- strcpy(out,buffer.c_str()); /* Flawfinder: ignore */
+ std::string buffer;
+ toString(buffer);
+ strcpy(out, buffer.c_str()); /* Flawfinder: ignore */
}
void LLUUID::toCompressedString(std::string& out) const
{
- char bytes[UUID_BYTES+1];
- memcpy(bytes, mData, UUID_BYTES); /* Flawfinder: ignore */
- bytes[UUID_BYTES] = '\0';
- out.assign(bytes, UUID_BYTES);
+ char bytes[UUID_BYTES + 1];
+ memcpy(bytes, mData, UUID_BYTES); /* Flawfinder: ignore */
+ bytes[UUID_BYTES] = '\0';
+ out.assign(bytes, UUID_BYTES);
}
// *TODO: deprecate
-void LLUUID::toCompressedString(char *out) const
+void LLUUID::toCompressedString(char* out) const
{
- memcpy(out, mData, UUID_BYTES); /* Flawfinder: ignore */
- out[UUID_BYTES] = '\0';
+ memcpy(out, mData, UUID_BYTES); /* Flawfinder: ignore */
+ out[UUID_BYTES] = '\0';
}
std::string LLUUID::getString() const
{
- return asString();
+ return asString();
}
std::string LLUUID::asString() const
{
- std::string str;
- toString(str);
- return str;
+ std::string str;
+ toString(str);
+ return str;
}
BOOL LLUUID::set(const char* in_string, BOOL emit)
{
- return set(ll_safe_string(in_string),emit);
+ return set(ll_safe_string(in_string), emit);
}
BOOL LLUUID::set(const std::string& in_string, BOOL emit)
{
- BOOL broken_format = FALSE;
-
- // empty strings should make NULL uuid
- if (in_string.empty())
- {
- setNull();
- return TRUE;
- }
-
- if (in_string.length() != (UUID_STR_LENGTH - 1)) /* Flawfinder: ignore */
- {
- // I'm a moron. First implementation didn't have the right UUID format.
- // Shouldn't see any of these any more
- if (in_string.length() == (UUID_STR_LENGTH - 2)) /* Flawfinder: ignore */
- {
- if(emit)
- {
- LL_WARNS() << "Warning! Using broken UUID string format" << LL_ENDL;
- }
- broken_format = TRUE;
- }
- else
- {
- // Bad UUID string. Spam as INFO, as most cases we don't care.
- if(emit)
- {
- //don't spam the logs because a resident can't spell.
- LL_WARNS() << "Bad UUID string: " << in_string << LL_ENDL;
- }
- setNull();
- return FALSE;
- }
- }
-
- U8 cur_pos = 0;
- S32 i;
- for (i = 0; i < UUID_BYTES; i++)
- {
- if ((i == 4) || (i == 6) || (i == 8) || (i == 10))
- {
- cur_pos++;
- if (broken_format && (i==10))
- {
- // Missing - in the broken format
- cur_pos--;
- }
- }
-
- mData[i] = 0;
-
- if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
- {
- mData[i] += (U8)(in_string[cur_pos] - '0');
- }
- else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <='f'))
- {
- mData[i] += (U8)(10 + in_string[cur_pos] - 'a');
- }
- else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <='F'))
- {
- mData[i] += (U8)(10 + in_string[cur_pos] - 'A');
- }
- else
- {
- if(emit)
- {
- LL_WARNS() << "Invalid UUID string character" << LL_ENDL;
- }
- setNull();
- return FALSE;
- }
-
- mData[i] = mData[i] << 4;
- cur_pos++;
-
- if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
- {
- mData[i] += (U8)(in_string[cur_pos] - '0');
- }
- else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <='f'))
- {
- mData[i] += (U8)(10 + in_string[cur_pos] - 'a');
- }
- else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <='F'))
- {
- mData[i] += (U8)(10 + in_string[cur_pos] - 'A');
- }
- else
- {
- if(emit)
- {
- LL_WARNS() << "Invalid UUID string character" << LL_ENDL;
- }
- setNull();
- return FALSE;
- }
- cur_pos++;
- }
-
- return TRUE;
+ BOOL broken_format = FALSE;
+
+ // empty strings should make NULL uuid
+ if (in_string.empty())
+ {
+ setNull();
+ return TRUE;
+ }
+
+ if (in_string.length() != (UUID_STR_LENGTH - 1)) /* Flawfinder: ignore */
+ {
+ // I'm a moron. First implementation didn't have the right UUID format.
+ // Shouldn't see any of these any more
+ if (in_string.length() == (UUID_STR_LENGTH - 2)) /* Flawfinder: ignore */
+ {
+ if (emit)
+ {
+ LL_WARNS() << "Warning! Using broken UUID string format" << LL_ENDL;
+ }
+ broken_format = TRUE;
+ }
+ else
+ {
+ // Bad UUID string. Spam as INFO, as most cases we don't care.
+ if (emit)
+ {
+ //don't spam the logs because a resident can't spell.
+ LL_WARNS() << "Bad UUID string: " << in_string << LL_ENDL;
+ }
+ setNull();
+ return FALSE;
+ }
+ }
+
+ U8 cur_pos = 0;
+ S32 i;
+ for (i = 0; i < UUID_BYTES; i++)
+ {
+ if ((i == 4) || (i == 6) || (i == 8) || (i == 10))
+ {
+ cur_pos++;
+ if (broken_format && (i == 10))
+ {
+ // Missing - in the broken format
+ cur_pos--;
+ }
+ }
+
+ mData[i] = 0;
+
+ if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
+ {
+ mData[i] += (U8)(in_string[cur_pos] - '0');
+ }
+ else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <= 'f'))
+ {
+ mData[i] += (U8)(10 + in_string[cur_pos] - 'a');
+ }
+ else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <= 'F'))
+ {
+ mData[i] += (U8)(10 + in_string[cur_pos] - 'A');
+ }
+ else
+ {
+ if (emit)
+ {
+ LL_WARNS() << "Invalid UUID string character" << LL_ENDL;
+ }
+ setNull();
+ return FALSE;
+ }
+
+ mData[i] = mData[i] << 4;
+ cur_pos++;
+
+ if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
+ {
+ mData[i] += (U8)(in_string[cur_pos] - '0');
+ }
+ else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <= 'f'))
+ {
+ mData[i] += (U8)(10 + in_string[cur_pos] - 'a');
+ }
+ else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <= 'F'))
+ {
+ mData[i] += (U8)(10 + in_string[cur_pos] - 'A');
+ }
+ else
+ {
+ if (emit)
+ {
+ LL_WARNS() << "Invalid UUID string character" << LL_ENDL;
+ }
+ setNull();
+ return FALSE;
+ }
+ cur_pos++;
+ }
+
+ return TRUE;
}
BOOL LLUUID::validate(const std::string& in_string)
{
- BOOL broken_format = FALSE;
- if (in_string.length() != (UUID_STR_LENGTH - 1)) /* Flawfinder: ignore */
- {
- // I'm a moron. First implementation didn't have the right UUID format.
- if (in_string.length() == (UUID_STR_LENGTH - 2)) /* Flawfinder: ignore */
- {
- broken_format = TRUE;
- }
- else
- {
- return FALSE;
- }
- }
-
- U8 cur_pos = 0;
- for (U32 i = 0; i < 16; i++)
- {
- if ((i == 4) || (i == 6) || (i == 8) || (i == 10))
- {
- cur_pos++;
- if (broken_format && (i==10))
- {
- // Missing - in the broken format
- cur_pos--;
- }
- }
-
- if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
- {
- }
- else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <='f'))
- {
- }
- else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <='F'))
- {
- }
- else
- {
- return FALSE;
- }
-
- cur_pos++;
-
- if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
- {
- }
- else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <='f'))
- {
- }
- else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <='F'))
- {
- }
- else
- {
- return FALSE;
- }
- cur_pos++;
- }
- return TRUE;
+ BOOL broken_format = FALSE;
+ if (in_string.length() != (UUID_STR_LENGTH - 1)) /* Flawfinder: ignore */
+ {
+ // I'm a moron. First implementation didn't have the right UUID format.
+ if (in_string.length() == (UUID_STR_LENGTH - 2)) /* Flawfinder: ignore */
+ {
+ broken_format = TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+
+ U8 cur_pos = 0;
+ for (U32 i = 0; i < 16; i++)
+ {
+ if ((i == 4) || (i == 6) || (i == 8) || (i == 10))
+ {
+ cur_pos++;
+ if (broken_format && (i == 10))
+ {
+ // Missing - in the broken format
+ cur_pos--;
+ }
+ }
+
+ if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
+ {
+ }
+ else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <= 'f'))
+ {
+ }
+ else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <= 'F'))
+ {
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ cur_pos++;
+
+ if ((in_string[cur_pos] >= '0') && (in_string[cur_pos] <= '9'))
+ {
+ }
+ else if ((in_string[cur_pos] >= 'a') && (in_string[cur_pos] <= 'f'))
+ {
+ }
+ else if ((in_string[cur_pos] >= 'A') && (in_string[cur_pos] <= 'F'))
+ {
+ }
+ else
+ {
+ return FALSE;
+ }
+ cur_pos++;
+ }
+ return TRUE;
}
const LLUUID& LLUUID::operator^=(const LLUUID& rhs)
{
- U32* me = (U32*)&(mData[0]);
- const U32* other = (U32*)&(rhs.mData[0]);
- for(S32 i = 0; i < 4; ++i)
- {
- me[i] = me[i] ^ other[i];
- }
- return *this;
+ U32* me = (U32*)&(mData[0]);
+ const U32* other = (U32*)&(rhs.mData[0]);
+ for (S32 i = 0; i < 4; ++i)
+ {
+ me[i] = me[i] ^ other[i];
+ }
+ return *this;
}
LLUUID LLUUID::operator^(const LLUUID& rhs) const
{
- LLUUID id(*this);
- id ^= rhs;
- return id;
+ LLUUID id(*this);
+ id ^= rhs;
+ return id;
}
// WARNING: this algorithm SHALL NOT be changed. It is also used by the server
@@ -406,107 +406,107 @@ LLUUID LLUUID::operator^(const LLUUID& rhs) const
// it would cause invalid assets.
void LLUUID::combine(const LLUUID& other, LLUUID& result) const
{
- LLMD5 md5_uuid;
- md5_uuid.update((unsigned char*)mData, 16);
- md5_uuid.update((unsigned char*)other.mData, 16);
- md5_uuid.finalize();
- md5_uuid.raw_digest(result.mData);
+ LLMD5 md5_uuid;
+ md5_uuid.update((unsigned char*)mData, 16);
+ md5_uuid.update((unsigned char*)other.mData, 16);
+ md5_uuid.finalize();
+ md5_uuid.raw_digest(result.mData);
}
-LLUUID LLUUID::combine(const LLUUID &other) const
+LLUUID LLUUID::combine(const LLUUID& other) const
{
- LLUUID combination;
- combine(other, combination);
- return combination;
+ LLUUID combination;
+ combine(other, combination);
+ return combination;
}
-std::ostream& operator<<(std::ostream& s, const LLUUID &uuid)
+std::ostream& operator<<(std::ostream& s, const LLUUID& uuid)
{
- std::string uuid_str;
- uuid.toString(uuid_str);
- s << uuid_str;
- return s;
+ std::string uuid_str;
+ uuid.toString(uuid_str);
+ s << uuid_str;
+ return s;
}
-std::istream& operator>>(std::istream &s, LLUUID &uuid)
+std::istream& operator>>(std::istream& s, LLUUID& uuid)
{
- U32 i;
- char uuid_str[UUID_STR_LENGTH]; /* Flawfinder: ignore */
- for (i = 0; i < UUID_STR_LENGTH-1; i++)
- {
- s >> uuid_str[i];
- }
- uuid_str[i] = '\0';
- uuid.set(std::string(uuid_str));
- return s;
+ U32 i;
+ char uuid_str[UUID_STR_LENGTH]; /* Flawfinder: ignore */
+ for (i = 0; i < UUID_STR_LENGTH - 1; i++)
+ {
+ s >> uuid_str[i];
+ }
+ uuid_str[i] = '\0';
+ uuid.set(std::string(uuid_str));
+ return s;
}
-static void get_random_bytes(void *buf, int nbytes)
+static void get_random_bytes(void* buf, int nbytes)
{
- int i;
- char *cp = (char *) buf;
-
- // *NOTE: If we are not using the janky generator ll_rand()
- // generates at least 3 good bytes of data since it is 0 to
- // RAND_MAX. This could be made more efficient by copying all the
- // bytes.
- for (i=0; i < nbytes; i++)
+ int i;
+ char* cp = (char*)buf;
+
+ // *NOTE: If we are not using the janky generator ll_rand()
+ // generates at least 3 good bytes of data since it is 0 to
+ // RAND_MAX. This could be made more efficient by copying all the
+ // bytes.
+ for (i = 0; i < nbytes; i++)
#if LL_USE_JANKY_RANDOM_NUMBER_GENERATOR
- *cp++ = janky_fast_random_bytes() & 0xFF;
+ * cp++ = janky_fast_random_bytes() & 0xFF;
#else
- *cp++ = ll_rand() & 0xFF;
+ * cp++ = ll_rand() & 0xFF;
#endif
- return;
+ return;
}
#if LL_WINDOWS
typedef struct _ASTAT_
{
- ADAPTER_STATUS adapt;
- NAME_BUFFER NameBuff [30];
+ ADAPTER_STATUS adapt;
+ NAME_BUFFER NameBuff[30];
}ASTAT, * PASTAT;
// static
-S32 LLUUID::getNodeID(unsigned char *node_id)
+S32 LLUUID::getNodeID(unsigned char* node_id)
{
- ASTAT Adapter;
- NCB Ncb;
- UCHAR uRetCode;
- LANA_ENUM lenum;
- int i;
- int retval = 0;
-
- memset( &Ncb, 0, sizeof(Ncb) );
- Ncb.ncb_command = NCBENUM;
- Ncb.ncb_buffer = (UCHAR *)&lenum;
- Ncb.ncb_length = sizeof(lenum);
- uRetCode = Netbios( &Ncb );
-
- for(i=0; i < lenum.length ;i++)
- {
- memset( &Ncb, 0, sizeof(Ncb) );
- Ncb.ncb_command = NCBRESET;
- Ncb.ncb_lana_num = lenum.lana[i];
-
- uRetCode = Netbios( &Ncb );
-
- memset( &Ncb, 0, sizeof (Ncb) );
- Ncb.ncb_command = NCBASTAT;
- Ncb.ncb_lana_num = lenum.lana[i];
-
- strcpy( (char *)Ncb.ncb_callname, "* " ); /* Flawfinder: ignore */
- Ncb.ncb_buffer = (unsigned char *)&Adapter;
- Ncb.ncb_length = sizeof(Adapter);
-
- uRetCode = Netbios( &Ncb );
- if ( uRetCode == 0 )
- {
- memcpy(node_id,Adapter.adapt.adapter_address,6); /* Flawfinder: ignore */
- retval = 1;
- }
- }
- return retval;
+ ASTAT Adapter;
+ NCB Ncb;
+ UCHAR uRetCode;
+ LANA_ENUM lenum;
+ int i;
+ int retval = 0;
+
+ memset(&Ncb, 0, sizeof(Ncb));
+ Ncb.ncb_command = NCBENUM;
+ Ncb.ncb_buffer = (UCHAR*)&lenum;
+ Ncb.ncb_length = sizeof(lenum);
+ uRetCode = Netbios(&Ncb);
+
+ for (i = 0; i < lenum.length; i++)
+ {
+ memset(&Ncb, 0, sizeof(Ncb));
+ Ncb.ncb_command = NCBRESET;
+ Ncb.ncb_lana_num = lenum.lana[i];
+
+ uRetCode = Netbios(&Ncb);
+
+ memset(&Ncb, 0, sizeof(Ncb));
+ Ncb.ncb_command = NCBASTAT;
+ Ncb.ncb_lana_num = lenum.lana[i];
+
+ strcpy((char*)Ncb.ncb_callname, "* "); /* Flawfinder: ignore */
+ Ncb.ncb_buffer = (unsigned char*)&Adapter;
+ Ncb.ncb_length = sizeof(Adapter);
+
+ uRetCode = Netbios(&Ncb);
+ if (uRetCode == 0)
+ {
+ memcpy(node_id, Adapter.adapt.adapter_address, 6); /* Flawfinder: ignore */
+ retval = 1;
+ }
+ }
+ return retval;
}
#elif LL_DARWIN
@@ -525,66 +525,66 @@ S32 LLUUID::getNodeID(unsigned char *node_id)
#include <net/route.h>
#include <ifaddrs.h>
-// static
-S32 LLUUID::getNodeID(unsigned char *node_id)
+ // static
+S32 LLUUID::getNodeID(unsigned char* node_id)
{
- int i;
- unsigned char *a = NULL;
- struct ifaddrs *ifap, *ifa;
- int rv;
- S32 result = 0;
-
- if ((rv=getifaddrs(&ifap))==-1)
- {
- return -1;
- }
- if (ifap == NULL)
- {
- return -1;
- }
-
- for (ifa = ifap; ifa != NULL; ifa = ifa->ifa_next)
- {
-// printf("Interface %s, address family %d, ", ifa->ifa_name, ifa->ifa_addr->sa_family);
- for(i=0; i< ifa->ifa_addr->sa_len; i++)
- {
-// printf("%02X ", (unsigned char)ifa->ifa_addr->sa_data[i]);
- }
-// printf("\n");
-
- if(ifa->ifa_addr->sa_family == AF_LINK)
- {
- // This is a link-level address
- struct sockaddr_dl *lla = (struct sockaddr_dl *)ifa->ifa_addr;
-
-// printf("\tLink level address, type %02X\n", lla->sdl_type);
-
- if(lla->sdl_type == IFT_ETHER)
- {
- // Use the first ethernet MAC in the list.
- // For some reason, the macro LLADDR() defined in net/if_dl.h doesn't expand correctly. This is what it would do.
- a = (unsigned char *)&((lla)->sdl_data);
- a += (lla)->sdl_nlen;
-
- if (!a[0] && !a[1] && !a[2] && !a[3] && !a[4] && !a[5])
- {
- continue;
- }
-
- if (node_id)
- {
- memcpy(node_id, a, 6);
- result = 1;
- }
-
- // We found one.
- break;
- }
- }
- }
- freeifaddrs(ifap);
-
- return result;
+ int i;
+ unsigned char* a = NULL;
+ struct ifaddrs* ifap, * ifa;
+ int rv;
+ S32 result = 0;
+
+ if ((rv = getifaddrs(&ifap)) == -1)
+ {
+ return -1;
+ }
+ if (ifap == NULL)
+ {
+ return -1;
+ }
+
+ for (ifa = ifap; ifa != NULL; ifa = ifa->ifa_next)
+ {
+ // printf("Interface %s, address family %d, ", ifa->ifa_name, ifa->ifa_addr->sa_family);
+ for (i = 0; i < ifa->ifa_addr->sa_len; i++)
+ {
+ // printf("%02X ", (unsigned char)ifa->ifa_addr->sa_data[i]);
+ }
+ // printf("\n");
+
+ if (ifa->ifa_addr->sa_family == AF_LINK)
+ {
+ // This is a link-level address
+ struct sockaddr_dl* lla = (struct sockaddr_dl*)ifa->ifa_addr;
+
+ // printf("\tLink level address, type %02X\n", lla->sdl_type);
+
+ if (lla->sdl_type == IFT_ETHER)
+ {
+ // Use the first ethernet MAC in the list.
+ // For some reason, the macro LLADDR() defined in net/if_dl.h doesn't expand correctly. This is what it would do.
+ a = (unsigned char*)&((lla)->sdl_data);
+ a += (lla)->sdl_nlen;
+
+ if (!a[0] && !a[1] && !a[2] && !a[3] && !a[4] && !a[5])
+ {
+ continue;
+ }
+
+ if (node_id)
+ {
+ memcpy(node_id, a, 6);
+ result = 1;
+ }
+
+ // We found one.
+ break;
+ }
+ }
+ }
+ freeifaddrs(ifap);
+
+ return result;
}
#else
@@ -611,22 +611,22 @@ S32 LLUUID::getNodeID(unsigned char *node_id)
#endif
#endif
-// static
-S32 LLUUID::getNodeID(unsigned char *node_id)
+ // static
+S32 LLUUID::getNodeID(unsigned char* node_id)
{
- int sd;
- struct ifreq ifr, *ifrp;
- struct ifconf ifc;
- char buf[1024];
- int n, i;
- unsigned char *a;
-
-/*
- * BSD 4.4 defines the size of an ifreq to be
- * max(sizeof(ifreq), sizeof(ifreq.ifr_name)+ifreq.ifr_addr.sa_len
- * However, under earlier systems, sa_len isn't present, so the size is
- * just sizeof(struct ifreq)
- */
+ int sd;
+ struct ifreq ifr, * ifrp;
+ struct ifconf ifc;
+ char buf[1024];
+ int n, i;
+ unsigned char* a;
+
+ /*
+ * BSD 4.4 defines the size of an ifreq to be
+ * max(sizeof(ifreq), sizeof(ifreq.ifr_name)+ifreq.ifr_addr.sa_len
+ * However, under earlier systems, sa_len isn't present, so the size is
+ * just sizeof(struct ifreq)
+ */
#ifdef HAVE_SA_LEN
#ifndef max
#define max(a,b) ((a) > (b) ? (a) : (b))
@@ -637,252 +637,253 @@ S32 LLUUID::getNodeID(unsigned char *node_id)
#define ifreq_size(i) sizeof(struct ifreq)
#endif /* HAVE_SA_LEN*/
- sd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
- if (sd < 0) {
- return -1;
- }
- memset(buf, 0, sizeof(buf));
- ifc.ifc_len = sizeof(buf);
- ifc.ifc_buf = buf;
- if (ioctl (sd, SIOCGIFCONF, (char *)&ifc) < 0) {
- close(sd);
- return -1;
- }
- n = ifc.ifc_len;
- for (i = 0; i < n; i+= ifreq_size(*ifr) ) {
- ifrp = (struct ifreq *)((char *) ifc.ifc_buf+i);
- strncpy(ifr.ifr_name, ifrp->ifr_name, IFNAMSIZ); /* Flawfinder: ignore */
+ sd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
+ if (sd < 0) {
+ return -1;
+ }
+ memset(buf, 0, sizeof(buf));
+ ifc.ifc_len = sizeof(buf);
+ ifc.ifc_buf = buf;
+ if (ioctl(sd, SIOCGIFCONF, (char*)&ifc) < 0) {
+ close(sd);
+ return -1;
+ }
+ n = ifc.ifc_len;
+ for (i = 0; i < n; i += ifreq_size(*ifr)) {
+ ifrp = (struct ifreq*)((char*)ifc.ifc_buf + i);
+ strncpy(ifr.ifr_name, ifrp->ifr_name, IFNAMSIZ); /* Flawfinder: ignore */
#ifdef SIOCGIFHWADDR
- if (ioctl(sd, SIOCGIFHWADDR, &ifr) < 0)
- continue;
- a = (unsigned char *) &ifr.ifr_hwaddr.sa_data;
+ if (ioctl(sd, SIOCGIFHWADDR, &ifr) < 0)
+ continue;
+ a = (unsigned char*)&ifr.ifr_hwaddr.sa_data;
#else
#ifdef SIOCGENADDR
- if (ioctl(sd, SIOCGENADDR, &ifr) < 0)
- continue;
- a = (unsigned char *) ifr.ifr_enaddr;
+ if (ioctl(sd, SIOCGENADDR, &ifr) < 0)
+ continue;
+ a = (unsigned char*)ifr.ifr_enaddr;
#else
- /*
- * XXX we don't have a way of getting the hardware
- * address
- */
- close(sd);
- return 0;
+ /*
+ * XXX we don't have a way of getting the hardware
+ * address
+ */
+ close(sd);
+ return 0;
#endif /* SIOCGENADDR */
#endif /* SIOCGIFHWADDR */
- if (!a[0] && !a[1] && !a[2] && !a[3] && !a[4] && !a[5])
- continue;
- if (node_id) {
- memcpy(node_id, a, 6); /* Flawfinder: ignore */
- close(sd);
- return 1;
- }
- }
- close(sd);
- return 0;
+ if (!a[0] && !a[1] && !a[2] && !a[3] && !a[4] && !a[5])
+ continue;
+ if (node_id) {
+ memcpy(node_id, a, 6); /* Flawfinder: ignore */
+ close(sd);
+ return 1;
+ }
+ }
+ close(sd);
+ return 0;
}
#endif
-S32 LLUUID::cmpTime(uuid_time_t *t1, uuid_time_t *t2)
+S32 LLUUID::cmpTime(uuid_time_t* t1, uuid_time_t* t2)
{
- // Compare two time values.
+ // Compare two time values.
- if (t1->high < t2->high) return -1;
- if (t1->high > t2->high) return 1;
- if (t1->low < t2->low) return -1;
- if (t1->low > t2->low) return 1;
- return 0;
+ if (t1->high < t2->high) return -1;
+ if (t1->high > t2->high) return 1;
+ if (t1->low < t2->low) return -1;
+ if (t1->low > t2->low) return 1;
+ return 0;
}
-void LLUUID::getSystemTime(uuid_time_t *timestamp)
+void LLUUID::getSystemTime(uuid_time_t* timestamp)
{
- // Get system time with 100ns precision. Time is since Oct 15, 1582.
+ // Get system time with 100ns precision. Time is since Oct 15, 1582.
#if LL_WINDOWS
- ULARGE_INTEGER time;
- GetSystemTimeAsFileTime((FILETIME *)&time);
- // NT keeps time in FILETIME format which is 100ns ticks since
- // Jan 1, 1601. UUIDs use time in 100ns ticks since Oct 15, 1582.
- // The difference is 17 Days in Oct + 30 (Nov) + 31 (Dec)
- // + 18 years and 5 leap days.
- time.QuadPart +=
- (unsigned __int64) (1000*1000*10) // seconds
- * (unsigned __int64) (60 * 60 * 24) // days
- * (unsigned __int64) (17+30+31+365*18+5); // # of days
-
- timestamp->high = time.HighPart;
- timestamp->low = time.LowPart;
+ ULARGE_INTEGER time;
+ GetSystemTimeAsFileTime((FILETIME*)&time);
+ // NT keeps time in FILETIME format which is 100ns ticks since
+ // Jan 1, 1601. UUIDs use time in 100ns ticks since Oct 15, 1582.
+ // The difference is 17 Days in Oct + 30 (Nov) + 31 (Dec)
+ // + 18 years and 5 leap days.
+ time.QuadPart +=
+ (unsigned __int64)(1000 * 1000 * 10) // seconds
+ * (unsigned __int64)(60 * 60 * 24) // days
+ * (unsigned __int64)(17 + 30 + 31 + 365 * 18 + 5); // # of days
+
+ timestamp->high = time.HighPart;
+ timestamp->low = time.LowPart;
#else
- struct timeval tp;
- gettimeofday(&tp, 0);
-
- // Offset between UUID formatted times and Unix formatted times.
- // UUID UTC base time is October 15, 1582.
- // Unix base time is January 1, 1970.
- U64 uuid_time = ((U64)tp.tv_sec * 10000000) + (tp.tv_usec * 10) +
- U64L(0x01B21DD213814000);
- timestamp->high = (U32) (uuid_time >> 32);
- timestamp->low = (U32) (uuid_time & 0xFFFFFFFF);
+ struct timeval tp;
+ gettimeofday(&tp, 0);
+
+ // Offset between UUID formatted times and Unix formatted times.
+ // UUID UTC base time is October 15, 1582.
+ // Unix base time is January 1, 1970.
+ U64 uuid_time = ((U64)tp.tv_sec * 10000000) + (tp.tv_usec * 10) +
+ U64L(0x01B21DD213814000);
+ timestamp->high = (U32)(uuid_time >> 32);
+ timestamp->low = (U32)(uuid_time & 0xFFFFFFFF);
#endif
}
-void LLUUID::getCurrentTime(uuid_time_t *timestamp)
+void LLUUID::getCurrentTime(uuid_time_t* timestamp)
{
- // Get current time as 60 bit 100ns ticks since whenever.
- // Compensate for the fact that real clock resolution is less
- // than 100ns.
-
- const U32 uuids_per_tick = 1024;
-
- static uuid_time_t time_last;
- static U32 uuids_this_tick;
- static BOOL init = FALSE;
-
- if (!init) {
- getSystemTime(&time_last);
- uuids_this_tick = uuids_per_tick;
- init = TRUE;
- mMutex = new LLMutex();
- }
-
- uuid_time_t time_now = {0,0};
-
- while (1) {
- getSystemTime(&time_now);
-
- // if clock reading changed since last UUID generated
- if (cmpTime(&time_last, &time_now)) {
- // reset count of uuid's generated with this clock reading
- uuids_this_tick = 0;
- break;
- }
- if (uuids_this_tick < uuids_per_tick) {
- uuids_this_tick++;
- break;
- }
- // going too fast for our clock; spin
- }
-
- time_last = time_now;
-
- if (uuids_this_tick != 0) {
- if (time_now.low & 0x80000000) {
- time_now.low += uuids_this_tick;
- if (!(time_now.low & 0x80000000))
- time_now.high++;
- } else
- time_now.low += uuids_this_tick;
- }
-
- timestamp->high = time_now.high;
- timestamp->low = time_now.low;
+ // Get current time as 60 bit 100ns ticks since whenever.
+ // Compensate for the fact that real clock resolution is less
+ // than 100ns.
+
+ const U32 uuids_per_tick = 1024;
+
+ static uuid_time_t time_last;
+ static U32 uuids_this_tick;
+ static BOOL init = FALSE;
+
+ if (!init) {
+ getSystemTime(&time_last);
+ uuids_this_tick = uuids_per_tick;
+ init = TRUE;
+ mMutex = new LLMutex();
+ }
+
+ uuid_time_t time_now = { 0,0 };
+
+ while (1) {
+ getSystemTime(&time_now);
+
+ // if clock reading changed since last UUID generated
+ if (cmpTime(&time_last, &time_now)) {
+ // reset count of uuid's generated with this clock reading
+ uuids_this_tick = 0;
+ break;
+ }
+ if (uuids_this_tick < uuids_per_tick) {
+ uuids_this_tick++;
+ break;
+ }
+ // going too fast for our clock; spin
+ }
+
+ time_last = time_now;
+
+ if (uuids_this_tick != 0) {
+ if (time_now.low & 0x80000000) {
+ time_now.low += uuids_this_tick;
+ if (!(time_now.low & 0x80000000))
+ time_now.high++;
+ }
+ else
+ time_now.low += uuids_this_tick;
+ }
+
+ timestamp->high = time_now.high;
+ timestamp->low = time_now.low;
}
void LLUUID::generate()
{
- // Create a UUID.
- uuid_time_t timestamp;
-
- static unsigned char node_id[6]; /* Flawfinder: ignore */
- static int has_init = 0;
-
- // Create a UUID.
- static uuid_time_t time_last = {0,0};
- static U16 clock_seq = 0;
+ // Create a UUID.
+ uuid_time_t timestamp;
+
+ static unsigned char node_id[6]; /* Flawfinder: ignore */
+ static int has_init = 0;
+
+ // Create a UUID.
+ static uuid_time_t time_last = { 0,0 };
+ static U16 clock_seq = 0;
#if LL_USE_JANKY_RANDOM_NUMBER_GENERATOR
- static U32 seed = 0L; // dummy seed. reset it below
+ static U32 seed = 0L; // dummy seed. reset it below
#endif
- if (!has_init)
- {
- has_init = 1;
- if (getNodeID(node_id) <= 0)
- {
- get_random_bytes(node_id, 6);
- /*
- * Set multicast bit, to prevent conflicts
- * with IEEE 802 addresses obtained from
- * network cards
- */
- node_id[0] |= 0x80;
- }
-
- getCurrentTime(&time_last);
+ if (!has_init)
+ {
+ has_init = 1;
+ if (getNodeID(node_id) <= 0)
+ {
+ get_random_bytes(node_id, 6);
+ /*
+ * Set multicast bit, to prevent conflicts
+ * with IEEE 802 addresses obtained from
+ * network cards
+ */
+ node_id[0] |= 0x80;
+ }
+
+ getCurrentTime(&time_last);
#if LL_USE_JANKY_RANDOM_NUMBER_GENERATOR
- seed = time_last.low;
+ seed = time_last.low;
#endif
#if LL_USE_JANKY_RANDOM_NUMBER_GENERATOR
- clock_seq = (U16)janky_fast_random_seeded_bytes(seed, 65536);
+ clock_seq = (U16)janky_fast_random_seeded_bytes(seed, 65536);
#else
- clock_seq = (U16)ll_rand(65536);
+ clock_seq = (U16)ll_rand(65536);
#endif
- }
-
- // get current time
- getCurrentTime(&timestamp);
- U16 our_clock_seq = clock_seq;
-
- // if clock hasn't changed or went backward, change clockseq
- if (cmpTime(&timestamp, &time_last) != 1)
- {
- LLMutexLock lock(mMutex);
- clock_seq = (clock_seq + 1) & 0x3FFF;
- if (clock_seq == 0)
- clock_seq++;
- our_clock_seq = clock_seq; // Ensure we're using a different clock_seq value from previous time
- }
+ }
+
+ // get current time
+ getCurrentTime(&timestamp);
+ U16 our_clock_seq = clock_seq;
+
+ // if clock hasn't changed or went backward, change clockseq
+ if (cmpTime(&timestamp, &time_last) != 1)
+ {
+ LLMutexLock lock(mMutex);
+ clock_seq = (clock_seq + 1) & 0x3FFF;
+ if (clock_seq == 0)
+ clock_seq++;
+ our_clock_seq = clock_seq; // Ensure we're using a different clock_seq value from previous time
+ }
time_last = timestamp;
- memcpy(mData+10, node_id, 6); /* Flawfinder: ignore */
- U32 tmp;
- tmp = timestamp.low;
- mData[3] = (unsigned char) tmp;
- tmp >>= 8;
- mData[2] = (unsigned char) tmp;
- tmp >>= 8;
- mData[1] = (unsigned char) tmp;
- tmp >>= 8;
- mData[0] = (unsigned char) tmp;
-
- tmp = (U16) timestamp.high;
- mData[5] = (unsigned char) tmp;
- tmp >>= 8;
- mData[4] = (unsigned char) tmp;
-
- tmp = (timestamp.high >> 16) | 0x1000;
- mData[7] = (unsigned char) tmp;
- tmp >>= 8;
- mData[6] = (unsigned char) tmp;
-
- tmp = our_clock_seq;
-
- mData[9] = (unsigned char) tmp;
- tmp >>= 8;
- mData[8] = (unsigned char) tmp;
-
- HBXXH128::digest(*this, (const void*)mData, 16);
+ memcpy(mData + 10, node_id, 6); /* Flawfinder: ignore */
+ U32 tmp;
+ tmp = timestamp.low;
+ mData[3] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[2] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[1] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[0] = (unsigned char)tmp;
+
+ tmp = (U16)timestamp.high;
+ mData[5] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[4] = (unsigned char)tmp;
+
+ tmp = (timestamp.high >> 16) | 0x1000;
+ mData[7] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[6] = (unsigned char)tmp;
+
+ tmp = our_clock_seq;
+
+ mData[9] = (unsigned char)tmp;
+ tmp >>= 8;
+ mData[8] = (unsigned char)tmp;
+
+ HBXXH128::digest(*this, (const void*)mData, 16);
}
void LLUUID::generate(const std::string& hash_string)
{
- HBXXH128::digest(*this, hash_string);
+ HBXXH128::digest(*this, hash_string);
}
U32 LLUUID::getRandomSeed()
{
- static unsigned char seed[16]; /* Flawfinder: ignore */
-
- getNodeID(&seed[0]);
+ static unsigned char seed[16]; /* Flawfinder: ignore */
+
+ getNodeID(&seed[0]);
- // Incorporate the pid into the seed to prevent
- // processes that start on the same host at the same
- // time from generating the same seed.
- pid_t pid = LLApp::getPid();
+ // Incorporate the pid into the seed to prevent
+ // processes that start on the same host at the same
+ // time from generating the same seed.
+ pid_t pid = LLApp::getPid();
- seed[6]=(unsigned char)(pid >> 8);
- seed[7]=(unsigned char)(pid);
- getSystemTime((uuid_time_t *)(&seed[8]));
+ seed[6] = (unsigned char)(pid >> 8);
+ seed[7] = (unsigned char)(pid);
+ getSystemTime((uuid_time_t*)(&seed[8]));
U64 seed64 = HBXXH64::digest((const void*)seed, 16);
return U32(seed64) ^ U32(seed64 >> 32);
@@ -890,92 +891,92 @@ U32 LLUUID::getRandomSeed()
BOOL LLUUID::parseUUID(const std::string& buf, LLUUID* value)
{
- if( buf.empty() || value == NULL)
- {
- return FALSE;
- }
-
- std::string temp( buf );
- LLStringUtil::trim(temp);
- if( LLUUID::validate( temp ) )
- {
- value->set( temp );
- return TRUE;
- }
- return FALSE;
+ if (buf.empty() || value == NULL)
+ {
+ return FALSE;
+ }
+
+ std::string temp(buf);
+ LLStringUtil::trim(temp);
+ if (LLUUID::validate(temp))
+ {
+ value->set(temp);
+ return TRUE;
+ }
+ return FALSE;
}
//static
LLUUID LLUUID::generateNewID(std::string hash_string)
{
- LLUUID new_id;
- if (hash_string.empty())
- {
- new_id.generate();
- }
- else
- {
- new_id.generate(hash_string);
- }
- return new_id;
+ LLUUID new_id;
+ if (hash_string.empty())
+ {
+ new_id.generate();
+ }
+ else
+ {
+ new_id.generate(hash_string);
+ }
+ return new_id;
}
LLAssetID LLTransactionID::makeAssetID(const LLUUID& session) const
{
- LLAssetID result;
- if (isNull())
- {
- result.setNull();
- }
- else
- {
- combine(session, result);
- }
- return result;
+ LLAssetID result;
+ if (isNull())
+ {
+ result.setNull();
+ }
+ else
+ {
+ combine(session, result);
+ }
+ return result;
}
// Construct
LLUUID::LLUUID()
{
- setNull();
+ setNull();
}
// Faster than copying from memory
- void LLUUID::setNull()
+void LLUUID::setNull()
{
- U32 *word = (U32 *)mData;
- word[0] = 0;
- word[1] = 0;
- word[2] = 0;
- word[3] = 0;
+ U32* word = (U32*)mData;
+ word[0] = 0;
+ word[1] = 0;
+ word[2] = 0;
+ word[3] = 0;
}
// Compare
- bool LLUUID::operator==(const LLUUID& rhs) const
+bool LLUUID::operator==(const LLUUID& rhs) const
{
- U32 *tmp = (U32 *)mData;
- U32 *rhstmp = (U32 *)rhs.mData;
- // Note: binary & to avoid branching
- return
- (tmp[0] == rhstmp[0]) &
- (tmp[1] == rhstmp[1]) &
- (tmp[2] == rhstmp[2]) &
- (tmp[3] == rhstmp[3]);
+ U32* tmp = (U32*)mData;
+ U32* rhstmp = (U32*)rhs.mData;
+ // Note: binary & to avoid branching
+ return
+ (tmp[0] == rhstmp[0]) &
+ (tmp[1] == rhstmp[1]) &
+ (tmp[2] == rhstmp[2]) &
+ (tmp[3] == rhstmp[3]);
}
- bool LLUUID::operator!=(const LLUUID& rhs) const
+bool LLUUID::operator!=(const LLUUID& rhs) const
{
- U32 *tmp = (U32 *)mData;
- U32 *rhstmp = (U32 *)rhs.mData;
- // Note: binary | to avoid branching
- return
- (tmp[0] != rhstmp[0]) |
- (tmp[1] != rhstmp[1]) |
- (tmp[2] != rhstmp[2]) |
- (tmp[3] != rhstmp[3]);
+ U32* tmp = (U32*)mData;
+ U32* rhstmp = (U32*)rhs.mData;
+ // Note: binary | to avoid branching
+ return
+ (tmp[0] != rhstmp[0]) |
+ (tmp[1] != rhstmp[1]) |
+ (tmp[2] != rhstmp[2]) |
+ (tmp[3] != rhstmp[3]);
}
/*
@@ -983,94 +984,94 @@ LLUUID::LLUUID()
// to integers, among other things. Use isNull() or notNull().
LLUUID::operator bool() const
{
- U32 *word = (U32 *)mData;
- return (word[0] | word[1] | word[2] | word[3]) > 0;
+ U32 *word = (U32 *)mData;
+ return (word[0] | word[1] | word[2] | word[3]) > 0;
}
*/
- BOOL LLUUID::notNull() const
+BOOL LLUUID::notNull() const
{
- U32 *word = (U32 *)mData;
- return (word[0] | word[1] | word[2] | word[3]) > 0;
+ U32* word = (U32*)mData;
+ return (word[0] | word[1] | word[2] | word[3]) > 0;
}
// Faster than == LLUUID::null because doesn't require
// as much memory access.
- BOOL LLUUID::isNull() const
+BOOL LLUUID::isNull() const
{
- U32 *word = (U32 *)mData;
- // If all bits are zero, return !0 == TRUE
- return !(word[0] | word[1] | word[2] | word[3]);
+ U32* word = (U32*)mData;
+ // If all bits are zero, return !0 == TRUE
+ return !(word[0] | word[1] | word[2] | word[3]);
}
- LLUUID::LLUUID(const char *in_string)
+LLUUID::LLUUID(const char* in_string)
{
- if (!in_string || in_string[0] == 0)
- {
- setNull();
- return;
- }
-
- set(in_string);
+ if (!in_string || in_string[0] == 0)
+ {
+ setNull();
+ return;
+ }
+
+ set(in_string);
}
- LLUUID::LLUUID(const std::string& in_string)
+LLUUID::LLUUID(const std::string& in_string)
{
- if (in_string.empty())
- {
- setNull();
- return;
- }
+ if (in_string.empty())
+ {
+ setNull();
+ return;
+ }
- set(in_string);
+ set(in_string);
}
// IW: DON'T "optimize" these w/ U32s or you'll scoogie the sort order
// IW: this will make me very sad
- bool LLUUID::operator<(const LLUUID &rhs) const
+bool LLUUID::operator<(const LLUUID& rhs) const
{
- U32 i;
- for( i = 0; i < (UUID_BYTES - 1); i++ )
- {
- if( mData[i] != rhs.mData[i] )
- {
- return (mData[i] < rhs.mData[i]);
- }
- }
- return (mData[UUID_BYTES - 1] < rhs.mData[UUID_BYTES - 1]);
+ U32 i;
+ for (i = 0; i < (UUID_BYTES - 1); i++)
+ {
+ if (mData[i] != rhs.mData[i])
+ {
+ return (mData[i] < rhs.mData[i]);
+ }
+ }
+ return (mData[UUID_BYTES - 1] < rhs.mData[UUID_BYTES - 1]);
}
- bool LLUUID::operator>(const LLUUID &rhs) const
+bool LLUUID::operator>(const LLUUID& rhs) const
{
- U32 i;
- for( i = 0; i < (UUID_BYTES - 1); i++ )
- {
- if( mData[i] != rhs.mData[i] )
- {
- return (mData[i] > rhs.mData[i]);
- }
- }
- return (mData[UUID_BYTES - 1] > rhs.mData[UUID_BYTES - 1]);
+ U32 i;
+ for (i = 0; i < (UUID_BYTES - 1); i++)
+ {
+ if (mData[i] != rhs.mData[i])
+ {
+ return (mData[i] > rhs.mData[i]);
+ }
+ }
+ return (mData[UUID_BYTES - 1] > rhs.mData[UUID_BYTES - 1]);
}
- U16 LLUUID::getCRC16() const
+U16 LLUUID::getCRC16() const
{
- // A UUID is 16 bytes, or 8 shorts.
- U16 *short_data = (U16*)mData;
- U16 out = 0;
- out += short_data[0];
- out += short_data[1];
- out += short_data[2];
- out += short_data[3];
- out += short_data[4];
- out += short_data[5];
- out += short_data[6];
- out += short_data[7];
- return out;
+ // A UUID is 16 bytes, or 8 shorts.
+ U16* short_data = (U16*)mData;
+ U16 out = 0;
+ out += short_data[0];
+ out += short_data[1];
+ out += short_data[2];
+ out += short_data[3];
+ out += short_data[4];
+ out += short_data[5];
+ out += short_data[6];
+ out += short_data[7];
+ return out;
}
- U32 LLUUID::getCRC32() const
+U32 LLUUID::getCRC32() const
{
- U32 *tmp = (U32*)mData;
- return tmp[0] + tmp[1] + tmp[2] + tmp[3];
+ U32* tmp = (U32*)mData;
+ return tmp[0] + tmp[1] + tmp[2] + tmp[3];
}
diff --git a/indra/llcommon/llworkerthread.cpp b/indra/llcommon/llworkerthread.cpp
index e5eda1eac7..06c74bdba0 100644
--- a/indra/llcommon/llworkerthread.cpp
+++ b/indra/llcommon/llworkerthread.cpp
@@ -73,6 +73,7 @@ void LLWorkerThread::clearDeleteList()
{
worker->mRequestHandle = LLWorkerThread::nullHandle();
worker->clearFlags(LLWorkerClass::WCF_HAVE_WORK);
+ worker->clearFlags(LLWorkerClass::WCF_WORKING);
delete worker;
}
mDeleteList.clear() ;
@@ -97,6 +98,7 @@ size_t LLWorkerThread::update(F32 max_time_ms)
{
if (worker->getFlags(LLWorkerClass::WCF_WORK_FINISHED))
{
+ worker->setFlags(LLWorkerClass::WCF_DELETE_REQUESTED);
delete_list.push_back(worker);
mDeleteList.erase(curiter);
}
@@ -130,11 +132,11 @@ size_t LLWorkerThread::update(F32 max_time_ms)
//----------------------------------------------------------------------------
-LLWorkerThread::handle_t LLWorkerThread::addWorkRequest(LLWorkerClass* workerclass, S32 param, U32 priority)
+LLWorkerThread::handle_t LLWorkerThread::addWorkRequest(LLWorkerClass* workerclass, S32 param)
{
handle_t handle = generateHandle();
- WorkRequest* req = new WorkRequest(handle, priority, workerclass, param);
+ WorkRequest* req = new WorkRequest(handle, workerclass, param);
bool res = addRequest(req);
if (!res)
@@ -157,8 +159,8 @@ void LLWorkerThread::deleteWorker(LLWorkerClass* workerclass)
//============================================================================
// Runs on its OWN thread
-LLWorkerThread::WorkRequest::WorkRequest(handle_t handle, U32 priority, LLWorkerClass* workerclass, S32 param) :
- LLQueuedThread::QueuedRequest(handle, priority),
+LLWorkerThread::WorkRequest::WorkRequest(handle_t handle, LLWorkerClass* workerclass, S32 param) :
+ LLQueuedThread::QueuedRequest(handle),
mWorkerClass(workerclass),
mParam(param)
{
@@ -177,6 +179,7 @@ void LLWorkerThread::WorkRequest::deleteRequest()
// virtual
bool LLWorkerThread::WorkRequest::processRequest()
{
+ LL_PROFILE_ZONE_SCOPED;
LLWorkerClass* workerclass = getWorkerClass();
workerclass->setWorking(true);
bool complete = workerclass->doWork(getParam());
@@ -187,6 +190,7 @@ bool LLWorkerThread::WorkRequest::processRequest()
// virtual
void LLWorkerThread::WorkRequest::finishRequest(bool completed)
{
+ LL_PROFILE_ZONE_SCOPED;
LLWorkerClass* workerclass = getWorkerClass();
workerclass->finishWork(getParam(), completed);
U32 flags = LLWorkerClass::WCF_WORK_FINISHED | (completed ? 0 : LLWorkerClass::WCF_WORK_ABORTED);
@@ -200,7 +204,6 @@ LLWorkerClass::LLWorkerClass(LLWorkerThread* workerthread, const std::string& na
: mWorkerThread(workerthread),
mWorkerClassName(name),
mRequestHandle(LLWorkerThread::nullHandle()),
- mRequestPriority(LLWorkerThread::PRIORITY_NORMAL),
mMutex(),
mWorkFlags(0)
{
@@ -289,7 +292,7 @@ bool LLWorkerClass::yield()
//----------------------------------------------------------------------------
// calls startWork, adds doWork() to queue
-void LLWorkerClass::addWork(S32 param, U32 priority)
+void LLWorkerClass::addWork(S32 param)
{
mMutex.lock();
llassert_always(!(mWorkFlags & (WCF_WORKING|WCF_HAVE_WORK)));
@@ -303,7 +306,7 @@ void LLWorkerClass::addWork(S32 param, U32 priority)
startWork(param);
clearFlags(WCF_WORK_FINISHED|WCF_WORK_ABORTED);
setFlags(WCF_HAVE_WORK);
- mRequestHandle = mWorkerThread->addWorkRequest(this, param, priority);
+ mRequestHandle = mWorkerThread->addWorkRequest(this, param);
mMutex.unlock();
}
@@ -318,7 +321,6 @@ void LLWorkerClass::abortWork(bool autocomplete)
if (mRequestHandle != LLWorkerThread::nullHandle())
{
mWorkerThread->abortRequest(mRequestHandle, autocomplete);
- mWorkerThread->setPriority(mRequestHandle, LLQueuedThread::PRIORITY_IMMEDIATE);
setFlags(WCF_ABORT_REQUESTED);
}
mMutex.unlock();
@@ -392,16 +394,5 @@ void LLWorkerClass::scheduleDelete()
}
}
-void LLWorkerClass::setPriority(U32 priority)
-{
- mMutex.lock();
- if (mRequestHandle != LLWorkerThread::nullHandle() && mRequestPriority != priority)
- {
- mRequestPriority = priority;
- mWorkerThread->setPriority(mRequestHandle, priority);
- }
- mMutex.unlock();
-}
-
//============================================================================
diff --git a/indra/llcommon/llworkerthread.h b/indra/llcommon/llworkerthread.h
index e3004d7242..bf94c84090 100644
--- a/indra/llcommon/llworkerthread.h
+++ b/indra/llcommon/llworkerthread.h
@@ -56,7 +56,7 @@ public:
virtual ~WorkRequest(); // use deleteRequest()
public:
- WorkRequest(handle_t handle, U32 priority, LLWorkerClass* workerclass, S32 param);
+ WorkRequest(handle_t handle, LLWorkerClass* workerclass, S32 param);
S32 getParam()
{
@@ -90,7 +90,7 @@ public:
/*virtual*/ size_t update(F32 max_time_ms);
- handle_t addWorkRequest(LLWorkerClass* workerclass, S32 param, U32 priority = PRIORITY_NORMAL);
+ handle_t addWorkRequest(LLWorkerClass* workerclass, S32 param);
S32 getNumDeletes() { return (S32)mDeleteList.size(); } // debug
@@ -151,10 +151,6 @@ public:
bool isWorking() { return getFlags(WCF_WORKING); }
bool wasAborted() { return getFlags(WCF_ABORT_REQUESTED); }
- // setPriority(): changes the priority of a request
- void setPriority(U32 priority);
- U32 getPriority() { return mRequestPriority; }
-
const std::string& getName() const { return mWorkerClassName; }
protected:
@@ -169,7 +165,7 @@ protected:
void setWorkerThread(LLWorkerThread* workerthread);
// addWork(): calls startWork, adds doWork() to queue
- void addWork(S32 param, U32 priority = LLWorkerThread::PRIORITY_NORMAL);
+ void addWork(S32 param);
// abortWork(): requests that work be aborted
void abortWork(bool autocomplete);
@@ -193,7 +189,6 @@ protected:
LLWorkerThread* mWorkerThread;
std::string mWorkerClassName;
handle_t mRequestHandle;
- U32 mRequestPriority; // last priority set
private:
LLMutex mMutex;
diff --git a/indra/llcommon/tests/apply_test.cpp b/indra/llcommon/tests/apply_test.cpp
new file mode 100644
index 0000000000..56b497e0c8
--- /dev/null
+++ b/indra/llcommon/tests/apply_test.cpp
@@ -0,0 +1,240 @@
+/**
+ * @file apply_test.cpp
+ * @author Nat Goodspeed
+ * @date 2022-12-19
+ * @brief Test for apply.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+// Precompiled header
+#include "linden_common.h"
+// associated header
+#include "apply.h"
+// STL headers
+// std headers
+#include <iomanip>
+// external library headers
+// other Linden headers
+#include "llsd.h"
+#include "llsdutil.h"
+#include <array>
+#include <string>
+#include <vector>
+
+// for ensure_equals
+std::ostream& operator<<(std::ostream& out, const std::vector<std::string>& stringvec)
+{
+ const char* delim = "[";
+ for (const auto& str : stringvec)
+ {
+ out << delim << std::quoted(str);
+ delim = ", ";
+ }
+ return out << ']';
+}
+
+// the above must be declared BEFORE ensure_equals(std::vector<std::string>)
+#include "../test/lltut.h"
+
+/*****************************************************************************
+* TUT
+*****************************************************************************/
+namespace tut
+{
+ namespace statics
+ {
+ /*------------------------------ data ------------------------------*/
+ // Although we're using types from the LLSD namespace, we're not
+ // constructing LLSD values, but rather instances of the C++ types
+ // supported by LLSD.
+ static LLSD::Boolean b{true};
+ static LLSD::Integer i{17};
+ static LLSD::Real f{3.14};
+ static LLSD::String s{ "hello" };
+ static LLSD::UUID uu{ "baadf00d-dead-beef-baad-feedb0ef" };
+ static LLSD::Date dt{ "2022-12-19" };
+ static LLSD::URI uri{ "http://secondlife.com" };
+ static LLSD::Binary bin{ 0x01, 0x02, 0x03, 0x04, 0x05 };
+
+ static std::vector<LLSD::String> quick
+ {
+ "The", "quick", "brown", "fox", "etc."
+ };
+
+ static std::array<int, 5> fibs
+ {
+ 0, 1, 1, 2, 3
+ };
+
+ // ensure that apply() actually reaches the target method --
+ // lack of ensure_equals() failure could be due to no-op apply()
+ bool called{ false };
+ // capture calls from collect()
+ std::vector<std::string> collected;
+
+ /*------------------------- test functions -------------------------*/
+ void various(LLSD::Boolean b, LLSD::Integer i, LLSD::Real f, const LLSD::String& s,
+ const LLSD::UUID& uu, const LLSD::Date& dt,
+ const LLSD::URI& uri, const LLSD::Binary& bin)
+ {
+ called = true;
+ ensure_equals( "b mismatch", b, statics::b);
+ ensure_equals( "i mismatch", i, statics::i);
+ ensure_equals( "f mismatch", f, statics::f);
+ ensure_equals( "s mismatch", s, statics::s);
+ ensure_equals( "uu mismatch", uu, statics::uu);
+ ensure_equals( "dt mismatch", dt, statics::dt);
+ ensure_equals("uri mismatch", uri, statics::uri);
+ ensure_equals("bin mismatch", bin, statics::bin);
+ }
+
+ void strings(std::string s0, std::string s1, std::string s2, std::string s3, std::string s4)
+ {
+ called = true;
+ ensure_equals("s0 mismatch", s0, statics::quick[0]);
+ ensure_equals("s1 mismatch", s1, statics::quick[1]);
+ ensure_equals("s2 mismatch", s2, statics::quick[2]);
+ ensure_equals("s3 mismatch", s3, statics::quick[3]);
+ ensure_equals("s4 mismatch", s4, statics::quick[4]);
+ }
+
+ void ints(int i0, int i1, int i2, int i3, int i4)
+ {
+ called = true;
+ ensure_equals("i0 mismatch", i0, statics::fibs[0]);
+ ensure_equals("i1 mismatch", i1, statics::fibs[1]);
+ ensure_equals("i2 mismatch", i2, statics::fibs[2]);
+ ensure_equals("i3 mismatch", i3, statics::fibs[3]);
+ ensure_equals("i4 mismatch", i4, statics::fibs[4]);
+ }
+
+ void sdfunc(const LLSD& sd)
+ {
+ called = true;
+ ensure_equals("sd mismatch", sd.asInteger(), statics::i);
+ }
+
+ void intfunc(int i)
+ {
+ called = true;
+ ensure_equals("i mismatch", i, statics::i);
+ }
+
+ void voidfunc()
+ {
+ called = true;
+ }
+
+ // recursion tail
+ void collect()
+ {
+ called = true;
+ }
+
+ // collect(arbitrary)
+ template <typename... ARGS>
+ void collect(const std::string& first, ARGS&&... rest)
+ {
+ statics::collected.push_back(first);
+ collect(std::forward<ARGS>(rest)...);
+ }
+ } // namespace statics
+
+ struct apply_data
+ {
+ apply_data()
+ {
+ // reset called before each test
+ statics::called = false;
+ statics::collected.clear();
+ }
+ };
+ typedef test_group<apply_data> apply_group;
+ typedef apply_group::object object;
+ apply_group applygrp("apply");
+
+ template<> template<>
+ void object::test<1>()
+ {
+ set_test_name("apply(tuple)");
+ LL::apply(statics::various,
+ std::make_tuple(statics::b, statics::i, statics::f, statics::s,
+ statics::uu, statics::dt, statics::uri, statics::bin));
+ ensure("apply(tuple) failed", statics::called);
+ }
+
+ template<> template<>
+ void object::test<2>()
+ {
+ set_test_name("apply(array)");
+ LL::apply(statics::ints, statics::fibs);
+ ensure("apply(array) failed", statics::called);
+ }
+
+ template<> template<>
+ void object::test<3>()
+ {
+ set_test_name("apply(vector)");
+ LL::apply(statics::strings, statics::quick);
+ ensure("apply(vector) failed", statics::called);
+ }
+
+ // The various apply(LLSD) tests exercise only the success cases because
+ // the failure cases trigger assert() fail, which is hard to catch.
+ template<> template<>
+ void object::test<4>()
+ {
+ set_test_name("apply(LLSD())");
+ LL::apply(statics::voidfunc, LLSD());
+ ensure("apply(LLSD()) failed", statics::called);
+ }
+
+ template<> template<>
+ void object::test<5>()
+ {
+ set_test_name("apply(fn(int), LLSD scalar)");
+ LL::apply(statics::intfunc, LLSD(statics::i));
+ ensure("apply(fn(int), LLSD scalar) failed", statics::called);
+ }
+
+ template<> template<>
+ void object::test<6>()
+ {
+ set_test_name("apply(fn(LLSD), LLSD scalar)");
+ // This test verifies that LLSDParam<LLSD> doesn't send the compiler
+ // into infinite recursion when the target is itself LLSD.
+ LL::apply(statics::sdfunc, LLSD(statics::i));
+ ensure("apply(fn(LLSD), LLSD scalar) failed", statics::called);
+ }
+
+ template<> template<>
+ void object::test<7>()
+ {
+ set_test_name("apply(LLSD array)");
+ LL::apply(statics::various,
+ llsd::array(statics::b, statics::i, statics::f, statics::s,
+ statics::uu, statics::dt, statics::uri, statics::bin));
+ ensure("apply(LLSD array) failed", statics::called);
+ }
+
+ template<> template<>
+ void object::test<8>()
+ {
+ set_test_name("VAPPLY()");
+ // Make a std::array<std::string> from statics::quick. We can't call a
+ // variadic function with a data structure of dynamic length.
+ std::array<std::string, 5> strray;
+ for (size_t i = 0; i < strray.size(); ++i)
+ strray[i] = statics::quick[i];
+ // This doesn't work: the compiler doesn't know which overload of
+ // collect() to pass to LL::apply().
+ // LL::apply(statics::collect, strray);
+ // That's what VAPPLY() is for.
+ VAPPLY(statics::collect, strray);
+ ensure("VAPPLY() failed", statics::called);
+ ensure_equals("collected mismatch", statics::collected, statics::quick);
+ }
+} // namespace tut
diff --git a/indra/llcommon/tests/lazyeventapi_test.cpp b/indra/llcommon/tests/lazyeventapi_test.cpp
new file mode 100644
index 0000000000..31b2d6d17f
--- /dev/null
+++ b/indra/llcommon/tests/lazyeventapi_test.cpp
@@ -0,0 +1,136 @@
+/**
+ * @file lazyeventapi_test.cpp
+ * @author Nat Goodspeed
+ * @date 2022-06-18
+ * @brief Test for lazyeventapi.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+// Precompiled header
+#include "linden_common.h"
+// associated header
+#include "lazyeventapi.h"
+// STL headers
+// std headers
+// external library headers
+// other Linden headers
+#include "../test/lltut.h"
+#include "llevents.h"
+#include "llsdutil.h"
+
+// observable side effect, solely for testing
+static LLSD data;
+
+// LLEventAPI listener subclass
+class MyListener: public LLEventAPI
+{
+public:
+ // need this trivial forwarding constructor
+ // (of course do any other initialization your subclass requires)
+ MyListener(const LL::LazyEventAPIParams& params):
+ LLEventAPI(params)
+ {}
+
+ // example operation, registered by LazyEventAPI subclass below
+ void set_data(const LLSD& event)
+ {
+ data = event["data"];
+ }
+};
+
+// LazyEventAPI registrar subclass
+class MyRegistrar: public LL::LazyEventAPI<MyListener>
+{
+ using super = LL::LazyEventAPI<MyListener>;
+ using super::listener;
+public:
+ // LazyEventAPI subclass initializes like a classic LLEventAPI subclass
+ // constructor, with API name and desc plus add() calls for the defined
+ // operations
+ MyRegistrar():
+ super("Test", "This is a test LLEventAPI")
+ {
+ add("set", "This is a set operation", &listener::set_data);
+ }
+};
+// Normally we'd declare a static instance of MyRegistrar -- but because we
+// want to test both with and without, defer declaration to individual test
+// methods.
+
+/*****************************************************************************
+* TUT
+*****************************************************************************/
+namespace tut
+{
+ struct lazyeventapi_data
+ {
+ lazyeventapi_data()
+ {
+ // before every test, reset 'data'
+ data.clear();
+ }
+ ~lazyeventapi_data()
+ {
+ // after every test, reset LLEventPumps
+ LLEventPumps::deleteSingleton();
+ }
+ };
+ typedef test_group<lazyeventapi_data> lazyeventapi_group;
+ typedef lazyeventapi_group::object object;
+ lazyeventapi_group lazyeventapigrp("lazyeventapi");
+
+ template<> template<>
+ void object::test<1>()
+ {
+ set_test_name("LazyEventAPI");
+ // this is where the magic (should) happen
+ // 'register' still a keyword until C++17
+ MyRegistrar regster;
+ LLEventPumps::instance().obtain("Test").post(llsd::map("op", "set", "data", "hey"));
+ ensure_equals("failed to set data", data.asString(), "hey");
+ }
+
+ template<> template<>
+ void object::test<2>()
+ {
+ set_test_name("No LazyEventAPI");
+ // Because the MyRegistrar declaration in test<1>() is local, because
+ // it has been destroyed, we fully expect NOT to reach a MyListener
+ // instance with this post.
+ LLEventPumps::instance().obtain("Test").post(llsd::map("op", "set", "data", "moot"));
+ ensure("accidentally set data", ! data.isDefined());
+ }
+
+ template<> template<>
+ void object::test<3>()
+ {
+ set_test_name("LazyEventAPI metadata");
+ MyRegistrar regster;
+ // Of course we have 'regster' in hand; we don't need to search for
+ // it. But this next test verifies that we can find (all) LazyEventAPI
+ // instances using LazyEventAPIBase::instance_snapshot. Normally we
+ // wouldn't search; normally we'd just look at each instance in the
+ // loop body.
+ const MyRegistrar* found = nullptr;
+ for (const auto& registrar : LL::LazyEventAPIBase::instance_snapshot())
+ if ((found = dynamic_cast<const MyRegistrar*>(&registrar)))
+ break;
+ ensure("Failed to find MyRegistrar via LLInstanceTracker", found);
+
+ ensure_equals("wrong API name", found->getName(), "Test");
+ ensure_contains("wrong API desc", found->getDesc(), "test LLEventAPI");
+ ensure_equals("wrong API field", found->getDispatchKey(), "op");
+ // Normally we'd just iterate over *found. But for test purposes,
+ // actually capture the range of NameDesc pairs in a vector.
+ std::vector<LL::LazyEventAPIBase::NameDesc> ops{ found->begin(), found->end() };
+ ensure_equals("failed to find operations", ops.size(), 1);
+ ensure_equals("wrong operation name", ops[0].first, "set");
+ ensure_contains("wrong operation desc", ops[0].second, "set operation");
+ LLSD metadata{ found->getMetadata(ops[0].first) };
+ ensure_equals("bad metadata name", metadata["name"].asString(), ops[0].first);
+ ensure_equals("bad metadata desc", metadata["desc"].asString(), ops[0].second);
+ }
+} // namespace tut
diff --git a/indra/llcommon/tests/lleventdispatcher_test.cpp b/indra/llcommon/tests/lleventdispatcher_test.cpp
index 466f11f52a..b0c532887c 100644
--- a/indra/llcommon/tests/lleventdispatcher_test.cpp
+++ b/indra/llcommon/tests/lleventdispatcher_test.cpp
@@ -18,9 +18,12 @@
// external library headers
// other Linden headers
#include "../test/lltut.h"
+#include "lleventfilter.h"
#include "llsd.h"
#include "llsdutil.h"
+#include "llevents.h"
#include "stringize.h"
+#include "StringVec.h"
#include "tests/wrapllerrs.h"
#include "../test/catch_and_store_what_in.h"
#include "../test/debug.h"
@@ -32,8 +35,6 @@
#include <boost/bind.hpp>
#include <boost/function.hpp>
#include <boost/range.hpp>
-#include <boost/foreach.hpp>
-#define foreach BOOST_FOREACH
#include <boost/lambda/lambda.hpp>
@@ -177,6 +178,7 @@ struct Vars
/*-------- Arbitrary-params (non-const, const, static) methods ---------*/
void methodna(NPARAMSa)
{
+ DEBUG;
// Because our const char* param cp might be NULL, and because we
// intend to capture the value in a std::string, have to distinguish
// between the NULL value and any non-NULL value. Use a convention
@@ -188,7 +190,7 @@ struct Vars
else
vcp = std::string("'") + cp + "'";
- debug()("methodna(", b,
+ this->debug()("methodna(", b,
", ", i,
", ", f,
", ", d,
@@ -205,7 +207,7 @@ struct Vars
void methodnb(NPARAMSb)
{
std::ostringstream vbin;
- foreach(U8 byte, bin)
+ for (U8 byte: bin)
{
vbin << std::hex << std::setfill('0') << std::setw(2) << unsigned(byte);
}
@@ -226,7 +228,8 @@ struct Vars
void cmethodna(NPARAMSa) const
{
- debug()('c', NONL);
+ DEBUG;
+ this->debug()('c', NONL);
const_cast<Vars*>(this)->methodna(NARGSa);
}
@@ -315,6 +318,31 @@ void freenb(NPARAMSb)
*****************************************************************************/
namespace tut
{
+ void ensure_has(const std::string& outer, const std::string& inner)
+ {
+ ensure(stringize("'", outer, "' does not contain '", inner, "'"),
+ outer.find(inner) != std::string::npos);
+ }
+
+ template <typename CALLABLE>
+ std::string call_exc(CALLABLE&& func, const std::string& exc_frag)
+ {
+ std::string what =
+ catch_what<LLEventDispatcher::DispatchError>(std::forward<CALLABLE>(func));
+ ensure_has(what, exc_frag);
+ return what;
+ }
+
+ template <typename CALLABLE>
+ void call_logerr(CALLABLE&& func, const std::string& frag)
+ {
+ CaptureLog capture;
+ // the error should be logged; we just need to stop the exception
+ // propagating
+ catch_what<LLEventDispatcher::DispatchError>(std::forward<CALLABLE>(func));
+ capture.messageWith(frag);
+ }
+
struct lleventdispatcher_data
{
Debug debug{"test"};
@@ -397,9 +425,9 @@ namespace tut
work.add(name, desc, &Dispatcher::cmethod1, required);
// Non-subclass method with/out required params
addf("method1", "method1", &v);
- work.add(name, desc, boost::bind(&Vars::method1, boost::ref(v), _1));
+ work.add(name, desc, [this](const LLSD& args){ return v.method1(args); });
addf("method1_req", "method1", &v);
- work.add(name, desc, boost::bind(&Vars::method1, boost::ref(v), _1), required);
+ work.add(name, desc, [this](const LLSD& args){ return v.method1(args); }, required);
/*--------------- Arbitrary params, array style ----------------*/
@@ -461,7 +489,7 @@ namespace tut
debug("dft_array_full:\n",
dft_array_full);
// Partial defaults arrays.
- foreach(LLSD::String a, ab)
+ for (LLSD::String a: ab)
{
LLSD::Integer partition(std::min(partial_offset, dft_array_full[a].size()));
dft_array_partial[a] =
@@ -471,7 +499,7 @@ namespace tut
debug("dft_array_partial:\n",
dft_array_partial);
- foreach(LLSD::String a, ab)
+ for(LLSD::String a: ab)
{
// Generate full defaults maps by zipping (params, dft_array_full).
dft_map_full[a] = zipmap(params[a], dft_array_full[a]);
@@ -583,6 +611,7 @@ namespace tut
void addf(const std::string& n, const std::string& d, Vars* v)
{
+ debug("addf('", n, "', '", d, "')");
// This method is to capture in our own DescMap the name and
// description of every registered function, for metadata query
// testing.
@@ -598,19 +627,14 @@ namespace tut
{
// Copy descs to a temp map of same type.
DescMap forgotten(descs.begin(), descs.end());
- // LLEventDispatcher intentionally provides only const_iterator:
- // since dereferencing that iterator generates values on the fly,
- // it's meaningless to have a modifiable iterator. But since our
- // 'work' object isn't const, by default BOOST_FOREACH() wants to
- // use non-const iterators. Persuade it to use the const_iterator.
- foreach(LLEventDispatcher::NameDesc nd, const_cast<const Dispatcher&>(work))
+ for (LLEventDispatcher::NameDesc nd: work)
{
DescMap::iterator found = forgotten.find(nd.first);
- ensure(STRINGIZE("LLEventDispatcher records function '" << nd.first
- << "' we didn't enter"),
+ ensure(stringize("LLEventDispatcher records function '", nd.first,
+ "' we didn't enter"),
found != forgotten.end());
- ensure_equals(STRINGIZE("LLEventDispatcher desc '" << nd.second <<
- "' doesn't match what we entered: '" << found->second << "'"),
+ ensure_equals(stringize("LLEventDispatcher desc '", nd.second,
+ "' doesn't match what we entered: '", found->second, "'"),
nd.second, found->second);
// found in our map the name from LLEventDispatcher, good, erase
// our map entry
@@ -621,41 +645,49 @@ namespace tut
std::ostringstream out;
out << "LLEventDispatcher failed to report";
const char* delim = ": ";
- foreach(const DescMap::value_type& fme, forgotten)
+ for (const DescMap::value_type& fme: forgotten)
{
out << delim << fme.first;
delim = ", ";
}
- ensure(out.str(), false);
+ throw failure(out.str());
}
}
Vars* varsfor(const std::string& name)
{
VarsMap::const_iterator found = funcvars.find(name);
- ensure(STRINGIZE("No Vars* for " << name), found != funcvars.end());
- ensure(STRINGIZE("NULL Vars* for " << name), found->second);
+ ensure(stringize("No Vars* for ", name), found != funcvars.end());
+ ensure(stringize("NULL Vars* for ", name), found->second);
return found->second;
}
- void ensure_has(const std::string& outer, const std::string& inner)
+ std::string call_exc(const std::string& func, const LLSD& args, const std::string& exc_frag)
{
- ensure(STRINGIZE("'" << outer << "' does not contain '" << inner << "'").c_str(),
- outer.find(inner) != std::string::npos);
+ return tut::call_exc(
+ [this, func, args]()
+ {
+ if (func.empty())
+ {
+ work(args);
+ }
+ else
+ {
+ work(func, args);
+ }
+ },
+ exc_frag);
}
- void call_exc(const std::string& func, const LLSD& args, const std::string& exc_frag)
+ void call_logerr(const std::string& func, const LLSD& args, const std::string& frag)
{
- std::string threw = catch_what<std::runtime_error>([this, &func, &args](){
- work(func, args);
- });
- ensure_has(threw, exc_frag);
+ tut::call_logerr([this, func, args](){ work(func, args); }, frag);
}
LLSD getMetadata(const std::string& name)
{
LLSD meta(work.getMetadata(name));
- ensure(STRINGIZE("No metadata for " << name), meta.isDefined());
+ ensure(stringize("No metadata for ", name), meta.isDefined());
return meta;
}
@@ -724,7 +756,7 @@ namespace tut
set_test_name("map-style registration with non-array params");
// Pass "param names" as scalar or as map
LLSD attempts(llsd::array(17, LLSDMap("pi", 3.14)("two", 2)));
- foreach(LLSD ae, inArray(attempts))
+ for (LLSD ae: inArray(attempts))
{
std::string threw = catch_what<std::exception>([this, &ae](){
work.add("freena_err", "freena", freena, ae);
@@ -799,7 +831,7 @@ namespace tut
{
set_test_name("query Callables with/out required params");
LLSD names(llsd::array("free1", "Dmethod1", "Dcmethod1", "method1"));
- foreach(LLSD nm, inArray(names))
+ for (LLSD nm: inArray(names))
{
LLSD metadata(getMetadata(nm));
ensure_equals("name mismatch", metadata["name"], nm);
@@ -828,19 +860,19 @@ namespace tut
(5, llsd::array("freena_array", "smethodna_array", "methodna_array")),
llsd::array
(5, llsd::array("freenb_array", "smethodnb_array", "methodnb_array"))));
- foreach(LLSD ae, inArray(expected))
+ for (LLSD ae: inArray(expected))
{
LLSD::Integer arity(ae[0].asInteger());
LLSD names(ae[1]);
LLSD req(LLSD::emptyArray());
if (arity)
req[arity - 1] = LLSD();
- foreach(LLSD nm, inArray(names))
+ for (LLSD nm: inArray(names))
{
LLSD metadata(getMetadata(nm));
ensure_equals("name mismatch", metadata["name"], nm);
ensure_equals(metadata["desc"].asString(), descs[nm]);
- ensure_equals(STRINGIZE("mismatched required for " << nm.asString()),
+ ensure_equals(stringize("mismatched required for ", nm.asString()),
metadata["required"], req);
ensure("should not have optional", metadata["optional"].isUndefined());
}
@@ -854,7 +886,7 @@ namespace tut
// - (Free function | non-static method), map style, no params (ergo
// no defaults)
LLSD names(llsd::array("free0_map", "smethod0_map", "method0_map"));
- foreach(LLSD nm, inArray(names))
+ for (LLSD nm: inArray(names))
{
LLSD metadata(getMetadata(nm));
ensure_equals("name mismatch", metadata["name"], nm);
@@ -884,7 +916,7 @@ namespace tut
llsd::array("smethodnb_map_adft", "smethodnb_map_mdft"),
llsd::array("methodna_map_adft", "methodna_map_mdft"),
llsd::array("methodnb_map_adft", "methodnb_map_mdft")));
- foreach(LLSD eq, inArray(equivalences))
+ for (LLSD eq: inArray(equivalences))
{
LLSD adft(eq[0]);
LLSD mdft(eq[1]);
@@ -898,8 +930,8 @@ namespace tut
ensure_equals("mdft name", mdft, mmeta["name"]);
ameta.erase("name");
mmeta.erase("name");
- ensure_equals(STRINGIZE("metadata for " << adft.asString()
- << " vs. " << mdft.asString()),
+ ensure_equals(stringize("metadata for ", adft.asString(),
+ " vs. ", mdft.asString()),
ameta, mmeta);
}
}
@@ -915,7 +947,7 @@ namespace tut
// params are required. Also maps containing left requirements for
// partial defaults arrays. Also defaults maps from defaults arrays.
LLSD allreq, leftreq, rightdft;
- foreach(LLSD::String a, ab)
+ for (LLSD::String a: ab)
{
// The map in which all params are required uses params[a] as
// keys, with all isUndefined() as values. We can accomplish that
@@ -943,9 +975,9 @@ namespace tut
// Generate maps containing parameter names not provided by the
// dft_map_partial maps.
LLSD skipreq(allreq);
- foreach(LLSD::String a, ab)
+ for (LLSD::String a: ab)
{
- foreach(const MapEntry& me, inMap(dft_map_partial[a]))
+ for (const MapEntry& me: inMap(dft_map_partial[a]))
{
skipreq[a].erase(me.first);
}
@@ -990,7 +1022,7 @@ namespace tut
(llsd::array("freenb_map_mdft", "smethodnb_map_mdft", "methodnb_map_mdft"),
llsd::array(LLSD::emptyMap(), dft_map_full["b"])))); // required, optional
- foreach(LLSD grp, inArray(groups))
+ for (LLSD grp: inArray(groups))
{
// Internal structure of each group in 'groups':
LLSD names(grp[0]);
@@ -1003,14 +1035,14 @@ namespace tut
optional);
// Loop through 'names'
- foreach(LLSD nm, inArray(names))
+ for (LLSD nm: inArray(names))
{
LLSD metadata(getMetadata(nm));
ensure_equals("name mismatch", metadata["name"], nm);
ensure_equals(nm.asString(), metadata["desc"].asString(), descs[nm]);
- ensure_equals(STRINGIZE(nm << " required mismatch"),
+ ensure_equals(stringize(nm, " required mismatch"),
metadata["required"], required);
- ensure_equals(STRINGIZE(nm << " optional mismatch"),
+ ensure_equals(stringize(nm, " optional mismatch"),
metadata["optional"], optional);
}
}
@@ -1031,13 +1063,7 @@ namespace tut
{
set_test_name("call with bad name");
call_exc("freek", LLSD(), "not found");
- // We don't have a comparable helper function for the one-arg
- // operator() method, and it's not worth building one just for this
- // case. Write it out.
- std::string threw = catch_what<std::runtime_error>([this](){
- work(LLSDMap("op", "freek"));
- });
- ensure_has(threw, "bad");
+ std::string threw = call_exc("", LLSDMap("op", "freek"), "bad");
ensure_has(threw, "op");
ensure_has(threw, "freek");
}
@@ -1079,7 +1105,7 @@ namespace tut
// LLSD value matching 'required' according to llsd_matches() rules.
LLSD matching(LLSDMap("d", 3.14)("array", llsd::array("answer", true, answer)));
// Okay, walk through 'tests'.
- foreach(const CallablesTriple& tr, tests)
+ for (const CallablesTriple& tr: tests)
{
// Should be able to pass 'answer' to Callables registered
// without 'required'.
@@ -1087,7 +1113,7 @@ namespace tut
ensure_equals("answer mismatch", tr.llsd, answer);
// Should NOT be able to pass 'answer' to Callables registered
// with 'required'.
- call_exc(tr.name_req, answer, "bad request");
+ call_logerr(tr.name_req, answer, "bad request");
// But SHOULD be able to pass 'matching' to Callables registered
// with 'required'.
work(tr.name_req, matching);
@@ -1101,17 +1127,20 @@ namespace tut
set_test_name("passing wrong args to (map | array)-style registrations");
// Pass scalar/map to array-style functions, scalar/array to map-style
- // functions. As that validation happens well before we engage the
- // argument magic, it seems pointless to repeat this with every
- // variation: (free function | non-static method), (no | arbitrary)
- // args. We should only need to engage it for one map-style
- // registration and one array-style registration.
- std::string array_exc("needs an args array");
- call_exc("free0_array", 17, array_exc);
- call_exc("free0_array", LLSDMap("pi", 3.14), array_exc);
+ // functions. It seems pointless to repeat this with every variation:
+ // (free function | non-static method), (no | arbitrary) args. We
+ // should only need to engage it for one map-style registration and
+ // one array-style registration.
+ // Now that LLEventDispatcher has been extended to treat an LLSD
+ // scalar as a single-entry array, the error we expect in this case is
+ // that apply() is trying to pass that non-empty array to a nullary
+ // function.
+ call_logerr("free0_array", 17, "LL::apply");
+ // similarly, apply() doesn't accept an LLSD Map
+ call_logerr("free0_array", LLSDMap("pi", 3.14), "unsupported");
std::string map_exc("needs a map");
- call_exc("free0_map", 17, map_exc);
+ call_logerr("free0_map", 17, map_exc);
// Passing an array to a map-style function works now! No longer an
// error case!
// call_exc("free0_map", llsd::array("a", "b"), map_exc);
@@ -1125,7 +1154,7 @@ namespace tut
("free0_array", "free0_map",
"smethod0_array", "smethod0_map",
"method0_array", "method0_map"));
- foreach(LLSD name, inArray(names))
+ for (LLSD name: inArray(names))
{
// Look up the Vars instance for this function.
Vars* vars(varsfor(name));
@@ -1150,15 +1179,21 @@ namespace tut
template<> template<>
void object::test<19>()
{
- set_test_name("call array-style functions with too-short arrays");
- // Could have two different too-short arrays, one for *na and one for
- // *nb, but since they both take 5 params...
+ set_test_name("call array-style functions with wrong-length arrays");
+ // Could have different wrong-length arrays for *na and for *nb, but
+ // since they both take 5 params...
LLSD tooshort(llsd::array("this", "array", "too", "short"));
- foreach(const LLSD& funcsab, inArray(array_funcs))
+ LLSD toolong (llsd::array("this", "array", "is", "one", "too", "long"));
+ LLSD badargs (llsd::array(tooshort, toolong));
+ for (const LLSD& toosomething: inArray(badargs))
{
- foreach(const llsd::MapEntry& e, inMap(funcsab))
+ for (const LLSD& funcsab: inArray(array_funcs))
{
- call_exc(e.second, tooshort, "requires more arguments");
+ for (const llsd::MapEntry& e: inMap(funcsab))
+ {
+ // apply() complains about wrong number of array entries
+ call_logerr(e.second, toosomething, "LL::apply");
+ }
}
}
}
@@ -1166,7 +1201,7 @@ namespace tut
template<> template<>
void object::test<20>()
{
- set_test_name("call array-style functions with (just right | too long) arrays");
+ set_test_name("call array-style functions with right-size arrays");
std::vector<U8> binary;
for (size_t h(0x01), i(0); i < 5; h+= 0x22, ++i)
{
@@ -1178,40 +1213,25 @@ namespace tut
LLDate("2011-02-03T15:07:00Z"),
LLURI("http://secondlife.com"),
binary)));
- LLSD argsplus(args);
- argsplus["a"].append("bogus");
- argsplus["b"].append("bogus");
LLSD expect;
- foreach(LLSD::String a, ab)
+ for (LLSD::String a: ab)
{
expect[a] = zipmap(params[a], args[a]);
}
// Adjust expect["a"]["cp"] for special Vars::cp treatment.
- expect["a"]["cp"] = std::string("'") + expect["a"]["cp"].asString() + "'";
+ expect["a"]["cp"] = stringize("'", expect["a"]["cp"].asString(), "'");
debug("expect: ", expect);
- // Use substantially the same logic for args and argsplus
- LLSD argsarrays(llsd::array(args, argsplus));
- // So i==0 selects 'args', i==1 selects argsplus
- for (LLSD::Integer i(0), iend(argsarrays.size()); i < iend; ++i)
+ for (const LLSD& funcsab: inArray(array_funcs))
{
- foreach(const LLSD& funcsab, inArray(array_funcs))
+ for (LLSD::String a: ab)
{
- foreach(LLSD::String a, ab)
- {
- // Reset the Vars instance before each call
- Vars* vars(varsfor(funcsab[a]));
- *vars = Vars();
- work(funcsab[a], argsarrays[i][a]);
- ensure_llsd(STRINGIZE(funcsab[a].asString() <<
- ": expect[\"" << a << "\"] mismatch"),
- vars->inspect(), expect[a], 7); // 7 bits ~= 2 decimal digits
-
- // TODO: in the i==1 or argsplus case, intercept LL_WARNS
- // output? Even without that, using argsplus verifies that
- // passing too many args isn't fatal; it works -- but
- // would be nice to notice the warning too.
- }
+ // Reset the Vars instance before each call
+ Vars* vars(varsfor(funcsab[a]));
+ *vars = Vars();
+ work(funcsab[a], args[a]);
+ ensure_llsd(stringize(funcsab[a].asString(), ": expect[\"", a, "\"] mismatch"),
+ vars->inspect(), expect[a], 7); // 7 bits ~= 2 decimal digits
}
}
}
@@ -1239,7 +1259,7 @@ namespace tut
("a", llsd::array(false, 255, 98.6, 1024.5, "pointer"))
("b", llsd::array("object", LLUUID::generateNewID(), LLDate::now(), LLURI("http://wiki.lindenlab.com/wiki"), LLSD::Binary(boost::begin(binary), boost::end(binary)))));
LLSD array_overfull(array_full);
- foreach(LLSD::String a, ab)
+ for (LLSD::String a: ab)
{
array_overfull[a].append("bogus");
}
@@ -1253,7 +1273,7 @@ namespace tut
ensure_not_equals("UUID collision",
array_full["b"][1].asUUID(), dft_array_full["b"][1].asUUID());
LLSD map_full, map_overfull;
- foreach(LLSD::String a, ab)
+ for (LLSD::String a: ab)
{
map_full[a] = zipmap(params[a], array_full[a]);
map_overfull[a] = map_full[a];
@@ -1294,21 +1314,360 @@ namespace tut
"freenb_map_mdft", "smethodnb_map_mdft", "methodnb_map_mdft")));
// Treat (full | overfull) (array | map) the same.
LLSD argssets(llsd::array(array_full, array_overfull, map_full, map_overfull));
- foreach(const LLSD& args, inArray(argssets))
+ for (const LLSD& args: inArray(argssets))
{
- foreach(LLSD::String a, ab)
+ for (LLSD::String a: ab)
{
- foreach(LLSD::String name, inArray(names[a]))
+ for (LLSD::String name: inArray(names[a]))
{
// Reset the Vars instance
Vars* vars(varsfor(name));
*vars = Vars();
work(name, args[a]);
- ensure_llsd(STRINGIZE(name << ": expect[\"" << a << "\"] mismatch"),
+ ensure_llsd(stringize(name, ": expect[\"", a, "\"] mismatch"),
vars->inspect(), expect[a], 7); // 7 bits, 2 decimal digits
// intercept LL_WARNS for the two overfull cases?
}
}
}
}
+
+ struct DispatchResult: public LLDispatchListener
+ {
+ using DR = DispatchResult;
+
+ DispatchResult(): LLDispatchListener("results", "op")
+ {
+ add("strfunc", "return string", &DR::strfunc);
+ add("voidfunc", "void function", &DR::voidfunc);
+ add("emptyfunc", "return empty LLSD", &DR::emptyfunc);
+ add("intfunc", "return Integer LLSD", &DR::intfunc);
+ add("llsdfunc", "return passed LLSD", &DR::llsdfunc);
+ add("mapfunc", "return map LLSD", &DR::mapfunc);
+ add("arrayfunc", "return array LLSD", &DR::arrayfunc);
+ }
+
+ std::string strfunc(const std::string& str) const { return "got " + str; }
+ void voidfunc() const {}
+ LLSD emptyfunc() const { return {}; }
+ int intfunc(int i) const { return -i; }
+ LLSD llsdfunc(const LLSD& event) const
+ {
+ LLSD result{ event };
+ result["with"] = "string";
+ return result;
+ }
+ LLSD mapfunc(int i, const std::string& str) const
+ {
+ return llsd::map("i", intfunc(i), "str", strfunc(str));
+ }
+ LLSD arrayfunc(int i, const std::string& str) const
+ {
+ return llsd::array(intfunc(i), strfunc(str));
+ }
+ };
+
+ template<> template<>
+ void object::test<23>()
+ {
+ set_test_name("string result");
+ DispatchResult service;
+ LLSD result{ service("strfunc", "a string") };
+ ensure_equals("strfunc() mismatch", result.asString(), "got a string");
+ }
+
+ template<> template<>
+ void object::test<24>()
+ {
+ set_test_name("void result");
+ DispatchResult service;
+ LLSD result{ service("voidfunc", LLSD()) };
+ ensure("voidfunc() returned defined", result.isUndefined());
+ }
+
+ template<> template<>
+ void object::test<25>()
+ {
+ set_test_name("Integer result");
+ DispatchResult service;
+ LLSD result{ service("intfunc", -17) };
+ ensure_equals("intfunc() mismatch", result.asInteger(), 17);
+ }
+
+ template<> template<>
+ void object::test<26>()
+ {
+ set_test_name("LLSD echo");
+ DispatchResult service;
+ LLSD result{ service("llsdfunc", llsd::map("op", "llsdfunc", "reqid", 17)) };
+ ensure_equals("llsdfunc() mismatch", result,
+ llsd::map("op", "llsdfunc", "reqid", 17, "with", "string"));
+ }
+
+ template<> template<>
+ void object::test<27>()
+ {
+ set_test_name("map LLSD result");
+ DispatchResult service;
+ LLSD result{ service("mapfunc", llsd::array(-12, "value")) };
+ ensure_equals("mapfunc() mismatch", result, llsd::map("i", 12, "str", "got value"));
+ }
+
+ template<> template<>
+ void object::test<28>()
+ {
+ set_test_name("array LLSD result");
+ DispatchResult service;
+ LLSD result{ service("arrayfunc", llsd::array(-8, "word")) };
+ ensure_equals("arrayfunc() mismatch", result, llsd::array(8, "got word"));
+ }
+
+ template<> template<>
+ void object::test<29>()
+ {
+ set_test_name("listener error, no reply");
+ DispatchResult service;
+ tut::call_exc(
+ [&service]()
+ { service.post(llsd::map("op", "nosuchfunc", "reqid", 17)); },
+ "nosuchfunc");
+ }
+
+ template<> template<>
+ void object::test<30>()
+ {
+ set_test_name("listener error with reply");
+ DispatchResult service;
+ LLCaptureListener<LLSD> result;
+ service.post(llsd::map("op", "nosuchfunc", "reqid", 17, "reply", result.getName()));
+ LLSD reply{ result.get() };
+ ensure("no reply", reply.isDefined());
+ ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
+ ensure_has(reply["error"].asString(), "nosuchfunc");
+ }
+
+ template<> template<>
+ void object::test<31>()
+ {
+ set_test_name("listener call to void function");
+ DispatchResult service;
+ LLCaptureListener<LLSD> result;
+ result.set("non-empty");
+ for (const auto& func: StringVec{ "voidfunc", "emptyfunc" })
+ {
+ service.post(llsd::map(
+ "op", func,
+ "reqid", 17,
+ "reply", result.getName()));
+ ensure_equals("reply from " + func, result.get().asString(), "non-empty");
+ }
+ }
+
+ template<> template<>
+ void object::test<32>()
+ {
+ set_test_name("listener call to string function");
+ DispatchResult service;
+ LLCaptureListener<LLSD> result;
+ service.post(llsd::map(
+ "op", "strfunc",
+ "args", llsd::array("a string"),
+ "reqid", 17,
+ "reply", result.getName()));
+ LLSD reply{ result.get() };
+ ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
+ ensure_equals("bad reply from strfunc", reply["data"].asString(), "got a string");
+ }
+
+ template<> template<>
+ void object::test<33>()
+ {
+ set_test_name("listener call to map function");
+ DispatchResult service;
+ LLCaptureListener<LLSD> result;
+ service.post(llsd::map(
+ "op", "mapfunc",
+ "args", llsd::array(-7, "value"),
+ "reqid", 17,
+ "reply", result.getName()));
+ LLSD reply{ result.get() };
+ ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
+ ensure_equals("bad i from mapfunc", reply["i"].asInteger(), 7);
+ ensure_equals("bad str from mapfunc", reply["str"], "got value");
+ }
+
+ template<> template<>
+ void object::test<34>()
+ {
+ set_test_name("batched map success");
+ DispatchResult service;
+ LLCaptureListener<LLSD> result;
+ service.post(llsd::map(
+ "op", llsd::map(
+ "strfunc", "some string",
+ "intfunc", 2,
+ "voidfunc", LLSD(),
+ "arrayfunc", llsd::array(-5, "other string")),
+ "reqid", 17,
+ "reply", result.getName()));
+ LLSD reply{ result.get() };
+ ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
+ reply.erase("reqid");
+ ensure_equals(
+ "bad map batch",
+ reply,
+ llsd::map(
+ "strfunc", "got some string",
+ "intfunc", -2,
+ "voidfunc", LLSD(),
+ "arrayfunc", llsd::array(5, "got other string")));
+ }
+
+ template<> template<>
+ void object::test<35>()
+ {
+ set_test_name("batched map error");
+ DispatchResult service;
+ LLCaptureListener<LLSD> result;
+ service.post(llsd::map(
+ "op", llsd::map(
+ "badfunc", 34, // !
+ "strfunc", "some string",
+ "intfunc", 2,
+ "missing", LLSD(), // !
+ "voidfunc", LLSD(),
+ "arrayfunc", llsd::array(-5, "other string")),
+ "reqid", 17,
+ "reply", result.getName()));
+ LLSD reply{ result.get() };
+ ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
+ reply.erase("reqid");
+ auto error{ reply["error"].asString() };
+ reply.erase("error");
+ ensure_has(error, "badfunc");
+ ensure_has(error, "missing");
+ ensure_equals(
+ "bad partial batch",
+ reply,
+ llsd::map(
+ "strfunc", "got some string",
+ "intfunc", -2,
+ "voidfunc", LLSD(),
+ "arrayfunc", llsd::array(5, "got other string")));
+ }
+
+ template<> template<>
+ void object::test<36>()
+ {
+ set_test_name("batched map exception");
+ DispatchResult service;
+ auto error = tut::call_exc(
+ [&service]()
+ {
+ service.post(llsd::map(
+ "op", llsd::map(
+ "badfunc", 34, // !
+ "strfunc", "some string",
+ "intfunc", 2,
+ "missing", LLSD(), // !
+ "voidfunc", LLSD(),
+ "arrayfunc", llsd::array(-5, "other string")),
+ "reqid", 17));
+ // no "reply"
+ },
+ "badfunc");
+ ensure_has(error, "missing");
+ }
+
+ template<> template<>
+ void object::test<37>()
+ {
+ set_test_name("batched array success");
+ DispatchResult service;
+ LLCaptureListener<LLSD> result;
+ service.post(llsd::map(
+ "op", llsd::array(
+ llsd::array("strfunc", "some string"),
+ llsd::array("intfunc", 2),
+ "arrayfunc",
+ "voidfunc"),
+ "args", llsd::array(
+ LLSD(),
+ LLSD(),
+ llsd::array(-5, "other string")),
+ // args array deliberately short, since the default
+ // [3] is undefined, which should work for voidfunc
+ "reqid", 17,
+ "reply", result.getName()));
+ LLSD reply{ result.get() };
+ ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
+ reply.erase("reqid");
+ ensure_equals(
+ "bad array batch",
+ reply,
+ llsd::map(
+ "data", llsd::array(
+ "got some string",
+ -2,
+ llsd::array(5, "got other string"),
+ LLSD())));
+ }
+
+ template<> template<>
+ void object::test<38>()
+ {
+ set_test_name("batched array error");
+ DispatchResult service;
+ LLCaptureListener<LLSD> result;
+ service.post(llsd::map(
+ "op", llsd::array(
+ llsd::array("strfunc", "some string"),
+ llsd::array("intfunc", 2, "whoops"), // bad form
+ "arrayfunc",
+ "voidfunc"),
+ "args", llsd::array(
+ LLSD(),
+ LLSD(),
+ llsd::array(-5, "other string")),
+ // args array deliberately short, since the default
+ // [3] is undefined, which should work for voidfunc
+ "reqid", 17,
+ "reply", result.getName()));
+ LLSD reply{ result.get() };
+ ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
+ reply.erase("reqid");
+ auto error{ reply["error"] };
+ reply.erase("error");
+ ensure_has(error, "[1]");
+ ensure_has(error, "unsupported");
+ ensure_equals("bad array batch", reply,
+ llsd::map("data", llsd::array("got some string")));
+ }
+
+ template<> template<>
+ void object::test<39>()
+ {
+ set_test_name("batched array exception");
+ DispatchResult service;
+ auto error = tut::call_exc(
+ [&service]()
+ {
+ service.post(llsd::map(
+ "op", llsd::array(
+ llsd::array("strfunc", "some string"),
+ llsd::array("intfunc", 2, "whoops"), // bad form
+ "arrayfunc",
+ "voidfunc"),
+ "args", llsd::array(
+ LLSD(),
+ LLSD(),
+ llsd::array(-5, "other string")),
+ // args array deliberately short, since the default
+ // [3] is undefined, which should work for voidfunc
+ "reqid", 17));
+ // no "reply"
+ },
+ "[1]");
+ ensure_has(error, "unsupported");
+ }
} // namespace tut
diff --git a/indra/llcommon/tests/workqueue_test.cpp b/indra/llcommon/tests/workqueue_test.cpp
index 7655a7aa1f..df16f4a46e 100644
--- a/indra/llcommon/tests/workqueue_test.cpp
+++ b/indra/llcommon/tests/workqueue_test.cpp
@@ -38,7 +38,7 @@ namespace tut
{
struct workqueue_data
{
- WorkQueue queue{"queue"};
+ WorkSchedule queue{"queue"};
};
typedef test_group<workqueue_data> workqueue_group;
typedef workqueue_group::object object;
@@ -49,8 +49,8 @@ namespace tut
{
set_test_name("name");
ensure_equals("didn't capture name", queue.getKey(), "queue");
- ensure("not findable", WorkQueue::getInstance("queue") == queue.getWeak().lock());
- WorkQueue q2;
+ ensure("not findable", WorkSchedule::getInstance("queue") == queue.getWeak().lock());
+ WorkSchedule q2;
ensure("has no name", LLStringUtil::startsWith(q2.getKey(), "WorkQueue"));
}
@@ -73,16 +73,16 @@ namespace tut
{
set_test_name("postEvery");
// record of runs
- using Shared = std::deque<WorkQueue::TimePoint>;
+ using Shared = std::deque<WorkSchedule::TimePoint>;
// This is an example of how to share data between the originator of
- // postEvery(work) and the work item itself, since usually a WorkQueue
+ // postEvery(work) and the work item itself, since usually a WorkSchedule
// is used to dispatch work to a different thread. Neither of them
// should call any of LLCond's wait methods: you don't want to stall
// either the worker thread or the originating thread (conventionally
// main). Use LLCond or a subclass even if all you want to do is
// signal the work item that it can quit; consider LLOneShotCond.
LLCond<Shared> data;
- auto start = WorkQueue::TimePoint::clock::now();
+ auto start = WorkSchedule::TimePoint::clock::now();
// 2s seems like a long time to wait, since it directly impacts the
// duration of this test program. Unfortunately GitHub's Mac runners
// are pretty wimpy, and we're getting spurious "too late" errors just
@@ -97,7 +97,7 @@ namespace tut
data.update_one(
[](Shared& data)
{
- data.push_back(WorkQueue::TimePoint::clock::now());
+ data.push_back(WorkSchedule::TimePoint::clock::now());
});
// by the 3rd call, return false to stop
return (++count < 3);
@@ -106,7 +106,7 @@ namespace tut
// postEvery() running, so run until we have exhausted the iterations
// or we time out waiting
for (auto finish = start + 10*interval;
- WorkQueue::TimePoint::clock::now() < finish &&
+ WorkSchedule::TimePoint::clock::now() < finish &&
data.get([](const Shared& data){ return data.size(); }) < 3; )
{
queue.runPending();
@@ -143,8 +143,8 @@ namespace tut
void object::test<4>()
{
set_test_name("postTo");
- WorkQueue main("main");
- auto qptr = WorkQueue::getInstance("queue");
+ WorkSchedule main("main");
+ auto qptr = WorkSchedule::getInstance("queue");
int result = 0;
main.postTo(
qptr,
@@ -175,8 +175,8 @@ namespace tut
void object::test<5>()
{
set_test_name("postTo with void return");
- WorkQueue main("main");
- auto qptr = WorkQueue::getInstance("queue");
+ WorkSchedule main("main");
+ auto qptr = WorkSchedule::getInstance("queue");
std::string observe;
main.postTo(
qptr,
@@ -198,7 +198,7 @@ namespace tut
std::string stored;
// Try to call waitForResult() on this thread's main coroutine. It
// should throw because the main coroutine must service the queue.
- auto what{ catch_what<WorkQueue::Error>(
+ auto what{ catch_what<WorkSchedule::Error>(
[this, &stored](){ stored = queue.waitForResult(
[](){ return "should throw"; }); }) };
ensure("lambda should not have run", stored.empty());
diff --git a/indra/llcommon/tests/wrapllerrs.h b/indra/llcommon/tests/wrapllerrs.h
index 3779fb41bc..d657b329bb 100644
--- a/indra/llcommon/tests/wrapllerrs.h
+++ b/indra/llcommon/tests/wrapllerrs.h
@@ -226,6 +226,11 @@ public:
return boost::dynamic_pointer_cast<CaptureLogRecorder>(mRecorder)->streamto(out);
}
+ friend inline std::ostream& operator<<(std::ostream& out, const CaptureLog& self)
+ {
+ return self.streamto(out);
+ }
+
private:
LLError::FatalFunction mFatalFunction;
LLError::SettingsStoragePtr mOldSettings;
diff --git a/indra/llcommon/threadpool.cpp b/indra/llcommon/threadpool.cpp
index d5adf11264..3a9a5a2062 100644
--- a/indra/llcommon/threadpool.cpp
+++ b/indra/llcommon/threadpool.cpp
@@ -17,18 +17,58 @@
// std headers
// external library headers
// other Linden headers
+#include "commoncontrol.h"
#include "llerror.h"
#include "llevents.h"
+#include "llsd.h"
#include "stringize.h"
-LL::ThreadPool::ThreadPool(const std::string& name, size_t threads, size_t capacity):
+#include <boost/fiber/algo/round_robin.hpp>
+
+/*****************************************************************************
+* Custom fiber scheduler for worker threads
+*****************************************************************************/
+// As of 2022-12-06, each of our worker threads only runs a single (default)
+// fiber: we don't launch explicit fibers within worker threads, nor do we
+// anticipate doing so. So a worker thread that's simply waiting for incoming
+// tasks should really sleep a little. Override the default fiber scheduler to
+// implement that.
+struct sleepy_robin: public boost::fibers::algo::round_robin
+{
+ virtual void suspend_until( std::chrono::steady_clock::time_point const&) noexcept
+ {
+#if LL_WINDOWS
+ // round_robin holds a std::condition_variable, and
+ // round_robin::suspend_until() calls
+ // std::condition_variable::wait_until(). On Windows, that call seems
+ // busier than it ought to be. Try just sleeping.
+ Sleep(1);
+#else
+ // currently unused other than windows, but might as well have something here
+ // different units than Sleep(), but we actually just want to sleep for any de-minimis duration
+ usleep(1);
+#endif
+ }
+
+ virtual void notify() noexcept
+ {
+ // Since our Sleep() call above will wake up on its own, we need not
+ // take any special action to wake it.
+ }
+};
+
+/*****************************************************************************
+* ThreadPoolBase
+*****************************************************************************/
+LL::ThreadPoolBase::ThreadPoolBase(const std::string& name, size_t threads,
+ WorkQueueBase* queue):
super(name),
- mQueue(name, capacity),
mName("ThreadPool:" + name),
- mThreadCount(threads)
+ mThreadCount(getConfiguredWidth(name, threads)),
+ mQueue(queue)
{}
-void LL::ThreadPool::start()
+void LL::ThreadPoolBase::start()
{
for (size_t i = 0; i < mThreadCount; ++i)
{
@@ -56,17 +96,17 @@ void LL::ThreadPool::start()
});
}
-LL::ThreadPool::~ThreadPool()
+LL::ThreadPoolBase::~ThreadPoolBase()
{
close();
}
-void LL::ThreadPool::close()
+void LL::ThreadPoolBase::close()
{
- if (! mQueue.isClosed())
+ if (! mQueue->isClosed())
{
LL_DEBUGS("ThreadPool") << mName << " closing queue and joining threads" << LL_ENDL;
- mQueue.close();
+ mQueue->close();
for (auto& pair: mThreads)
{
LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL;
@@ -76,14 +116,74 @@ void LL::ThreadPool::close()
}
}
-void LL::ThreadPool::run(const std::string& name)
+void LL::ThreadPoolBase::run(const std::string& name)
{
+#if LL_WINDOWS
+ // Try using sleepy_robin fiber scheduler.
+ boost::fibers::use_scheduling_algorithm<sleepy_robin>();
+#endif // LL_WINDOWS
+
LL_DEBUGS("ThreadPool") << name << " starting" << LL_ENDL;
run();
LL_DEBUGS("ThreadPool") << name << " stopping" << LL_ENDL;
}
-void LL::ThreadPool::run()
+void LL::ThreadPoolBase::run()
+{
+ mQueue->runUntilClose();
+}
+
+//static
+size_t LL::ThreadPoolBase::getConfiguredWidth(const std::string& name, size_t dft)
+{
+ LLSD poolSizes;
+ try
+ {
+ poolSizes = LL::CommonControl::get("Global", "ThreadPoolSizes");
+ // "ThreadPoolSizes" is actually a map containing the sizes of
+ // interest -- or should be, if this process has an
+ // LLViewerControlListener instance and its settings include
+ // "ThreadPoolSizes". If we failed to retrieve it, perhaps we're in a
+ // program that doesn't define that, or perhaps there's no such
+ // setting, or perhaps we're asking too early, before the LLEventAPI
+ // itself has been instantiated. In any of those cases, it seems worth
+ // warning.
+ if (! poolSizes.isDefined())
+ {
+ // Note: we don't warn about absence of an override key for a
+ // particular ThreadPool name, that's fine. This warning is about
+ // complete absence of a ThreadPoolSizes setting, which we expect
+ // in a normal viewer session.
+ LL_WARNS("ThreadPool") << "No 'ThreadPoolSizes' setting for ThreadPool '"
+ << name << "'" << LL_ENDL;
+ }
+ }
+ catch (const LL::CommonControl::Error& exc)
+ {
+ // We don't want ThreadPool to *require* LLViewerControlListener.
+ // Just log it and carry on.
+ LL_WARNS("ThreadPool") << "Can't check 'ThreadPoolSizes': " << exc.what() << LL_ENDL;
+ }
+
+ LL_DEBUGS("ThreadPool") << "ThreadPoolSizes = " << poolSizes << LL_ENDL;
+ // LLSD treats an undefined value as an empty map when asked to retrieve a
+ // key, so we don't need this to be conditional.
+ LLSD sizeSpec{ poolSizes[name] };
+ // We retrieve sizeSpec as LLSD, rather than immediately as LLSD::Integer,
+ // so we can distinguish the case when it's undefined.
+ return sizeSpec.isInteger() ? sizeSpec.asInteger() : dft;
+}
+
+//static
+size_t LL::ThreadPoolBase::getWidth(const std::string& name, size_t dft)
{
- mQueue.runUntilClose();
+ auto instance{ getInstance(name) };
+ if (instance)
+ {
+ return instance->getWidth();
+ }
+ else
+ {
+ return getConfiguredWidth(name, dft);
+ }
}
diff --git a/indra/llcommon/threadpool.h b/indra/llcommon/threadpool.h
index f8eec3b457..60f4a0ce1b 100644
--- a/indra/llcommon/threadpool.h
+++ b/indra/llcommon/threadpool.h
@@ -13,7 +13,9 @@
#if ! defined(LL_THREADPOOL_H)
#define LL_THREADPOOL_H
+#include "threadpool_fwd.h"
#include "workqueue.h"
+#include <memory> // std::unique_ptr
#include <string>
#include <thread>
#include <utility> // std::pair
@@ -22,17 +24,24 @@
namespace LL
{
- class ThreadPool: public LLInstanceTracker<ThreadPool, std::string>
+ class ThreadPoolBase: public LLInstanceTracker<ThreadPoolBase, std::string>
{
private:
- using super = LLInstanceTracker<ThreadPool, std::string>;
+ using super = LLInstanceTracker<ThreadPoolBase, std::string>;
+
public:
/**
- * Pass ThreadPool a string name. This can be used to look up the
+ * Pass ThreadPoolBase a string name. This can be used to look up the
* relevant WorkQueue.
+ *
+ * The number of threads you pass sets the compile-time default. But
+ * if the user has overridden the LLSD map in the "ThreadPoolSizes"
+ * setting with a key matching this ThreadPool name, that setting
+ * overrides this parameter.
*/
- ThreadPool(const std::string& name, size_t threads=1, size_t capacity=1024);
- virtual ~ThreadPool();
+ ThreadPoolBase(const std::string& name, size_t threads,
+ WorkQueueBase* queue);
+ virtual ~ThreadPoolBase();
/**
* Launch the ThreadPool. Until this call, a constructed ThreadPool
@@ -50,8 +59,6 @@ namespace LL
std::string getName() const { return mName; }
size_t getWidth() const { return mThreads.size(); }
- /// obtain a non-const reference to the WorkQueue to post work to it
- WorkQueue& getQueue() { return mQueue; }
/**
* Override run() if you need special processing. The default run()
@@ -59,15 +66,72 @@ namespace LL
*/
virtual void run();
+ /**
+ * getConfiguredWidth() returns the setting, if any, for the specified
+ * ThreadPool name. Returns dft if the "ThreadPoolSizes" map does not
+ * contain the specified name.
+ */
+ static
+ size_t getConfiguredWidth(const std::string& name, size_t dft=0);
+
+ /**
+ * This getWidth() returns the width of the instantiated ThreadPool
+ * with the specified name, if any. If no instance exists, returns its
+ * getConfiguredWidth() if any. If there's no instance and no relevant
+ * override, return dft. Presumably dft should match the threads
+ * parameter passed to the ThreadPool constructor call that will
+ * eventually instantiate the ThreadPool with that name.
+ */
+ static
+ size_t getWidth(const std::string& name, size_t dft);
+
+ protected:
+ std::unique_ptr<WorkQueueBase> mQueue;
+
private:
void run(const std::string& name);
- WorkQueue mQueue;
std::string mName;
size_t mThreadCount;
std::vector<std::pair<std::string, std::thread>> mThreads;
};
+ /**
+ * Specialize with WorkQueue or, for timestamped tasks, WorkSchedule
+ */
+ template <class QUEUE>
+ struct ThreadPoolUsing: public ThreadPoolBase
+ {
+ using queue_t = QUEUE;
+
+ /**
+ * Pass ThreadPoolUsing a string name. This can be used to look up the
+ * relevant WorkQueue.
+ *
+ * The number of threads you pass sets the compile-time default. But
+ * if the user has overridden the LLSD map in the "ThreadPoolSizes"
+ * setting with a key matching this ThreadPool name, that setting
+ * overrides this parameter.
+ *
+ * Pass an explicit capacity to limit the size of the queue.
+ * Constraining the queue can cause a submitter to block. Do not
+ * constrain any ThreadPool accepting work from the main thread.
+ */
+ ThreadPoolUsing(const std::string& name, size_t threads=1, size_t capacity=1024*1024):
+ ThreadPoolBase(name, threads, new queue_t(name, capacity))
+ {}
+ ~ThreadPoolUsing() override {}
+
+ /**
+ * obtain a non-const reference to the specific WorkQueue subclass to
+ * post work to it
+ */
+ queue_t& getQueue() { return static_cast<queue_t&>(*mQueue); }
+ };
+
+ /// ThreadPool is shorthand for using the simpler WorkQueue
+ using ThreadPool = ThreadPoolUsing<WorkQueue>;
+
} // namespace LL
#endif /* ! defined(LL_THREADPOOL_H) */
diff --git a/indra/llcommon/threadpool_fwd.h b/indra/llcommon/threadpool_fwd.h
new file mode 100644
index 0000000000..1aa3c4a0e2
--- /dev/null
+++ b/indra/llcommon/threadpool_fwd.h
@@ -0,0 +1,25 @@
+/**
+ * @file threadpool_fwd.h
+ * @author Nat Goodspeed
+ * @date 2022-12-09
+ * @brief Forward declarations for ThreadPool et al.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Copyright (c) 2022, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_THREADPOOL_FWD_H)
+#define LL_THREADPOOL_FWD_H
+
+#include "workqueue.h"
+
+namespace LL
+{
+ template <class QUEUE>
+ struct ThreadPoolUsing;
+
+ using ThreadPool = ThreadPoolUsing<WorkQueue>;
+} // namespace LL
+
+#endif /* ! defined(LL_THREADPOOL_FWD_H) */
diff --git a/indra/llcommon/workqueue.cpp b/indra/llcommon/workqueue.cpp
index eb06890468..cf80ce0656 100644
--- a/indra/llcommon/workqueue.cpp
+++ b/indra/llcommon/workqueue.cpp
@@ -26,83 +26,65 @@
using Mutex = LLCoros::Mutex;
using Lock = LLCoros::LockType;
-LL::WorkQueue::WorkQueue(const std::string& name, size_t capacity):
- super(makeName(name)),
- mQueue(capacity)
+/*****************************************************************************
+* WorkQueueBase
+*****************************************************************************/
+LL::WorkQueueBase::WorkQueueBase(const std::string& name):
+ super(makeName(name))
{
// TODO: register for "LLApp" events so we can implicitly close() on
// viewer shutdown.
}
-void LL::WorkQueue::close()
-{
- mQueue.close();
-}
-
-size_t LL::WorkQueue::size()
-{
- return mQueue.size();
-}
-
-bool LL::WorkQueue::isClosed()
-{
- return mQueue.isClosed();
-}
-
-bool LL::WorkQueue::done()
-{
- return mQueue.done();
-}
-
-void LL::WorkQueue::runUntilClose()
+void LL::WorkQueueBase::runUntilClose()
{
try
{
for (;;)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
- callWork(mQueue.pop());
+ callWork(pop_());
}
}
- catch (const Queue::Closed&)
+ catch (const Closed&)
{
}
}
-bool LL::WorkQueue::runPending()
+bool LL::WorkQueueBase::runPending()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
- for (Work work; mQueue.tryPop(work); )
+ for (Work work; tryPop_(work); )
{
callWork(work);
}
- return ! mQueue.done();
+ return ! done();
}
-bool LL::WorkQueue::runOne()
+bool LL::WorkQueueBase::runOne()
{
Work work;
- if (mQueue.tryPop(work))
+ if (tryPop_(work))
{
callWork(work);
}
- return ! mQueue.done();
+ return ! done();
}
-bool LL::WorkQueue::runUntil(const TimePoint& until)
+bool LL::WorkQueueBase::runUntil(const TimePoint& until)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Should we subtract some slop to allow for typical Work execution time?
// How much slop?
// runUntil() is simply a time-bounded runPending().
- for (Work work; TimePoint::clock::now() < until && mQueue.tryPop(work); )
+ for (Work work; TimePoint::clock::now() < until && tryPop_(work); )
{
callWork(work);
}
- return ! mQueue.done();
+ return ! done();
}
-std::string LL::WorkQueue::makeName(const std::string& name)
+std::string LL::WorkQueueBase::makeName(const std::string& name)
{
if (! name.empty())
return name;
@@ -120,14 +102,7 @@ std::string LL::WorkQueue::makeName(const std::string& name)
return STRINGIZE("WorkQueue" << num);
}
-void LL::WorkQueue::callWork(const Queue::DataTuple& work)
-{
- // ThreadSafeSchedule::pop() always delivers a tuple, even when
- // there's only one data field per item, as for us.
- callWork(std::get<0>(work));
-}
-
-void LL::WorkQueue::callWork(const Work& work)
+void LL::WorkQueueBase::callWork(const Work& work)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
try
@@ -142,12 +117,12 @@ void LL::WorkQueue::callWork(const Work& work)
}
}
-void LL::WorkQueue::error(const std::string& msg)
+void LL::WorkQueueBase::error(const std::string& msg)
{
LL_ERRS("WorkQueue") << msg << LL_ENDL;
}
-void LL::WorkQueue::checkCoroutine(const std::string& method)
+void LL::WorkQueueBase::checkCoroutine(const std::string& method)
{
// By convention, the default coroutine on each thread has an empty name
// string. See also LLCoros::logname().
@@ -156,3 +131,115 @@ void LL::WorkQueue::checkCoroutine(const std::string& method)
LLTHROW(Error("Do not call " + method + " from a thread's default coroutine"));
}
}
+
+/*****************************************************************************
+* WorkQueue
+*****************************************************************************/
+LL::WorkQueue::WorkQueue(const std::string& name, size_t capacity):
+ super(name),
+ mQueue(capacity)
+{
+}
+
+void LL::WorkQueue::close()
+{
+ mQueue.close();
+}
+
+size_t LL::WorkQueue::size()
+{
+ return mQueue.size();
+}
+
+bool LL::WorkQueue::isClosed()
+{
+ return mQueue.isClosed();
+}
+
+bool LL::WorkQueue::done()
+{
+ return mQueue.done();
+}
+
+bool LL::WorkQueue::post(const Work& callable)
+{
+ return mQueue.pushIfOpen(callable);
+}
+
+bool LL::WorkQueue::tryPost(const Work& callable)
+{
+ return mQueue.tryPush(callable);
+}
+
+LL::WorkQueue::Work LL::WorkQueue::pop_()
+{
+ return mQueue.pop();
+}
+
+bool LL::WorkQueue::tryPop_(Work& work)
+{
+ return mQueue.tryPop(work);
+}
+
+/*****************************************************************************
+* WorkSchedule
+*****************************************************************************/
+LL::WorkSchedule::WorkSchedule(const std::string& name, size_t capacity):
+ super(name),
+ mQueue(capacity)
+{
+}
+
+void LL::WorkSchedule::close()
+{
+ mQueue.close();
+}
+
+size_t LL::WorkSchedule::size()
+{
+ return mQueue.size();
+}
+
+bool LL::WorkSchedule::isClosed()
+{
+ return mQueue.isClosed();
+}
+
+bool LL::WorkSchedule::done()
+{
+ return mQueue.done();
+}
+
+bool LL::WorkSchedule::post(const Work& callable)
+{
+ // Use TimePoint::clock::now() instead of TimePoint's representation of
+ // the epoch because this WorkSchedule may contain a mix of past-due
+ // TimedWork items and TimedWork items scheduled for the future. Sift this
+ // new item into the correct place.
+ return post(callable, TimePoint::clock::now());
+}
+
+bool LL::WorkSchedule::post(const Work& callable, const TimePoint& time)
+{
+ return mQueue.pushIfOpen(TimedWork(time, callable));
+}
+
+bool LL::WorkSchedule::tryPost(const Work& callable)
+{
+ return tryPost(callable, TimePoint::clock::now());
+}
+
+bool LL::WorkSchedule::tryPost(const Work& callable, const TimePoint& time)
+{
+ return mQueue.tryPush(TimedWork(time, callable));
+}
+
+LL::WorkSchedule::Work LL::WorkSchedule::pop_()
+{
+ return std::get<0>(mQueue.pop());
+}
+
+bool LL::WorkSchedule::tryPop_(Work& work)
+{
+ return mQueue.tryPop(work);
+}
diff --git a/indra/llcommon/workqueue.h b/indra/llcommon/workqueue.h
index 70fd65bd0c..ec0700a718 100644
--- a/indra/llcommon/workqueue.h
+++ b/indra/llcommon/workqueue.h
@@ -15,6 +15,7 @@
#include "llcoros.h"
#include "llexception.h"
#include "llinstancetracker.h"
+#include "llinstancetrackersubclass.h"
#include "threadsafeschedule.h"
#include <chrono>
#include <exception> // std::current_exception
@@ -23,27 +24,23 @@
namespace LL
{
+
+/*****************************************************************************
+* WorkQueueBase: API for WorkQueue and WorkSchedule
+*****************************************************************************/
/**
* A typical WorkQueue has a string name that can be used to find it.
*/
- class WorkQueue: public LLInstanceTracker<WorkQueue, std::string>
+ class WorkQueueBase: public LLInstanceTracker<WorkQueueBase, std::string>
{
private:
- using super = LLInstanceTracker<WorkQueue, std::string>;
+ using super = LLInstanceTracker<WorkQueueBase, std::string>;
public:
using Work = std::function<void()>;
-
- private:
- using Queue = ThreadSafeSchedule<Work>;
- // helper for postEvery()
- template <typename Rep, typename Period, typename CALLABLE>
- class BackJack;
-
- public:
- using TimePoint = Queue::TimePoint;
- using TimedWork = Queue::TimeTuple;
- using Closed = Queue::Closed;
+ using Closed = LLThreadSafeQueueInterrupt;
+ // for runFor()
+ using TimePoint = std::chrono::steady_clock::time_point;
struct Error: public LLException
{
@@ -51,18 +48,18 @@ namespace LL
};
/**
- * You may omit the WorkQueue name, in which case a unique name is
+ * You may omit the WorkQueueBase name, in which case a unique name is
* synthesized; for practical purposes that makes it anonymous.
*/
- WorkQueue(const std::string& name = std::string(), size_t capacity=1024);
+ WorkQueueBase(const std::string& name);
/**
* Since the point of WorkQueue is to pass work to some other worker
- * thread(s) asynchronously, it's important that the WorkQueue continue
- * to exist until the worker thread(s) have drained it. To communicate
- * that it's time for them to quit, close() the queue.
+ * thread(s) asynchronously, it's important that it continue to exist
+ * until the worker thread(s) have drained it. To communicate that
+ * it's time for them to quit, close() the queue.
*/
- void close();
+ virtual void close() = 0;
/**
* WorkQueue supports multiple producers and multiple consumers. In
@@ -78,152 +75,57 @@ namespace LL
* * If you're the only consumer, noticing that size() > 0 is
* meaningful.
*/
- size_t size();
+ virtual size_t size() = 0;
/// producer end: are we prevented from pushing any additional items?
- bool isClosed();
+ virtual bool isClosed() = 0;
/// consumer end: are we done, is the queue entirely drained?
- bool done();
+ virtual bool done() = 0;
/*---------------------- fire and forget API -----------------------*/
- /// fire-and-forget, but at a particular (future?) time
- template <typename CALLABLE>
- void post(const TimePoint& time, CALLABLE&& callable)
- {
- // Defer reifying an arbitrary CALLABLE until we hit this or
- // postIfOpen(). All other methods should accept CALLABLEs of
- // arbitrary type to avoid multiple levels of std::function
- // indirection.
- mQueue.push(TimedWork(time, std::move(callable)));
- }
-
- /// fire-and-forget
- template <typename CALLABLE>
- void post(CALLABLE&& callable)
- {
- // We use TimePoint::clock::now() instead of TimePoint's
- // representation of the epoch because this WorkQueue may contain
- // a mix of past-due TimedWork items and TimedWork items scheduled
- // for the future. Sift this new item into the correct place.
- post(TimePoint::clock::now(), std::move(callable));
- }
-
- /**
- * post work for a particular time, unless the queue is closed before
- * we can post
- */
- template <typename CALLABLE>
- bool postIfOpen(const TimePoint& time, CALLABLE&& callable)
- {
- // Defer reifying an arbitrary CALLABLE until we hit this or
- // post(). All other methods should accept CALLABLEs of arbitrary
- // type to avoid multiple levels of std::function indirection.
- return mQueue.pushIfOpen(TimedWork(time, std::move(callable)));
- }
-
/**
* post work, unless the queue is closed before we can post
*/
- template <typename CALLABLE>
- bool postIfOpen(CALLABLE&& callable)
- {
- return postIfOpen(TimePoint::clock::now(), std::move(callable));
- }
+ virtual bool post(const Work&) = 0;
/**
- * Post work to be run at a specified time to another WorkQueue, which
- * may or may not still exist and be open. Return true if we were able
- * to post.
+ * post work, unless the queue is full
*/
- template <typename CALLABLE>
- static bool postMaybe(weak_t target, const TimePoint& time, CALLABLE&& callable);
+ virtual bool tryPost(const Work&) = 0;
/**
* Post work to another WorkQueue, which may or may not still exist
- * and be open. Return true if we were able to post.
- */
- template <typename CALLABLE>
- static bool postMaybe(weak_t target, CALLABLE&& callable)
- {
- return postMaybe(target, TimePoint::clock::now(),
- std::forward<CALLABLE>(callable));
- }
-
- /**
- * Launch a callable returning bool that will trigger repeatedly at
- * specified interval, until the callable returns false.
- *
- * If you need to signal that callable from outside, DO NOT bind a
- * reference to a simple bool! That's not thread-safe. Instead, bind
- * an LLCond variant, e.g. LLOneShotCond or LLBoolCond.
+ * and be open. Support any post() overload. Return true if we were
+ * able to post.
*/
- template <typename Rep, typename Period, typename CALLABLE>
- void postEvery(const std::chrono::duration<Rep, Period>& interval,
- CALLABLE&& callable);
-
- template <typename CALLABLE>
- bool tryPost(CALLABLE&& callable)
- {
- return mQueue.tryPush(TimedWork(TimePoint::clock::now(), std::move(callable)));
- }
+ template <typename... ARGS>
+ static bool postMaybe(weak_t target, ARGS&&... args);
/*------------------------- handshake API --------------------------*/
/**
- * Post work to another WorkQueue to be run at a specified time,
- * requesting a specific callback to be run on this WorkQueue on
- * completion.
- *
- * Returns true if able to post, false if the other WorkQueue is
- * inaccessible.
- */
- // Apparently some Microsoft header file defines a macro CALLBACK? The
- // natural template argument name CALLBACK produces very weird Visual
- // Studio compile errors that seem utterly unrelated to this source
- // code.
- template <typename CALLABLE, typename FOLLOWUP>
- bool postTo(weak_t target,
- const TimePoint& time, CALLABLE&& callable, FOLLOWUP&& callback);
-
- /**
* Post work to another WorkQueue, requesting a specific callback to
- * be run on this WorkQueue on completion.
+ * be run on this WorkQueue on completion. Optional final argument is
+ * TimePoint for WorkSchedule.
*
* Returns true if able to post, false if the other WorkQueue is
* inaccessible.
*/
- template <typename CALLABLE, typename FOLLOWUP>
- bool postTo(weak_t target, CALLABLE&& callable, FOLLOWUP&& callback)
- {
- return postTo(target, TimePoint::clock::now(),
- std::move(callable), std::move(callback));
- }
-
- /**
- * Post work to another WorkQueue to be run at a specified time,
- * blocking the calling coroutine until then, returning the result to
- * caller on completion.
- *
- * In general, we assume that each thread's default coroutine is busy
- * servicing its WorkQueue or whatever. To try to prevent mistakes, we
- * forbid calling waitForResult() from a thread's default coroutine.
- */
- template <typename CALLABLE>
- auto waitForResult(const TimePoint& time, CALLABLE&& callable);
+ template <typename CALLABLE, typename FOLLOWUP, typename... ARGS>
+ bool postTo(weak_t target, CALLABLE&& callable, FOLLOWUP&& callback,
+ ARGS&&... args);
/**
* Post work to another WorkQueue, blocking the calling coroutine
- * until then, returning the result to caller on completion.
+ * until then, returning the result to caller on completion. Optional
+ * final argument is TimePoint for WorkSchedule.
*
* In general, we assume that each thread's default coroutine is busy
* servicing its WorkQueue or whatever. To try to prevent mistakes, we
* forbid calling waitForResult() from a thread's default coroutine.
*/
- template <typename CALLABLE>
- auto waitForResult(CALLABLE&& callable)
- {
- return waitForResult(TimePoint::clock::now(), std::move(callable));
- }
+ template <typename CALLABLE, typename... ARGS>
+ auto waitForResult(CALLABLE&& callable, ARGS&&... args);
/*--------------------------- worker API ---------------------------*/
@@ -270,7 +172,7 @@ namespace LL
*/
bool runUntil(const TimePoint& until);
- private:
+ protected:
template <typename CALLABLE, typename FOLLOWUP>
static auto makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback);
/// general case: arbitrary C++ return type
@@ -290,13 +192,170 @@ namespace LL
static void checkCoroutine(const std::string& method);
static void error(const std::string& msg);
static std::string makeName(const std::string& name);
- void callWork(const Queue::DataTuple& work);
void callWork(const Work& work);
+
+ private:
+ virtual Work pop_() = 0;
+ virtual bool tryPop_(Work&) = 0;
+ };
+
+/*****************************************************************************
+* WorkQueue: no timestamped task support
+*****************************************************************************/
+ class WorkQueue: public LLInstanceTrackerSubclass<WorkQueue, WorkQueueBase>
+ {
+ private:
+ using super = LLInstanceTrackerSubclass<WorkQueue, WorkQueueBase>;
+
+ public:
+ /**
+ * You may omit the WorkQueue name, in which case a unique name is
+ * synthesized; for practical purposes that makes it anonymous.
+ */
+ WorkQueue(const std::string& name = std::string(), size_t capacity=1024);
+
+ /**
+ * Since the point of WorkQueue is to pass work to some other worker
+ * thread(s) asynchronously, it's important that it continue to exist
+ * until the worker thread(s) have drained it. To communicate that
+ * it's time for them to quit, close() the queue.
+ */
+ void close() override;
+
+ /**
+ * WorkQueue supports multiple producers and multiple consumers. In
+ * the general case it's misleading to test size(), since any other
+ * thread might change it the nanosecond the lock is released. On that
+ * basis, some might argue against publishing a size() method at all.
+ *
+ * But there are two specific cases in which a test based on size()
+ * might be reasonable:
+ *
+ * * If you're the only producer, noticing that size() == 0 is
+ * meaningful.
+ * * If you're the only consumer, noticing that size() > 0 is
+ * meaningful.
+ */
+ size_t size() override;
+ /// producer end: are we prevented from pushing any additional items?
+ bool isClosed() override;
+ /// consumer end: are we done, is the queue entirely drained?
+ bool done() override;
+
+ /*---------------------- fire and forget API -----------------------*/
+
+ /**
+ * post work, unless the queue is closed before we can post
+ */
+ bool post(const Work&) override;
+
+ /**
+ * post work, unless the queue is full
+ */
+ bool tryPost(const Work&) override;
+
+ private:
+ using Queue = LLThreadSafeQueue<Work>;
+ Queue mQueue;
+
+ Work pop_() override;
+ bool tryPop_(Work&) override;
+ };
+
+/*****************************************************************************
+* WorkSchedule: add support for timestamped tasks
+*****************************************************************************/
+ class WorkSchedule: public LLInstanceTrackerSubclass<WorkSchedule, WorkQueueBase>
+ {
+ private:
+ using super = LLInstanceTrackerSubclass<WorkSchedule, WorkQueueBase>;
+ using Queue = ThreadSafeSchedule<Work>;
+ // helper for postEvery()
+ template <typename Rep, typename Period, typename CALLABLE>
+ class BackJack;
+
+ public:
+ using TimePoint = Queue::TimePoint;
+ using TimedWork = Queue::TimeTuple;
+
+ /**
+ * You may omit the WorkSchedule name, in which case a unique name is
+ * synthesized; for practical purposes that makes it anonymous.
+ */
+ WorkSchedule(const std::string& name = std::string(), size_t capacity=1024);
+
+ /**
+ * Since the point of WorkSchedule is to pass work to some other worker
+ * thread(s) asynchronously, it's important that the WorkSchedule continue
+ * to exist until the worker thread(s) have drained it. To communicate
+ * that it's time for them to quit, close() the queue.
+ */
+ void close() override;
+
+ /**
+ * WorkSchedule supports multiple producers and multiple consumers. In
+ * the general case it's misleading to test size(), since any other
+ * thread might change it the nanosecond the lock is released. On that
+ * basis, some might argue against publishing a size() method at all.
+ *
+ * But there are two specific cases in which a test based on size()
+ * might be reasonable:
+ *
+ * * If you're the only producer, noticing that size() == 0 is
+ * meaningful.
+ * * If you're the only consumer, noticing that size() > 0 is
+ * meaningful.
+ */
+ size_t size() override;
+ /// producer end: are we prevented from pushing any additional items?
+ bool isClosed() override;
+ /// consumer end: are we done, is the queue entirely drained?
+ bool done() override;
+
+ /*---------------------- fire and forget API -----------------------*/
+
+ /**
+ * post work, unless the queue is closed before we can post
+ */
+ bool post(const Work& callable) override;
+
+ /**
+ * post work for a particular time, unless the queue is closed before
+ * we can post
+ */
+ bool post(const Work& callable, const TimePoint& time);
+
+ /**
+ * post work, unless the queue is full
+ */
+ bool tryPost(const Work& callable) override;
+
+ /**
+ * post work for a particular time, unless the queue is full
+ */
+ bool tryPost(const Work& callable, const TimePoint& time);
+
+ /**
+ * Launch a callable returning bool that will trigger repeatedly at
+ * specified interval, until the callable returns false.
+ *
+ * If you need to signal that callable from outside, DO NOT bind a
+ * reference to a simple bool! That's not thread-safe. Instead, bind
+ * an LLCond variant, e.g. LLOneShotCond or LLBoolCond.
+ */
+ template <typename Rep, typename Period, typename CALLABLE>
+ bool postEvery(const std::chrono::duration<Rep, Period>& interval,
+ CALLABLE&& callable);
+
+ private:
Queue mQueue;
+
+ Work pop_() override;
+ bool tryPop_(Work&) override;
};
/**
- * BackJack is, in effect, a hand-rolled lambda, binding a WorkQueue, a
+ * BackJack is, in effect, a hand-rolled lambda, binding a WorkSchedule, a
* CALLABLE that returns bool, a TimePoint and an interval at which to
* relaunch it. As long as the callable continues returning true, BackJack
* keeps resubmitting it to the target WorkQueue.
@@ -305,7 +364,7 @@ namespace LL
// class method gets its own 'this' pointer -- which we need to resubmit
// the whole BackJack callable.
template <typename Rep, typename Period, typename CALLABLE>
- class WorkQueue::BackJack
+ class WorkSchedule::BackJack
{
public:
// bind the desired data
@@ -319,9 +378,10 @@ namespace LL
mCallable(std::move(callable))
{}
- // Call by target WorkQueue -- note that although WE require a
- // callable returning bool, WorkQueue wants a void callable. We
- // consume the bool.
+ // This operator() method, called by target WorkSchedule, is what
+ // makes this object a Work item. Although WE require a callable
+ // returning bool, WorkSchedule wants a void callable. We consume the
+ // bool.
void operator()()
{
// If mCallable() throws an exception, don't catch it here: if it
@@ -337,7 +397,7 @@ namespace LL
// register our intent to fire at exact mIntervals.
mStart += mInterval;
- // We're being called at this moment by the target WorkQueue.
+ // We're being called at this moment by the target WorkSchedule.
// Assume it still exists, rather than checking the result of
// lock().
// Resubmit the whole *this callable: that's why we're a class
@@ -345,14 +405,10 @@ namespace LL
// move-only callable; but naturally this statement must be
// the last time we reference this instance, which may become
// moved-from.
- try
- {
- mTarget.lock()->post(mStart, std::move(*this));
- }
- catch (const Closed&)
- {
- // Once this queue is closed, oh well, just stop
- }
+ auto target{ std::dynamic_pointer_cast<WorkSchedule>(mTarget.lock()) };
+ // Discard bool return: once this queue is closed, oh well,
+ // just stop
+ target->post(std::move(*this), mStart);
}
}
@@ -364,8 +420,8 @@ namespace LL
};
template <typename Rep, typename Period, typename CALLABLE>
- void WorkQueue::postEvery(const std::chrono::duration<Rep, Period>& interval,
- CALLABLE&& callable)
+ bool WorkSchedule::postEvery(const std::chrono::duration<Rep, Period>& interval,
+ CALLABLE&& callable)
{
if (interval.count() <= 0)
{
@@ -381,14 +437,14 @@ namespace LL
// Instantiate and post a suitable BackJack, binding a weak_ptr to
// self, the current time, the desired interval and the desired
// callable.
- post(
+ return post(
BackJack<Rep, Period, CALLABLE>(
getWeak(), TimePoint::clock::now(), interval, std::move(callable)));
}
/// general case: arbitrary C++ return type
template <typename CALLABLE, typename FOLLOWUP, typename RETURNTYPE>
- struct WorkQueue::MakeReplyLambda
+ struct WorkQueueBase::MakeReplyLambda
{
auto operator()(CALLABLE&& callable, FOLLOWUP&& callback)
{
@@ -409,7 +465,7 @@ namespace LL
/// specialize for CALLABLE returning void
template <typename CALLABLE, typename FOLLOWUP>
- struct WorkQueue::MakeReplyLambda<CALLABLE, FOLLOWUP, void>
+ struct WorkQueueBase::MakeReplyLambda<CALLABLE, FOLLOWUP, void>
{
auto operator()(CALLABLE&& callable, FOLLOWUP&& callback)
{
@@ -421,16 +477,16 @@ namespace LL
};
template <typename CALLABLE, typename FOLLOWUP>
- auto WorkQueue::makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback)
+ auto WorkQueueBase::makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback)
{
return MakeReplyLambda<CALLABLE, FOLLOWUP,
decltype(std::forward<CALLABLE>(callable)())>()
(std::move(callable), std::move(callback));
}
- template <typename CALLABLE, typename FOLLOWUP>
- bool WorkQueue::postTo(weak_t target,
- const TimePoint& time, CALLABLE&& callable, FOLLOWUP&& callback)
+ template <typename CALLABLE, typename FOLLOWUP, typename... ARGS>
+ bool WorkQueueBase::postTo(weak_t target, CALLABLE&& callable, FOLLOWUP&& callback,
+ ARGS&&... args)
{
LL_PROFILE_ZONE_SCOPED;
// We're being asked to post to the WorkQueue at target.
@@ -443,13 +499,12 @@ namespace LL
// Here we believe target WorkQueue still exists. Post to it a
// lambda that packages our callable, our callback and a weak_ptr
// to this originating WorkQueue.
- tptr->post(
- time,
+ return tptr->post(
[reply = super::getWeak(),
callable = std::move(callable),
callback = std::move(callback)]
- ()
- mutable {
+ () mutable
+ {
// Use postMaybe() below in case this originating WorkQueue
// has been closed or destroyed. Remember, the outer lambda is
// now running on a thread servicing the target WorkQueue, and
@@ -472,44 +527,34 @@ namespace LL
// originating WorkQueue. Once there, rethrow it.
[exc = std::current_exception()](){ std::rethrow_exception(exc); });
}
- });
-
- // looks like we were able to post()
- return true;
+ },
+ // if caller passed a TimePoint, pass it along to post()
+ std::forward<ARGS>(args)...);
}
- template <typename CALLABLE>
- bool WorkQueue::postMaybe(weak_t target, const TimePoint& time, CALLABLE&& callable)
+ template <typename... ARGS>
+ bool WorkQueueBase::postMaybe(weak_t target, ARGS&&... args)
{
LL_PROFILE_ZONE_SCOPED;
// target is a weak_ptr: have to lock it to check it
auto tptr = target.lock();
if (tptr)
{
- try
- {
- tptr->post(time, std::forward<CALLABLE>(callable));
- // we were able to post()
- return true;
- }
- catch (const Closed&)
- {
- // target WorkQueue still exists, but is Closed
- }
+ return tptr->post(std::forward<ARGS>(args)...);
}
- // either target no longer exists, or its WorkQueue is Closed
+ // target no longer exists
return false;
}
/// general case: arbitrary C++ return type
template <typename CALLABLE, typename RETURNTYPE>
- struct WorkQueue::WaitForResult
+ struct WorkQueueBase::WaitForResult
{
- auto operator()(WorkQueue* self, const TimePoint& time, CALLABLE&& callable)
+ template <typename... ARGS>
+ auto operator()(WorkQueueBase* self, CALLABLE&& callable, ARGS&&... args)
{
LLCoros::Promise<RETURNTYPE> promise;
- self->post(
- time,
+ bool posted = self->post(
// We dare to bind a reference to Promise because it's
// specifically designed for cross-thread communication.
[&promise, callable = std::move(callable)]()
@@ -523,7 +568,13 @@ namespace LL
{
promise.set_exception(std::current_exception());
}
- });
+ },
+ // if caller passed a TimePoint, pass it to post()
+ std::forward<ARGS>(args)...);
+ if (! posted)
+ {
+ LLTHROW(WorkQueueBase::Closed());
+ }
auto future{ LLCoros::getFuture(promise) };
// now, on the calling thread, wait for that result
LLCoros::TempStatus st("waiting for WorkQueue::waitForResult()");
@@ -533,13 +584,13 @@ namespace LL
/// specialize for CALLABLE returning void
template <typename CALLABLE>
- struct WorkQueue::WaitForResult<CALLABLE, void>
+ struct WorkQueueBase::WaitForResult<CALLABLE, void>
{
- void operator()(WorkQueue* self, const TimePoint& time, CALLABLE&& callable)
+ template <typename... ARGS>
+ void operator()(WorkQueueBase* self, CALLABLE&& callable, ARGS&&... args)
{
LLCoros::Promise<void> promise;
- self->post(
- time,
+ bool posted = self->post(
// &promise is designed for cross-thread access
[&promise, callable = std::move(callable)]()
mutable {
@@ -552,7 +603,13 @@ namespace LL
{
promise.set_exception(std::current_exception());
}
- });
+ },
+ // if caller passed a TimePoint, pass it to post()
+ std::forward<ARGS>(args)...);
+ if (! posted)
+ {
+ LLTHROW(WorkQueueBase::Closed());
+ }
auto future{ LLCoros::getFuture(promise) };
// block until set_value()
LLCoros::TempStatus st("waiting for void WorkQueue::waitForResult()");
@@ -560,13 +617,13 @@ namespace LL
}
};
- template <typename CALLABLE>
- auto WorkQueue::waitForResult(const TimePoint& time, CALLABLE&& callable)
+ template <typename CALLABLE, typename... ARGS>
+ auto WorkQueueBase::waitForResult(CALLABLE&& callable, ARGS&&... args)
{
checkCoroutine("waitForResult()");
// derive callable's return type so we can specialize for void
return WaitForResult<CALLABLE, decltype(std::forward<CALLABLE>(callable)())>()
- (this, time, std::forward<CALLABLE>(callable));
+ (this, std::forward<CALLABLE>(callable), std::forward<ARGS>(args)...);
}
} // namespace LL