summaryrefslogtreecommitdiff
path: root/indra
diff options
context:
space:
mode:
Diffstat (limited to 'indra')
-rw-r--r--indra/llcommon/llevents.cpp13
-rw-r--r--indra/llcommon/llevents.h45
-rw-r--r--indra/llcommon/lua_function.cpp128
-rw-r--r--indra/newview/scripts/lua/ErrorQueue.lua4
-rw-r--r--indra/newview/scripts/lua/WaitQueue.lua31
-rw-r--r--indra/newview/scripts/lua/fiber.lua331
-rw-r--r--indra/newview/scripts/lua/leap.lua200
-rw-r--r--indra/newview/scripts/lua/printf.lua19
-rw-r--r--indra/newview/tests/llluamanager_test.cpp98
9 files changed, 662 insertions, 207 deletions
diff --git a/indra/llcommon/llevents.cpp b/indra/llcommon/llevents.cpp
index 01bba7a620..667e047fd3 100644
--- a/indra/llcommon/llevents.cpp
+++ b/indra/llcommon/llevents.cpp
@@ -726,7 +726,7 @@ void LLReqID::stamp(LLSD& response) const
response["reqid"] = mReqid;
}
-bool sendReply(const LLSD& reply, const LLSD& request, const std::string& replyKey)
+bool sendReply(LLSD reply, const LLSD& request, const std::string& replyKey)
{
// If the original request has no value for replyKey, it's pointless to
// construct or send a reply event: on which LLEventPump should we send
@@ -739,13 +739,10 @@ bool sendReply(const LLSD& reply, const LLSD& request, const std::string& replyK
// Here the request definitely contains replyKey; reasonable to proceed.
- // Copy 'reply' to modify it.
- LLSD newreply(reply);
// Get the ["reqid"] element from request
LLReqID reqID(request);
- // and copy it to 'newreply'.
- reqID.stamp(newreply);
- // Send reply on LLEventPump named in request[replyKey]. Don't forget to
- // send the modified 'newreply' instead of the original 'reply'.
- return LLEventPumps::instance().obtain(request[replyKey]).post(newreply);
+ // and copy it to 'reply'.
+ reqID.stamp(reply);
+ // Send reply on LLEventPump named in request[replyKey].
+ return LLEventPumps::instance().obtain(request[replyKey]).post(reply);
}
diff --git a/indra/llcommon/llevents.h b/indra/llcommon/llevents.h
index c1dbf4392f..ebc893d1e6 100644
--- a/indra/llcommon/llevents.h
+++ b/indra/llcommon/llevents.h
@@ -151,6 +151,8 @@ typedef boost::signals2::signal<bool(const LLSD&), LLStopWhenHandled, float> LL
/// Methods that forward listeners (e.g. constructed with
/// <tt>boost::bind()</tt>) should accept (const LLEventListener&)
typedef LLStandardSignal::slot_type LLEventListener;
+/// Accept a void listener too
+typedef std::function<void(const LLSD&)> LLVoidListener;
/// Result of registering a listener, supports <tt>connected()</tt>,
/// <tt>disconnect()</tt> and <tt>blocked()</tt>
typedef boost::signals2::connection LLBoundListener;
@@ -158,6 +160,23 @@ typedef boost::signals2::connection LLBoundListener;
/// referenced listener when the LLTempBoundListener instance is destroyed.
typedef boost::signals2::scoped_connection LLTempBoundListener;
+/// Accepting (const LLListener&) allows either LLEventListener or LLVoidListener
+/// TODO: but compiler considers the constructor call ambiguous??
+class LLListener
+{
+public:
+ LLListener(const LLEventListener& listener):
+ mListener(listener)
+ {}
+ LLListener(const LLVoidListener& listener):
+ mListener([listener](const LLSD& data){ listener(data); return false; })
+ {}
+ operator LLEventListener() const { return mListener; }
+
+private:
+ LLEventListener mListener;
+};
+
/**
* A common idiom for event-based code is to accept either a callable --
* directly called on completion -- or the string name of an LLEventPump on
@@ -688,6 +707,30 @@ private:
};
/*****************************************************************************
+* LLNamedListener
+*****************************************************************************/
+/**
+ * LLNamedListener bundles a concrete LLEventPump subclass with a specific
+ * listener function, with an LLTempBoundListener to ensure that it's
+ * disconnected before destruction.
+ */
+template <class PUMP=LLEventStream>
+class LL_COMMON_API LLNamedListener: PUMP
+{
+ using pump_t = PUMP;
+public:
+ template <typename LISTENER>
+ LLNamedListener(const std::string& name, LISTENER&& listener):
+ pump_t(name, false), // don't tweak the name
+ mConn(pump_t::listen("func", std::forward<LISTENER>(listener)))
+ {}
+
+private:
+ LLTempBoundListener mConn;
+};
+using LLStreamListener = LLNamedListener<>;
+
+/*****************************************************************************
* LLReqID
*****************************************************************************/
/**
@@ -779,7 +822,7 @@ private:
* Before sending the reply event, sendReply() copies the ["reqid"] item from
* the request to the reply.
*/
-LL_COMMON_API bool sendReply(const LLSD& reply, const LLSD& request,
+LL_COMMON_API bool sendReply(LLSD reply, const LLSD& request,
const std::string& replyKey="reply");
#endif /* ! defined(LL_LLEVENTS_H) */
diff --git a/indra/llcommon/lua_function.cpp b/indra/llcommon/lua_function.cpp
index b5de5099ba..441e17dafd 100644
--- a/indra/llcommon/lua_function.cpp
+++ b/indra/llcommon/lua_function.cpp
@@ -17,6 +17,7 @@
// std headers
#include <algorithm>
#include <exception>
+#include <filesystem>
#include <iomanip> // std::quoted
#include <map>
#include <memory> // std::unique_ptr
@@ -97,8 +98,6 @@ void lua_pushstdstring(lua_State* L, const std::string& str)
// reached by that block raises a Lua error.
LLSD lua_tollsd(lua_State* L, int index)
{
- LL_DEBUGS("Lua") << "lua_tollsd(" << index << ") of " << lua_gettop(L) << " stack entries: "
- << lua_what(L, index) << LL_ENDL;
switch (lua_type(L, index))
{
case LUA_TNONE:
@@ -200,15 +199,12 @@ LLSD lua_tollsd(lua_State* L, int index)
// we do, below: lua_tollsd(L, -1). If 'index' is -1, then when we
// push nil, what we find at index -1 is nil, not the table!
index = lua_absindex(L, index);
- LL_DEBUGS("Lua") << "checking for empty table" << LL_ENDL;
lua_pushnil(L); // first key
- LL_DEBUGS("Lua") << lua_stack(L) << LL_ENDL;
if (! lua_next(L, index))
{
// it's a table, but the table is empty -- no idea if it should be
// modeled as empty array or empty map -- return isUndefined(),
// which can be consumed as either
- LL_DEBUGS("Lua") << "empty table" << LL_ENDL;
return {};
}
// key is at stack index -2, value at index -1
@@ -217,8 +213,6 @@ LLSD lua_tollsd(lua_State* L, int index)
LuaPopper popper(L, 2);
// Remember the type of the first key
auto firstkeytype{ lua_type(L, -2) };
- LL_DEBUGS("Lua") << "table not empty, first key type " << lua_typename(L, firstkeytype)
- << LL_ENDL;
switch (firstkeytype)
{
case LUA_TNUMBER:
@@ -296,7 +290,6 @@ LLSD lua_tollsd(lua_State* L, int index)
// crazy key territory.
return lluau::error(L, "Gaps in Lua table too large for conversion to LLSD array");
}
- LL_DEBUGS("Lua") << "collected " << keys.size() << " keys, max " << highkey << LL_ENDL;
// right away expand the result array to the size we'll need
LLSD result{ LLSD::emptyArray() };
result[highkey - 1] = LLSD();
@@ -307,7 +300,6 @@ LLSD lua_tollsd(lua_State* L, int index)
// key at stack index -2, value at index -1
// We've already validated lua_tointegerx() for each key.
auto key{ lua_tointeger(L, -2) };
- LL_DEBUGS("Lua") << "key " << key << ':' << LL_ENDL;
// Don't forget to subtract 1 from Lua key for LLSD subscript!
result[LLSD::Integer(key) - 1] = lua_tollsd(L, -1);
// remove value, keep key for next iteration
@@ -330,7 +322,6 @@ LLSD lua_tollsd(lua_State* L, int index)
}
auto key{ lua_tostdstring(L, -2) };
- LL_DEBUGS("Lua") << "map key " << std::quoted(key) << ':' << LL_ENDL;
result[key] = lua_tollsd(L, -1);
// remove value, keep key for next iteration
lua_pop(L, 1);
@@ -493,53 +484,100 @@ std::pair<int, LLSD> LuaState::expr(const std::string& desc, const std::string&
// here we believe there was no error -- did the Lua fragment leave
// anything on the stack?
std::pair<int, LLSD> result{ lua_gettop(mState), {} };
- if (! result.first)
- return result;
-
- // aha, at least one entry on the stack!
- if (result.first == 1)
+ if (result.first)
{
- // Don't forget that lua_tollsd() can throw Lua errors.
- try
+ // aha, at least one entry on the stack!
+ if (result.first == 1)
{
- result.second = lua_tollsd(mState, 1);
+ // Don't forget that lua_tollsd() can throw Lua errors.
+ try
+ {
+ result.second = lua_tollsd(mState, 1);
+ }
+ catch (const std::exception& error)
+ {
+ // lua_tollsd() is designed to be called from a lua_function(),
+ // that is, from a C++ function called by Lua. In case of error,
+ // it throws a Lua error to be caught by the Lua runtime. expr()
+ // is a peculiar use case in which our C++ code is calling
+ // lua_tollsd() after return from the Lua runtime. We must catch
+ // the exception thrown for a Lua error, else it will propagate
+ // out to the main coroutine and terminate the viewer -- but since
+ // we instead of the Lua runtime catch it, our lua_State retains
+ // its internal error status. Any subsequent lua_pcall() calls
+ // with this lua_State will report error regardless of whether the
+ // chunk runs successfully. Get a new lua_State().
+ initLuaState();
+ return { -1, stringize(LLError::Log::classname(error), ": ", error.what()) };
+ }
}
- catch (const std::exception& error)
+ else
{
- // lua_tollsd() is designed to be called from a lua_function(),
- // that is, from a C++ function called by Lua. In case of error,
- // it throws a Lua error to be caught by the Lua runtime. expr()
- // is a peculiar use case in which our C++ code is calling
- // lua_tollsd() after return from the Lua runtime. We must catch
- // the exception thrown for a Lua error, else it will propagate
- // out to the main coroutine and terminate the viewer -- but since
- // we instead of the Lua runtime catch it, our lua_State retains
- // its internal error status. Any subsequent lua_pcall() calls
- // with this lua_State will report error regardless of whether the
- // chunk runs successfully. Get a new lua_State().
- initLuaState();
- return { -1, stringize(LLError::Log::classname(error), ": ", error.what()) };
+ // multiple entries on the stack
+ try
+ {
+ for (int index = 1; index <= result.first; ++index)
+ {
+ result.second.append(lua_tollsd(mState, index));
+ }
+ }
+ catch (const std::exception& error)
+ {
+ // see above comments regarding lua_State's error status
+ initLuaState();
+ return { -1, stringize(LLError::Log::classname(error), ": ", error.what()) };
+ }
}
- // pop the result we claimed
- lua_settop(mState, 0);
- return result;
}
+ // pop everything
+ lua_settop(mState, 0);
- // multiple entries on the stack
- try
- {
- for (int index = 1; index <= result.first; ++index)
+ // If we ran a script that loaded the fiber module, finish up with a call
+ // to fiber.run(). That allows a script to kick off some number of fibers,
+ // do some work on the main thread and then fall off the end of the script
+ // without explicitly appending a call to fiber.run(). run() ensures the
+ // rest of the fibers run to completion (or error).
+ luaL_checkstack(mState, 4, nullptr);
+ // Push _MODULES table on stack
+ luaL_findtable(mState, LUA_REGISTRYINDEX, "_MODULES", 1);
+ int index = lua_gettop(mState);
+ bool found = false;
+ // Did this chunk already require('fiber')? To find out, we must search
+ // the _MODULES table, because our require() implementation uses the
+ // pathname of the module file as the key. Push nil key to start.
+ lua_pushnil(mState);
+ while (lua_next(mState, index) != 0)
+ {
+ // key is at index -2, value at index -1
+ // "While traversing a table, do not call lua_tolstring directly on a
+ // key, unless you know that the key is actually a string. Recall that
+ // lua_tolstring changes the value at the given index; this confuses
+ // the next call to lua_next."
+ // https://www.lua.org/manual/5.1/manual.html#lua_next
+ if (lua_type(mState, -2) == LUA_TSTRING &&
+ std::filesystem::path(lua_tostdstring(mState, -2)).stem() == "fiber")
{
- result.second.append(lua_tollsd(mState, index));
+ found = true;
+ break;
}
+ // pop value so key is at top for lua_next()
+ lua_pop(mState, 1);
}
- catch (const std::exception& error)
+ if (found)
{
- // see above comments regarding lua_State's error status
- initLuaState();
- return { -1, stringize(LLError::Log::classname(error), ": ", error.what()) };
+ // okay, index -1 is a table loaded from a file 'fiber.xxx' --
+ // does it have a function named 'run'?
+ auto run_type{ lua_getfield(mState, -1, "run") };
+ if (run_type == LUA_TFUNCTION)
+ {
+ // there's a fiber.run() function sitting on the top of the stack
+ // -- call it with no arguments, discarding anything it returns
+ LL_DEBUGS("Lua") << "Calling fiber.run()" << LL_ENDL;
+ if (! checkLua(desc, lua_pcall(mState, 0, 0, 0)))
+ return { -1, mError };
+ }
}
- // pop everything
+ // pop everything again
lua_settop(mState, 0);
return result;
}
diff --git a/indra/newview/scripts/lua/ErrorQueue.lua b/indra/newview/scripts/lua/ErrorQueue.lua
index a6d4470044..076742815a 100644
--- a/indra/newview/scripts/lua/ErrorQueue.lua
+++ b/indra/newview/scripts/lua/ErrorQueue.lua
@@ -3,18 +3,22 @@
-- raise that error.
local WaitQueue = require('WaitQueue')
+-- local debug = require('printf')
+local function debug(...) end
local ErrorQueue = WaitQueue:new()
function ErrorQueue:Error(message)
-- Setting Error() is a marker, like closing the queue. Once we reach the
-- error, every subsequent Dequeue() call will raise the same error.
+ debug('Setting self._closed to %q', message)
self._closed = message
self:_wake_waiters()
end
function ErrorQueue:Dequeue()
local value = WaitQueue.Dequeue(self)
+ debug('ErrorQueue:Dequeue: base Dequeue() got %s', value)
if value ~= nil then
-- queue not yet closed, show caller
return value
diff --git a/indra/newview/scripts/lua/WaitQueue.lua b/indra/newview/scripts/lua/WaitQueue.lua
index 00766ccae7..a34dbef4d7 100644
--- a/indra/newview/scripts/lua/WaitQueue.lua
+++ b/indra/newview/scripts/lua/WaitQueue.lua
@@ -2,8 +2,12 @@
-- the Dequeue() operation blocks the calling coroutine until some other
-- coroutine Enqueue()s a new value.
+local fiber = require('fiber')
local Queue = require('Queue')
+-- local debug = print_debug
+local function debug(...) end
+
local WaitQueue = Queue:new()
function WaitQueue:new()
@@ -32,26 +36,20 @@ function WaitQueue:_wake_waiters()
-- cases. With multiple consumers, if more than one is trying to
-- Dequeue() from an empty WaitQueue, we'll have multiple waiters.
-- Unlike OS threads, with cooperative concurrency it doesn't make sense
- -- to "notify all": we need resume only one of the waiting Dequeue()
- -- callers. But since resuming that caller might entail either Enqueue()
- -- or Dequeue() calls, recheck every time around to see if we must resume
- -- another waiting coroutine.
- while not self:IsEmpty() and #self._waiters > 0 do
+ -- to "notify all": we need wake only one of the waiting Dequeue()
+ -- callers.
+ if ((not self:IsEmpty()) or self._closed) and next(self._waiters) then
-- Pop the oldest waiting coroutine instead of the most recent, for
-- more-or-less round robin fairness. But skip any coroutines that
-- have gone dead in the meantime.
local waiter = table.remove(self._waiters, 1)
- while waiter and coroutine.status(waiter) ~= "suspended" do
+ while waiter and fiber.status(waiter) == "dead" do
waiter = table.remove(self._waiters, 1)
end
-- do we still have at least one waiting coroutine?
if waiter then
-- don't pass the head item: let the resumed coroutine retrieve it
- local ok, message = coroutine.resume(waiter)
- -- if resuming that waiter encountered an error, don't swallow it
- if not ok then
- error(message)
- end
+ fiber.wake(waiter)
end
end
end
@@ -62,18 +60,17 @@ function WaitQueue:Dequeue()
-- the queue while there are still items left, and we want the
-- consumer(s) to retrieve those last few items.
if self._closed then
+ debug('WaitQueue:Dequeue(): closed')
return nil
end
- local coro = coroutine.running()
- if coro == nil then
- error("WaitQueue:Dequeue() trying to suspend main coroutine")
- end
+ debug('WaitQueue:Dequeue(): waiting')
-- add the running coroutine to the list of waiters
- table.insert(self._waiters, coro)
+ table.insert(self._waiters, fiber.running())
-- then let somebody else run
- coroutine.yield()
+ fiber.wait()
end
-- here we're sure this queue isn't empty
+ debug('WaitQueue:Dequeue() calling Queue.Dequeue()')
return Queue.Dequeue(self)
end
diff --git a/indra/newview/scripts/lua/fiber.lua b/indra/newview/scripts/lua/fiber.lua
new file mode 100644
index 0000000000..7dc67f510c
--- /dev/null
+++ b/indra/newview/scripts/lua/fiber.lua
@@ -0,0 +1,331 @@
+-- Organize Lua coroutines into fibers.
+
+-- In this usage, the difference between coroutines and fibers is that fibers
+-- have a scheduler. Yielding a fiber means allowing other fibers, plural, to
+-- run: it's more than just returning control to the specific Lua thread that
+-- resumed the running coroutine.
+
+-- fiber.launch() creates a new fiber ready to run.
+-- fiber.status() reports (augmented) status of the passed fiber: instead of
+-- 'suspended', it returns either 'ready' or 'waiting'
+-- fiber.yield() allows other fibers to run, but leaves the calling fiber
+-- ready to run.
+-- fiber.wait() marks the running fiber not ready, and resumes other fibers.
+-- fiber.wake() marks the designated suspended fiber ready to run, but does
+-- not yet resume it.
+-- fiber.run() runs all current fibers until all have terminated (successfully
+-- or with an error).
+
+local printf = require 'printf'
+-- local debug = printf
+local function debug(...) end
+local coro = require 'coro'
+
+local fiber = {}
+
+-- The tables in which we track fibers must have weak keys so dead fibers
+-- can be garbage-collected.
+local weak_values = {__mode='v'}
+local weak_keys = {__mode='k'}
+
+-- Track each current fiber as being either ready to run or not ready
+-- (waiting). wait() moves the running fiber from ready to waiting; wake()
+-- moves the designated fiber from waiting back to ready.
+-- The ready table is used as a list so yield() can go round robin.
+local ready = setmetatable({'main'}, weak_keys)
+-- The waiting table is used as a set because order doesn't matter.
+local waiting = setmetatable({}, weak_keys)
+
+-- Every fiber has a name, for diagnostic purposes. Names must be unique.
+-- A colliding name will be suffixed with an integer.
+-- Predefine 'main' with our marker so nobody else claims that name.
+local names = setmetatable({main='main'}, weak_keys)
+local byname = setmetatable({main='main'}, weak_values)
+-- each colliding name has its own distinct suffix counter
+local suffix = {}
+
+-- Specify a nullary idle() callback to be called whenever there are no ready
+-- fibers but there are waiting fibers. The idle() callback is responsible for
+-- changing zero or more waiting fibers to ready fibers by calling
+-- fiber.wake(), although a given call may leave them all still waiting.
+-- When there are no ready fibers, it's a good idea for the idle() function to
+-- return control to a higher-level execution agent. Simply returning without
+-- changing any fiber's status will spin the CPU.
+-- The idle() callback can return non-nil to exit fiber.run() with that value.
+function fiber._idle()
+ error('fiber.yield(): you must first call set_idle(nullary idle() function)')
+end
+
+function fiber.set_idle(func)
+ fiber._idle = func
+end
+
+-- Launch a new Lua fiber, ready to run.
+function fiber.launch(name, func, ...)
+ local args = table.pack(...)
+ local co = coroutine.create(function() func(table.unpack(args)) end)
+ -- a new fiber is ready to run
+ table.insert(ready, co)
+ local namekey = name
+ while byname[namekey] do
+ if not suffix[name] then
+ suffix[name] = 1
+ end
+ suffix[name] += 1
+ namekey = name .. tostring(suffix[name])
+ end
+ -- found a namekey not yet in byname: set it
+ byname[namekey] = co
+ -- and remember it as this fiber's name
+ names[co] = namekey
+-- debug('launch(%s)', namekey)
+-- debug('byname[%s] = %s', namekey, tostring(byname[namekey]))
+-- debug('names[%s] = %s', tostring(co), names[co])
+-- debug('ready[-1] = %s', tostring(ready[#ready]))
+end
+
+-- for debugging
+function fiber.print_all()
+ print('Ready fibers:' .. if next(ready) then '' else ' none')
+ for _, co in pairs(ready) do
+ printf(' %s: %s', fiber.get_name(co), fiber.status(co))
+ end
+ print('Waiting fibers:' .. if next(waiting) then '' else ' none')
+ for co in pairs(waiting) do
+ printf(' %s: %s', fiber.get_name(co), fiber.status(co))
+ end
+end
+
+-- return either the running coroutine or, if called from the main thread,
+-- 'main'
+function fiber.running()
+ return coroutine.running() or 'main'
+end
+
+-- Query a fiber's name (nil for the running fiber)
+function fiber.get_name(co)
+ return names[co or fiber.running()] or 'unknown'
+end
+
+-- Query status of the passed fiber
+function fiber.status(co)
+ local running = coroutine.running()
+ if (not co) or co == running then
+ -- silly to ask the status of the running fiber: it's 'running'
+ return 'running'
+ end
+ if co ~= 'main' then
+ -- for any coroutine but main, consult coroutine.status()
+ local status = coroutine.status(co)
+ if status ~= 'suspended' then
+ return status
+ end
+ -- here co is suspended, answer needs further refinement
+ else
+ -- co == 'main'
+ if not running then
+ -- asking about 'main' from the main fiber
+ return 'running'
+ end
+ -- asking about 'main' from some other fiber, so presumably main is suspended
+ end
+ -- here we know co is suspended -- but is it ready to run?
+ if waiting[co] then
+ return 'waiting'
+ end
+ -- not waiting should imply ready: sanity check
+ if table.find(ready, co) then
+ return 'ready'
+ end
+ -- Calls within yield() between popping the next ready fiber and
+ -- re-appending it to the list are in this state. Once we're done
+ -- debugging yield(), we could reinstate either of the below.
+-- error(string.format('fiber.status(%s) is stumped', fiber.get_name(co)))
+-- print(string.format('*** fiber.status(%s) is stumped', fiber.get_name(co)))
+ return '(unknown)'
+end
+
+-- change the running fiber's status to waiting
+local function set_waiting()
+ -- if called from the main fiber, inject a 'main' marker into the list
+ co = fiber.running()
+ -- delete from ready list
+ local i = table.find(ready, co)
+ if i then
+ table.remove(ready, i)
+ end
+ -- add to waiting list
+ waiting[co] = true
+end
+
+-- Suspend the current fiber until some other fiber calls fiber.wake() on it
+function fiber.wait()
+ set_waiting()
+ -- now yield to other fibers
+ fiber.yield()
+end
+
+-- Mark a suspended fiber as being ready to run
+function fiber.wake(co)
+ if not waiting[co] then
+ error(string.format('fiber.wake(%s) but status=%s, ready=%s, waiting=%s',
+ names[co], fiber.status(co), ready[co], waiting[co]))
+ end
+ -- delete from waiting list
+ waiting[co] = nil
+ -- add to end of ready list
+ table.insert(ready, co)
+ -- but don't yet resume it: that happens next time we reach yield()
+end
+
+-- pop and return the next not-dead fiber in the ready list, or nil if none remain
+local function live_ready_iter()
+ -- don't write
+ -- for co in table.remove, ready, 1
+ -- because it would keep passing a new second parameter!
+ for co in function() return table.remove(ready, 1) end do
+ debug('%s live_ready_iter() sees %s, status %s',
+ fiber.get_name(), fiber.get_name(co), fiber.status(co))
+ -- keep removing the head entry until we find one that's not dead,
+ -- discarding any dead coroutines along the way
+ if co == 'main' or coroutine.status(co) ~= 'dead' then
+ debug('%s live_ready_iter() returning %s',
+ fiber.get_name(), fiber.get_name(co))
+ return co
+ end
+ end
+ debug('%s live_ready_iter() returning nil', fiber.get_name())
+ return nil
+end
+
+-- prune the set of waiting fibers
+local function prune_waiting()
+ for waiter in pairs(waiting) do
+ if waiter ~= 'main' and coroutine.status(waiter) == 'dead' then
+ waiting[waiter] = nil
+ end
+ end
+end
+
+-- Run other ready fibers, leaving this one ready, returning after a cycle.
+-- Returns:
+-- * true, nil if there remain other live fibers, whether ready or waiting,
+-- but it's our turn to run
+-- * false, nil if this is the only remaining fiber
+-- * nil, x if configured idle() callback returns non-nil x
+local function scheduler()
+ -- scheduler() is asymmetric because Lua distinguishes the main thread
+ -- from other coroutines. The main thread can't yield; it can only resume
+ -- other coroutines. So although an arbitrary coroutine could resume still
+ -- other arbitrary coroutines, it could NOT resume the main thread because
+ -- the main thread can't yield. Therefore, scheduler() delegates its real
+ -- processing to the main thread. If called from a coroutine, pass control
+ -- back to the main thread.
+ if coroutine.running() then
+ -- seize the opportunity to make sure the viewer isn't shutting down
+-- check_stop()
+ -- this is a real coroutine, yield normally to main thread
+ coroutine.yield()
+ -- main certainly still exists
+ return true
+ end
+
+ -- This is the main fiber: coroutine.yield() doesn't work.
+ -- Instead, resume each of the ready fibers.
+ -- Prune the set of waiting fibers after every time fiber business logic
+ -- runs (i.e. other fibers might have terminated or hit error), such as
+ -- here on entry.
+ prune_waiting()
+ local others, idle_stop
+ repeat
+ for co in live_ready_iter do
+ -- seize the opportunity to make sure the viewer isn't shutting down
+-- check_stop()
+ -- before we re-append co, is it the only remaining entry?
+ others = next(ready)
+ -- co is live, re-append it to the ready list
+ table.insert(ready, co)
+ if co == 'main' then
+ -- Since we know the caller is the main fiber, it's our turn.
+ -- Tell caller if there are other ready or waiting fibers.
+ return others or next(waiting)
+ end
+ -- not main, but some other ready coroutine:
+ -- use coro.resume() so we'll propagate any error encountered
+ coro.resume(co)
+ prune_waiting()
+ end
+ -- Here there are no ready fibers. Are there any waiting fibers?
+ if not next(waiting) then
+ return false
+ end
+ -- there are waiting fibers: call consumer's configured idle() function
+ idle_stop = fiber._idle()
+ if idle_stop ~= nil then
+ return nil, idle_stop
+ end
+ prune_waiting()
+ -- loop "forever", that is, until:
+ -- * main is ready, or
+ -- * there are neither ready fibers nor waiting fibers, or
+ -- * fiber._idle() returned non-nil
+ until false
+end
+
+-- Let other fibers run. This is useful in either of two cases:
+-- * fiber.wait() calls this to run other fibers while this one is waiting.
+-- fiber.yield() (and therefore fiber.wait()) works from the main thread as
+-- well as from explicitly-launched fibers, without the caller having to
+-- care.
+-- * A long-running fiber that doesn't often call fiber.wait() should sprinkle
+-- in fiber.yield() calls to interleave processing on other fibers.
+function fiber.yield()
+ -- The difference between this and fiber.run() is that fiber.yield()
+ -- assumes its caller has work to do. yield() returns to its caller as
+ -- soon as scheduler() pops this fiber from the ready list. fiber.run()
+ -- continues looping until all other fibers have terminated, or the
+ -- set_idle() callback tells it to stop.
+ local others, idle_done = scheduler()
+ -- scheduler() returns either if we're ready, or if idle_done ~= nil.
+ if idle_done ~= nil then
+ -- Returning normally from yield() means the caller can carry on with
+ -- its pending work. But in this case scheduler() returned because the
+ -- configured set_idle() function interrupted it -- not because we're
+ -- actually ready. Don't return normally.
+ error('fiber.set_idle() interrupted yield() with: ' .. tostring(idle_done))
+ end
+ -- We're ready! Just return to caller. In this situation we don't care
+ -- whether there are other ready fibers.
+end
+
+-- Run fibers until all but main have terminated: return nil.
+-- Or until configured idle() callback returns x ~= nil: return x.
+function fiber.run()
+ -- A fiber calling run() is not also doing other useful work. Remove the
+ -- calling fiber from the ready list. Otherwise yield() would keep seeing
+ -- that our caller is ready and return to us, instead of realizing that
+ -- all coroutines are waiting and call idle(). But don't say we're
+ -- waiting, either, because then when all other fibers have terminated
+ -- we'd call idle() forever waiting for something to make us ready again.
+ local i = table.find(ready, fiber.running())
+ if i then
+ table.remove(ready, i)
+ end
+ local others, idle_done
+ repeat
+ debug('%s calling fiber.run() calling scheduler()', fiber.get_name())
+ others, idle_done = scheduler()
+ debug("%s fiber.run()'s scheduler() returned %s, %s", fiber.get_name(),
+ tostring(others), tostring(idle_done))
+ until (not others)
+ debug('%s fiber.run() done', fiber.get_name())
+ -- For whatever it's worth, put our own fiber back in the ready list.
+ table.insert(ready, fiber.running())
+ -- Once there are no more waiting fibers, and the only ready fiber is
+ -- us, return to caller. All previously-launched fibers are done. Possibly
+ -- the chunk is done, or the chunk may decide to launch a new batch of
+ -- fibers.
+ return idle_done
+end
+
+return fiber
diff --git a/indra/newview/scripts/lua/leap.lua b/indra/newview/scripts/lua/leap.lua
index 81728e7230..77f3a3e116 100644
--- a/indra/newview/scripts/lua/leap.lua
+++ b/indra/newview/scripts/lua/leap.lua
@@ -38,7 +38,10 @@
-- leap.process(). process() won't notice until the next event from the
-- viewer, though.
+local fiber = require('fiber')
local ErrorQueue = require('ErrorQueue')
+-- local debug = require('printf')
+local function debug(...) end
local leap = {}
@@ -68,11 +71,13 @@ leap._reply, leap._command = get_event_pumps()
-- later one. That means that no incoming event will ever be given to
-- the old WaitForReqid object. Any coroutine waiting on the discarded
-- WaitForReqid object would therefore wait forever.
-leap._pending = {}
+-- these are weak values tables
+local weak_values = {__mode='v'}
+leap._pending = setmetatable({}, weak_values)
-- Our consumer will instantiate some number of WaitFor subclass objects.
-- As these are traversed in descending priority order, we must keep
-- them in a list.
-leap._waitfors = {}
+leap._waitfors = setmetatable({}, weak_values)
-- It has been suggested that we should use UUIDs as ["reqid"] values,
-- since UUIDs are guaranteed unique. However, as the "namespace" for
-- ["reqid"] values is our very own _reply pump, we can get away with
@@ -91,15 +96,13 @@ function leap.cmdpump()
return leap._command
end
--- local inspect = require('inspect')
-
-- Fire and forget. Send the specified request LLSD, expecting no reply.
-- In fact, should the request produce an eventual reply, it will be
-- treated as an unsolicited event.
--
-- See also request(), generate().
function leap.send(pump, data, reqid)
--- print_debug('leap.send('..pump..', '..inspect(data)..', '..reqid..') entry')
+ debug('leap.send(%s, %s, %s) entry', pump, data, reqid)
local data = data
if type(data) == 'table' then
data = table.clone(data)
@@ -108,10 +111,26 @@ function leap.send(pump, data, reqid)
data['reqid'] = reqid
end
end
--- print_debug('leap.send('..pump..', '..inspect(data)..') calling post_on()')
+ debug('leap.send(%s, %s) calling post_on()', pump, data)
post_on(pump, data)
end
+-- common setup code shared by request() and generate()
+local function requestSetup(pump, data)
+ -- invent a new, unique reqid
+ leap._reqid += 1
+ local reqid = leap._reqid
+ -- Instantiate a new WaitForReqid object. The priority is irrelevant
+ -- because, unlike the WaitFor base class, WaitForReqid does not
+ -- self-register on our leap._waitfors list. Instead, capture the new
+ -- WaitForReqid object in leap._pending so dispatch() can find it.
+ leap._pending[reqid] = leap.WaitForReqid:new(reqid)
+ -- Pass reqid to send() to stamp it into (a copy of) the request data.
+ debug('requestSetup(%s, %s)', pump, data)
+ leap.send(pump, data, reqid)
+ return reqid
+end
+
-- Send the specified request LLSD, expecting exactly one reply. Block
-- the calling coroutine until we receive that reply.
--
@@ -131,39 +150,20 @@ end
--
-- See also send(), generate().
function leap.request(pump, data)
- local reqid = leap._requestSetup(pump, data)
+ local reqid = requestSetup(pump, data)
local waitfor = leap._pending[reqid]
--- print_debug('leap.request('..tostring(pump)..', '..inspect(data)..') about to wait on '..
--- tostring(waitfor))
+ debug('leap.request(%s, %s) about to wait on %s', pump, data, tostring(waitfor))
local ok, response = pcall(waitfor.wait, waitfor)
--- print_debug('leap.request('..tostring(pump)..', '..inspect(data)..') got '..
--- tostring(ok)..': '..inspect(response))
+ debug('leap.request(%s, %s) got %s: %s', pump, data, ok, response)
-- kill off temporary WaitForReqid object, even if error
leap._pending[reqid] = nil
if ok then
- response.reqid = nil
return response
else
error(response)
end
end
--- common setup code shared by request() and generate()
-function leap._requestSetup(pump, data)
- -- invent a new, unique reqid
- leap._reqid += 1
- local reqid = leap._reqid
- -- Instantiate a new WaitForReqid object. The priority is irrelevant
- -- because, unlike the WaitFor base class, WaitForReqid does not
- -- self-register on our leap._waitfors list. Instead, capture the new
- -- WaitForReqid object in leap._pending so _dispatch() can find it.
- leap._pending[reqid] = leap.WaitForReqid:new(reqid)
- -- Pass reqid to send() to stamp it into (a copy of) the request data.
--- print_debug('leap._requestSetup('..tostring(pump)..', '..inspect(data)..')')
- leap.send(pump, data, reqid)
- return reqid
-end
-
-- Send the specified request LLSD, expecting an arbitrary number of replies.
-- Each one is yielded on receipt. If you omit checklast, this is an infinite
-- generator; it's up to the caller to recognize when the last reply has been
@@ -178,7 +178,7 @@ function leap.generate(pump, data, checklast)
-- Invent a new, unique reqid. Arrange to handle incoming events
-- bearing that reqid. Stamp the outbound request with that reqid, and
-- send it.
- local reqid = leap._requestSetup(pump, data)
+ local reqid = requestSetup(pump, data)
local waitfor = leap._pending[reqid]
local ok, response
repeat
@@ -186,7 +186,6 @@ function leap.generate(pump, data, checklast)
if not ok then
break
end
- response.reqid = nil
coroutine.yield(response)
until checklast and checklast(response)
-- If we break the above loop, whether or not due to error, clean up.
@@ -196,78 +195,79 @@ function leap.generate(pump, data, checklast)
end
end
--- Kick off response processing. The calling script must create and resume one
--- or more coroutines to perform viewer requests using send(), request() or
--- generate() before calling this function to handle responses.
---
--- While waiting for responses from the viewer, the C++ coroutine running the
--- calling Lua script is blocked: no other Lua coroutine is running.
-function leap.process()
- leap._done = false
- local ok, pump, data
- while not leap._done do
--- print_debug('leap.process() calling get_event_next()')
- ok, pump, data = pcall(get_event_next)
--- print_debug('leap.process() got '..tostring(ok)..': '..pump..', '..inspect(data))
- -- ok false means get_event_next() raised a Lua error
- -- data nil means get_event_next() returned (pump, LLSD()) to indicate done
- if not (ok and data) then
- break
- end
- leap._dispatch(pump, data)
- end
--- print_debug('leap.process() done')
+local function cleanup(message)
-- we're done: clean up all pending coroutines
- -- if ok, then we're just done.
- -- if not ok, then 'pump' is actually the error message.
- message = if ok then 'done' else pump
for i, waitfor in pairs(leap._pending) do
- waitfor:_exception(message)
+ waitfor:close()
end
for i, waitfor in pairs(leap._waitfors) do
- waitfor:_exception(message)
- end
- -- now that we're done with cleanup, propagate the error we caught above
- if not ok then
- error(pump)
+ waitfor:close()
end
end
-function leap.done()
- leap._done = true
+-- Handle an incoming (pump, data) event with no recognizable ['reqid']
+local function unsolicited(pump, data)
+ -- we maintain waitfors in descending priority order, so the first waitfor
+ -- to claim this event is the one with the highest priority
+ for i, waitfor in pairs(leap._waitfors) do
+ debug('unsolicited() checking %s', waitfor.name)
+ if waitfor:handle(pump, data) then
+ return
+ end
+ end
+ print_debug(string.format('unsolicited(%s, %s) discarding unclaimed event', pump, data))
end
-- Route incoming (pump, data) event to the appropriate waiting coroutine.
-function leap._dispatch(pump, data)
+local function dispatch(pump, data)
local reqid = data['reqid']
-- if the response has no 'reqid', it's not from request() or generate()
if reqid == nil then
- return leap._unsolicited(pump, data)
+ return unsolicited(pump, data)
end
-- have reqid; do we have a WaitForReqid?
local waitfor = leap._pending[reqid]
if waitfor == nil then
- return leap._unsolicited(pump, data)
+ return unsolicited(pump, data)
end
-- found the right WaitForReqid object, let it handle the event
- data['reqid'] = nil
- waitfor:_handle(pump, data)
+ waitfor:handle(pump, data)
end
--- Handle an incoming (pump, data) event with no recognizable ['reqid']
-function leap._unsolicited(pump, data)
- -- we maintain waitfors in descending priority order, so the first waitfor
- -- to claim this event is the one with the highest priority
- for i, waitfor in pairs(leap._waitfors) do
- if waitfor:_handle(pump, data) then
- return
- end
+-- We configure fiber.set_idle() function. fiber.yield() calls the configured
+-- idle callback whenever there are waiting fibers but no ready fibers. In
+-- our case, that means it's time to fetch another incoming viewer event.
+fiber.set_idle(function ()
+ -- If someone has called leap.done(), then tell fiber.yield() to break loop.
+ if leap._done then
+ cleanup('done')
+ return 'done'
+ end
+ debug('leap.idle() calling get_event_next()')
+ local ok, pump, data = pcall(get_event_next)
+ debug('leap.idle() got %s: %s, %s', ok, pump, data)
+ -- ok false means get_event_next() raised a Lua error, pump is message
+ if not ok then
+ cleanup(pump)
+ error(pump)
+ end
+ -- data nil means get_event_next() returned (pump, LLSD()) to indicate done
+ if not data then
+ cleanup('end')
+ return 'end'
end
--- print_debug('_unsolicited(', pump, ', ', data, ') discarding unclaimed event')
+ -- got a real pump, data pair
+ dispatch(pump, data)
+ -- return to fiber.yield(): any incoming message might result in one or
+ -- more fibers becoming ready
+end)
+
+function leap.done()
+ leap._done = true
end
-- called by WaitFor.enable()
-function leap._registerWaitFor(waitfor)
+local function registerWaitFor(waitfor)
table.insert(leap._waitfors, waitfor)
-- keep waitfors sorted in descending order of specified priority
table.sort(leap._waitfors,
@@ -275,7 +275,7 @@ function leap._registerWaitFor(waitfor)
end
-- called by WaitFor.disable()
-function leap._unregisterWaitFor(waitfor)
+local function unregisterWaitFor(waitfor)
for i, w in pairs(leap._waitfors) do
if w == waitfor then
leap._waitfors[i] = nil
@@ -322,8 +322,13 @@ end
-- --------------------------------- WaitFor ---------------------------------
leap.WaitFor = { _id=0 }
+function leap.WaitFor.tostring(self)
+ -- Lua (sub)classes have no name; can't prefix with that
+ return self.name
+end
+
function leap.WaitFor:new(priority, name)
- local obj = setmetatable({}, self)
+ local obj = setmetatable({__tostring=leap.WaitFor.tostring}, self)
self.__index = self
obj.priority = priority
@@ -343,16 +348,11 @@ function leap.WaitFor:new(priority, name)
return obj
end
-function leap.WaitFor.tostring(self)
- -- Lua (sub)classes have no name; can't prefix with that
- return self.name
-end
-
-- Re-enable a disable()d WaitFor object. New WaitFor objects are
-- enable()d by default.
function leap.WaitFor:enable()
if not self._registered then
- leap._registerWaitFor(self)
+ registerWaitFor(self)
self._registered = true
end
end
@@ -360,7 +360,7 @@ end
-- Disable an enable()d WaitFor object.
function leap.WaitFor:disable()
if self._registered then
- leap._unregisterWaitFor(self)
+ unregisterWaitFor(self)
self._registered = false
end
end
@@ -368,18 +368,12 @@ end
-- Block the calling coroutine until a suitable unsolicited event (one
-- for which filter() returns the event) arrives.
function leap.WaitFor:wait()
--- print_debug(self.name .. ' about to wait')
- item = self._queue:Dequeue()
--- print_debug(self.name .. ' got ', item)
+ debug('%s about to wait', self.name)
+ local item = self._queue:Dequeue()
+ debug('%s got %s', self.name, item)
return item
end
--- Loop over wait() calls.
-function leap.WaitFor:iterate()
- -- on each iteration, call self.wait(self)
- return self.wait, self, nil
-end
-
-- Override filter() to examine the incoming event in whatever way
-- makes sense.
--
@@ -395,9 +389,10 @@ function leap.WaitFor:filter(pump, data)
error('You must override the WaitFor.filter() method')
end
--- called by leap._unsolicited() for each WaitFor in leap._waitfors
-function leap.WaitFor:_handle(pump, data)
- item = self:filter(pump, data)
+-- called by unsolicited() for each WaitFor in leap._waitfors
+function leap.WaitFor:handle(pump, data)
+ local item = self:filter(pump, data)
+ debug('%s.filter() returned %s', self.name, item)
-- if this item doesn't pass the filter, we're not interested
if not item then
return false
@@ -407,13 +402,18 @@ function leap.WaitFor:_handle(pump, data)
return true
end
--- called by WaitFor:_handle() for an accepted event
+-- called by WaitFor:handle() for an accepted event
function leap.WaitFor:process(item)
self._queue:Enqueue(item)
end
+-- called by cleanup() at end
+function leap.WaitFor:close()
+ self._queue:close()
+end
+
-- called by leap.process() when get_event_next() raises an error
-function leap.WaitFor:_exception(message)
+function leap.WaitFor:exception(message)
print_warning(self.name .. ' error: ' .. message)
self._queue:Error(message)
end
diff --git a/indra/newview/scripts/lua/printf.lua b/indra/newview/scripts/lua/printf.lua
new file mode 100644
index 0000000000..584cd4f391
--- /dev/null
+++ b/indra/newview/scripts/lua/printf.lua
@@ -0,0 +1,19 @@
+-- printf(...) is short for print(string.format(...))
+
+local inspect = require 'inspect'
+
+local function printf(...)
+ -- string.format() only handles numbers and strings.
+ -- Convert anything else to string using the inspect module.
+ local args = {}
+ for _, arg in pairs(table.pack(...)) do
+ if type(arg) == 'number' or type(arg) == 'string' then
+ table.insert(args, arg)
+ else
+ table.insert(args, inspect(arg))
+ end
+ end
+ print(string.format(table.unpack(args)))
+end
+
+return printf
diff --git a/indra/newview/tests/llluamanager_test.cpp b/indra/newview/tests/llluamanager_test.cpp
index 069e10e9cf..872d7827fe 100644
--- a/indra/newview/tests/llluamanager_test.cpp
+++ b/indra/newview/tests/llluamanager_test.cpp
@@ -105,10 +105,8 @@ namespace tut
void from_lua(const std::string& desc, const std::string_view& construct, const LLSD& expect)
{
LLSD fromlua;
- LLEventStream replypump("testpump");
- LLTempBoundListener conn(
- replypump.listen("llluamanager_test",
- listener([&fromlua](const LLSD& data){ fromlua = data; })));
+ LLStreamListener pump("testpump",
+ listener([&fromlua](const LLSD& data){ fromlua = data; }));
const std::string lua(stringize(
"data = ", construct, "\n"
"post_on('testpump', data)\n"
@@ -137,11 +135,9 @@ namespace tut
{
set_test_name("test post_on(), get_event_pumps(), get_event_next()");
StringVec posts;
- LLEventStream replypump("testpump");
- LLTempBoundListener conn(
- replypump.listen("test<3>",
- listener([&posts](const LLSD& data)
- { posts.push_back(data.asString()); })));
+ LLStreamListener pump("testpump",
+ listener([&posts](const LLSD& data)
+ { posts.push_back(data.asString()); }));
const std::string lua(
"-- test post_on,get_event_pumps,get_event_next\n"
"post_on('testpump', 'entry')\n"
@@ -180,7 +176,7 @@ namespace tut
void round_trip(const std::string& desc, const LLSD& send, const LLSD& expect)
{
- LLEventMailDrop replypump("testpump");
+ LLEventMailDrop testpump("testpump");
const std::string lua(
"-- test LLSD round trip\n"
"replypump, cmdpump = get_event_pumps()\n"
@@ -194,7 +190,7 @@ namespace tut
// reached the get_event_next() call, which suspends the calling C++
// coroutine (including the Lua code running on it) until we post
// something to that reply pump.
- auto luapump{ llcoro::suspendUntilEventOn(replypump).asString() };
+ auto luapump{ llcoro::suspendUntilEventOn(testpump).asString() };
LLEventPumps::instance().post(luapump, send);
// The C++ coroutine running the Lua script is now ready to run. Run
// it so it will echo the LLSD back to us.
@@ -307,27 +303,60 @@ namespace tut
template<> template<>
void object::test<5>()
{
- set_test_name("test leap.lua");
+ set_test_name("leap.request() from main thread");
const std::string lua(
- "-- test leap.lua\n"
+ "-- leap.request() from main thread\n"
"\n"
+ "leap = require 'leap'\n"
+ "\n"
+ "return {\n"
+ " a=leap.request('echo', {data='a'}).data,\n"
+ " b=leap.request('echo', {data='b'}).data\n"
+ "}\n"
+ );
+
+ LLStreamListener pump(
+ "echo",
+ listener([](const LLSD& data)
+ {
+ LL_DEBUGS("Lua") << "echo pump got: " << data << LL_ENDL;
+ sendReply(data, data);
+ }));
+
+ LuaState L;
+ auto [count, result] = LLLUAmanager::waitScriptLine(L, lua);
+ ensure_equals("Lua script didn't return item", count, 1);
+ ensure_equals("echo failed", result, llsd::map("a", "a", "b", "b"));
+ }
+
+ template<> template<>
+ void object::test<6>()
+ {
+ set_test_name("interleave leap.request() responses");
+ const std::string lua(
+ "-- interleave leap.request() responses\n"
+ "\n"
+ "fiber = require('fiber')\n"
"leap = require('leap')\n"
- "coro = require('coro')\n"
+ "-- debug = require('printf')\n"
+ "local function debug(...) end\n"
"\n"
"-- negative priority ensures catchall is always last\n"
"catchall = leap.WaitFor:new(-1, 'catchall')\n"
"function catchall:filter(pump, data)\n"
+ " debug('catchall:filter(%s, %s)', pump, data)\n"
" return data\n"
"end\n"
"\n"
"-- but first, catch events with 'special' key\n"
"catch_special = leap.WaitFor:new(2, 'catch_special')\n"
"function catch_special:filter(pump, data)\n"
+ " debug('catch_special:filter(%s, %s)', pump, data)\n"
" return if data['special'] ~= nil then data else nil\n"
"end\n"
"\n"
"function drain(waitfor)\n"
- " print(waitfor.name .. ' start')\n"
+ " debug('%s start', waitfor.name)\n"
" -- It seems as though we ought to be able to code this loop\n"
" -- over waitfor:wait() as:\n"
" -- for item in waitfor.wait, waitfor do\n"
@@ -335,40 +364,37 @@ namespace tut
" -- the coroutine call stack, which prohibits coroutine.yield():\n"
" -- 'attempt to yield across metamethod/C-call boundary'\n"
" -- So we resort to two different calls to waitfor:wait().\n"
- " item = waitfor:wait()\n"
+ " local item = waitfor:wait()\n"
" while item do\n"
- " print(waitfor.name .. ' caught', item)\n"
+ " debug('%s caught %s', waitfor.name, item)\n"
" item = waitfor:wait()\n"
" end\n"
- " print(waitfor.name .. ' done')\n"
+ " debug('%s done', waitfor.name)\n"
"end\n"
"\n"
"function requester(name)\n"
- " print('requester('..name..') start')\n"
- " response = leap.request('testpump', {name=name})\n"
- " print('requester('..name..') got '..tostring(response))\n"
+ " debug('requester(%s) start', name)\n"
+ " local response = leap.request('testpump', {name=name})\n"
+ " debug('requester(%s) got %s', name, response)\n"
" -- verify that the correct response was dispatched to this coroutine\n"
" assert(response.name == name)\n"
"end\n"
"\n"
- "coro.launch(drain, catchall)\n"
- "coro.launch(drain, catch_special)\n"
- "coro.launch(requester, 'a')\n"
- "coro.launch(requester, 'b')\n"
- "\n"
- "leap.process()\n"
+ "-- fiber.print_all()\n"
+ "fiber.launch('catchall', drain, catchall)\n"
+ "fiber.launch('catch_special', drain, catch_special)\n"
+ "fiber.launch('requester(a)', requester, 'a')\n"
+ "fiber.launch('requester(b)', requester, 'b')\n"
);
LLSD requests;
- LLEventStream pump("testpump", false);
- LLTempBoundListener conn{
- pump.listen("test<5>()",
- listener([&requests](const LLSD& data)
- {
- LL_DEBUGS("Lua") << "testpump got: " << data << LL_ENDL;
- requests.append(data);
- }))
- };
+ LLStreamListener pump(
+ "testpump",
+ listener([&requests](const LLSD& data)
+ {
+ LL_DEBUGS("Lua") << "testpump got: " << data << LL_ENDL;
+ requests.append(data);
+ }));
LuaState L;
auto future = LLLUAmanager::startScriptLine(L, lua);