diff options
author | Mnikolenko Productengine <mnikolenko@productengine.com> | 2024-03-15 21:11:39 +0200 |
---|---|---|
committer | Mnikolenko Productengine <mnikolenko@productengine.com> | 2024-03-15 21:13:35 +0200 |
commit | 5a6ec3e26ad9a6fb33e18a3d0198482c0685a4fa (patch) | |
tree | 9303750372967dced1f06d1e5d9b6f8a64bce317 | |
parent | fdfff5305bc9d97bb27d36a7fe18bab693e42aed (diff) | |
parent | c5dec705cf6bb3dce9381c9ec574335356051297 (diff) |
Merge branch 'release/luau-scripting' into lua-floater
-rw-r--r-- | indra/cmake/Prebuilt.cmake | 1 | ||||
-rw-r--r-- | indra/llcommon/lua_function.cpp | 38 | ||||
-rw-r--r-- | indra/newview/llluamanager.cpp | 9 | ||||
-rw-r--r-- | indra/newview/scripts/lua/ErrorQueue.lua | 30 | ||||
-rw-r--r-- | indra/newview/scripts/lua/LLFloaterAbout.lua | 11 | ||||
-rw-r--r-- | indra/newview/scripts/lua/LLGesture.lua | 23 | ||||
-rw-r--r-- | indra/newview/scripts/lua/Queue.lua | 41 | ||||
-rw-r--r-- | indra/newview/scripts/lua/UI.lua | 16 | ||||
-rw-r--r-- | indra/newview/scripts/lua/WaitQueue.lua | 87 | ||||
-rw-r--r-- | indra/newview/scripts/lua/coro.lua | 67 | ||||
-rw-r--r-- | indra/newview/scripts/lua/inspect.lua | 371 | ||||
-rw-r--r-- | indra/newview/scripts/lua/leap.lua | 442 | ||||
-rw-r--r-- | indra/newview/scripts/lua/qtest.lua | 146 | ||||
-rw-r--r-- | indra/newview/scripts/lua/test_LLFloaterAbout.lua | 14 | ||||
-rw-r--r-- | indra/newview/scripts/lua/test_LLGesture.lua | 32 | ||||
-rw-r--r-- | indra/newview/scripts/lua/util.lua | 39 | ||||
-rw-r--r-- | indra/newview/tests/llluamanager_test.cpp | 110 |
17 files changed, 1442 insertions, 35 deletions
diff --git a/indra/cmake/Prebuilt.cmake b/indra/cmake/Prebuilt.cmake index 634cc15c21..a8c702bfef 100644 --- a/indra/cmake/Prebuilt.cmake +++ b/indra/cmake/Prebuilt.cmake @@ -40,6 +40,7 @@ macro (use_prebuilt_binary _binary) --install-dir=${AUTOBUILD_INSTALL_DIR} ${_binary} ") endif(DEBUG_PREBUILT) + message(STATUS "Installing ${_binary}...") execute_process(COMMAND "${AUTOBUILD_EXECUTABLE}" install --install-dir=${AUTOBUILD_INSTALL_DIR} diff --git a/indra/llcommon/lua_function.cpp b/indra/llcommon/lua_function.cpp index 78abb8ba7e..b5de5099ba 100644 --- a/indra/llcommon/lua_function.cpp +++ b/indra/llcommon/lua_function.cpp @@ -16,9 +16,11 @@ // STL headers // std headers #include <algorithm> +#include <exception> #include <iomanip> // std::quoted #include <map> #include <memory> // std::unique_ptr +#include <typeinfo> // external library headers // other Linden headers #include "hexdump.h" @@ -26,6 +28,7 @@ #include "llsd.h" #include "llsdutil.h" #include "lualistener.h" +#include "stringize.h" /***************************************************************************** * luau namespace @@ -496,16 +499,45 @@ std::pair<int, LLSD> LuaState::expr(const std::string& desc, const std::string& // aha, at least one entry on the stack! if (result.first == 1) { - result.second = lua_tollsd(mState, 1); + // Don't forget that lua_tollsd() can throw Lua errors. + try + { + result.second = lua_tollsd(mState, 1); + } + catch (const std::exception& error) + { + // lua_tollsd() is designed to be called from a lua_function(), + // that is, from a C++ function called by Lua. In case of error, + // it throws a Lua error to be caught by the Lua runtime. expr() + // is a peculiar use case in which our C++ code is calling + // lua_tollsd() after return from the Lua runtime. We must catch + // the exception thrown for a Lua error, else it will propagate + // out to the main coroutine and terminate the viewer -- but since + // we instead of the Lua runtime catch it, our lua_State retains + // its internal error status. Any subsequent lua_pcall() calls + // with this lua_State will report error regardless of whether the + // chunk runs successfully. Get a new lua_State(). + initLuaState(); + return { -1, stringize(LLError::Log::classname(error), ": ", error.what()) }; + } // pop the result we claimed lua_settop(mState, 0); return result; } // multiple entries on the stack - for (int index = 1; index <= result.first; ++index) + try + { + for (int index = 1; index <= result.first; ++index) + { + result.second.append(lua_tollsd(mState, index)); + } + } + catch (const std::exception& error) { - result.second.append(lua_tollsd(mState, index)); + // see above comments regarding lua_State's error status + initLuaState(); + return { -1, stringize(LLError::Log::classname(error), ": ", error.what()) }; } // pop everything lua_settop(mState, 0); diff --git a/indra/newview/llluamanager.cpp b/indra/newview/llluamanager.cpp index cd663021a1..c65f062fd7 100644 --- a/indra/newview/llluamanager.cpp +++ b/indra/newview/llluamanager.cpp @@ -435,7 +435,14 @@ void LLRequireResolver::findModule() fail(); } - std::vector<std::string> lib_paths {gDirUtilp->getExpandedFilename(LL_PATH_SCRIPTS, "lua")}; + std::vector<std::string> lib_paths + { + gDirUtilp->getExpandedFilename(LL_PATH_SCRIPTS, "lua"), +#ifdef LL_TEST + // Build-time tests don't have the app bundle - use source tree. + std::filesystem::path(__FILE__).parent_path() / "scripts" / "lua", +#endif + }; for (const auto& path : lib_paths) { diff --git a/indra/newview/scripts/lua/ErrorQueue.lua b/indra/newview/scripts/lua/ErrorQueue.lua new file mode 100644 index 0000000000..a6d4470044 --- /dev/null +++ b/indra/newview/scripts/lua/ErrorQueue.lua @@ -0,0 +1,30 @@ +-- ErrorQueue isa WaitQueue with the added feature that a producer can push an +-- error through the queue. Once that error is dequeued, every consumer will +-- raise that error. + +local WaitQueue = require('WaitQueue') + +local ErrorQueue = WaitQueue:new() + +function ErrorQueue:Error(message) + -- Setting Error() is a marker, like closing the queue. Once we reach the + -- error, every subsequent Dequeue() call will raise the same error. + self._closed = message + self:_wake_waiters() +end + +function ErrorQueue:Dequeue() + local value = WaitQueue.Dequeue(self) + if value ~= nil then + -- queue not yet closed, show caller + return value + end + if self._closed == true then + -- WaitQueue:close() sets true: queue has only been closed, tell caller + return nil + end + -- self._closed is a message set by Error() + error(self._closed) +end + +return ErrorQueue diff --git a/indra/newview/scripts/lua/LLFloaterAbout.lua b/indra/newview/scripts/lua/LLFloaterAbout.lua new file mode 100644 index 0000000000..44afee2e5c --- /dev/null +++ b/indra/newview/scripts/lua/LLFloaterAbout.lua @@ -0,0 +1,11 @@ +-- Engage the LLFloaterAbout LLEventAPI + +leap = require 'leap' + +local LLFloaterAbout = {} + +function LLFloaterAbout.getInfo() + return leap.request('LLFloaterAbout', {op='getInfo'}) +end + +return LLFloaterAbout diff --git a/indra/newview/scripts/lua/LLGesture.lua b/indra/newview/scripts/lua/LLGesture.lua new file mode 100644 index 0000000000..cb410446d7 --- /dev/null +++ b/indra/newview/scripts/lua/LLGesture.lua @@ -0,0 +1,23 @@ +-- Engage the LLGesture LLEventAPI + +leap = require 'leap' + +local LLGesture = {} + +function LLGesture.getActiveGestures() + return leap.request('LLGesture', {op='getActiveGestures'})['gestures'] +end + +function LLGesture.isGesturePlaying(id) + return leap.request('LLGesture', {op='isGesturePlaying', id=id})['playing'] +end + +function LLGesture.startGesture(id) + leap.send('LLGesture', {op='startGesture', id=id}) +end + +function LLGesture.stopGesture(id) + leap.send('LLGesture', {op='stopGesture', id=id}) +end + +return LLGesture diff --git a/indra/newview/scripts/lua/Queue.lua b/indra/newview/scripts/lua/Queue.lua index e178ad9969..b0a5a87f87 100644 --- a/indra/newview/scripts/lua/Queue.lua +++ b/indra/newview/scripts/lua/Queue.lua @@ -1,40 +1,41 @@ --- from https://create.roblox.com/docs/luau/queues#implementing-queues +-- from https://create.roblox.com/docs/luau/queues#implementing-queues, +-- amended per https://www.lua.org/pil/16.1.html local Queue = {} -Queue.__index = Queue -function Queue.new() - local self = setmetatable({}, Queue) +function Queue:new() + local obj = setmetatable({}, self) + self.__index = self - self._first = 0 - self._last = -1 - self._queue = {} + obj._first = 0 + obj._last = -1 + obj._queue = {} - return self + return obj end -- Check if the queue is empty function Queue:IsEmpty() - return self._first > self._last + return self._first > self._last end -- Add a value to the queue function Queue:Enqueue(value) - local last = self._last + 1 - self._last = last - self._queue[last] = value + local last = self._last + 1 + self._last = last + self._queue[last] = value end -- Remove a value from the queue function Queue:Dequeue() - local first = self._first - if self:IsEmpty() then - return nil - end - local value = self._queue[first] - self._queue[first] = nil - self._first = first + 1 - return value + if self:IsEmpty() then + return nil + end + local first = self._first + local value = self._queue[first] + self._queue[first] = nil + self._first = first + 1 + return value end return Queue diff --git a/indra/newview/scripts/lua/UI.lua b/indra/newview/scripts/lua/UI.lua new file mode 100644 index 0000000000..f851632bad --- /dev/null +++ b/indra/newview/scripts/lua/UI.lua @@ -0,0 +1,16 @@ +-- Engage the UI LLEventAPI + +leap = require 'leap' + +local UI = {} + +function UI.call(func, parameter) + -- 'call' is fire-and-forget + leap.send('UI', {op='call', ['function']=func, parameter=parameter}) +end + +function UI.getValue(path) + return leap.request('UI', {op='getValue', path=path})['value'] +end + +return UI diff --git a/indra/newview/scripts/lua/WaitQueue.lua b/indra/newview/scripts/lua/WaitQueue.lua new file mode 100644 index 0000000000..00766ccae7 --- /dev/null +++ b/indra/newview/scripts/lua/WaitQueue.lua @@ -0,0 +1,87 @@ +-- WaitQueue isa Queue with the added feature that when the queue is empty, +-- the Dequeue() operation blocks the calling coroutine until some other +-- coroutine Enqueue()s a new value. + +local Queue = require('Queue') + +local WaitQueue = Queue:new() + +function WaitQueue:new() + local obj = Queue:new() + setmetatable(obj, self) + self.__index = self + + obj._waiters = {} + obj._closed = false + return obj +end + +function WaitQueue:Enqueue(value) + if self._closed then + error("can't Enqueue() on closed Queue") + end + -- can't simply call Queue:Enqueue(value)! That calls the method on the + -- Queue class definition, instead of calling Queue:Enqueue() on self. + -- Hand-expand the Queue:Enqueue() syntactic sugar. + Queue.Enqueue(self, value) + self:_wake_waiters() +end + +function WaitQueue:_wake_waiters() + -- WaitQueue is designed to support multi-producer, multi-consumer use + -- cases. With multiple consumers, if more than one is trying to + -- Dequeue() from an empty WaitQueue, we'll have multiple waiters. + -- Unlike OS threads, with cooperative concurrency it doesn't make sense + -- to "notify all": we need resume only one of the waiting Dequeue() + -- callers. But since resuming that caller might entail either Enqueue() + -- or Dequeue() calls, recheck every time around to see if we must resume + -- another waiting coroutine. + while not self:IsEmpty() and #self._waiters > 0 do + -- Pop the oldest waiting coroutine instead of the most recent, for + -- more-or-less round robin fairness. But skip any coroutines that + -- have gone dead in the meantime. + local waiter = table.remove(self._waiters, 1) + while waiter and coroutine.status(waiter) ~= "suspended" do + waiter = table.remove(self._waiters, 1) + end + -- do we still have at least one waiting coroutine? + if waiter then + -- don't pass the head item: let the resumed coroutine retrieve it + local ok, message = coroutine.resume(waiter) + -- if resuming that waiter encountered an error, don't swallow it + if not ok then + error(message) + end + end + end +end + +function WaitQueue:Dequeue() + while self:IsEmpty() do + -- Don't check for closed until the queue is empty: producer can close + -- the queue while there are still items left, and we want the + -- consumer(s) to retrieve those last few items. + if self._closed then + return nil + end + local coro = coroutine.running() + if coro == nil then + error("WaitQueue:Dequeue() trying to suspend main coroutine") + end + -- add the running coroutine to the list of waiters + table.insert(self._waiters, coro) + -- then let somebody else run + coroutine.yield() + end + -- here we're sure this queue isn't empty + return Queue.Dequeue(self) +end + +function WaitQueue:close() + self._closed = true + -- close() is like Enqueueing an end marker. If there are waiting + -- consumers, give them a chance to see we're closed. + self:_wake_waiters() +end + +return WaitQueue diff --git a/indra/newview/scripts/lua/coro.lua b/indra/newview/scripts/lua/coro.lua new file mode 100644 index 0000000000..616a797e95 --- /dev/null +++ b/indra/newview/scripts/lua/coro.lua @@ -0,0 +1,67 @@ +-- Manage Lua coroutines + +local coro = {} + +coro._coros = {} + +-- Launch a Lua coroutine: create and resume. +-- Returns: new coroutine, values yielded or returned from initial resume() +-- If initial resume() encountered an error, propagates the error. +function coro.launch(func, ...) + local co = coroutine.create(func) + table.insert(coro._coros, co) + return co, coro.resume(co, ...) +end + +-- resume() wrapper to propagate errors +function coro.resume(co, ...) + -- if there's an idiom other than table.pack() to assign an arbitrary + -- number of return values, I don't yet know it + local ok_result = table.pack(coroutine.resume(co, ...)) + if not ok_result[1] then + -- if [1] is false, then [2] is the error message + error(ok_result[2]) + end + -- ok is true, whew, just return the rest of the values + return table.unpack(ok_result, 2) +end + +-- yield to other coroutines even if you don't know whether you're in a +-- created coroutine or the main coroutine +function coro.yield(...) + if coroutine.running() then + -- this is a real coroutine, yield normally + return coroutine.yield(...) + else + -- This is the main coroutine: coroutine.yield() doesn't work. + -- But we can take a spin through previously-launched coroutines. + -- Walk a copy of coro._coros in case any of these coroutines launches + -- another: next() forbids creating new entries during traversal. + for co in coro._live_coros_iter, table.clone(coro._coros) do + coro.resume(co) + end + end +end + +-- Walk coro._coros table, returning running or suspended coroutines. +-- Once a coroutine becomes dead, remove it from _coros and don't return it. +function coro._live_coros() + return coro._live_coros_iter, coro._coros +end + +-- iterator function for _live_coros() +function coro._live_coros_iter(t, idx) + local k, co = next(t, idx) + while k and coroutine.status(co) == 'dead' do +-- t[k] = nil + -- See coro.yield(): sometimes we traverse a copy of _coros, but if we + -- discover a dead coroutine in that copy, delete it from _coros + -- anyway. Deleting it from a temporary copy does nothing. + coro._coros[k] = nil + coroutine.close(co) + k, co = next(t, k) + end + return co +end + +return coro diff --git a/indra/newview/scripts/lua/inspect.lua b/indra/newview/scripts/lua/inspect.lua new file mode 100644 index 0000000000..9900a0b81b --- /dev/null +++ b/indra/newview/scripts/lua/inspect.lua @@ -0,0 +1,371 @@ +local _tl_compat; if (tonumber((_VERSION or ''):match('[%d.]*$')) or 0) < 5.3 then local p, m = pcall(require, 'compat53.module'); if p then _tl_compat = m end end; local math = _tl_compat and _tl_compat.math or math; local string = _tl_compat and _tl_compat.string or string; local table = _tl_compat and _tl_compat.table or table +local inspect = {Options = {}, } + + + + + + + + + + + + + + + + + +inspect._VERSION = 'inspect.lua 3.1.0' +inspect._URL = 'http://github.com/kikito/inspect.lua' +inspect._DESCRIPTION = 'human-readable representations of tables' +inspect._LICENSE = [[ + MIT LICENSE + + Copyright (c) 2022 Enrique GarcĂa Cota + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +]] +inspect.KEY = setmetatable({}, { __tostring = function() return 'inspect.KEY' end }) +inspect.METATABLE = setmetatable({}, { __tostring = function() return 'inspect.METATABLE' end }) + +local tostring = tostring +local rep = string.rep +local match = string.match +local char = string.char +local gsub = string.gsub +local fmt = string.format + +local _rawget +if rawget then + _rawget = rawget +else + _rawget = function(t, k) return t[k] end +end + +local function rawpairs(t) + return next, t, nil +end + + + +local function smartQuote(str) + if match(str, '"') and not match(str, "'") then + return "'" .. str .. "'" + end + return '"' .. gsub(str, '"', '\\"') .. '"' +end + + +local shortControlCharEscapes = { + ["\a"] = "\\a", ["\b"] = "\\b", ["\f"] = "\\f", ["\n"] = "\\n", + ["\r"] = "\\r", ["\t"] = "\\t", ["\v"] = "\\v", ["\127"] = "\\127", +} +local longControlCharEscapes = { ["\127"] = "\127" } +for i = 0, 31 do + local ch = char(i) + if not shortControlCharEscapes[ch] then + shortControlCharEscapes[ch] = "\\" .. i + longControlCharEscapes[ch] = fmt("\\%03d", i) + end +end + +local function escape(str) + return (gsub(gsub(gsub(str, "\\", "\\\\"), + "(%c)%f[0-9]", longControlCharEscapes), + "%c", shortControlCharEscapes)) +end + +local luaKeywords = { + ['and'] = true, + ['break'] = true, + ['do'] = true, + ['else'] = true, + ['elseif'] = true, + ['end'] = true, + ['false'] = true, + ['for'] = true, + ['function'] = true, + ['goto'] = true, + ['if'] = true, + ['in'] = true, + ['local'] = true, + ['nil'] = true, + ['not'] = true, + ['or'] = true, + ['repeat'] = true, + ['return'] = true, + ['then'] = true, + ['true'] = true, + ['until'] = true, + ['while'] = true, +} + +local function isIdentifier(str) + return type(str) == "string" and + not not str:match("^[_%a][_%a%d]*$") and + not luaKeywords[str] +end + +local flr = math.floor +local function isSequenceKey(k, sequenceLength) + return type(k) == "number" and + flr(k) == k and + 1 <= (k) and + k <= sequenceLength +end + +local defaultTypeOrders = { + ['number'] = 1, ['boolean'] = 2, ['string'] = 3, ['table'] = 4, + ['function'] = 5, ['userdata'] = 6, ['thread'] = 7, +} + +local function sortKeys(a, b) + local ta, tb = type(a), type(b) + + + if ta == tb and (ta == 'string' or ta == 'number') then + return (a) < (b) + end + + local dta = defaultTypeOrders[ta] or 100 + local dtb = defaultTypeOrders[tb] or 100 + + + return dta == dtb and ta < tb or dta < dtb +end + +local function getKeys(t) + + local seqLen = 1 + while _rawget(t, seqLen) ~= nil do + seqLen = seqLen + 1 + end + seqLen = seqLen - 1 + + local keys, keysLen = {}, 0 + for k in rawpairs(t) do + if not isSequenceKey(k, seqLen) then + keysLen = keysLen + 1 + keys[keysLen] = k + end + end + table.sort(keys, sortKeys) + return keys, keysLen, seqLen +end + +local function countCycles(x, cycles) + if type(x) == "table" then + if cycles[x] then + cycles[x] = cycles[x] + 1 + else + cycles[x] = 1 + for k, v in rawpairs(x) do + countCycles(k, cycles) + countCycles(v, cycles) + end + countCycles(getmetatable(x), cycles) + end + end +end + +local function makePath(path, a, b) + local newPath = {} + local len = #path + for i = 1, len do newPath[i] = path[i] end + + newPath[len + 1] = a + newPath[len + 2] = b + + return newPath +end + + +local function processRecursive(process, + item, + path, + visited) + if item == nil then return nil end + if visited[item] then return visited[item] end + + local processed = process(item, path) + if type(processed) == "table" then + local processedCopy = {} + visited[item] = processedCopy + local processedKey + + for k, v in rawpairs(processed) do + processedKey = processRecursive(process, k, makePath(path, k, inspect.KEY), visited) + if processedKey ~= nil then + processedCopy[processedKey] = processRecursive(process, v, makePath(path, processedKey), visited) + end + end + + local mt = processRecursive(process, getmetatable(processed), makePath(path, inspect.METATABLE), visited) + if type(mt) ~= 'table' then mt = nil end + setmetatable(processedCopy, mt) + processed = processedCopy + end + return processed +end + +local function puts(buf, str) + buf.n = buf.n + 1 + buf[buf.n] = str +end + + + +local Inspector = {} + + + + + + + + + + +local Inspector_mt = { __index = Inspector } + +local function tabify(inspector) + puts(inspector.buf, inspector.newline .. rep(inspector.indent, inspector.level)) +end + +function Inspector:getId(v) + local id = self.ids[v] + local ids = self.ids + if not id then + local tv = type(v) + id = (ids[tv] or 0) + 1 + ids[v], ids[tv] = id, id + end + return tostring(id) +end + +function Inspector:putValue(v) + local buf = self.buf + local tv = type(v) + if tv == 'string' then + puts(buf, smartQuote(escape(v))) + elseif tv == 'number' or tv == 'boolean' or tv == 'nil' or + tv == 'cdata' or tv == 'ctype' then + puts(buf, tostring(v)) + elseif tv == 'table' and not self.ids[v] then + local t = v + + if t == inspect.KEY or t == inspect.METATABLE then + puts(buf, tostring(t)) + elseif self.level >= self.depth then + puts(buf, '{...}') + else + if self.cycles[t] > 1 then puts(buf, fmt('<%d>', self:getId(t))) end + + local keys, keysLen, seqLen = getKeys(t) + + puts(buf, '{') + self.level = self.level + 1 + + for i = 1, seqLen + keysLen do + if i > 1 then puts(buf, ',') end + if i <= seqLen then + puts(buf, ' ') + self:putValue(t[i]) + else + local k = keys[i - seqLen] + tabify(self) + if isIdentifier(k) then + puts(buf, k) + else + puts(buf, "[") + self:putValue(k) + puts(buf, "]") + end + puts(buf, ' = ') + self:putValue(t[k]) + end + end + + local mt = getmetatable(t) + if type(mt) == 'table' then + if seqLen + keysLen > 0 then puts(buf, ',') end + tabify(self) + puts(buf, '<metatable> = ') + self:putValue(mt) + end + + self.level = self.level - 1 + + if keysLen > 0 or type(mt) == 'table' then + tabify(self) + elseif seqLen > 0 then + puts(buf, ' ') + end + + puts(buf, '}') + end + + else + puts(buf, fmt('<%s %d>', tv, self:getId(v))) + end +end + + + + +function inspect.inspect(root, options) + options = options or {} + + local depth = options.depth or (math.huge) + local newline = options.newline or '\n' + local indent = options.indent or ' ' + local process = options.process + + if process then + root = processRecursive(process, root, {}, {}) + end + + local cycles = {} + countCycles(root, cycles) + + local inspector = setmetatable({ + buf = { n = 0 }, + ids = {}, + cycles = cycles, + depth = depth, + level = 0, + newline = newline, + indent = indent, + }, Inspector_mt) + + inspector:putValue(root) + + return table.concat(inspector.buf) +end + +setmetatable(inspect, { + __call = function(_, root, options) + return inspect.inspect(root, options) + end, +}) + +return inspect diff --git a/indra/newview/scripts/lua/leap.lua b/indra/newview/scripts/lua/leap.lua new file mode 100644 index 0000000000..81728e7230 --- /dev/null +++ b/indra/newview/scripts/lua/leap.lua @@ -0,0 +1,442 @@ +-- Lua implementation of LEAP (LLSD Event API Plugin) protocol +-- +-- This module supports Lua scripts run by the Second Life viewer. +-- +-- LEAP protocol passes LLSD objects, converted to/from Lua tables, in both +-- directions. A typical LLSD object is a map containing keys 'pump' and +-- 'data'. +-- +-- The viewer's Lua post_to(pump, data) function posts 'data' to the +-- LLEventPump 'pump'. This is typically used to engage an LLEventAPI method. +-- +-- Similarly, the viewer gives each Lua script its own LLEventPump with a +-- unique name. That name is returned by get_event_pumps(). Every event +-- received on that LLEventPump is queued for retrieval by get_event_next(), +-- which returns (pump, data): the name of the LLEventPump on which the event +-- was received and the received event data. When the queue is empty, +-- get_event_next() blocks the calling Lua script until the next event is +-- received. +-- +-- Usage: +-- 1. Launch some number of Lua coroutines. The code in each coroutine may +-- call leap.send(), leap.request() or leap.generate(). leap.send() returns +-- immediately ("fire and forget"). leap.request() blocks the calling +-- coroutine until it receives and returns the viewer's response to its +-- request. leap.generate() expects an arbitrary number of responses to the +-- original request. +-- 2. To handle events from the viewer other than direct responses to +-- requests, instantiate a leap.WaitFor object with a filter(pump, data) +-- override method that returns non-nil for desired events. A coroutine may +-- call wait() on any such WaitFor. +-- 3. Once the coroutines have been launched, call leap.process() on the main +-- coroutine. process() retrieves incoming events from the viewer and +-- dispatches them to waiting request() or generate() calls, or to +-- appropriate WaitFor instances. process() returns when either +-- get_event_next() raises an error or the viewer posts nil to the script's +-- reply pump to indicate it's done. +-- 4. Alternatively, a running coroutine may call leap.done() to break out of +-- leap.process(). process() won't notice until the next event from the +-- viewer, though. + +local ErrorQueue = require('ErrorQueue') + +local leap = {} + +-- _reply: string name of reply LLEventPump. Any events the viewer posts to +-- this pump will be queued for get_event_next(). We usually specify it as the +-- reply pump for requests to internal viewer services. +-- _command: string name of command LLEventPump. post_to(_command, ...) +-- engages LLLeapListener operations such as listening on a specified other +-- LLEventPump, etc. +leap._reply, leap._command = get_event_pumps() +-- Dict of features added to the LEAP protocol since baseline implementation. +-- Before engaging a new feature that might break an older viewer, we can +-- check for the presence of that feature key. This table is solely about the +-- LEAP protocol itself, the way we communicate with the viewer. To discover +-- whether a given listener exists, or supports a particular operation, use +-- _command's "getAPI" operation. +-- For Lua, _command's "getFeatures" operation suffices? +-- leap._features = {} + +-- Each outstanding request() or generate() call has a corresponding +-- WaitForReqid object (later in this module) to handle the +-- response(s). If an incoming event contains an echoed ["reqid"] key, +-- we can look up the appropriate WaitForReqid object more efficiently +-- in a dict than by tossing such objects into the usual waitfors list. +-- Note: the ["reqid"] must be unique, otherwise we could end up +-- replacing an earlier WaitForReqid object in self.pending with a +-- later one. That means that no incoming event will ever be given to +-- the old WaitForReqid object. Any coroutine waiting on the discarded +-- WaitForReqid object would therefore wait forever. +leap._pending = {} +-- Our consumer will instantiate some number of WaitFor subclass objects. +-- As these are traversed in descending priority order, we must keep +-- them in a list. +leap._waitfors = {} +-- It has been suggested that we should use UUIDs as ["reqid"] values, +-- since UUIDs are guaranteed unique. However, as the "namespace" for +-- ["reqid"] values is our very own _reply pump, we can get away with +-- an integer. +leap._reqid = 0 +-- break leap.process() loop +leap._done = false + +-- get the name of the reply pump +function leap.replypump() + return leap._reply +end + +-- get the name of the command pump +function leap.cmdpump() + return leap._command +end + +-- local inspect = require('inspect') + +-- Fire and forget. Send the specified request LLSD, expecting no reply. +-- In fact, should the request produce an eventual reply, it will be +-- treated as an unsolicited event. +-- +-- See also request(), generate(). +function leap.send(pump, data, reqid) +-- print_debug('leap.send('..pump..', '..inspect(data)..', '..reqid..') entry') + local data = data + if type(data) == 'table' then + data = table.clone(data) + data['reply'] = leap._reply + if reqid ~= nil then + data['reqid'] = reqid + end + end +-- print_debug('leap.send('..pump..', '..inspect(data)..') calling post_on()') + post_on(pump, data) +end + +-- Send the specified request LLSD, expecting exactly one reply. Block +-- the calling coroutine until we receive that reply. +-- +-- Every request() (or generate()) LLSD block we send will get stamped +-- with a distinct ["reqid"] value. The requested event API must echo the +-- same ["reqid"] field in each reply associated with that request. This way +-- we can correctly dispatch interleaved replies from different requests. +-- +-- If the desired event API doesn't support the ["reqid"] echo convention, +-- you should use send() instead -- since request() or generate() would +-- wait forever for a reply stamped with that ["reqid"] -- and intercept +-- any replies using WaitFor. +-- +-- Unless the request data already contains a ["reply"] key, we insert +-- reply=self.replypump to try to ensure that the expected reply will be +-- returned over the socket. +-- +-- See also send(), generate(). +function leap.request(pump, data) + local reqid = leap._requestSetup(pump, data) + local waitfor = leap._pending[reqid] +-- print_debug('leap.request('..tostring(pump)..', '..inspect(data)..') about to wait on '.. +-- tostring(waitfor)) + local ok, response = pcall(waitfor.wait, waitfor) +-- print_debug('leap.request('..tostring(pump)..', '..inspect(data)..') got '.. +-- tostring(ok)..': '..inspect(response)) + -- kill off temporary WaitForReqid object, even if error + leap._pending[reqid] = nil + if ok then + response.reqid = nil + return response + else + error(response) + end +end + +-- common setup code shared by request() and generate() +function leap._requestSetup(pump, data) + -- invent a new, unique reqid + leap._reqid += 1 + local reqid = leap._reqid + -- Instantiate a new WaitForReqid object. The priority is irrelevant + -- because, unlike the WaitFor base class, WaitForReqid does not + -- self-register on our leap._waitfors list. Instead, capture the new + -- WaitForReqid object in leap._pending so _dispatch() can find it. + leap._pending[reqid] = leap.WaitForReqid:new(reqid) + -- Pass reqid to send() to stamp it into (a copy of) the request data. +-- print_debug('leap._requestSetup('..tostring(pump)..', '..inspect(data)..')') + leap.send(pump, data, reqid) + return reqid +end + +-- Send the specified request LLSD, expecting an arbitrary number of replies. +-- Each one is yielded on receipt. If you omit checklast, this is an infinite +-- generator; it's up to the caller to recognize when the last reply has been +-- received, and stop resuming for more. +-- +-- If you pass checklast=<callable accepting(event)>, each response event is +-- passed to that callable (after the yield). When the callable returns +-- True, the generator terminates in the usual way. +-- +-- See request() remarks about ["reqid"]. +function leap.generate(pump, data, checklast) + -- Invent a new, unique reqid. Arrange to handle incoming events + -- bearing that reqid. Stamp the outbound request with that reqid, and + -- send it. + local reqid = leap._requestSetup(pump, data) + local waitfor = leap._pending[reqid] + local ok, response + repeat + ok, response = pcall(waitfor.wait, waitfor) + if not ok then + break + end + response.reqid = nil + coroutine.yield(response) + until checklast and checklast(response) + -- If we break the above loop, whether or not due to error, clean up. + leap._pending[reqid] = nil + if not ok then + error(response) + end +end + +-- Kick off response processing. The calling script must create and resume one +-- or more coroutines to perform viewer requests using send(), request() or +-- generate() before calling this function to handle responses. +-- +-- While waiting for responses from the viewer, the C++ coroutine running the +-- calling Lua script is blocked: no other Lua coroutine is running. +function leap.process() + leap._done = false + local ok, pump, data + while not leap._done do +-- print_debug('leap.process() calling get_event_next()') + ok, pump, data = pcall(get_event_next) +-- print_debug('leap.process() got '..tostring(ok)..': '..pump..', '..inspect(data)) + -- ok false means get_event_next() raised a Lua error + -- data nil means get_event_next() returned (pump, LLSD()) to indicate done + if not (ok and data) then + break + end + leap._dispatch(pump, data) + end +-- print_debug('leap.process() done') + -- we're done: clean up all pending coroutines + -- if ok, then we're just done. + -- if not ok, then 'pump' is actually the error message. + message = if ok then 'done' else pump + for i, waitfor in pairs(leap._pending) do + waitfor:_exception(message) + end + for i, waitfor in pairs(leap._waitfors) do + waitfor:_exception(message) + end + -- now that we're done with cleanup, propagate the error we caught above + if not ok then + error(pump) + end +end + +function leap.done() + leap._done = true +end + +-- Route incoming (pump, data) event to the appropriate waiting coroutine. +function leap._dispatch(pump, data) + local reqid = data['reqid'] + -- if the response has no 'reqid', it's not from request() or generate() + if reqid == nil then + return leap._unsolicited(pump, data) + end + -- have reqid; do we have a WaitForReqid? + local waitfor = leap._pending[reqid] + if waitfor == nil then + return leap._unsolicited(pump, data) + end + -- found the right WaitForReqid object, let it handle the event + data['reqid'] = nil + waitfor:_handle(pump, data) +end + +-- Handle an incoming (pump, data) event with no recognizable ['reqid'] +function leap._unsolicited(pump, data) + -- we maintain waitfors in descending priority order, so the first waitfor + -- to claim this event is the one with the highest priority + for i, waitfor in pairs(leap._waitfors) do + if waitfor:_handle(pump, data) then + return + end + end +-- print_debug('_unsolicited(', pump, ', ', data, ') discarding unclaimed event') +end + +-- called by WaitFor.enable() +function leap._registerWaitFor(waitfor) + table.insert(leap._waitfors, waitfor) + -- keep waitfors sorted in descending order of specified priority + table.sort(leap._waitfors, + function (lhs, rhs) return lhs.priority > rhs.priority end) +end + +-- called by WaitFor.disable() +function leap._unregisterWaitFor(waitfor) + for i, w in pairs(leap._waitfors) do + if w == waitfor then + leap._waitfors[i] = nil + break + end + end +end + +-- ****************************************************************************** +-- WaitFor and friends +-- ****************************************************************************** + +-- An unsolicited event is handled by the highest-priority WaitFor subclass +-- object willing to accept it. If no such object is found, the unsolicited +-- event is discarded. +-- +-- * First, instantiate a WaitFor subclass object to register its interest in +-- some incoming event(s). WaitFor instances are self-registering; merely +-- instantiating the object suffices. +-- * Any coroutine may call a given WaitFor object's wait() method. This blocks +-- the calling coroutine until a suitable event arrives. +-- * WaitFor's constructor accepts a float priority. Every incoming event +-- (other than those claimed by request() or generate()) is passed to each +-- extant WaitFor.filter() method in descending priority order. The first +-- such filter() to return nontrivial data claims that event. +-- * At that point, the blocked wait() call on that WaitFor object returns the +-- item returned by filter(). +-- * WaitFor contains a queue. Multiple arriving events claimed by that WaitFor +-- object's filter() method are added to the queue. Naturally, until the +-- queue is empty, calling wait() immediately returns the front entry. +-- +-- It's reasonable to instantiate a WaitFor subclass whose filter() method +-- unconditionally returns the incoming event, and whose priority places it +-- last in the list. This object will enqueue every unsolicited event left +-- unclaimed by other WaitFor subclass objects. +-- +-- It's not strictly necessary to associate a WaitFor object with exactly one +-- coroutine. You might have multiple "worker" coroutines drawing from the same +-- WaitFor object, useful if the work being done per event might itself involve +-- "blocking" operations. Or a given coroutine might sample a number of WaitFor +-- objects in round-robin fashion... etc. etc. Nonetheless, it's +-- straightforward to designate one coroutine for each WaitFor object. + +-- --------------------------------- WaitFor --------------------------------- +leap.WaitFor = { _id=0 } + +function leap.WaitFor:new(priority, name) + local obj = setmetatable({}, self) + self.__index = self + + obj.priority = priority + if name then + obj.name = name + else + self._id += 1 + obj.name = 'WaitFor' .. self._id + end + obj._queue = ErrorQueue:new() + obj._registered = false + -- if no priority, then don't enable() - remember 0 is truthy + if priority then + obj:enable() + end + + return obj +end + +function leap.WaitFor.tostring(self) + -- Lua (sub)classes have no name; can't prefix with that + return self.name +end + +-- Re-enable a disable()d WaitFor object. New WaitFor objects are +-- enable()d by default. +function leap.WaitFor:enable() + if not self._registered then + leap._registerWaitFor(self) + self._registered = true + end +end + +-- Disable an enable()d WaitFor object. +function leap.WaitFor:disable() + if self._registered then + leap._unregisterWaitFor(self) + self._registered = false + end +end + +-- Block the calling coroutine until a suitable unsolicited event (one +-- for which filter() returns the event) arrives. +function leap.WaitFor:wait() +-- print_debug(self.name .. ' about to wait') + item = self._queue:Dequeue() +-- print_debug(self.name .. ' got ', item) + return item +end + +-- Loop over wait() calls. +function leap.WaitFor:iterate() + -- on each iteration, call self.wait(self) + return self.wait, self, nil +end + +-- Override filter() to examine the incoming event in whatever way +-- makes sense. +-- +-- Return nil to ignore this event. +-- +-- To claim the event, return the item you want placed in the queue. +-- Typically you'd write: +-- return data +-- or perhaps +-- return {pump=pump, data=data} +-- or some variation. +function leap.WaitFor:filter(pump, data) + error('You must override the WaitFor.filter() method') +end + +-- called by leap._unsolicited() for each WaitFor in leap._waitfors +function leap.WaitFor:_handle(pump, data) + item = self:filter(pump, data) + -- if this item doesn't pass the filter, we're not interested + if not item then + return false + end + -- okay, filter() claims this event + self:process(item) + return true +end + +-- called by WaitFor:_handle() for an accepted event +function leap.WaitFor:process(item) + self._queue:Enqueue(item) +end + +-- called by leap.process() when get_event_next() raises an error +function leap.WaitFor:_exception(message) + print_warning(self.name .. ' error: ' .. message) + self._queue:Error(message) +end + +-- ------------------------------ WaitForReqid ------------------------------- +leap.WaitForReqid = leap.WaitFor:new() + +function leap.WaitForReqid:new(reqid) + -- priority is meaningless, since this object won't be added to the + -- priority-sorted ViewerClient.waitfors list. Use the reqid as the + -- debugging name string. + local obj = leap.WaitFor:new(nil, 'WaitForReqid(' .. reqid .. ')') + setmetatable(obj, self) + self.__index = self + + return obj +end + +function leap.WaitForReqid:filter(pump, data) + -- Because we expect to directly look up the WaitForReqid object of + -- interest based on the incoming ["reqid"] value, it's not necessary + -- to test the event again. Accept every such event. + return data +end + +return leap diff --git a/indra/newview/scripts/lua/qtest.lua b/indra/newview/scripts/lua/qtest.lua new file mode 100644 index 0000000000..009446d0c3 --- /dev/null +++ b/indra/newview/scripts/lua/qtest.lua @@ -0,0 +1,146 @@ +-- Exercise the Queue, WaitQueue, ErrorQueue family + +Queue = require('Queue') +WaitQueue = require('WaitQueue') +ErrorQueue = require('ErrorQueue') +util = require('util') + +inspect = require('inspect') + +-- resume() wrapper to propagate errors +function resume(co, ...) + -- if there's an idiom other than table.pack() to assign an arbitrary + -- number of return values, I don't yet know it + local ok_result = table.pack(coroutine.resume(co, ...)) + if not ok_result[1] then + -- if [1] is false, then [2] is the error message + error(ok_result[2]) + end + -- ok is true, whew, just return the rest of the values + return table.unpack(ok_result, 2) +end + +-- ------------------ Queue variables are instance-specific ------------------ +q1 = Queue:new() +q2 = Queue:new() + +q1:Enqueue(17) + +assert(not q1:IsEmpty()) +assert(q2:IsEmpty()) +assert(q1:Dequeue() == 17) +assert(q1:Dequeue() == nil) +assert(q2:Dequeue() == nil) + +-- ----------------------------- test WaitQueue ------------------------------ +q1 = WaitQueue:new() +q2 = WaitQueue:new() +result = {} +values = { 1, 1, 2, 3, 5, 8, 13, 21 } + +for i, value in pairs(values) do + q1:Enqueue(value) +end +-- close() while not empty tests that queue drains before reporting done +q1:close() + +-- ensure that WaitQueue instance variables are in fact independent +assert(q2:IsEmpty()) + +-- consumer() coroutine to pull from the passed q until closed +function consumer(desc, q) + print(string.format('consumer(%s) start', desc)) + local value = q:Dequeue() + while value ~= nil do + print(string.format('consumer(%s) got %q', desc, value)) + table.insert(result, value) + value = q:Dequeue() + end + print(string.format('consumer(%s) done', desc)) +end + +-- run two consumers +coa = coroutine.create(consumer) +cob = coroutine.create(consumer) +-- Since consumer() doesn't yield while it can still retrieve values, +-- consumer(a) will dequeue all values from q1 and return when done. +resume(coa, 'a', q1) +-- consumer(b) will wake up to find the queue empty and closed. +resume(cob, 'b', q1) +coroutine.close(coa) +coroutine.close(cob) + +print('values:', inspect(values)) +print('result:', inspect(result)) + +assert(util.equal(values, result)) + +-- try incrementally enqueueing values +q3 = WaitQueue:new() +result = {} +values = { 'This', 'is', 'a', 'test', 'script' } + +coros = {} +for _, name in {'a', 'b'} do + local coro = coroutine.create(consumer) + table.insert(coros, coro) + -- Resuming both coroutines should leave them both waiting for a queue item. + resume(coro, name, q3) +end + +for _, s in pairs(values) do + print(string.format('Enqueue(%q)', s)) + q3:Enqueue(s) +end +q3:close() + +function joinall(coros) + local running + local errors = 0 + repeat + running = false + for i, coro in pairs(coros) do + if coroutine.status(coro) == 'suspended' then + running = true + -- directly call coroutine.resume() instead of our resume() + -- wrapper because we explicitly check for errors here + local ok, message = coroutine.resume(coro) + if not ok then + print('*** ' .. message) + errors += 1 + end + if coroutine.status(coro) == 'dead' then + coros[i] = nil + end + end + end + until not running + return errors +end + +joinall(coros) + +print(string.format('%q', table.concat(result, ' '))) +assert(util.equal(values, result)) + +-- ----------------------------- test ErrorQueue ----------------------------- +q4 = ErrorQueue:new() +result = {} +values = { 'This', 'is', 'a', 'test', 'script' } + +coros = {} +for _, name in {'a', 'b'} do + local coro = coroutine.create(consumer) + table.insert(coros, coro) + -- Resuming both coroutines should leave them both waiting for a queue item. + resume(coro, name, q4) +end + +for i = 1, 4 do + print(string.format('Enqueue(%q)', values[i])) + q4:Enqueue(values[i]) +end +q4:Error('something went wrong') + +assert(joinall(coros) == 2) + diff --git a/indra/newview/scripts/lua/test_LLFloaterAbout.lua b/indra/newview/scripts/lua/test_LLFloaterAbout.lua new file mode 100644 index 0000000000..7abc437b79 --- /dev/null +++ b/indra/newview/scripts/lua/test_LLFloaterAbout.lua @@ -0,0 +1,14 @@ +-- test LLFloaterAbout + +LLFloaterAbout = require('LLFloaterAbout') +leap = require('leap') +coro = require('coro') +inspect = require('inspect') + +coro.launch(function () + print(inspect(LLFloaterAbout.getInfo())) + leap.done() +end) + +leap.process() + diff --git a/indra/newview/scripts/lua/test_LLGesture.lua b/indra/newview/scripts/lua/test_LLGesture.lua new file mode 100644 index 0000000000..5c0db6c063 --- /dev/null +++ b/indra/newview/scripts/lua/test_LLGesture.lua @@ -0,0 +1,32 @@ +-- exercise LLGesture API + +LLGesture = require 'LLGesture' +inspect = require 'inspect' +coro = require 'coro' +leap = require 'leap' + +coro.launch(function() + -- getActiveGestures() returns {<UUID>: {name, playing, trigger}} + gestures_uuid = LLGesture.getActiveGestures() + -- convert to {<name>: <uuid>} + gestures = {} + for uuid, info in pairs(gestures_uuid) do + gestures[info.name] = uuid + end + -- now run through the list + for name, uuid in pairs(gestures) do + if name == 'afk' then + -- afk has a long timeout, and isn't interesting to look at + continue + end + print(name) + LLGesture.startGesture(uuid) + repeat + sleep(1) + until not LLGesture.isGesturePlaying(uuid) + end + print('Done.') + leap.done() +end) + +leap.process() diff --git a/indra/newview/scripts/lua/util.lua b/indra/newview/scripts/lua/util.lua new file mode 100644 index 0000000000..e3af633ea7 --- /dev/null +++ b/indra/newview/scripts/lua/util.lua @@ -0,0 +1,39 @@ +-- utility functions, in alpha order + +local util = {} + +-- cheap test whether table t is empty +function util.empty(t) + return not next(t) +end + +-- reliable count of the number of entries in table t +-- (since #t is unreliable) +function util.count(t) + local count = 0 + for _ in pairs(t) do + count += 1 + end + return count +end + +-- recursive table equality +function util.equal(t1, t2) + if not (type(t1) == 'table' and type(t2) == 'table') then + return t1 == t2 + end + -- both t1 and t2 are tables: get modifiable copy of t2 + local temp = table.clone(t2) + for k, v in pairs(t1) do + -- if any key in t1 doesn't have same value in t2, not equal + if not util.equal(v, temp[k]) then + return false + end + -- temp[k] == t1[k], delete temp[k] + temp[k] = nil + end + -- All keys in t1 have equal values in t2; t2 == t1 if there are no extra keys in t2 + return util.empty(temp) +end + +return util diff --git a/indra/newview/tests/llluamanager_test.cpp b/indra/newview/tests/llluamanager_test.cpp index 81ec186cf4..069e10e9cf 100644 --- a/indra/newview/tests/llluamanager_test.cpp +++ b/indra/newview/tests/llluamanager_test.cpp @@ -28,6 +28,7 @@ #include "lluri.h" #include "lluuid.h" #include "lua_function.h" +#include "lualistener.h" #include "stringize.h" class LLTestApp : public LLApp @@ -94,9 +95,10 @@ namespace tut { auto [count, result] = LLLUAmanager::waitScriptLine(L, "return " + luax.expr); - auto desc{ stringize("waitScriptLine(", luax.desc, ")") }; - ensure_equals(desc + ".count", count, 1); - ensure_equals(desc + ".result", result, luax.expect); + auto desc{ stringize("waitScriptLine(", luax.desc, "): ") }; + // if count < 0, report Lua error message + ensure_equals(desc + result.asString(), count, 1); + ensure_equals(desc + "result", result, luax.expect); } } @@ -116,7 +118,7 @@ namespace tut // We woke up again ourselves because the coroutine running Lua has // finished. But our Lua chunk didn't actually return anything, so we // expect count to be 0 and result to be undefined. - ensure_equals(desc + " count", count, 0); + ensure_equals(desc + ": " + result.asString(), count, 0); ensure_equals(desc, fromlua, expect); } @@ -172,16 +174,13 @@ namespace tut << "': post('" << expected[4] << "')" << LL_ENDL; luapump.post(expected[4]); auto [count, result] = future.get(); + ensure_equals("post_on(): " + result.asString(), count, 0); ensure_equals("post_on() sequence", posts, expected); } void round_trip(const std::string& desc, const LLSD& send, const LLSD& expect) { - LLSD reply; - LLEventStream replypump("testpump"); - LLTempBoundListener conn( - replypump.listen("llluamanager_test", - listener([&reply](const LLSD& post){ reply = post; }))); + LLEventMailDrop replypump("testpump"); const std::string lua( "-- test LLSD round trip\n" "replypump, cmdpump = get_event_pumps()\n" @@ -195,12 +194,12 @@ namespace tut // reached the get_event_next() call, which suspends the calling C++ // coroutine (including the Lua code running on it) until we post // something to that reply pump. - auto luapump{ reply.asString() }; - reply.clear(); + auto luapump{ llcoro::suspendUntilEventOn(replypump).asString() }; LLEventPumps::instance().post(luapump, send); // The C++ coroutine running the Lua script is now ready to run. Run // it so it will echo the LLSD back to us. auto [count, result] = future.get(); + ensure_equals(stringize("round_trip(", desc, "): ", result.asString()), count, 1); ensure_equals(desc, result, expect); } @@ -304,4 +303,93 @@ namespace tut } round_trip("nested map", send_map, expect_map); } + + template<> template<> + void object::test<5>() + { + set_test_name("test leap.lua"); + const std::string lua( + "-- test leap.lua\n" + "\n" + "leap = require('leap')\n" + "coro = require('coro')\n" + "\n" + "-- negative priority ensures catchall is always last\n" + "catchall = leap.WaitFor:new(-1, 'catchall')\n" + "function catchall:filter(pump, data)\n" + " return data\n" + "end\n" + "\n" + "-- but first, catch events with 'special' key\n" + "catch_special = leap.WaitFor:new(2, 'catch_special')\n" + "function catch_special:filter(pump, data)\n" + " return if data['special'] ~= nil then data else nil\n" + "end\n" + "\n" + "function drain(waitfor)\n" + " print(waitfor.name .. ' start')\n" + " -- It seems as though we ought to be able to code this loop\n" + " -- over waitfor:wait() as:\n" + " -- for item in waitfor.wait, waitfor do\n" + " -- However, that seems to stitch a detour through C code into\n" + " -- the coroutine call stack, which prohibits coroutine.yield():\n" + " -- 'attempt to yield across metamethod/C-call boundary'\n" + " -- So we resort to two different calls to waitfor:wait().\n" + " item = waitfor:wait()\n" + " while item do\n" + " print(waitfor.name .. ' caught', item)\n" + " item = waitfor:wait()\n" + " end\n" + " print(waitfor.name .. ' done')\n" + "end\n" + "\n" + "function requester(name)\n" + " print('requester('..name..') start')\n" + " response = leap.request('testpump', {name=name})\n" + " print('requester('..name..') got '..tostring(response))\n" + " -- verify that the correct response was dispatched to this coroutine\n" + " assert(response.name == name)\n" + "end\n" + "\n" + "coro.launch(drain, catchall)\n" + "coro.launch(drain, catch_special)\n" + "coro.launch(requester, 'a')\n" + "coro.launch(requester, 'b')\n" + "\n" + "leap.process()\n" + ); + + LLSD requests; + LLEventStream pump("testpump", false); + LLTempBoundListener conn{ + pump.listen("test<5>()", + listener([&requests](const LLSD& data) + { + LL_DEBUGS("Lua") << "testpump got: " << data << LL_ENDL; + requests.append(data); + })) + }; + + LuaState L; + auto future = LLLUAmanager::startScriptLine(L, lua); + auto replyname{ L.obtainListener()->getReplyName() }; + auto& replypump{ LLEventPumps::instance().obtain(replyname) }; + // By the time leap.process() calls get_event_next() and wakes us up, + // we expect that both requester() coroutines have posted and are + // waiting for a reply. + ensure_equals("didn't get both requests", requests.size(), 2); + // moreover, we expect they arrived in the order they were created + ensure_equals("a wasn't first", requests[0]["name"].asString(), "a"); + ensure_equals("b wasn't second", requests[1]["name"].asString(), "b"); + replypump.post(llsd::map("special", "K")); + // respond to requester(b) FIRST + replypump.post(requests[1]); + replypump.post(llsd::map("name", "not special")); + // now respond to requester(a) + replypump.post(requests[0]); + // tell leap.process() we're done + replypump.post(LLSD()); + auto [count, result] = future.get(); + ensure_equals("leap.lua: " + result.asString(), count, 0); + } } // namespace tut |