diff options
Diffstat (limited to 'indra/newview/scripts/lua/require')
18 files changed, 2188 insertions, 0 deletions
diff --git a/indra/newview/scripts/lua/require/ErrorQueue.lua b/indra/newview/scripts/lua/require/ErrorQueue.lua new file mode 100644 index 0000000000..e6e9a5ef48 --- /dev/null +++ b/indra/newview/scripts/lua/require/ErrorQueue.lua @@ -0,0 +1,37 @@ +-- ErrorQueue isa WaitQueue with the added feature that a producer can push an +-- error through the queue. Once that error is dequeued, every consumer will +-- raise that error. + +local WaitQueue = require('WaitQueue') +local function dbg(...) end +-- local dbg = require('printf') +local util = require('util') + +local ErrorQueue = WaitQueue() + +util.classctor(ErrorQueue) + +function ErrorQueue:Error(message) + -- Setting Error() is a marker, like closing the queue. Once we reach the + -- error, every subsequent Dequeue() call will raise the same error. + dbg('Setting self._closed to %q', message) + self._closed = message + self:_wake_waiters() +end + +function ErrorQueue:Dequeue() + local value = WaitQueue.Dequeue(self) + dbg('ErrorQueue:Dequeue: base Dequeue() got %s', value) + if value ~= nil then + -- queue not yet closed, show caller + return value + end + if self._closed == true then + -- WaitQueue:close() sets true: queue has only been closed, tell caller + return nil + end + -- self._closed is a message set by Error() + error(self._closed) +end + +return ErrorQueue diff --git a/indra/newview/scripts/lua/require/Floater.lua b/indra/newview/scripts/lua/require/Floater.lua new file mode 100644 index 0000000000..d057a74386 --- /dev/null +++ b/indra/newview/scripts/lua/require/Floater.lua @@ -0,0 +1,146 @@ +-- Floater base class + +local leap = require 'leap' +local fiber = require 'fiber' +local util = require 'util' + +-- list of all the events that a LLLuaFloater might send +local event_list = leap.request("LLFloaterReg", {op="getFloaterEvents"}).events +local event_set = {} +for _, event in pairs(event_list) do + event_set[event] = true +end + +local function _event(event_name) + if not event_set[event_name] then + error("Incorrect event name: " .. event_name, 3) + end + return event_name +end + +-- --------------------------------------------------------------------------- +local Floater = {} + +-- Pass: +-- relative file path to floater's XUI definition file +-- optional: sign up for additional events for defined control +-- {<control_name>={action1, action2, ...}} +function Floater:new(path, extra) + local obj = setmetatable({}, self) + self.__index = self + + local path_parts = string.split(path, '/') + obj.name = 'Floater ' .. path_parts[#path_parts] + + obj._command = {op="showLuaFloater", xml_path=LL.abspath(path)} + if extra then + -- validate each of the actions for each specified control + for control, actions in pairs(extra) do + for _, action in pairs(actions) do + _event(action) + end + end + obj._command.extra_events = extra + end + + return obj +end + +util.classctor(Floater) + +function Floater:show() + -- leap.eventstream() returns the first response, and launches a + -- background fiber to call the passed callback with all subsequent + -- responses. + local event = leap.eventstream( + 'LLFloaterReg', + self._command, + -- handleEvents() returns false when done. + -- eventstream() expects a true return when done. + function(event) return not self:handleEvents(event) end) + self._pump = event.command_name + -- we might need the returned reqid to cancel the eventstream() fiber + self.reqid = event.reqid + + -- The response to 'showLuaFloater' *is* the 'post_build' event. Check if + -- subclass has a post_build() method. Honor the convention that if + -- handleEvents() returns false, we're done. + if not self:handleEvents(event) then + return + end +end + +function Floater:post(action) + leap.send(self._pump, action) +end + +function Floater:request(action) + return leap.request(self._pump, action) +end + +-- local inspect = require 'inspect' + +function Floater:handleEvents(event_data) + local event = event_data.event + if event_set[event] == nil then + LL.print_warning(string.format('%s received unknown event %q', self.name, event)) + end + + -- Before checking for a general (e.g.) commit() method, first look for + -- commit_ctrl_name(): in other words, concatenate the event name with the + -- ctrl_name, with an underscore between. If there exists such a specific + -- method, call that. + local handler, ret + if event_data.ctrl_name then + local specific = event .. '_' .. event_data.ctrl_name + handler = self[specific] + if handler then + ret = handler(self, event_data) + -- Avoid 'return ret or true' because we explicitly want to allow + -- the handler to return false. + if ret ~= nil then + return ret + else + return true + end + end + end + + -- No specific "event_on_ctrl()" method found; try just "event()" + handler = self[event] + if handler then + ret = handler(self, event_data) + if ret ~= nil then + return ret + end +-- else +-- print(string.format('%s ignoring event %s', self.name, inspect(event_data))) + end + + -- We check for event() method before recognizing floater_close in case + -- the consumer needs to react specially to closing the floater. Now that + -- we've checked, recognize it ourselves. Returning false terminates the + -- anonymous fiber function launched by leap.eventstream(). + if event == _event('floater_close') then + LL.print_warning(self.name .. ' closed') + return false + end + return true +end + +-- onCtrl() permits a different dispatch style in which the general event() +-- method explicitly calls (e.g.) +-- self:onCtrl(event_data, { +-- ctrl_name=function() +-- self:post(...) +-- end, +-- ... +-- }) +function Floater:onCtrl(event_data, ctrl_map) + local handler = ctrl_map[event_data.ctrl_name] + if handler then + handler() + end +end + +return Floater diff --git a/indra/newview/scripts/lua/require/LLChat.lua b/indra/newview/scripts/lua/require/LLChat.lua new file mode 100644 index 0000000000..78dca765e8 --- /dev/null +++ b/indra/newview/scripts/lua/require/LLChat.lua @@ -0,0 +1,17 @@ +local leap = require 'leap' + +local LLChat = {} + +function LLChat.sendNearby(msg) + leap.send('LLChatBar', {op='sendChat', message=msg}) +end + +function LLChat.sendWhisper(msg) + leap.send('LLChatBar', {op='sendChat', type='whisper', message=msg}) +end + +function LLChat.sendShout(msg) + leap.send('LLChatBar', {op='sendChat', type='shout', message=msg}) +end + +return LLChat diff --git a/indra/newview/scripts/lua/require/LLDebugSettings.lua b/indra/newview/scripts/lua/require/LLDebugSettings.lua new file mode 100644 index 0000000000..cff1a63c21 --- /dev/null +++ b/indra/newview/scripts/lua/require/LLDebugSettings.lua @@ -0,0 +1,17 @@ +local leap = require 'leap' + +local LLDebugSettings = {} + +function LLDebugSettings.set(name, value) + leap.request('LLViewerControl', {op='set', group='Global', key=name, value=value}) +end + +function LLDebugSettings.toggle(name) + leap.request('LLViewerControl', {op='toggle', group='Global', key=name}) +end + +function LLDebugSettings.get(name) + return leap.request('LLViewerControl', {op='get', group='Global', key=name})['value'] +end + +return LLDebugSettings diff --git a/indra/newview/scripts/lua/require/LLFloaterAbout.lua b/indra/newview/scripts/lua/require/LLFloaterAbout.lua new file mode 100644 index 0000000000..a6e42d364f --- /dev/null +++ b/indra/newview/scripts/lua/require/LLFloaterAbout.lua @@ -0,0 +1,11 @@ +-- Engage the LLFloaterAbout LLEventAPI + +local leap = require 'leap' + +local LLFloaterAbout = {} + +function LLFloaterAbout.getInfo() + return leap.request('LLFloaterAbout', {op='getInfo'}) +end + +return LLFloaterAbout diff --git a/indra/newview/scripts/lua/require/LLGesture.lua b/indra/newview/scripts/lua/require/LLGesture.lua new file mode 100644 index 0000000000..343b611e2c --- /dev/null +++ b/indra/newview/scripts/lua/require/LLGesture.lua @@ -0,0 +1,23 @@ +-- Engage the LLGesture LLEventAPI + +local leap = require 'leap' + +local LLGesture = {} + +function LLGesture.getActiveGestures() + return leap.request('LLGesture', {op='getActiveGestures'})['gestures'] +end + +function LLGesture.isGesturePlaying(id) + return leap.request('LLGesture', {op='isGesturePlaying', id=id})['playing'] +end + +function LLGesture.startGesture(id) + leap.send('LLGesture', {op='startGesture', id=id}) +end + +function LLGesture.stopGesture(id) + leap.send('LLGesture', {op='stopGesture', id=id}) +end + +return LLGesture diff --git a/indra/newview/scripts/lua/require/Queue.lua b/indra/newview/scripts/lua/require/Queue.lua new file mode 100644 index 0000000000..5bc72e4057 --- /dev/null +++ b/indra/newview/scripts/lua/require/Queue.lua @@ -0,0 +1,51 @@ +-- from https://create.roblox.com/docs/luau/queues#implementing-queues, +-- amended per https://www.lua.org/pil/16.1.html + +-- While coding some scripting in Lua +-- I found that I needed a queua +-- I thought of linked list +-- But had to resist +-- For fear it might be too obscua. + +local util = require 'util' + +local Queue = {} + +function Queue:new() + local obj = setmetatable({}, self) + self.__index = self + + obj._first = 0 + obj._last = -1 + obj._queue = {} + + return obj +end + +util.classctor(Queue) + +-- Check if the queue is empty +function Queue:IsEmpty() + return self._first > self._last +end + +-- Add a value to the queue +function Queue:Enqueue(value) + local last = self._last + 1 + self._last = last + self._queue[last] = value +end + +-- Remove a value from the queue +function Queue:Dequeue() + if self:IsEmpty() then + return nil + end + local first = self._first + local value = self._queue[first] + self._queue[first] = nil + self._first = first + 1 + return value +end + +return Queue diff --git a/indra/newview/scripts/lua/require/UI.lua b/indra/newview/scripts/lua/require/UI.lua new file mode 100644 index 0000000000..eb1a4017c7 --- /dev/null +++ b/indra/newview/scripts/lua/require/UI.lua @@ -0,0 +1,125 @@ +-- Engage the viewer's UI + +local leap = require 'leap' +local Timer = (require 'timers').Timer +local mapargs = require 'mapargs' + +local UI = {} + +-- *************************************************************************** +-- registered menu actions +-- *************************************************************************** +function UI.call(func, parameter) + -- 'call' is fire-and-forget + leap.request('UI', {op='call', ['function']=func, parameter=parameter}) +end + +function UI.getValue(path) + return leap.request('UI', {op='getValue', path=path})['value'] +end + +-- *************************************************************************** +-- UI views +-- *************************************************************************** +-- Either: +-- wreq{op='Something', a=1, b=2, ...} +-- or: +-- (args should be local, as this wreq() call modifies it) +-- local args = {a=1, b=2, ...} +-- wreq('Something', args) +local function wreq(op_or_data, data_if_op) + if data_if_op ~= nil then + -- this is the wreq(op, data) form + data_if_op.op = op_or_data + op_or_data = data_if_op + end + return leap.request('LLWindow', op_or_data) +end + +-- omit 'parent' to list all view paths +function UI.listviews(parent) + return wreq{op='getPaths', under=parent} +end + +function UI.viewinfo(path) + return wreq{op='getInfo', path=path} +end + +-- *************************************************************************** +-- mouse actions +-- *************************************************************************** +-- pass a table: +-- UI.click{path=path +-- [, button='LEFT' | 'CENTER' | 'RIGHT'] +-- [, x=x, y=y] +-- [, hold=duration]} +function UI.click(...) + local args = mapargs('path,button,x,y,hold', ...) + args.button = args.button or 'LEFT' + local hold = args.hold or 1.0 + wreq('mouseMove', args) + wreq('mouseDown', args) + Timer(hold, 'wait') + wreq('mouseUp', args) +end + +-- pass a table as for UI.click() +function UI.doubleclick(...) + local args = mapargs('path,button,x,y', ...) + args.button = args.button or 'LEFT' + wreq('mouseDown', args) + wreq('mouseUp', args) + wreq('mouseDown', args) + wreq('mouseUp', args) +end + +-- UI.drag{path=, xoff=, yoff=} +function UI.drag(...) + local args = mapargs('path,xoff,yoff', ...) + -- query the specified path + local rect = UI.viewinfo(args.path).rect + local centerx = math.floor(rect.left + (rect.right - rect.left)/2) + local centery = math.floor(rect.bottom + (rect.top - rect.bottom)/2) + wreq{op='mouseMove', path=args.path, x=centerx, y=centery} + wreq{op='mouseDown', path=args.path, button='LEFT'} + wreq{op='mouseMove', path=args.path, x=centerx + args.xoff, y=centery + args.yoff} + wreq{op='mouseUp', path=args.path, button='LEFT'} +end + +-- *************************************************************************** +-- keyboard actions +-- *************************************************************************** +-- pass a table: +-- UI.keypress{ +-- [path=path] -- if omitted, default input field +-- [, char='x'] -- requires one of char, keycode, keysym +-- [, keycode=120] +-- keysym per https://github.com/secondlife/viewer/blob/main/indra/llwindow/llkeyboard.cpp#L68-L124 +-- [, keysym='Enter'] +-- [, mask={'SHIFT', 'CTL', 'ALT', 'MAC_CONTROL'}] -- some subset of these +-- } +function UI.keypress(...) + local args = mapargs('path,char,keycode,keysym,mask', ...) + if args.char == '\n' then + args.char = nil + args.keysym = 'Enter' + end + return wreq('keyDown', args) +end + +-- UI.type{text=, path=} +function UI.type(...) + local args = mapargs('text,path', ...) + if #args.text > 0 then + -- The caller's path may be specified in a way that requires recursively + -- searching parts of the LLView tree. No point in doing that more than + -- once. Capture the actual path found by that first call and use that for + -- subsequent calls. + local path = UI.keypress{path=args.path, char=string.sub(args.text, 1, 1)}.path + for i = 2, #args.text do + UI.keypress{path=path, char=string.sub(args.text, i, i)} + end + end +end + +return UI diff --git a/indra/newview/scripts/lua/require/WaitQueue.lua b/indra/newview/scripts/lua/require/WaitQueue.lua new file mode 100644 index 0000000000..7e10d03295 --- /dev/null +++ b/indra/newview/scripts/lua/require/WaitQueue.lua @@ -0,0 +1,88 @@ +-- WaitQueue isa Queue with the added feature that when the queue is empty, +-- the Dequeue() operation blocks the calling coroutine until some other +-- coroutine Enqueue()s a new value. + +local fiber = require('fiber') +local Queue = require('Queue') +local util = require('util') + +local function dbg(...) end +-- local dbg = require('printf') + +local WaitQueue = Queue() + +function WaitQueue:new() + local obj = Queue() + setmetatable(obj, self) + self.__index = self + + obj._waiters = {} + obj._closed = false + return obj +end + +util.classctor(WaitQueue) + +function WaitQueue:Enqueue(value) + if self._closed then + error("can't Enqueue() on closed Queue") + end + -- can't simply call Queue:Enqueue(value)! That calls the method on the + -- Queue class definition, instead of calling Queue:Enqueue() on self. + -- Hand-expand the Queue:Enqueue() syntactic sugar. + Queue.Enqueue(self, value) + self:_wake_waiters() +end + +function WaitQueue:_wake_waiters() + -- WaitQueue is designed to support multi-producer, multi-consumer use + -- cases. With multiple consumers, if more than one is trying to + -- Dequeue() from an empty WaitQueue, we'll have multiple waiters. + -- Unlike OS threads, with cooperative concurrency it doesn't make sense + -- to "notify all": we need wake only one of the waiting Dequeue() + -- callers. + if ((not self:IsEmpty()) or self._closed) and next(self._waiters) then + -- Pop the oldest waiting coroutine instead of the most recent, for + -- more-or-less round robin fairness. But skip any coroutines that + -- have gone dead in the meantime. + local waiter = table.remove(self._waiters, 1) + while waiter and fiber.status(waiter) == "dead" do + waiter = table.remove(self._waiters, 1) + end + -- do we still have at least one waiting coroutine? + if waiter then + -- don't pass the head item: let the resumed coroutine retrieve it + fiber.wake(waiter) + end + end +end + +function WaitQueue:Dequeue() + while self:IsEmpty() do + -- Don't check for closed until the queue is empty: producer can close + -- the queue while there are still items left, and we want the + -- consumer(s) to retrieve those last few items. + if self._closed then + dbg('WaitQueue:Dequeue(): closed') + return nil + end + dbg('WaitQueue:Dequeue(): waiting') + -- add the running coroutine to the list of waiters + dbg('WaitQueue:Dequeue() running %s', tostring(coroutine.running() or 'main')) + table.insert(self._waiters, fiber.running()) + -- then let somebody else run + fiber.wait() + end + -- here we're sure this queue isn't empty + dbg('WaitQueue:Dequeue() calling Queue.Dequeue()') + return Queue.Dequeue(self) +end + +function WaitQueue:close() + self._closed = true + -- close() is like Enqueueing an end marker. If there are waiting + -- consumers, give them a chance to see we're closed. + self:_wake_waiters() +end + +return WaitQueue diff --git a/indra/newview/scripts/lua/require/coro.lua b/indra/newview/scripts/lua/require/coro.lua new file mode 100644 index 0000000000..616a797e95 --- /dev/null +++ b/indra/newview/scripts/lua/require/coro.lua @@ -0,0 +1,67 @@ +-- Manage Lua coroutines + +local coro = {} + +coro._coros = {} + +-- Launch a Lua coroutine: create and resume. +-- Returns: new coroutine, values yielded or returned from initial resume() +-- If initial resume() encountered an error, propagates the error. +function coro.launch(func, ...) + local co = coroutine.create(func) + table.insert(coro._coros, co) + return co, coro.resume(co, ...) +end + +-- resume() wrapper to propagate errors +function coro.resume(co, ...) + -- if there's an idiom other than table.pack() to assign an arbitrary + -- number of return values, I don't yet know it + local ok_result = table.pack(coroutine.resume(co, ...)) + if not ok_result[1] then + -- if [1] is false, then [2] is the error message + error(ok_result[2]) + end + -- ok is true, whew, just return the rest of the values + return table.unpack(ok_result, 2) +end + +-- yield to other coroutines even if you don't know whether you're in a +-- created coroutine or the main coroutine +function coro.yield(...) + if coroutine.running() then + -- this is a real coroutine, yield normally + return coroutine.yield(...) + else + -- This is the main coroutine: coroutine.yield() doesn't work. + -- But we can take a spin through previously-launched coroutines. + -- Walk a copy of coro._coros in case any of these coroutines launches + -- another: next() forbids creating new entries during traversal. + for co in coro._live_coros_iter, table.clone(coro._coros) do + coro.resume(co) + end + end +end + +-- Walk coro._coros table, returning running or suspended coroutines. +-- Once a coroutine becomes dead, remove it from _coros and don't return it. +function coro._live_coros() + return coro._live_coros_iter, coro._coros +end + +-- iterator function for _live_coros() +function coro._live_coros_iter(t, idx) + local k, co = next(t, idx) + while k and coroutine.status(co) == 'dead' do +-- t[k] = nil + -- See coro.yield(): sometimes we traverse a copy of _coros, but if we + -- discover a dead coroutine in that copy, delete it from _coros + -- anyway. Deleting it from a temporary copy does nothing. + coro._coros[k] = nil + coroutine.close(co) + k, co = next(t, k) + end + return co +end + +return coro diff --git a/indra/newview/scripts/lua/require/fiber.lua b/indra/newview/scripts/lua/require/fiber.lua new file mode 100644 index 0000000000..cae27b936b --- /dev/null +++ b/indra/newview/scripts/lua/require/fiber.lua @@ -0,0 +1,340 @@ +-- Organize Lua coroutines into fibers. + +-- In this usage, the difference between coroutines and fibers is that fibers +-- have a scheduler. Yielding a fiber means allowing other fibers, plural, to +-- run: it's more than just returning control to the specific Lua thread that +-- resumed the running coroutine. + +-- fiber.launch() creates a new fiber ready to run. +-- fiber.status() reports (augmented) status of the passed fiber: instead of +-- 'suspended', it returns either 'ready' or 'waiting' +-- fiber.yield() allows other fibers to run, but leaves the calling fiber +-- ready to run. +-- fiber.wait() marks the running fiber not ready, and resumes other fibers. +-- fiber.wake() marks the designated suspended fiber ready to run, but does +-- not yet resume it. +-- fiber.run() runs all current fibers until all have terminated (successfully +-- or with an error). + +local printf = require 'printf' +local function dbg(...) end +-- local dbg = printf +local coro = require 'coro' + +local fiber = {} + +-- The tables in which we track fibers must have weak keys so dead fibers +-- can be garbage-collected. +local weak_values = {__mode='v'} +local weak_keys = {__mode='k'} + +-- Track each current fiber as being either ready to run or not ready +-- (waiting). wait() moves the running fiber from ready to waiting; wake() +-- moves the designated fiber from waiting back to ready. +-- The ready table is used as a list so yield() can go round robin. +local ready = setmetatable({'main'}, weak_keys) +-- The waiting table is used as a set because order doesn't matter. +local waiting = setmetatable({}, weak_keys) + +-- Every fiber has a name, for diagnostic purposes. Names must be unique. +-- A colliding name will be suffixed with an integer. +-- Predefine 'main' with our marker so nobody else claims that name. +local names = setmetatable({main='main'}, weak_keys) +local byname = setmetatable({main='main'}, weak_values) +-- each colliding name has its own distinct suffix counter +local suffix = {} + +-- Specify a nullary idle() callback to be called whenever there are no ready +-- fibers but there are waiting fibers. The idle() callback is responsible for +-- changing zero or more waiting fibers to ready fibers by calling +-- fiber.wake(), although a given call may leave them all still waiting. +-- When there are no ready fibers, it's a good idea for the idle() function to +-- return control to a higher-level execution agent. Simply returning without +-- changing any fiber's status will spin the CPU. +-- The idle() callback can return non-nil to exit fiber.run() with that value. +function fiber._idle() + error('fiber.yield(): you must first call set_idle(nullary idle() function)') +end + +function fiber.set_idle(func) + fiber._idle = func +end + +-- Launch a new Lua fiber, ready to run. +function fiber.launch(name, func, ...) + local args = table.pack(...) + local co = coroutine.create(function() func(table.unpack(args)) end) + -- a new fiber is ready to run + table.insert(ready, co) + local namekey = name + while byname[namekey] do + if not suffix[name] then + suffix[name] = 1 + end + suffix[name] += 1 + namekey = name .. tostring(suffix[name]) + end + -- found a namekey not yet in byname: set it + byname[namekey] = co + -- and remember it as this fiber's name + names[co] = namekey +-- dbg('launch(%s)', namekey) +-- dbg('byname[%s] = %s', namekey, tostring(byname[namekey])) +-- dbg('names[%s] = %s', tostring(co), names[co]) +-- dbg('ready[-1] = %s', tostring(ready[#ready])) +end + +-- for debugging +function format_all() + output = {} + table.insert(output, 'Ready fibers:' .. if next(ready) then '' else ' none') + for _, co in pairs(ready) do + table.insert(output, string.format(' %s: %s', fiber.get_name(co), fiber.status(co))) + end + table.insert(output, 'Waiting fibers:' .. if next(waiting) then '' else ' none') + for co in pairs(waiting) do + table.insert(output, string.format(' %s: %s', fiber.get_name(co), fiber.status(co))) + end + return table.concat(output, '\n') +end + +function fiber.print_all() + print(format_all()) +end + +-- return either the running coroutine or, if called from the main thread, +-- 'main' +function fiber.running() + return coroutine.running() or 'main' +end + +-- Query a fiber's name (nil for the running fiber) +function fiber.get_name(co) + return names[co or fiber.running()] or 'unknown' +end + +-- Query status of the passed fiber +function fiber.status(co) + local running = coroutine.running() + if (not co) or co == running then + -- silly to ask the status of the running fiber: it's 'running' + return 'running' + end + if co ~= 'main' then + -- for any coroutine but main, consult coroutine.status() + local status = coroutine.status(co) + if status ~= 'suspended' then + return status + end + -- here co is suspended, answer needs further refinement + else + -- co == 'main' + if not running then + -- asking about 'main' from the main fiber + return 'running' + end + -- asking about 'main' from some other fiber, so presumably main is suspended + end + -- here we know co is suspended -- but is it ready to run? + if waiting[co] then + return 'waiting' + end + -- not waiting should imply ready: sanity check + if table.find(ready, co) then + return 'ready' + end + -- Calls within yield() between popping the next ready fiber and + -- re-appending it to the list are in this state. Once we're done + -- debugging yield(), we could reinstate either of the below. +-- error(string.format('fiber.status(%s) is stumped', fiber.get_name(co))) +-- print(string.format('*** fiber.status(%s) is stumped', fiber.get_name(co))) + return '(unknown)' +end + +-- change the running fiber's status to waiting +local function set_waiting() + -- if called from the main fiber, inject a 'main' marker into the list + co = fiber.running() + -- delete from ready list + local i = table.find(ready, co) + if i then + table.remove(ready, i) + end + -- add to waiting list + waiting[co] = true +end + +-- Suspend the current fiber until some other fiber calls fiber.wake() on it +function fiber.wait() + dbg('Fiber %q waiting', fiber.get_name()) + set_waiting() + -- now yield to other fibers + fiber.yield() +end + +-- Mark a suspended fiber as being ready to run +function fiber.wake(co) + if not waiting[co] then + error(string.format('fiber.wake(%s) but status=%s, ready=%s, waiting=%s', + names[co], fiber.status(co), ready[co], waiting[co])) + end + -- delete from waiting list + waiting[co] = nil + -- add to end of ready list + table.insert(ready, co) + dbg('Fiber %q ready', fiber.get_name(co)) + -- but don't yet resume it: that happens next time we reach yield() +end + +-- pop and return the next not-dead fiber in the ready list, or nil if none remain +local function live_ready_iter() + -- don't write: + -- for co in table.remove, ready, 1 + -- because it would keep passing a new second parameter! + for co in function() return table.remove(ready, 1) end do + dbg('%s live_ready_iter() sees %s, status %s', + fiber.get_name(), fiber.get_name(co), fiber.status(co)) + -- keep removing the head entry until we find one that's not dead, + -- discarding any dead coroutines along the way + if co == 'main' or coroutine.status(co) ~= 'dead' then + dbg('%s live_ready_iter() returning %s', + fiber.get_name(), fiber.get_name(co)) + return co + end + end + dbg('%s live_ready_iter() returning nil', fiber.get_name()) + return nil +end + +-- prune the set of waiting fibers +local function prune_waiting() + for waiter in pairs(waiting) do + if waiter ~= 'main' and coroutine.status(waiter) == 'dead' then + waiting[waiter] = nil + end + end +end + +-- Run other ready fibers, leaving this one ready, returning after a cycle. +-- Returns: +-- * true, nil if there remain other live fibers, whether ready or waiting, +-- but it's our turn to run +-- * false, nil if this is the only remaining fiber +-- * nil, x if configured idle() callback returns non-nil x +local function scheduler() + dbg('scheduler():\n%s', format_all()) + -- scheduler() is asymmetric because Lua distinguishes the main thread + -- from other coroutines. The main thread can't yield; it can only resume + -- other coroutines. So although an arbitrary coroutine could resume still + -- other arbitrary coroutines, it could NOT resume the main thread because + -- the main thread can't yield. Therefore, scheduler() delegates its real + -- processing to the main thread. If called from a coroutine, pass control + -- back to the main thread. + if coroutine.running() then + -- this is a real coroutine, yield normally to main thread + coroutine.yield() + -- main certainly still exists + return true + end + + -- This is the main fiber: coroutine.yield() doesn't work. + -- Instead, resume each of the ready fibers. + -- Prune the set of waiting fibers after every time fiber business logic + -- runs (i.e. other fibers might have terminated or hit error), such as + -- here on entry. + prune_waiting() + local others, idle_stop + repeat + for co in live_ready_iter do + -- seize the opportunity to make sure the viewer isn't shutting down + LL.check_stop() + -- before we re-append co, is it the only remaining entry? + others = next(ready) + -- co is live, re-append it to the ready list + table.insert(ready, co) + if co == 'main' then + -- Since we know the caller is the main fiber, it's our turn. + -- Tell caller if there are other ready or waiting fibers. + return others or next(waiting) + end + -- not main, but some other ready coroutine: + -- use coro.resume() so we'll propagate any error encountered + coro.resume(co) + prune_waiting() + end + -- Here there are no ready fibers. Are there any waiting fibers? + if not next(waiting) then + return false + end + -- there are waiting fibers: call consumer's configured idle() function + idle_stop = fiber._idle() + if idle_stop ~= nil then + return nil, idle_stop + end + prune_waiting() + -- loop "forever", that is, until: + -- * main is ready, or + -- * there are neither ready fibers nor waiting fibers, or + -- * fiber._idle() returned non-nil + until false +end + +-- Let other fibers run. This is useful in either of two cases: +-- * fiber.wait() calls this to run other fibers while this one is waiting. +-- fiber.yield() (and therefore fiber.wait()) works from the main thread as +-- well as from explicitly-launched fibers, without the caller having to +-- care. +-- * A long-running fiber that doesn't often call fiber.wait() should sprinkle +-- in fiber.yield() calls to interleave processing on other fibers. +function fiber.yield() + -- The difference between this and fiber.run() is that fiber.yield() + -- assumes its caller has work to do. yield() returns to its caller as + -- soon as scheduler() pops this fiber from the ready list. fiber.run() + -- continues looping until all other fibers have terminated, or the + -- set_idle() callback tells it to stop. + local others, idle_done = scheduler() + -- scheduler() returns either if we're ready, or if idle_done ~= nil. + if idle_done ~= nil then + -- Returning normally from yield() means the caller can carry on with + -- its pending work. But in this case scheduler() returned because the + -- configured set_idle() function interrupted it -- not because we're + -- actually ready. Don't return normally. + error('fiber.set_idle() interrupted yield() with: ' .. tostring(idle_done)) + end + -- We're ready! Just return to caller. In this situation we don't care + -- whether there are other ready fibers. + dbg('fiber.yield() returning to %s (%sothers are ready)', + fiber.get_name(), ((not others) and "no " or "")) +end + +-- Run fibers until all but main have terminated: return nil. +-- Or until configured idle() callback returns x ~= nil: return x. +function fiber.run() + -- A fiber calling run() is not also doing other useful work. Remove the + -- calling fiber from the ready list. Otherwise yield() would keep seeing + -- that our caller is ready and return to us, instead of realizing that + -- all coroutines are waiting and call idle(). But don't say we're + -- waiting, either, because then when all other fibers have terminated + -- we'd call idle() forever waiting for something to make us ready again. + local i = table.find(ready, fiber.running()) + if i then + table.remove(ready, i) + end + local others, idle_done + repeat + dbg('%s calling fiber.run() calling scheduler()', fiber.get_name()) + others, idle_done = scheduler() + dbg("%s fiber.run()'s scheduler() returned %s, %s", fiber.get_name(), + tostring(others), tostring(idle_done)) + until (not others) + dbg('%s fiber.run() done', fiber.get_name()) + -- For whatever it's worth, put our own fiber back in the ready list. + table.insert(ready, fiber.running()) + -- Once there are no more waiting fibers, and the only ready fiber is + -- us, return to caller. All previously-launched fibers are done. Possibly + -- the chunk is done, or the chunk may decide to launch a new batch of + -- fibers. + return idle_done +end + +return fiber diff --git a/indra/newview/scripts/lua/require/inspect.lua b/indra/newview/scripts/lua/require/inspect.lua new file mode 100644 index 0000000000..9900a0b81b --- /dev/null +++ b/indra/newview/scripts/lua/require/inspect.lua @@ -0,0 +1,371 @@ +local _tl_compat; if (tonumber((_VERSION or ''):match('[%d.]*$')) or 0) < 5.3 then local p, m = pcall(require, 'compat53.module'); if p then _tl_compat = m end end; local math = _tl_compat and _tl_compat.math or math; local string = _tl_compat and _tl_compat.string or string; local table = _tl_compat and _tl_compat.table or table +local inspect = {Options = {}, } + + + + + + + + + + + + + + + + + +inspect._VERSION = 'inspect.lua 3.1.0' +inspect._URL = 'http://github.com/kikito/inspect.lua' +inspect._DESCRIPTION = 'human-readable representations of tables' +inspect._LICENSE = [[ + MIT LICENSE + + Copyright (c) 2022 Enrique GarcÃa Cota + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +]] +inspect.KEY = setmetatable({}, { __tostring = function() return 'inspect.KEY' end }) +inspect.METATABLE = setmetatable({}, { __tostring = function() return 'inspect.METATABLE' end }) + +local tostring = tostring +local rep = string.rep +local match = string.match +local char = string.char +local gsub = string.gsub +local fmt = string.format + +local _rawget +if rawget then + _rawget = rawget +else + _rawget = function(t, k) return t[k] end +end + +local function rawpairs(t) + return next, t, nil +end + + + +local function smartQuote(str) + if match(str, '"') and not match(str, "'") then + return "'" .. str .. "'" + end + return '"' .. gsub(str, '"', '\\"') .. '"' +end + + +local shortControlCharEscapes = { + ["\a"] = "\\a", ["\b"] = "\\b", ["\f"] = "\\f", ["\n"] = "\\n", + ["\r"] = "\\r", ["\t"] = "\\t", ["\v"] = "\\v", ["\127"] = "\\127", +} +local longControlCharEscapes = { ["\127"] = "\127" } +for i = 0, 31 do + local ch = char(i) + if not shortControlCharEscapes[ch] then + shortControlCharEscapes[ch] = "\\" .. i + longControlCharEscapes[ch] = fmt("\\%03d", i) + end +end + +local function escape(str) + return (gsub(gsub(gsub(str, "\\", "\\\\"), + "(%c)%f[0-9]", longControlCharEscapes), + "%c", shortControlCharEscapes)) +end + +local luaKeywords = { + ['and'] = true, + ['break'] = true, + ['do'] = true, + ['else'] = true, + ['elseif'] = true, + ['end'] = true, + ['false'] = true, + ['for'] = true, + ['function'] = true, + ['goto'] = true, + ['if'] = true, + ['in'] = true, + ['local'] = true, + ['nil'] = true, + ['not'] = true, + ['or'] = true, + ['repeat'] = true, + ['return'] = true, + ['then'] = true, + ['true'] = true, + ['until'] = true, + ['while'] = true, +} + +local function isIdentifier(str) + return type(str) == "string" and + not not str:match("^[_%a][_%a%d]*$") and + not luaKeywords[str] +end + +local flr = math.floor +local function isSequenceKey(k, sequenceLength) + return type(k) == "number" and + flr(k) == k and + 1 <= (k) and + k <= sequenceLength +end + +local defaultTypeOrders = { + ['number'] = 1, ['boolean'] = 2, ['string'] = 3, ['table'] = 4, + ['function'] = 5, ['userdata'] = 6, ['thread'] = 7, +} + +local function sortKeys(a, b) + local ta, tb = type(a), type(b) + + + if ta == tb and (ta == 'string' or ta == 'number') then + return (a) < (b) + end + + local dta = defaultTypeOrders[ta] or 100 + local dtb = defaultTypeOrders[tb] or 100 + + + return dta == dtb and ta < tb or dta < dtb +end + +local function getKeys(t) + + local seqLen = 1 + while _rawget(t, seqLen) ~= nil do + seqLen = seqLen + 1 + end + seqLen = seqLen - 1 + + local keys, keysLen = {}, 0 + for k in rawpairs(t) do + if not isSequenceKey(k, seqLen) then + keysLen = keysLen + 1 + keys[keysLen] = k + end + end + table.sort(keys, sortKeys) + return keys, keysLen, seqLen +end + +local function countCycles(x, cycles) + if type(x) == "table" then + if cycles[x] then + cycles[x] = cycles[x] + 1 + else + cycles[x] = 1 + for k, v in rawpairs(x) do + countCycles(k, cycles) + countCycles(v, cycles) + end + countCycles(getmetatable(x), cycles) + end + end +end + +local function makePath(path, a, b) + local newPath = {} + local len = #path + for i = 1, len do newPath[i] = path[i] end + + newPath[len + 1] = a + newPath[len + 2] = b + + return newPath +end + + +local function processRecursive(process, + item, + path, + visited) + if item == nil then return nil end + if visited[item] then return visited[item] end + + local processed = process(item, path) + if type(processed) == "table" then + local processedCopy = {} + visited[item] = processedCopy + local processedKey + + for k, v in rawpairs(processed) do + processedKey = processRecursive(process, k, makePath(path, k, inspect.KEY), visited) + if processedKey ~= nil then + processedCopy[processedKey] = processRecursive(process, v, makePath(path, processedKey), visited) + end + end + + local mt = processRecursive(process, getmetatable(processed), makePath(path, inspect.METATABLE), visited) + if type(mt) ~= 'table' then mt = nil end + setmetatable(processedCopy, mt) + processed = processedCopy + end + return processed +end + +local function puts(buf, str) + buf.n = buf.n + 1 + buf[buf.n] = str +end + + + +local Inspector = {} + + + + + + + + + + +local Inspector_mt = { __index = Inspector } + +local function tabify(inspector) + puts(inspector.buf, inspector.newline .. rep(inspector.indent, inspector.level)) +end + +function Inspector:getId(v) + local id = self.ids[v] + local ids = self.ids + if not id then + local tv = type(v) + id = (ids[tv] or 0) + 1 + ids[v], ids[tv] = id, id + end + return tostring(id) +end + +function Inspector:putValue(v) + local buf = self.buf + local tv = type(v) + if tv == 'string' then + puts(buf, smartQuote(escape(v))) + elseif tv == 'number' or tv == 'boolean' or tv == 'nil' or + tv == 'cdata' or tv == 'ctype' then + puts(buf, tostring(v)) + elseif tv == 'table' and not self.ids[v] then + local t = v + + if t == inspect.KEY or t == inspect.METATABLE then + puts(buf, tostring(t)) + elseif self.level >= self.depth then + puts(buf, '{...}') + else + if self.cycles[t] > 1 then puts(buf, fmt('<%d>', self:getId(t))) end + + local keys, keysLen, seqLen = getKeys(t) + + puts(buf, '{') + self.level = self.level + 1 + + for i = 1, seqLen + keysLen do + if i > 1 then puts(buf, ',') end + if i <= seqLen then + puts(buf, ' ') + self:putValue(t[i]) + else + local k = keys[i - seqLen] + tabify(self) + if isIdentifier(k) then + puts(buf, k) + else + puts(buf, "[") + self:putValue(k) + puts(buf, "]") + end + puts(buf, ' = ') + self:putValue(t[k]) + end + end + + local mt = getmetatable(t) + if type(mt) == 'table' then + if seqLen + keysLen > 0 then puts(buf, ',') end + tabify(self) + puts(buf, '<metatable> = ') + self:putValue(mt) + end + + self.level = self.level - 1 + + if keysLen > 0 or type(mt) == 'table' then + tabify(self) + elseif seqLen > 0 then + puts(buf, ' ') + end + + puts(buf, '}') + end + + else + puts(buf, fmt('<%s %d>', tv, self:getId(v))) + end +end + + + + +function inspect.inspect(root, options) + options = options or {} + + local depth = options.depth or (math.huge) + local newline = options.newline or '\n' + local indent = options.indent or ' ' + local process = options.process + + if process then + root = processRecursive(process, root, {}, {}) + end + + local cycles = {} + countCycles(root, cycles) + + local inspector = setmetatable({ + buf = { n = 0 }, + ids = {}, + cycles = cycles, + depth = depth, + level = 0, + newline = newline, + indent = indent, + }, Inspector_mt) + + inspector:putValue(root) + + return table.concat(inspector.buf) +end + +setmetatable(inspect, { + __call = function(_, root, options) + return inspect.inspect(root, options) + end, +}) + +return inspect diff --git a/indra/newview/scripts/lua/require/leap.lua b/indra/newview/scripts/lua/require/leap.lua new file mode 100644 index 0000000000..82f91ce9e9 --- /dev/null +++ b/indra/newview/scripts/lua/require/leap.lua @@ -0,0 +1,550 @@ +-- Lua implementation of LEAP (LLSD Event API Plugin) protocol +-- +-- This module supports Lua scripts run by the Second Life viewer. +-- +-- LEAP protocol passes LLSD objects, converted to/from Lua tables, in both +-- directions. A typical LLSD object is a map containing keys 'pump' and +-- 'data'. +-- +-- The viewer's Lua post_to(pump, data) function posts 'data' to the +-- LLEventPump 'pump'. This is typically used to engage an LLEventAPI method. +-- +-- Similarly, the viewer gives each Lua script its own LLEventPump with a +-- unique name. That name is returned by get_event_pumps(). Every event +-- received on that LLEventPump is queued for retrieval by get_event_next(), +-- which returns (pump, data): the name of the LLEventPump on which the event +-- was received and the received event data. When the queue is empty, +-- get_event_next() blocks the calling Lua script until the next event is +-- received. +-- +-- Usage: +-- 1. Launch some number of Lua coroutines. The code in each coroutine may +-- call leap.send(), leap.request() or leap.generate(). leap.send() returns +-- immediately ("fire and forget"). leap.request() blocks the calling +-- coroutine until it receives and returns the viewer's response to its +-- request. leap.generate() expects an arbitrary number of responses to the +-- original request. +-- 2. To handle events from the viewer other than direct responses to +-- requests, instantiate a leap.WaitFor object with a filter(pump, data) +-- override method that returns non-nil for desired events. A coroutine may +-- call wait() on any such WaitFor. +-- 3. Once the coroutines have been launched, call leap.process() on the main +-- coroutine. process() retrieves incoming events from the viewer and +-- dispatches them to waiting request() or generate() calls, or to +-- appropriate WaitFor instances. process() returns when either +-- get_event_next() raises an error or the viewer posts nil to the script's +-- reply pump to indicate it's done. +-- 4. Alternatively, a running coroutine may call leap.done() to break out of +-- leap.process(). process() won't notice until the next event from the +-- viewer, though. + +local fiber = require('fiber') +local ErrorQueue = require('ErrorQueue') +local inspect = require('inspect') +local function dbg(...) end +-- local dbg = require('printf') +local util = require('util') + +local leap = {} + +-- reply: string name of reply LLEventPump. Any events the viewer posts to +-- this pump will be queued for get_event_next(). We usually specify it as the +-- reply pump for requests to internal viewer services. +-- command: string name of command LLEventPump. post_to(command, ...) +-- engages LLLeapListener operations such as listening on a specified other +-- LLEventPump, etc. +local reply, command = LL.get_event_pumps() +-- Dict of features added to the LEAP protocol since baseline implementation. +-- Before engaging a new feature that might break an older viewer, we can +-- check for the presence of that feature key. This table is solely about the +-- LEAP protocol itself, the way we communicate with the viewer. To discover +-- whether a given listener exists, or supports a particular operation, use +-- command's "getAPI" operation. +-- For Lua, command's "getFeatures" operation suffices? +-- leap._features = {} + +-- Each outstanding request() or generate() call has a corresponding +-- WaitForReqid object (later in this module) to handle the +-- response(s). If an incoming event contains an echoed ["reqid"] key, +-- we can look up the appropriate WaitForReqid object more efficiently +-- in a dict than by tossing such objects into the usual waitfors list. +-- Note: the ["reqid"] must be unique, otherwise we could end up +-- replacing an earlier WaitForReqid object in pending with a +-- later one. That means that no incoming event will ever be given to +-- the old WaitForReqid object. Any coroutine waiting on the discarded +-- WaitForReqid object would therefore wait forever. +-- pending is NOT a weak table because the caller of request() or generate() +-- never sees the WaitForReqid object. pending holds the only reference, so +-- it should NOT be garbage-collected. +local pending = {} +-- Our consumer will instantiate some number of WaitFor subclass objects. +-- As these are traversed in descending priority order, we must keep +-- them in a list. +-- Anyone who instantiates a WaitFor subclass object should retain a reference +-- to it. Once the consuming script drops the reference, allow Lua to +-- garbage-collect the WaitFor despite its entry in waitfors. +local weak_values = {__mode='v'} +local waitfors = setmetatable({}, weak_values) +-- It has been suggested that we should use UUIDs as ["reqid"] values, +-- since UUIDs are guaranteed unique. However, as the "namespace" for +-- ["reqid"] values is our very own reply pump, we can get away with +-- an integer. +leap._reqid = 0 +-- break leap.process() loop +leap._done = false + +-- get the name of the reply pump +function leap.replypump() + return reply +end + +-- get the name of the command pump +function leap.cmdpump() + return command +end + +-- Fire and forget. Send the specified request LLSD, expecting no reply. +-- In fact, should the request produce an eventual reply, it will be +-- treated as an unsolicited event. +-- +-- See also request(), generate(). +function leap.send(pump, data, reqid) + local data = data + if type(data) == 'table' then + data = table.clone(data) + data['reply'] = reply + if reqid ~= nil then + data['reqid'] = reqid + end + end + dbg('leap.send(%s, %s) calling post_on()', pump, data) + LL.post_on(pump, data) +end + +-- common setup code shared by request() and generate() +local function requestSetup(pump, data) + -- invent a new, unique reqid + leap._reqid += 1 + local reqid = leap._reqid + -- Instantiate a new WaitForReqid object. The priority is irrelevant + -- because, unlike the WaitFor base class, WaitForReqid does not + -- self-register on our waitfors list. Instead, capture the new + -- WaitForReqid object in pending so dispatch() can find it. + local waitfor = leap.WaitForReqid(reqid) + pending[reqid] = waitfor + -- Pass reqid to send() to stamp it into (a copy of) the request data. + dbg('requestSetup(%s, %s) storing %s', pump, data, waitfor.name) + leap.send(pump, data, reqid) + return reqid, waitfor +end + +-- Send the specified request LLSD, expecting exactly one reply. Block +-- the calling coroutine until we receive that reply. +-- +-- Every request() (or generate()) LLSD block we send will get stamped +-- with a distinct ["reqid"] value. The requested event API must echo the +-- same ["reqid"] field in each reply associated with that request. This way +-- we can correctly dispatch interleaved replies from different requests. +-- +-- If the desired event API doesn't support the ["reqid"] echo convention, +-- you should use send() instead -- since request() or generate() would +-- wait forever for a reply stamped with that ["reqid"] -- and intercept +-- any replies using WaitFor. +-- +-- Unless the request data already contains a ["reply"] key, we insert +-- reply=self.replypump to try to ensure that the expected reply will be +-- returned over the socket. +-- +-- See also send(), generate(). +function leap.request(pump, data) + local reqid, waitfor = requestSetup(pump, data) + dbg('leap.request(%s, %s) about to wait on %s', pump, data, tostring(waitfor)) + local ok, response = pcall(waitfor.wait, waitfor) + dbg('leap.request(%s, %s) got %s: %s', pump, data, ok, response) + -- kill off temporary WaitForReqid object, even if error + pending[reqid] = nil + if not ok then + error(response) + elseif response.error then + error(response.error) + else + return response + end +end + +-- Send the specified request LLSD, expecting an arbitrary number of replies. +-- Each one is returned on request. +-- +-- Usage: +-- sequence = leap.generate(pump, data) +-- repeat +-- response = sequence.next() +-- until last(response) +-- (last() means whatever test the caller wants to perform on response) +-- sequence.done() +-- +-- See request() remarks about ["reqid"]. +-- +-- Note: this seems like a prime use case for Lua coroutines. But in a script +-- using fibers.lua, a "wild" coroutine confuses the fiber scheduler. If +-- generate() were itself a coroutine, it would call WaitForReqid:wait(), +-- which would yield -- thereby resuming generate() WITHOUT waiting. +function leap.generate(pump, data, checklast) + -- Invent a new, unique reqid. Arrange to handle incoming events + -- bearing that reqid. Stamp the outbound request with that reqid, and + -- send it. + local reqid, waitfor = requestSetup(pump, data) + return { + next = function() + dbg('leap.generate(%s).next() about to wait on %s', reqid, tostring(waitfor)) + local ok, response = pcall(waitfor.wait, waitfor) + dbg('leap.generate(%s).next() got %s: %s', reqid, ok, response) + if not ok then + error(response) + elseif response.error then + error(response.error) + else + return response + end + end, + done = function() + -- cleanup consists of removing our WaitForReqid from pending + pending[reqid] = nil + end + } +end + +-- Send the specified request LLSD, expecting an immediate reply followed by +-- an arbitrary number of subsequent replies with the same reqid. Block the +-- calling coroutine until the first (immediate) reply, but launch a separate +-- fiber on which to call the passed callback with later replies. +-- +-- Once the callback returns true, the background fiber terminates. +function leap.eventstream(pump, data, callback) + local reqid, waitfor = requestSetup(pump, data) + local response = waitfor:wait() + if response.error then + -- clean up our WaitForReqid + waitfor:close() + error(response.error) + end + -- No error, so far so good: + -- call the callback with the first response just in case + dbg('leap.eventstream(%s): first callback', reqid) + local ok, done = pcall(callback, response) + dbg('leap.eventstream(%s) got %s, %s', reqid, ok, done) + if not ok then + -- clean up our WaitForReqid + waitfor:close() + error(done) + end + if done then + return response + end + -- callback didn't throw an error, and didn't say stop, + -- so set up to handle subsequent events + -- TODO: distinguish "daemon" fibers that can be terminated even if waiting + fiber.launch( + pump, + function () + local ok, done + local nth = 1 + repeat + event = waitfor:wait() + if not event then + -- wait() returns nil once the queue is closed (e.g. cancelreq()) + ok, done = true, true + else + nth += 1 + dbg('leap.eventstream(%s): callback %d', reqid, nth) + ok, done = pcall(callback, event) + dbg('leap.eventstream(%s) got %s, %s', reqid, ok, done) + end + -- not ok means callback threw an error (caught as 'done') + -- done means callback succeeded but wants to stop + until (not ok) or done + -- once we break this loop, clean up our WaitForReqid + waitfor:close() + if not ok then + -- can't reflect the error back to our caller + LL.print_warning(fiber.get_name() .. ': ' .. done) + end + end) + return response +end + +-- we might want to clean up after leap.eventstream() even if the callback has +-- not yet returned true +function leap.cancelreq(reqid) + dbg('cancelreq(%s)', reqid) + local waitfor = pending[reqid] + if waitfor ~= nil then + -- close() removes the pending entry and also closes the queue, + -- breaking the background fiber's wait loop. + dbg('cancelreq(%s) canceling %s', reqid, waitfor.name) + waitfor:close() + end +end + +local function cleanup(message) + -- We're done: clean up all pending coroutines. + -- Iterate over copies of the pending and waitfors tables, since the + -- close() operation modifies the real tables. + for i, waitfor in pairs(table.clone(pending)) do + waitfor:close() + end + for i, waitfor in pairs(table.clone(waitfors)) do + waitfor:close() + end +end + +-- Handle an incoming (pump, data) event with no recognizable ['reqid'] +local function unsolicited(pump, data) + -- we maintain waitfors in descending priority order, so the first waitfor + -- to claim this event is the one with the highest priority + for i, waitfor in pairs(waitfors) do + dbg('unsolicited() checking %s', waitfor.name) + if waitfor:handle(pump, data) then + return + end + end + LL.print_debug(string.format('unsolicited(%s, %s) discarding unclaimed event', + pump, inspect(data))) +end + +-- Route incoming (pump, data) event to the appropriate waiting coroutine. +local function dispatch(pump, data) + local reqid = data['reqid'] + -- if the response has no 'reqid', it's not from request() or generate() + if reqid == nil then +-- dbg('dispatch() found no reqid; calling unsolicited(%s, %s)', pump, data) + return unsolicited(pump, data) + end + -- have reqid; do we have a WaitForReqid? + local waitfor = pending[reqid] + if waitfor == nil then +-- dbg('dispatch() found no WaitForReqid(%s); calling unsolicited(%s, %s)', reqid, pump, data) + return unsolicited(pump, data) + end + -- found the right WaitForReqid object, let it handle the event +-- dbg('dispatch() calling %s.handle(%s, %s)', waitfor.name, pump, data) + waitfor:handle(pump, data) +end + +-- We configure fiber.set_idle() function. fiber.yield() calls the configured +-- idle callback whenever there are waiting fibers but no ready fibers. In +-- our case, that means it's time to fetch another incoming viewer event. +fiber.set_idle(function () + -- If someone has called leap.done(), then tell fiber.yield() to break loop. + if leap._done then + cleanup('done') + return 'done' + end + dbg('leap.idle() calling get_event_next()') + local ok, pump, data = pcall(LL.get_event_next) + dbg('leap.idle() got %s: %s, %s', ok, pump, data) + -- ok false means get_event_next() raised a Lua error, pump is message + if not ok then + cleanup(pump) + error(pump) + end + -- data nil means get_event_next() returned (pump, LLSD()) to indicate done + if not data then + cleanup('end') + return 'end' + end + -- got a real pump, data pair + dispatch(pump, data) + -- return to fiber.yield(): any incoming message might result in one or + -- more fibers becoming ready +end) + +function leap.done() + leap._done = true +end + +-- called by WaitFor.enable() +local function registerWaitFor(waitfor) + table.insert(waitfors, waitfor) + -- keep waitfors sorted in descending order of specified priority + table.sort(waitfors, + function (lhs, rhs) return lhs.priority > rhs.priority end) +end + +-- called by WaitFor.disable() +local function unregisterWaitFor(waitfor) + local i = table.find(waitfors, waitfor) + if i ~= nil then + waitfors[i] = nil + end +end + +-- ****************************************************************************** +-- WaitFor and friends +-- ****************************************************************************** + +-- An unsolicited event is handled by the highest-priority WaitFor subclass +-- object willing to accept it. If no such object is found, the unsolicited +-- event is discarded. +-- +-- * First, instantiate a WaitFor subclass object to register its interest in +-- some incoming event(s). WaitFor instances are self-registering; merely +-- instantiating the object suffices. +-- * Any coroutine may call a given WaitFor object's wait() method. This blocks +-- the calling coroutine until a suitable event arrives. +-- * WaitFor's constructor accepts a float priority. Every incoming event +-- (other than those claimed by request() or generate()) is passed to each +-- extant WaitFor.filter() method in descending priority order. The first +-- such filter() to return nontrivial data claims that event. +-- * At that point, the blocked wait() call on that WaitFor object returns the +-- item returned by filter(). +-- * WaitFor contains a queue. Multiple arriving events claimed by that WaitFor +-- object's filter() method are added to the queue. Naturally, until the +-- queue is empty, calling wait() immediately returns the front entry. +-- +-- It's reasonable to instantiate a WaitFor subclass whose filter() method +-- unconditionally returns the incoming event, and whose priority places it +-- last in the list. This object will enqueue every unsolicited event left +-- unclaimed by other WaitFor subclass objects. +-- +-- It's not strictly necessary to associate a WaitFor object with exactly one +-- coroutine. You might have multiple "worker" coroutines drawing from the same +-- WaitFor object, useful if the work being done per event might itself involve +-- "blocking" operations. Or a given coroutine might sample a number of WaitFor +-- objects in round-robin fashion... etc. etc. Nonetheless, it's +-- straightforward to designate one coroutine for each WaitFor object. + +-- --------------------------------- WaitFor --------------------------------- +leap.WaitFor = { _id=0 } + +function leap.WaitFor.tostring(self) + -- Lua (sub)classes have no name; can't prefix with that + return self.name +end + +function leap.WaitFor:new(priority, name) + local obj = setmetatable({__tostring=leap.WaitFor.tostring}, self) + self.__index = self + + obj.priority = priority + if name then + obj.name = name + else + self._id += 1 + obj.name = 'WaitFor' .. self._id + end + obj._queue = ErrorQueue() + obj._registered = false + -- if no priority, then don't enable() - remember 0 is truthy + if priority then + obj:enable() + end + + return obj +end + +util.classctor(leap.WaitFor) + +-- Re-enable a disable()d WaitFor object. New WaitFor objects are +-- enable()d by default. +function leap.WaitFor:enable() + if not self._registered then + registerWaitFor(self) + self._registered = true + end +end + +-- Disable an enable()d WaitFor object. +function leap.WaitFor:disable() + if self._registered then + unregisterWaitFor(self) + self._registered = false + end +end + +-- Block the calling coroutine until a suitable unsolicited event (one +-- for which filter() returns the event) arrives. +function leap.WaitFor:wait() + dbg('%s about to wait', self.name) + local item = self._queue:Dequeue() + dbg('%s got %s', self.name, item) + return item +end + +-- Override filter() to examine the incoming event in whatever way +-- makes sense. +-- +-- Return nil to ignore this event. +-- +-- To claim the event, return the item you want placed in the queue. +-- Typically you'd write: +-- return data +-- or perhaps +-- return {pump=pump, data=data} +-- or some variation. +function leap.WaitFor:filter(pump, data) + error('You must override the WaitFor.filter() method') +end + +-- called by unsolicited() for each WaitFor in waitfors +function leap.WaitFor:handle(pump, data) + local item = self:filter(pump, data) + dbg('%s.filter() returned %s', self.name, item) + -- if this item doesn't pass the filter, we're not interested + if not item then + return false + end + -- okay, filter() claims this event + self:process(item) + return true +end + +-- called by WaitFor:handle() for an accepted event +function leap.WaitFor:process(item) + self._queue:Enqueue(item) +end + +-- called by cleanup() at end +function leap.WaitFor:close() + self:disable() + self._queue:close() +end + +-- called by leap.process() when get_event_next() raises an error +function leap.WaitFor:exception(message) + LL.print_warning(self.name .. ' error: ' .. message) + self._queue:Error(message) +end + +-- ------------------------------ WaitForReqid ------------------------------- +leap.WaitForReqid = leap.WaitFor() + +function leap.WaitForReqid:new(reqid) + -- priority is meaningless, since this object won't be added to the + -- priority-sorted waitfors list. Use the reqid as the debugging name + -- string. + local obj = leap.WaitFor(nil, 'WaitForReqid(' .. reqid .. ')') + setmetatable(obj, self) + self.__index = self + + obj.reqid = reqid + + return obj +end + +util.classctor(leap.WaitForReqid) + +function leap.WaitForReqid:filter(pump, data) + -- Because we expect to directly look up the WaitForReqid object of + -- interest based on the incoming ["reqid"] value, it's not necessary + -- to test the event again. Accept every such event. + return data +end + +function leap.WaitForReqid:close() + -- remove this entry from pending table + pending[self.reqid] = nil + self._queue:close() +end + +return leap diff --git a/indra/newview/scripts/lua/require/popup.lua b/indra/newview/scripts/lua/require/popup.lua new file mode 100644 index 0000000000..3aaadf85ba --- /dev/null +++ b/indra/newview/scripts/lua/require/popup.lua @@ -0,0 +1,53 @@ +local leap = require 'leap' +local mapargs = require 'mapargs' + +-- notification is any name defined in notifications.xml as +-- <notification name=> +-- vars is a table providing values for [VAR] substitution keys in the +-- notification body +-- payload prepopulates the response table +-- wait=false means fire and forget, otherwise wait for user response +local popup_meta = { + -- setting this function as getmetatable(popup).__call() means this gets + -- called when a consumer calls popup(notification, vars, payload) + __call = function(self, ...) + local args = mapargs('notification,vars,payload,wait', ...) + -- we use convenience argument names different from 'LLNotifications' + -- listener + args.name = args.notification + args.notification = nil + args.substitutions = args.vars + args.vars = nil + local wait = args.wait + args.wait = nil + args.op = 'requestAdd' + -- Specifically test (wait == false), NOT (not wait), because we treat + -- nil (omitted, default true) differently than false (explicitly + -- DON'T wait). + if wait == false then + leap.send('LLNotifications', args) + else + return leap.request('LLNotifications', args).response + end + end +} + +local popup = setmetatable({}, popup_meta) + +function popup:alert(message) + return self('GenericAlert', {MESSAGE=message}) +end + +function popup:alertOK(message) + return self('GenericAlertOK', {MESSAGE=message}) +end + +function popup:alertYesCancel(message) + return self('GenericAlertYesCancel', {MESSAGE=message}) +end + +function popup:tip(message) + self{'SystemMessageTip', {MESSAGE=message}, wait=false} +end + +return popup diff --git a/indra/newview/scripts/lua/require/printf.lua b/indra/newview/scripts/lua/require/printf.lua new file mode 100644 index 0000000000..e84b2024df --- /dev/null +++ b/indra/newview/scripts/lua/require/printf.lua @@ -0,0 +1,19 @@ +-- printf(...) is short for print(string.format(...)) + +local inspect = require 'inspect' + +local function printf(format, ...) + -- string.format() only handles numbers and strings. + -- Convert anything else to string using the inspect module. + local args = {} + for _, arg in pairs(table.pack(...)) do + if type(arg) == 'number' or type(arg) == 'string' then + table.insert(args, arg) + else + table.insert(args, inspect(arg)) + end + end + print(string.format(format, table.unpack(args))) +end + +return printf diff --git a/indra/newview/scripts/lua/require/startup.lua b/indra/newview/scripts/lua/require/startup.lua new file mode 100644 index 0000000000..c3040f94b8 --- /dev/null +++ b/indra/newview/scripts/lua/require/startup.lua @@ -0,0 +1,100 @@ +-- query, wait for or mandate a particular viewer startup state + +-- During startup, the viewer steps through a sequence of numbered (and named) +-- states. This can be used to detect when, for instance, the login screen is +-- displayed, or when the viewer has finished logging in and is fully +-- in-world. + +local fiber = require 'fiber' +local leap = require 'leap' +local inspect = require 'inspect' +local function dbg(...) end +-- local dbg = require 'printf' + +-- --------------------------------------------------------------------------- +local startup = {} + +-- Get the list of startup states from the viewer. +local bynum = leap.request('LLStartUp', {op='getStateTable'})['table'] + +local byname = setmetatable( + {}, + -- set metatable to throw an error if you look up invalid state name + {__index=function(t, k) + local v = rawget(t, k) + if v then + return v + end + error(string.format('startup module passed invalid state %q', k), 2) + end}) + +-- derive byname as a lookup table to find the 0-based index for a given name +for i, name in pairs(bynum) do + -- the viewer's states are 0-based, not 1-based like Lua indexes + byname[name] = i - 1 +end +-- dbg('startup states: %s', inspect(byname)) + +-- specialize a WaitFor to track the viewer's startup state +local startup_pump = 'StartupState' +local waitfor = leap.WaitFor(0, startup_pump) +function waitfor:filter(pump, data) + if pump == self.name then + return data + end +end + +function waitfor:process(data) + -- keep updating startup._state for interested parties + startup._state = data.str + dbg('startup updating state to %q', data.str) + -- now pass data along to base-class method to queue + leap.WaitFor.process(self, data) +end + +-- listen for StartupState events +leap.request(leap.cmdpump(), + {op='listen', source=startup_pump, listener='startup.lua', tweak=true}) +-- poke LLStartUp to make sure we get an event +leap.send('LLStartUp', {op='postStartupState'}) + +-- --------------------------------------------------------------------------- +-- wait for response from postStartupState +while not startup._state do + dbg('startup.state() waiting for first StartupState event') + waitfor:wait() +end + +-- return a list of all known startup states +function startup.list() + return bynum +end + +-- report whether state with string name 'left' is before string name 'right' +function startup.before(left, right) + return byname[left] < byname[right] +end + +-- report the viewer's current startup state +function startup.state() + return startup._state +end + +-- error if script is called before specified state string name +function startup.ensure(state) + if startup.before(startup.state(), state) then + -- tell error() to pretend this error was thrown by our caller + error('must not be called before startup state ' .. state, 2) + end +end + +-- block calling fiber until viewer has reached state with specified string name +function startup.wait(state) + dbg('startup.wait(%q)', state) + while startup.before(startup.state(), state) do + local item = waitfor:wait() + dbg('startup.wait(%q) sees %s', state, item) + end +end + +return startup diff --git a/indra/newview/scripts/lua/require/timers.lua b/indra/newview/scripts/lua/require/timers.lua new file mode 100644 index 0000000000..e4938078dc --- /dev/null +++ b/indra/newview/scripts/lua/require/timers.lua @@ -0,0 +1,104 @@ +-- Access to the viewer's time-delay facilities + +local leap = require 'leap' +local util = require 'util' + +local timers = {} + +local function dbg(...) end +-- local dbg = require 'printf' + +timers.Timer = {} + +-- delay: time in seconds until callback +-- callback: 'wait', or function to call when timer fires (self:tick if nil) +-- iterate: if non-nil, call callback repeatedly until it returns non-nil +-- (ignored if 'wait') +function timers.Timer:new(delay, callback, iterate) + local obj = setmetatable({}, self) + self.__index = self + + if callback == 'wait' then + dbg('scheduleAfter(%d):', delay) + sequence = leap.generate('Timers', {op='scheduleAfter', after=delay}) + -- ignore the immediate return + dbg('scheduleAfter(%d) -> %s', delay, + sequence.next()) + -- this call is where we wait for real + dbg('next():') + dbg('next() -> %s', + sequence.next()) + sequence.done() + return + end + + callback = callback or function() obj:tick() end + + local first = true + if iterate then + obj.id = leap.eventstream( + 'Timers', + {op='scheduleEvery', every=delay}, + function (event) + local reqid = event.reqid + if first then + first = false + dbg('timer(%s) first callback', reqid) + -- discard the first (immediate) response: don't call callback + return nil + else + dbg('timer(%s) nth callback', reqid) + return callback(event) + end + end + ).reqid + else + obj.id = leap.eventstream( + 'Timers', + {op='scheduleAfter', after=delay}, + function (event) + -- Arrange to return nil the first time, true the second. This + -- callback is called immediately with the response to + -- 'scheduleAfter', and if we immediately returned true, we'd + -- be done, and the subsequent timer event would be discarded. + if first then + first = false + -- Caller doesn't expect an immediate callback. + return nil + else + callback(event) + -- Since caller doesn't want to iterate, the value + -- returned by the callback is irrelevant: just stop after + -- this one and only call. + return true + end + end + ).reqid + end + + return obj +end + +util.classctor(timers.Timer) + +function timers.Timer:tick() + error('Pass a callback to Timer:new(), or override Timer:tick()') +end + +function timers.Timer:cancel() + local ok = leap.request('Timers', {op='cancel', id=self.id}).ok + leap.cancelreq(self.id) + return ok +end + +function timers.Timer:isRunning() + return leap.request('Timers', {op='isRunning', id=self.id}).running +end + +-- returns (true, seconds left) for a live timer, else (false, 0) +function timers.Timer:timeUntilCall() + local result = leap.request('Timers', {op='timeUntilCall', id=self.id}) + return result.ok, result.remaining +end + +return timers diff --git a/indra/newview/scripts/lua/require/util.lua b/indra/newview/scripts/lua/require/util.lua new file mode 100644 index 0000000000..bfbfc8637c --- /dev/null +++ b/indra/newview/scripts/lua/require/util.lua @@ -0,0 +1,69 @@ +-- utility functions, in alpha order + +local util = {} + +-- Allow MyClass(ctor args...) equivalent to MyClass:new(ctor args...) +-- Usage: +-- local MyClass = {} +-- function MyClass:new(...) +-- ... +-- end +-- ... +-- util.classctor(MyClass) +-- or if your constructor is named something other than MyClass:new(), e.g. +-- MyClass:construct(): +-- util.classctor(MyClass, MyClass.construct) +-- return MyClass +function util.classctor(class, ctor) + -- get the metatable for the passed class + local mt = getmetatable(class) + if mt == nil then + -- if it doesn't already have a metatable, then create one + mt = {} + setmetatable(class, mt) + end + -- now that class has a metatable, set its __call method to the specified + -- constructor method (class.new if not specified) + mt.__call = ctor or class.new +end + +-- check if array-like table contains certain value +function util.contains(t, v) + return table.find(t, v) ~= nil +end + +-- reliable count of the number of entries in table t +-- (since #t is unreliable) +function util.count(t) + local count = 0 + for _ in pairs(t) do + count += 1 + end + return count +end + +-- cheap test whether table t is empty +function util.empty(t) + return not next(t) +end + +-- recursive table equality +function util.equal(t1, t2) + if not (type(t1) == 'table' and type(t2) == 'table') then + return t1 == t2 + end + -- both t1 and t2 are tables: get modifiable copy of t2 + local temp = table.clone(t2) + for k, v in pairs(t1) do + -- if any key in t1 doesn't have same value in t2, not equal + if not util.equal(v, temp[k]) then + return false + end + -- temp[k] == t1[k], delete temp[k] + temp[k] = nil + end + -- All keys in t1 have equal values in t2; t2 == t1 if there are no extra keys in t2 + return util.empty(temp) +end + +return util |