summaryrefslogtreecommitdiff
path: root/indra/lib/python
diff options
context:
space:
mode:
Diffstat (limited to 'indra/lib/python')
-rw-r--r--indra/lib/python/indra/__init__.py27
-rw-r--r--indra/lib/python/indra/base/__init__.py2
-rw-r--r--indra/lib/python/indra/base/cllsd_test.py2
-rw-r--r--indra/lib/python/indra/base/config.py32
-rw-r--r--indra/lib/python/indra/base/llsd.py294
-rw-r--r--indra/lib/python/indra/base/lluuid.py12
-rw-r--r--indra/lib/python/indra/base/metrics.py96
-rw-r--r--indra/lib/python/indra/ipc/__init__.py2
-rw-r--r--indra/lib/python/indra/ipc/compatibility.py2
-rw-r--r--indra/lib/python/indra/ipc/llmessage.py13
-rw-r--r--indra/lib/python/indra/ipc/llsdhttp.py10
-rw-r--r--indra/lib/python/indra/ipc/mysql_pool.py15
-rw-r--r--indra/lib/python/indra/ipc/russ.py2
-rw-r--r--indra/lib/python/indra/ipc/servicebuilder.py53
-rw-r--r--indra/lib/python/indra/ipc/siesta.py136
-rw-r--r--indra/lib/python/indra/ipc/tokenstream.py2
-rw-r--r--indra/lib/python/indra/ipc/webdav.py2
-rw-r--r--indra/lib/python/indra/ipc/xml_rpc.py2
-rw-r--r--indra/lib/python/indra/util/__init__.py2
-rw-r--r--indra/lib/python/indra/util/fastest_elementtree.py32
-rw-r--r--indra/lib/python/indra/util/helpformatter.py2
-rw-r--r--indra/lib/python/indra/util/iterators.py63
-rwxr-xr-xindra/lib/python/indra/util/iterators_test.py72
-rw-r--r--indra/lib/python/indra/util/llmanifest.py25
-rwxr-xr-xindra/lib/python/indra/util/llperformance.py158
-rw-r--r--indra/lib/python/indra/util/llsubprocess.py13
-rw-r--r--indra/lib/python/indra/util/llversion.py41
-rw-r--r--indra/lib/python/indra/util/named_query.py95
-rw-r--r--indra/lib/python/indra/util/shutil2.py2
-rwxr-xr-xindra/lib/python/indra/util/simperf_host_xml_parser.py338
-rwxr-xr-xindra/lib/python/indra/util/simperf_oprof_interface.py167
-rwxr-xr-xindra/lib/python/indra/util/simperf_proc_interface.py191
-rw-r--r--indra/lib/python/indra/util/term.py24
-rw-r--r--indra/lib/python/indra/util/test_win32_manifest.py143
34 files changed, 1852 insertions, 220 deletions
diff --git a/indra/lib/python/indra/__init__.py b/indra/lib/python/indra/__init__.py
index 353a93ffae..e010741c1c 100644
--- a/indra/lib/python/indra/__init__.py
+++ b/indra/lib/python/indra/__init__.py
@@ -2,19 +2,24 @@
@file __init__.py
@brief Initialization file for the indra module.
-$LicenseInfo:firstyear=2006&license=internal$
+$LicenseInfo:firstyear=2006&license=viewerlgpl$
+Second Life Viewer Source Code
+Copyright (C) 2010, Linden Research, Inc.
-Copyright (c) 2006-2007, Linden Research, Inc.
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation;
+version 2.1 of the License only.
-The following source code is PROPRIETARY AND CONFIDENTIAL. Use of
-this source code is governed by the Linden Lab Source Code Disclosure
-Agreement ("Agreement") previously entered between you and Linden
-Lab. By accessing, using, copying, modifying or distributing this
-software, you acknowledge that you have been informed of your
-obligations under the Agreement and agree to abide by those obligations.
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
-ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
-WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
-COMPLETENESS OR PERFORMANCE.
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
$/LicenseInfo$
"""
diff --git a/indra/lib/python/indra/base/__init__.py b/indra/lib/python/indra/base/__init__.py
index 913164d090..2904fd3380 100644
--- a/indra/lib/python/indra/base/__init__.py
+++ b/indra/lib/python/indra/base/__init__.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/base/cllsd_test.py b/indra/lib/python/indra/base/cllsd_test.py
index 3af59e741a..0b20d99d80 100644
--- a/indra/lib/python/indra/base/cllsd_test.py
+++ b/indra/lib/python/indra/base/cllsd_test.py
@@ -10,7 +10,7 @@ values = (
'&<>',
u'\u81acj',
llsd.uri('http://foo<'),
- lluuid.LLUUID(),
+ lluuid.UUID(),
llsd.LLSD(['thing']),
1,
myint(31337),
diff --git a/indra/lib/python/indra/base/config.py b/indra/lib/python/indra/base/config.py
index 9d8da7dd15..adafa29b51 100644
--- a/indra/lib/python/indra/base/config.py
+++ b/indra/lib/python/indra/base/config.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2006&license=mit$
-Copyright (c) 2006-2007, Linden Research, Inc.
+Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -27,6 +27,7 @@ $/LicenseInfo$
"""
import copy
+import errno
import os
import traceback
import time
@@ -62,6 +63,8 @@ class IndraConfig(object):
self._load()
def _load(self):
+ # if you initialize the IndraConfig with None, no attempt
+ # is made to load any files
if self._indra_config_file is None:
return
@@ -164,18 +167,37 @@ class IndraConfig(object):
"""
return copy.deepcopy(self._combined_dict)
-def load(indra_xml_file = None):
+def load(config_xml_file = None):
global _g_config
- if indra_xml_file is None:
+ load_default_files = config_xml_file is None
+ if load_default_files:
## going from:
## "/opt/linden/indra/lib/python/indra/base/config.py"
## to:
## "/opt/linden/etc/indra.xml"
- indra_xml_file = realpath(
+ config_xml_file = realpath(
dirname(realpath(__file__)) + "../../../../../../etc/indra.xml")
- _g_config = IndraConfig(indra_xml_file)
+ try:
+ _g_config = IndraConfig(config_xml_file)
+ except IOError:
+ # Failure to load passed in file
+ # or indra.xml default file
+ if load_default_files:
+ try:
+ config_xml_file = realpath(
+ dirname(realpath(__file__)) + "../../../../../../etc/globals.xml")
+ _g_config = IndraConfig(config_xml_file)
+ return
+ except IOError:
+ # Failure to load globals.xml
+ # fall to code below
+ pass
+
+ # Either failed to load passed in file
+ # or failed to load all default files
+ _g_config = IndraConfig(None)
def dump(indra_xml_file, indra_cfg = None, update_in_mem=False):
'''
diff --git a/indra/lib/python/indra/base/llsd.py b/indra/lib/python/indra/base/llsd.py
index 9561a56710..4527b115f9 100644
--- a/indra/lib/python/indra/base/llsd.py
+++ b/indra/lib/python/indra/base/llsd.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2006&license=mit$
-Copyright (c) 2006-2007, Linden Research, Inc.
+Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -28,24 +28,35 @@ $/LicenseInfo$
import datetime
import base64
+import string
import struct
import time
import types
import re
-from indra.util.fastest_elementtree import fromstring
+from indra.util.fastest_elementtree import ElementTreeError, fromstring
from indra.base import lluuid
-int_regex = re.compile("[-+]?\d+")
-real_regex = re.compile("[-+]?(\d+(\.\d*)?|\d*\.\d+)([eE][-+]?\d+)?")
-alpha_regex = re.compile("[a-zA-Z]+")
-date_regex = re.compile("(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(?P<second_float>\.\d{2})?Z")
-#date: d"YYYY-MM-DDTHH:MM:SS.FFZ"
+# cllsd.c in server/server-1.25 has memory leaks,
+# so disabling cllsd for now
+#try:
+# import cllsd
+#except ImportError:
+# cllsd = None
+cllsd = None
+
+int_regex = re.compile(r"[-+]?\d+")
+real_regex = re.compile(r"[-+]?(\d+(\.\d*)?|\d*\.\d+)([eE][-+]?\d+)?")
+alpha_regex = re.compile(r"[a-zA-Z]+")
+date_regex = re.compile(r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T"
+ r"(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})"
+ r"(?P<second_float>(\.\d+)?)Z")
+#date: d"YYYY-MM-DDTHH:MM:SS.FFFFFFZ"
class LLSDParseError(Exception):
pass
-class LLSDSerializationError(Exception):
+class LLSDSerializationError(TypeError):
pass
@@ -61,15 +72,11 @@ BOOL_FALSE = ('0', '0.0', 'false', '')
def format_datestr(v):
- """ Formats a datetime object into the string format shared by xml and notation serializations."""
- second_str = ""
- if v.microsecond > 0:
- seconds = v.second + float(v.microsecond) / 1000000
- second_str = "%05.2f" % seconds
+ """ Formats a datetime or date object into the string format shared by xml and notation serializations."""
+ if hasattr(v, 'microsecond'):
+ return v.isoformat() + 'Z'
else:
- second_str = "%d" % v.second
- return '%s%sZ' % (v.strftime('%Y-%m-%dT%H:%M:'), second_str)
-
+ return v.strftime('%Y-%m-%dT%H:%M:%SZ')
def parse_datestr(datestr):
"""Parses a datetime object from the string format shared by xml and notation serializations."""
@@ -89,7 +96,7 @@ def parse_datestr(datestr):
seconds_float = match.group('second_float')
microsecond = 0
if seconds_float:
- microsecond = int(seconds_float[1:]) * 10000
+ microsecond = int(float('0' + seconds_float) * 1e6)
return datetime.datetime(year, month, day, hour, minute, second, microsecond)
@@ -116,7 +123,7 @@ def uuid_to_python(node):
return lluuid.UUID(node.text)
def str_to_python(node):
- return unicode(node.text or '').encode('utf8', 'replace')
+ return node.text or ''
def bin_to_python(node):
return binary(base64.decodestring(node.text or ''))
@@ -126,6 +133,7 @@ def date_to_python(node):
if not val:
val = "1970-01-01T00:00:00Z"
return parse_datestr(val)
+
def uri_to_python(node):
val = node.text or ''
@@ -178,6 +186,7 @@ class LLSDXMLFormatter(object):
unicode : self.STRING,
uri : self.URI,
datetime.datetime : self.DATE,
+ datetime.date : self.DATE,
list : self.ARRAY,
tuple : self.ARRAY,
types.GeneratorType : self.ARRAY,
@@ -189,9 +198,13 @@ class LLSDXMLFormatter(object):
if(contents is None or contents is ''):
return "<%s />" % (name,)
else:
+ if type(contents) is unicode:
+ contents = contents.encode('utf-8')
return "<%s>%s</%s>" % (name, contents, name)
def xml_esc(self, v):
+ if type(v) is unicode:
+ v = v.encode('utf-8')
return v.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
def LLSD(self, v):
@@ -225,7 +238,7 @@ class LLSDXMLFormatter(object):
def MAP(self, v):
return self.elt(
'map',
- ''.join(["%s%s" % (self.elt('key', key), self.generate(value))
+ ''.join(["%s%s" % (self.elt('key', self.xml_esc(str(key))), self.generate(value))
for key, value in v.items()]))
typeof = type
@@ -237,9 +250,14 @@ class LLSDXMLFormatter(object):
raise LLSDSerializationError("Cannot serialize unknown type: %s (%s)" % (
t, something))
- def format(self, something):
+ def _format(self, something):
return '<?xml version="1.0" ?>' + self.elt("llsd", self.generate(something))
+ def format(self, something):
+ if cllsd:
+ return cllsd.llsd_to_xml(something)
+ return self._format(something)
+
_g_xml_formatter = None
def format_xml(something):
global _g_xml_formatter
@@ -247,6 +265,78 @@ def format_xml(something):
_g_xml_formatter = LLSDXMLFormatter()
return _g_xml_formatter.format(something)
+class LLSDXMLPrettyFormatter(LLSDXMLFormatter):
+ def __init__(self, indent_atom = None):
+ # Call the super class constructor so that we have the type map
+ super(LLSDXMLPrettyFormatter, self).__init__()
+
+ # Override the type map to use our specialized formatters to
+ # emit the pretty output.
+ self.type_map[list] = self.PRETTY_ARRAY
+ self.type_map[tuple] = self.PRETTY_ARRAY
+ self.type_map[types.GeneratorType] = self.PRETTY_ARRAY,
+ self.type_map[dict] = self.PRETTY_MAP
+
+ # Private data used for indentation.
+ self._indent_level = 1
+ if indent_atom is None:
+ self._indent_atom = ' '
+ else:
+ self._indent_atom = indent_atom
+
+ def _indent(self):
+ "Return an indentation based on the atom and indentation level."
+ return self._indent_atom * self._indent_level
+
+ def PRETTY_ARRAY(self, v):
+ rv = []
+ rv.append('<array>\n')
+ self._indent_level = self._indent_level + 1
+ rv.extend(["%s%s\n" %
+ (self._indent(),
+ self.generate(item))
+ for item in v])
+ self._indent_level = self._indent_level - 1
+ rv.append(self._indent())
+ rv.append('</array>')
+ return ''.join(rv)
+
+ def PRETTY_MAP(self, v):
+ rv = []
+ rv.append('<map>\n')
+ self._indent_level = self._indent_level + 1
+ keys = v.keys()
+ keys.sort()
+ rv.extend(["%s%s\n%s%s\n" %
+ (self._indent(),
+ self.elt('key', key),
+ self._indent(),
+ self.generate(v[key]))
+ for key in keys])
+ self._indent_level = self._indent_level - 1
+ rv.append(self._indent())
+ rv.append('</map>')
+ return ''.join(rv)
+
+ def format(self, something):
+ data = []
+ data.append('<?xml version="1.0" ?>\n<llsd>')
+ data.append(self.generate(something))
+ data.append('</llsd>\n')
+ return '\n'.join(data)
+
+def format_pretty_xml(something):
+ """@brief Serialize a python object as 'pretty' llsd xml.
+
+ The output conforms to the LLSD DTD, unlike the output from the
+ standard python xml.dom DOM::toprettyxml() method which does not
+ preserve significant whitespace.
+ This function is not necessarily suited for serializing very large
+ objects. It is not optimized by the cllsd module, and sorts on
+ dict (llsd map) keys alphabetically to ease human reading.
+ """
+ return LLSDXMLPrettyFormatter().format(something)
+
class LLSDNotationFormatter(object):
def __init__(self):
self.type_map = {
@@ -261,6 +351,7 @@ class LLSDNotationFormatter(object):
unicode : self.STRING,
uri : self.URI,
datetime.datetime : self.DATE,
+ datetime.date : self.DATE,
list : self.ARRAY,
tuple : self.ARRAY,
types.GeneratorType : self.ARRAY,
@@ -284,8 +375,10 @@ class LLSDNotationFormatter(object):
def UUID(self, v):
return "u%s" % v
def BINARY(self, v):
- raise LLSDSerializationError("binary notation not yet supported")
+ return 'b64"' + base64.encodestring(v) + '"'
def STRING(self, v):
+ if isinstance(v, unicode):
+ v = v.encode('utf-8')
return "'%s'" % v.replace("\\", "\\\\").replace("'", "\\'")
def URI(self, v):
return 'l"%s"' % str(v).replace("\\", "\\\\").replace('"', '\\"')
@@ -294,16 +387,24 @@ class LLSDNotationFormatter(object):
def ARRAY(self, v):
return "[%s]" % ','.join([self.generate(item) for item in v])
def MAP(self, v):
- return "{%s}" % ','.join(["'%s':%s" % (key.replace("\\", "\\\\").replace("'", "\\'"), self.generate(value))
+ def fix(key):
+ if isinstance(key, unicode):
+ return key.encode('utf-8')
+ return key
+ return "{%s}" % ','.join(["'%s':%s" % (fix(key).replace("\\", "\\\\").replace("'", "\\'"), self.generate(value))
for key, value in v.items()])
def generate(self, something):
t = type(something)
- if self.type_map.has_key(t):
- return self.type_map[t](something)
+ handler = self.type_map.get(t)
+ if handler:
+ return handler(something)
else:
- raise LLSDSerializationError("Cannot serialize unknown type: %s (%s)" % (
- t, something))
+ try:
+ return self.ARRAY(iter(something))
+ except TypeError:
+ raise LLSDSerializationError(
+ "Cannot serialize unknown type: %s (%s)" % (t, something))
def format(self, something):
return self.generate(something)
@@ -407,7 +508,6 @@ class LLSDBinaryParser(object):
raise LLSDParseError("invalid map key at byte %d." % (
self._index - 1,))
value = self._parse()
- #print "kv:",key,value
rv[key] = value
count += 1
cc = self._buffer[self._index]
@@ -564,11 +664,23 @@ class LLSDNotationParser(object):
# 'd' = date in seconds since epoch
return self._parse_date()
elif cc == 'b':
- raise LLSDParseError("binary notation not yet supported")
+ return self._parse_binary()
else:
raise LLSDParseError("invalid token at index %d: %d" % (
self._index - 1, ord(cc)))
+ def _parse_binary(self):
+ i = self._index
+ if self._buffer[i:i+2] == '64':
+ q = self._buffer[i+2]
+ e = self._buffer.find(q, i+3)
+ try:
+ return base64.decodestring(self._buffer[i+3:e])
+ finally:
+ self._index = e + 1
+ else:
+ raise LLSDParseError('random horrible binary format not supported')
+
def _parse_map(self):
""" map: { string:object, string:object } """
rv = {}
@@ -581,30 +693,23 @@ class LLSDNotationParser(object):
if cc in ("'", '"', 's'):
key = self._parse_string(cc)
found_key = True
- #print "key:",key
elif cc.isspace() or cc == ',':
cc = self._buffer[self._index]
self._index += 1
else:
raise LLSDParseError("invalid map key at byte %d." % (
self._index - 1,))
+ elif cc.isspace() or cc == ':':
+ cc = self._buffer[self._index]
+ self._index += 1
+ continue
else:
- if cc.isspace() or cc == ':':
- #print "skipping whitespace '%s'" % cc
- cc = self._buffer[self._index]
- self._index += 1
- continue
self._index += 1
value = self._parse()
- #print "kv:",key,value
rv[key] = value
found_key = False
cc = self._buffer[self._index]
self._index += 1
- #if cc == '}':
- # break
- #cc = self._buffer[self._index]
- #self._index += 1
return rv
@@ -768,6 +873,14 @@ def format_binary(something):
return '<?llsd/binary?>\n' + _format_binary_recurse(something)
def _format_binary_recurse(something):
+ def _format_list(something):
+ array_builder = []
+ array_builder.append('[' + struct.pack('!i', len(something)))
+ for item in something:
+ array_builder.append(_format_binary_recurse(item))
+ array_builder.append(']')
+ return ''.join(array_builder)
+
if something is None:
return '!'
elif isinstance(something, LLSD):
@@ -785,7 +898,10 @@ def _format_binary_recurse(something):
return 'u' + something._bits
elif isinstance(something, binary):
return 'b' + struct.pack('!i', len(something)) + something
- elif isinstance(something, (str, unicode)):
+ elif isinstance(something, str):
+ return 's' + struct.pack('!i', len(something)) + something
+ elif isinstance(something, unicode):
+ something = something.encode('utf-8')
return 's' + struct.pack('!i', len(something)) + something
elif isinstance(something, uri):
return 'l' + struct.pack('!i', len(something)) + something
@@ -793,35 +909,52 @@ def _format_binary_recurse(something):
seconds_since_epoch = time.mktime(something.timetuple())
return 'd' + struct.pack('!d', seconds_since_epoch)
elif isinstance(something, (list, tuple)):
- array_builder = []
- array_builder.append('[' + struct.pack('!i', len(something)))
- for item in something:
- array_builder.append(_format_binary_recurse(item))
- array_builder.append(']')
- return ''.join(array_builder)
+ return _format_list(something)
elif isinstance(something, dict):
map_builder = []
map_builder.append('{' + struct.pack('!i', len(something)))
for key, value in something.items():
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
map_builder.append('k' + struct.pack('!i', len(key)) + key)
map_builder.append(_format_binary_recurse(value))
map_builder.append('}')
return ''.join(map_builder)
else:
- raise LLSDSerializationError("Cannot serialize unknown type: %s (%s)" % (
- type(something), something))
+ try:
+ return _format_list(list(something))
+ except TypeError:
+ raise LLSDSerializationError(
+ "Cannot serialize unknown type: %s (%s)" %
+ (type(something), something))
+def parse_binary(binary):
+ if binary.startswith('<?llsd/binary?>'):
+ just_binary = binary.split('\n', 1)[1]
+ else:
+ just_binary = binary
+ return LLSDBinaryParser().parse(just_binary)
+
+def parse_xml(something):
+ try:
+ return to_python(fromstring(something)[0])
+ except ElementTreeError, err:
+ raise LLSDParseError(*err.args)
+
+def parse_notation(something):
+ return LLSDNotationParser().parse(something)
+
def parse(something):
try:
+ something = string.lstrip(something) #remove any pre-trailing whitespace
if something.startswith('<?llsd/binary?>'):
- just_binary = something.split('\n', 1)[1]
- return LLSDBinaryParser().parse(just_binary)
+ return parse_binary(something)
# This should be better.
elif something.startswith('<'):
- return to_python(fromstring(something)[0])
+ return parse_xml(something)
else:
- return LLSDNotationParser().parse(something)
+ return parse_notation(something)
except KeyError, e:
raise Exception('LLSD could not be parsed: %s' % (e,))
@@ -834,12 +967,16 @@ class LLSD(object):
parse = staticmethod(parse)
toXML = staticmethod(format_xml)
+ toPrettyXML = staticmethod(format_pretty_xml)
toBinary = staticmethod(format_binary)
toNotation = staticmethod(format_notation)
undef = LLSD(None)
+XML_MIME_TYPE = 'application/llsd+xml'
+BINARY_MIME_TYPE = 'application/llsd+binary'
+
# register converters for llsd in mulib, if it is available
try:
from mulib import stacked, mu
@@ -849,7 +986,7 @@ except:
# mulib not available, don't print an error message since this is normal
pass
else:
- mu.add_parser(parse, 'application/llsd+xml')
+ mu.add_parser(parse, XML_MIME_TYPE)
mu.add_parser(parse, 'application/llsd+binary')
def llsd_convert_xml(llsd_stuff, request):
@@ -858,11 +995,58 @@ else:
def llsd_convert_binary(llsd_stuff, request):
request.write(format_binary(llsd_stuff))
- for typ in [LLSD, dict, list, tuple, str, int, float, bool, unicode, type(None)]:
- stacked.add_producer(typ, llsd_convert_xml, 'application/llsd+xml')
+ for typ in [LLSD, dict, list, tuple, str, int, long, float, bool, unicode, type(None)]:
+ stacked.add_producer(typ, llsd_convert_xml, XML_MIME_TYPE)
stacked.add_producer(typ, llsd_convert_xml, 'application/xml')
stacked.add_producer(typ, llsd_convert_xml, 'text/xml')
stacked.add_producer(typ, llsd_convert_binary, 'application/llsd+binary')
stacked.add_producer(LLSD, llsd_convert_xml, '*/*')
+
+ # in case someone is using the legacy mu.xml wrapper, we need to
+ # tell mu to produce application/xml or application/llsd+xml
+ # (based on the accept header) from raw xml. Phoenix 2008-07-21
+ stacked.add_producer(mu.xml, mu.produce_raw, XML_MIME_TYPE)
+ stacked.add_producer(mu.xml, mu.produce_raw, 'application/xml')
+
+
+
+# mulib wsgi stuff
+# try:
+# from mulib import mu, adapters
+#
+# # try some known attributes from mulib to be ultra-sure we've imported it
+# mu.get_current
+# adapters.handlers
+# except:
+# # mulib not available, don't print an error message since this is normal
+# pass
+# else:
+# def llsd_xml_handler(content_type):
+# def handle_llsd_xml(env, start_response):
+# llsd_stuff, _ = mu.get_current(env)
+# result = format_xml(llsd_stuff)
+# start_response("200 OK", [('Content-Type', content_type)])
+# env['mu.negotiated_type'] = content_type
+# yield result
+# return handle_llsd_xml
+#
+# def llsd_binary_handler(content_type):
+# def handle_llsd_binary(env, start_response):
+# llsd_stuff, _ = mu.get_current(env)
+# result = format_binary(llsd_stuff)
+# start_response("200 OK", [('Content-Type', content_type)])
+# env['mu.negotiated_type'] = content_type
+# yield result
+# return handle_llsd_binary
+#
+# adapters.DEFAULT_PARSERS[XML_MIME_TYPE] = parse
+
+# for typ in [LLSD, dict, list, tuple, str, int, float, bool, unicode, type(None)]:
+# for content_type in (XML_MIME_TYPE, 'application/xml'):
+# adapters.handlers.set_handler(typ, llsd_xml_handler(content_type), content_type)
+#
+# adapters.handlers.set_handler(typ, llsd_binary_handler(BINARY_MIME_TYPE), BINARY_MIME_TYPE)
+#
+# adapters.handlers.set_handler(LLSD, llsd_xml_handler(XML_MIME_TYPE), '*/*')
diff --git a/indra/lib/python/indra/base/lluuid.py b/indra/lib/python/indra/base/lluuid.py
index bd6c4320f3..1cdd8e915b 100644
--- a/indra/lib/python/indra/base/lluuid.py
+++ b/indra/lib/python/indra/base/lluuid.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2004&license=mit$
-Copyright (c) 2004-2007, Linden Research, Inc.
+Copyright (c) 2004-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -26,8 +26,14 @@ THE SOFTWARE.
$/LicenseInfo$
"""
-import md5, random, socket, string, time, re
+import random, socket, string, time, re
import uuid
+try:
+ # Python 2.6
+ from hashlib import md5
+except ImportError:
+ # Python 2.5 and earlier
+ from md5 import new as md5
def _int2binstr(i,l):
s=''
@@ -196,7 +202,7 @@ class UUID(object):
from c++ implementation for portability reasons.
Returns self.
"""
- m = md5.new()
+ m = md5()
m.update(uuid.uuid1().bytes)
self._bits = m.digest()
return self
diff --git a/indra/lib/python/indra/base/metrics.py b/indra/lib/python/indra/base/metrics.py
index d26f571be7..ff8380265f 100644
--- a/indra/lib/python/indra/base/metrics.py
+++ b/indra/lib/python/indra/base/metrics.py
@@ -6,7 +6,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -29,25 +29,93 @@ $/LicenseInfo$
"""
import sys
-from indra.base import llsd
+try:
+ import syslog
+except ImportError:
+ # Windows
+ import sys
+ class syslog(object):
+ # wrap to a lame syslog for windows
+ _logfp = sys.stderr
+ def syslog(msg):
+ _logfp.write(msg)
+ if not msg.endswith('\n'):
+ _logfp.write('\n')
+ syslog = staticmethod(syslog)
-_sequence_id = 0
+from indra.base.llsd import format_notation
-def record_metrics(table, stats, dest=None):
+def record_metrics(table, stats):
"Write a standard metrics log"
- _log("LLMETRICS", table, stats, dest)
+ _log("LLMETRICS", table, stats)
-def record_event(table, data, dest=None):
+def record_event(table, data):
"Write a standard logmessage log"
- _log("LLLOGMESSAGE", table, data, dest)
+ _log("LLLOGMESSAGE", table, data)
+
+def set_destination(dest):
+ """Set the destination of metrics logs for this process.
-def _log(header, table, data, dest):
+ If you do not call this function prior to calling a logging
+ method, that function will open sys.stdout as a destination.
+ Attempts to set dest to None will throw a RuntimeError.
+ @param dest a file-like object which will be the destination for logs."""
if dest is None:
- # do this check here in case sys.stdout changes at some
- # point. as a default parameter, it will never be
- # re-evaluated.
- dest = sys.stdout
+ raise RuntimeError("Attempt to unset metrics destination.")
+ global _destination
+ _destination = dest
+
+def destination():
+ """Get the destination of the metrics logs for this process.
+ Returns None if no destination is set"""
+ global _destination
+ return _destination
+
+class SysLogger(object):
+ "A file-like object which writes to syslog."
+ def __init__(self, ident='indra', logopt = None, facility = None):
+ try:
+ if logopt is None:
+ logopt = syslog.LOG_CONS | syslog.LOG_PID
+ if facility is None:
+ facility = syslog.LOG_LOCAL0
+ syslog.openlog(ident, logopt, facility)
+ import atexit
+ atexit.register(syslog.closelog)
+ except AttributeError:
+ # No syslog module on Windows
+ pass
+
+ def write(str):
+ syslog.syslog(str)
+ write = staticmethod(write)
+
+ def flush():
+ pass
+ flush = staticmethod(flush)
+
+#
+# internal API
+#
+_sequence_id = 0
+_destination = None
+
+def _next_id():
global _sequence_id
- print >>dest, header, "(" + str(_sequence_id) + ")",
- print >>dest, table, llsd.format_notation(data)
+ next = _sequence_id
_sequence_id += 1
+ return next
+
+def _dest():
+ global _destination
+ if _destination is None:
+ # this default behavior is documented in the metrics functions above.
+ _destination = sys.stdout
+ return _destination
+
+def _log(header, table, data):
+ log_line = "%s (%d) %s %s" \
+ % (header, _next_id(), table, format_notation(data))
+ dest = _dest()
+ dest.write(log_line)
+ dest.flush()
diff --git a/indra/lib/python/indra/ipc/__init__.py b/indra/lib/python/indra/ipc/__init__.py
index 4395361323..302bbf4a03 100644
--- a/indra/lib/python/indra/ipc/__init__.py
+++ b/indra/lib/python/indra/ipc/__init__.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2006&license=mit$
-Copyright (c) 2006-2007, Linden Research, Inc.
+Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/ipc/compatibility.py b/indra/lib/python/indra/ipc/compatibility.py
index 8435528787..b9045c22f3 100644
--- a/indra/lib/python/indra/ipc/compatibility.py
+++ b/indra/lib/python/indra/ipc/compatibility.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/ipc/llmessage.py b/indra/lib/python/indra/ipc/llmessage.py
index 2497393cbd..91fb36b72c 100644
--- a/indra/lib/python/indra/ipc/llmessage.py
+++ b/indra/lib/python/indra/ipc/llmessage.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -26,8 +26,6 @@ THE SOFTWARE.
$/LicenseInfo$
"""
-from sets import Set, ImmutableSet
-
from compatibility import Incompatible, Older, Newer, Same
from tokenstream import TokenStream
@@ -44,8 +42,8 @@ class Template:
def compatibleWithBase(self, base):
messagenames = (
- ImmutableSet(self.messages.keys())
- | ImmutableSet(base.messages.keys())
+ frozenset(self.messages.keys())
+ | frozenset(base.messages.keys())
)
compatibility = Same()
@@ -86,8 +84,9 @@ class Message:
NOTDEPRECATED = "NotDeprecated"
DEPRECATED = "Deprecated"
UDPDEPRECATED = "UDPDeprecated"
- deprecations = [ NOTDEPRECATED, UDPDEPRECATED, DEPRECATED ]
- # in order of increasing deprecation
+ UDPBLACKLISTED = "UDPBlackListed"
+ deprecations = [ NOTDEPRECATED, UDPDEPRECATED, UDPBLACKLISTED, DEPRECATED ]
+ # in order of increasing deprecation
def __init__(self, name, number, priority, trust, coding):
self.name = name
diff --git a/indra/lib/python/indra/ipc/llsdhttp.py b/indra/lib/python/indra/ipc/llsdhttp.py
index 12d759d3a0..cbe8ee1eca 100644
--- a/indra/lib/python/indra/ipc/llsdhttp.py
+++ b/indra/lib/python/indra/ipc/llsdhttp.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2006&license=mit$
-Copyright (c) 2006-2007, Linden Research, Inc.
+Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -48,7 +48,13 @@ put_ = suite.put_
request = suite.request
request_ = suite.request_
-for x in (httpc.ConnectionError, httpc.NotFound, httpc.Forbidden):
+# import every httpc error exception into our namespace for convenience
+for x in httpc.status_to_error_map.itervalues():
+ globals()[x.__name__] = x
+ConnectionError = httpc.ConnectionError
+Retriable = httpc.Retriable
+
+for x in (httpc.ConnectionError,):
globals()[x.__name__] = x
diff --git a/indra/lib/python/indra/ipc/mysql_pool.py b/indra/lib/python/indra/ipc/mysql_pool.py
index 2a5a916e74..e5855a3091 100644
--- a/indra/lib/python/indra/ipc/mysql_pool.py
+++ b/indra/lib/python/indra/ipc/mysql_pool.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -30,8 +30,10 @@ import MySQLdb
from eventlet import db_pool
class DatabaseConnector(db_pool.DatabaseConnector):
- def __init__(self, credentials, min_size = 0, max_size = 4, *args, **kwargs):
- super(DatabaseConnector, self).__init__(MySQLdb, credentials, min_size, max_size, conn_pool=db_pool.ConnectionPool, *args, **kwargs)
+ def __init__(self, credentials, *args, **kwargs):
+ super(DatabaseConnector, self).__init__(MySQLdb, credentials,
+ conn_pool=db_pool.ConnectionPool,
+ *args, **kwargs)
# get is extended relative to eventlet.db_pool to accept a port argument
def get(self, host, dbname, port=3306):
@@ -42,7 +44,7 @@ class DatabaseConnector(db_pool.DatabaseConnector):
new_kwargs['host'] = host
new_kwargs['port'] = port
new_kwargs.update(self.credentials_for(host))
- dbpool = ConnectionPool(self._min_size, self._max_size, *self._args, **new_kwargs)
+ dbpool = ConnectionPool(*self._args, **new_kwargs)
self._databases[key] = dbpool
return self._databases[key]
@@ -51,8 +53,8 @@ class ConnectionPool(db_pool.TpooledConnectionPool):
"""A pool which gives out saranwrapped MySQLdb connections from a pool
"""
- def __init__(self, min_size = 0, max_size = 4, *args, **kwargs):
- super(ConnectionPool, self).__init__(MySQLdb, min_size, max_size, *args, **kwargs)
+ def __init__(self, *args, **kwargs):
+ super(ConnectionPool, self).__init__(MySQLdb, *args, **kwargs)
def get(self):
conn = super(ConnectionPool, self).get()
@@ -76,3 +78,4 @@ class ConnectionPool(db_pool.TpooledConnectionPool):
converted_kwargs.update(self._kwargs)
conn.connection_parameters = converted_kwargs
return conn
+
diff --git a/indra/lib/python/indra/ipc/russ.py b/indra/lib/python/indra/ipc/russ.py
index bd50569d3a..35d8afb158 100644
--- a/indra/lib/python/indra/ipc/russ.py
+++ b/indra/lib/python/indra/ipc/russ.py
@@ -11,7 +11,7 @@ implementations section.
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/ipc/servicebuilder.py b/indra/lib/python/indra/ipc/servicebuilder.py
index ebd2583385..0a0ce2b4e2 100644
--- a/indra/lib/python/indra/ipc/servicebuilder.py
+++ b/indra/lib/python/indra/ipc/servicebuilder.py
@@ -5,7 +5,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -39,6 +39,12 @@ except:
pass
_g_builder = None
+def _builder():
+ global _g_builder
+ if _g_builder is None:
+ _g_builder = ServiceBuilder()
+ return _g_builder
+
def build(name, context={}, **kwargs):
""" Convenience method for using a global, singleton, service builder. Pass arguments either via a dict or via python keyword arguments, or both!
@@ -51,12 +57,15 @@ def build(name, context={}, **kwargs):
> servicebuilder.build('version-manager-version', context, version='1.18.1.2')
'http://int.util.vaak.lindenlab.com/channel/Second%20Life%20Release/1.18.1.2'
"""
- context = context.copy() # shouldn't modify the caller's dictionary
- context.update(kwargs)
global _g_builder
if _g_builder is None:
_g_builder = ServiceBuilder()
- return _g_builder.buildServiceURL(name, context)
+ return _g_builder.buildServiceURL(name, context, **kwargs)
+
+def build_path(name, context={}, **kwargs):
+ context = context.copy() # shouldn't modify the caller's dictionary
+ context.update(kwargs)
+ return _builder().buildPath(name, context)
class ServiceBuilder(object):
def __init__(self, services_definition = services_config):
@@ -75,19 +84,51 @@ class ServiceBuilder(object):
continue
if isinstance(service_builder, dict):
# We will be constructing several builders
- for name, builder in service_builder.items():
+ for name, builder in service_builder.iteritems():
full_builder_name = service['name'] + '-' + name
self.builders[full_builder_name] = builder
else:
self.builders[service['name']] = service_builder
- def buildServiceURL(self, name, context):
+ def buildPath(self, name, context):
+ """\
+ @brief given the environment on construction, return a service path.
+ @param name The name of the service.
+ @param context A dict of name value lookups for the service.
+ @returns Returns the
+ """
+ return russ.format(self.builders[name], context)
+
+ def buildServiceURL(self, name, context={}, **kwargs):
"""\
@brief given the environment on construction, return a service URL.
@param name The name of the service.
@param context A dict of name value lookups for the service.
+ @param kwargs Any keyword arguments are treated as members of the
+ context, this allows you to be all 31337 by writing shit like:
+ servicebuilder.build('name', param=value)
@returns Returns the
"""
+ context = context.copy() # shouldn't modify the caller's dictionary
+ context.update(kwargs)
base_url = config.get('services-base-url')
svc_path = russ.format(self.builders[name], context)
return base_url + svc_path
+
+
+def on_in(query_name, host_key, schema_key):
+ """\
+ @brief Constructs an on/in snippet (for running named queries)
+ from a schema name and two keys referencing values stored in
+ indra.xml.
+
+ @param query_name Name of the query.
+ @param host_key Logical name of destination host. Will be
+ looked up in indra.xml.
+ @param schema_key Logical name of destination schema. Will
+ be looked up in indra.xml.
+ """
+ return "on/config:%s/in/config:%s/%s" % (host_key.strip('/'),
+ schema_key.strip('/'),
+ query_name.lstrip('/'))
+
diff --git a/indra/lib/python/indra/ipc/siesta.py b/indra/lib/python/indra/ipc/siesta.py
index 5fbea29339..d867e71537 100644
--- a/indra/lib/python/indra/ipc/siesta.py
+++ b/indra/lib/python/indra/ipc/siesta.py
@@ -1,3 +1,32 @@
+"""\
+@file siesta.py
+@brief A tiny llsd based RESTful web services framework
+
+$LicenseInfo:firstyear=2008&license=mit$
+
+Copyright (c) 2008, Linden Research, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+$/LicenseInfo$
+"""
+
+from indra.base import config
from indra.base import llsd
from webob import exc
import webob
@@ -24,9 +53,9 @@ except ImportError:
llsd_parsers = {
'application/json': json_decode,
- 'application/llsd+binary': llsd.parse_binary,
+ llsd.BINARY_MIME_TYPE: llsd.parse_binary,
'application/llsd+notation': llsd.parse_notation,
- 'application/llsd+xml': llsd.parse_xml,
+ llsd.XML_MIME_TYPE: llsd.parse_xml,
'application/xml': llsd.parse_xml,
}
@@ -37,11 +66,11 @@ def mime_type(content_type):
return content_type.split(';', 1)[0].strip().lower()
class BodyLLSD(object):
- '''Give a webob Request or Response an llsd property.
+ '''Give a webob Request or Response an llsd based "content" property.
- Getting the llsd property parses the body, and caches the result.
+ Getting the content property parses the body, and caches the result.
- Setting the llsd property formats a payload, and the body property
+ Setting the content property formats a payload, and the body property
is set.'''
def _llsd__get(self):
@@ -80,7 +109,7 @@ class BodyLLSD(object):
if hasattr(self, '_llsd'):
del self._llsd
- llsd = property(_llsd__get, _llsd__set, _llsd__del)
+ content = property(_llsd__get, _llsd__set, _llsd__del)
class Response(webob.Response, BodyLLSD):
@@ -114,10 +143,10 @@ class Request(webob.Request, BodyLLSD):
Sensible content type and accept headers are used by default.
- Setting the llsd property also sets the body. Getting the llsd
+ Setting the content property also sets the body. Getting the content
property parses the body if necessary.
- If you set the body property directly, the llsd property will be
+ If you set the body property directly, the content property will be
deleted.'''
default_content_type = 'application/llsd+xml'
@@ -149,11 +178,11 @@ class Request(webob.Request, BodyLLSD):
body = property(webob.Request._body__get, _body__set,
webob.Request._body__del, webob.Request._body__get.__doc__)
- def create_response(self, llsd=None, status='200 OK',
+ def create_response(self, content=None, status='200 OK',
conditional_response=webob.NoDefault):
resp = self.ResponseClass(status=status, request=self,
conditional_response=conditional_response)
- resp.llsd = llsd
+ resp.content = content
return resp
def curl(self):
@@ -196,12 +225,18 @@ llsd_formatters = {
'application/xml': llsd.format_xml,
}
+formatter_qualities = (
+ ('application/llsd+xml', 1.0),
+ ('application/llsd+notation', 0.5),
+ ('application/llsd+binary', 0.4),
+ ('application/xml', 0.3),
+ ('application/json', 0.2),
+ )
def formatter_for_mime_type(mime_type):
'''Return a formatter that encodes to the given MIME type.
The result is a pair of function and MIME type.'''
-
try:
return llsd_formatters[mime_type], mime_type
except KeyError:
@@ -214,21 +249,19 @@ def formatter_for_request(req):
'''Return a formatter that encodes to the preferred type of the client.
The result is a pair of function and actual MIME type.'''
-
- for ctype in req.accept.best_matches('application/llsd+xml'):
- try:
- return llsd_formatters[ctype], ctype
- except KeyError:
- pass
- else:
+ ctype = req.accept.best_match(formatter_qualities)
+ try:
+ return llsd_formatters[ctype], ctype
+ except KeyError:
raise exc.HTTPNotAcceptable().exception
def wsgi_adapter(func, environ, start_response):
'''Adapt a Siesta callable to act as a WSGI application.'''
-
+ # Process the request as appropriate.
try:
req = Request(environ)
+ #print req.urlvars
resp = func(req, **req.urlvars)
if not isinstance(resp, webob.Response):
try:
@@ -281,7 +314,8 @@ def llsd_class(cls):
allowed = [m for m in http11_methods
if hasattr(instance, 'handle_' + m.lower())]
raise exc.HTTPMethodNotAllowed(
- headers={'Allowed': ', '.join(allowed)}).exception
+ headers={'Allow': ', '.join(allowed)}).exception
+ #print "kwargs: ", kwargs
return handler(req, **kwargs)
def replacement(environ, start_response):
@@ -336,7 +370,7 @@ def curl(reqs):
route_re = re.compile(r'''
\{ # exact character "{"
- (\w+) # variable name (restricted to a-z, 0-9, _)
+ (\w*) # "config" or variable (restricted to a-z, 0-9, _)
(?:([:~])([^}]+))? # optional :type or ~regex part
\} # exact character "}"
''', re.VERBOSE)
@@ -344,27 +378,37 @@ route_re = re.compile(r'''
predefined_regexps = {
'uuid': r'[a-f0-9][a-f0-9-]{31,35}',
'int': r'\d+',
+ 'host': r'[a-z0-9][a-z0-9\-\.]*',
}
def compile_route(route):
fp = StringIO()
last_pos = 0
for match in route_re.finditer(route):
+ #print "matches: ", match.groups()
fp.write(re.escape(route[last_pos:match.start()]))
var_name = match.group(1)
sep = match.group(2)
expr = match.group(3)
- if expr:
- if sep == ':':
- expr = predefined_regexps[expr]
- # otherwise, treat what follows '~' as a regexp
+ if var_name == 'config':
+ expr = re.escape(str(config.get(var_name)))
else:
- expr = '[^/]+'
- expr = '(?P<%s>%s)' % (var_name, expr)
+ if expr:
+ if sep == ':':
+ expr = predefined_regexps[expr]
+ # otherwise, treat what follows '~' as a regexp
+ else:
+ expr = '[^/]+'
+ if var_name != '':
+ expr = '(?P<%s>%s)' % (var_name, expr)
+ else:
+ expr = '(%s)' % (expr,)
fp.write(expr)
last_pos = match.end()
fp.write(re.escape(route[last_pos:]))
- return '^%s$' % fp.getvalue()
+ compiled_route = '^%s$' % fp.getvalue()
+ #print route, "->", compiled_route
+ return compiled_route
class Router(object):
'''WSGI routing class. Parses a URL and hands off a request to
@@ -372,21 +416,43 @@ class Router(object):
responds with a 404.'''
def __init__(self):
- self.routes = []
- self.paths = []
+ self._new_routes = []
+ self._routes = []
+ self._paths = []
def add(self, route, app, methods=None):
- self.paths.append(route)
- self.routes.append((re.compile(compile_route(route)), app,
- methods and dict.fromkeys(methods)))
+ self._new_routes.append((route, app, methods))
+
+ def _create_routes(self):
+ for route, app, methods in self._new_routes:
+ self._paths.append(route)
+ self._routes.append(
+ (re.compile(compile_route(route)),
+ app,
+ methods and dict.fromkeys(methods)))
+ self._new_routes = []
def __call__(self, environ, start_response):
+ # load up the config from the config file. Only needs to be
+ # done once per interpreter. This is the entry point of all
+ # siesta applications, so this is where we trap it.
+ _conf = config.get_config()
+ if _conf is None:
+ import os.path
+ fname = os.path.join(
+ environ.get('ll.config_dir', '/local/linden/etc'),
+ 'indra.xml')
+ config.load(fname)
+
+ # proceed with handling the request
+ self._create_routes()
path_info = environ['PATH_INFO']
request_method = environ['REQUEST_METHOD']
allowed = []
- for regex, app, methods in self.routes:
+ for regex, app, methods in self._routes:
m = regex.match(path_info)
if m:
+ #print "groupdict:",m.groupdict()
if not methods or request_method in methods:
environ['paste.urlvars'] = m.groupdict()
return app(environ, start_response)
@@ -396,7 +462,7 @@ class Router(object):
allowed = dict.fromkeys(allows).keys()
allowed.sort()
resp = exc.HTTPMethodNotAllowed(
- headers={'Allowed': ', '.join(allowed)})
+ headers={'Allow': ', '.join(allowed)})
else:
resp = exc.HTTPNotFound()
return resp(environ, start_response)
diff --git a/indra/lib/python/indra/ipc/tokenstream.py b/indra/lib/python/indra/ipc/tokenstream.py
index 37896d30d5..b96f26d3ff 100644
--- a/indra/lib/python/indra/ipc/tokenstream.py
+++ b/indra/lib/python/indra/ipc/tokenstream.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/ipc/webdav.py b/indra/lib/python/indra/ipc/webdav.py
index 66e55ca426..98b8499b6a 100644
--- a/indra/lib/python/indra/ipc/webdav.py
+++ b/indra/lib/python/indra/ipc/webdav.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/ipc/xml_rpc.py b/indra/lib/python/indra/ipc/xml_rpc.py
index dc8f0aac4b..47536c10c3 100644
--- a/indra/lib/python/indra/ipc/xml_rpc.py
+++ b/indra/lib/python/indra/ipc/xml_rpc.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2006&license=mit$
-Copyright (c) 2006-2007, Linden Research, Inc.
+Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/util/__init__.py b/indra/lib/python/indra/util/__init__.py
index 3eda1849ce..b004e5804f 100644
--- a/indra/lib/python/indra/util/__init__.py
+++ b/indra/lib/python/indra/util/__init__.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2006&license=mit$
-Copyright (c) 2006-2007, Linden Research, Inc.
+Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/util/fastest_elementtree.py b/indra/lib/python/indra/util/fastest_elementtree.py
index 6661580463..4fcf662dd9 100644
--- a/indra/lib/python/indra/util/fastest_elementtree.py
+++ b/indra/lib/python/indra/util/fastest_elementtree.py
@@ -2,9 +2,9 @@
@file fastest_elementtree.py
@brief Concealing some gnarly import logic in here. This should export the interface of elementtree.
-$LicenseInfo:firstyear=2006&license=mit$
+$LicenseInfo:firstyear=2008&license=mit$
-Copyright (c) 2006-2007, Linden Research, Inc.
+Copyright (c) 2008-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -26,27 +26,39 @@ THE SOFTWARE.
$/LicenseInfo$
"""
-# Using celementree might cause some unforeseen problems so here's a
-# convenient off switch.
+# The parsing exception raised by the underlying library depends
+# on the ElementTree implementation we're using, so we provide an
+# alias here.
+#
+# Use ElementTreeError as the exception type for catching parsing
+# errors.
+
-# *NOTE: turned off cause of problems. :-( *TODO: debug
-use_celementree = False
+# Using cElementTree might cause some unforeseen problems, so here's a
+# convenient off switch.
+use_celementree = True
try:
if not use_celementree:
raise ImportError()
- from cElementTree import * ## This does not work under Windows
+ # Python 2.3 and 2.4.
+ from cElementTree import *
+ ElementTreeError = SyntaxError
except ImportError:
try:
if not use_celementree:
raise ImportError()
- ## This is the name of cElementTree under python 2.5
+ # Python 2.5 and above.
from xml.etree.cElementTree import *
+ ElementTreeError = SyntaxError
except ImportError:
+ # Pure Python code.
try:
- ## This is the old name of elementtree, for use with 2.3
+ # Python 2.3 and 2.4.
from elementtree.ElementTree import *
except ImportError:
- ## This is the name of elementtree under python 2.5
+ # Python 2.5 and above.
from xml.etree.ElementTree import *
+ # The pure Python ElementTree module uses Expat for parsing.
+ from xml.parsers.expat import ExpatError as ElementTreeError
diff --git a/indra/lib/python/indra/util/helpformatter.py b/indra/lib/python/indra/util/helpformatter.py
index c4ff27f616..ba5c9b67d1 100644
--- a/indra/lib/python/indra/util/helpformatter.py
+++ b/indra/lib/python/indra/util/helpformatter.py
@@ -5,7 +5,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/util/iterators.py b/indra/lib/python/indra/util/iterators.py
new file mode 100644
index 0000000000..9013fa6303
--- /dev/null
+++ b/indra/lib/python/indra/util/iterators.py
@@ -0,0 +1,63 @@
+"""\
+@file iterators.py
+@brief Useful general-purpose iterators.
+
+$LicenseInfo:firstyear=2008&license=mit$
+
+Copyright (c) 2008-2009, Linden Research, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+$/LicenseInfo$
+"""
+
+from __future__ import nested_scopes
+
+def iter_chunks(rows, aggregate_size=100):
+ """
+ Given an iterable set of items (@p rows), produces lists of up to @p
+ aggregate_size items at a time, for example:
+
+ iter_chunks([1,2,3,4,5,6,7,8,9,10], 3)
+
+ Values for @p aggregate_size < 1 will raise ValueError.
+
+ Will return a generator that produces, in the following order:
+ - [1, 2, 3]
+ - [4, 5, 6]
+ - [7, 8, 9]
+ - [10]
+ """
+ if aggregate_size < 1:
+ raise ValueError()
+
+ def iter_chunks_inner():
+ row_iter = iter(rows)
+ done = False
+ agg = []
+ while not done:
+ try:
+ row = row_iter.next()
+ agg.append(row)
+ except StopIteration:
+ done = True
+ if agg and (len(agg) >= aggregate_size or done):
+ yield agg
+ agg = []
+
+ return iter_chunks_inner()
diff --git a/indra/lib/python/indra/util/iterators_test.py b/indra/lib/python/indra/util/iterators_test.py
new file mode 100755
index 0000000000..66928c8e7d
--- /dev/null
+++ b/indra/lib/python/indra/util/iterators_test.py
@@ -0,0 +1,72 @@
+"""\
+@file iterators_test.py
+@brief Test cases for iterators module.
+
+$LicenseInfo:firstyear=2008&license=mit$
+
+Copyright (c) 2008-2009, Linden Research, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+$/LicenseInfo$
+"""
+
+import unittest
+
+from indra.util.iterators import iter_chunks
+
+class TestIterChunks(unittest.TestCase):
+ """Unittests for iter_chunks"""
+ def test_bad_agg_size(self):
+ rows = [1,2,3,4]
+ self.assertRaises(ValueError, iter_chunks, rows, 0)
+ self.assertRaises(ValueError, iter_chunks, rows, -1)
+
+ try:
+ for i in iter_chunks(rows, 0):
+ pass
+ except ValueError:
+ pass
+ else:
+ self.fail()
+
+ try:
+ result = list(iter_chunks(rows, 0))
+ except ValueError:
+ pass
+ else:
+ self.fail()
+ def test_empty(self):
+ rows = []
+ result = list(iter_chunks(rows))
+ self.assertEqual(result, [])
+ def test_small(self):
+ rows = [[1]]
+ result = list(iter_chunks(rows, 2))
+ self.assertEqual(result, [[[1]]])
+ def test_size(self):
+ rows = [[1],[2]]
+ result = list(iter_chunks(rows, 2))
+ self.assertEqual(result, [[[1],[2]]])
+ def test_multi_agg(self):
+ rows = [[1],[2],[3],[4],[5]]
+ result = list(iter_chunks(rows, 2))
+ self.assertEqual(result, [[[1],[2]],[[3],[4]],[[5]]])
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/indra/lib/python/indra/util/llmanifest.py b/indra/lib/python/indra/util/llmanifest.py
index 467517756a..c33a03034a 100644
--- a/indra/lib/python/indra/util/llmanifest.py
+++ b/indra/lib/python/indra/util/llmanifest.py
@@ -5,7 +5,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007-2008, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -39,6 +39,7 @@ import shutil
import sys
import tarfile
import errno
+import subprocess
def path_ancestors(path):
drive, path = os.path.splitdrive(os.path.normpath(path))
@@ -119,10 +120,10 @@ ARGUMENTS=[
On Linux this would try to use Linux_i686Manifest.""",
default=""),
dict(name='build', description='Build directory.', default=DEFAULT_SRCTREE),
+ dict(name='buildtype', description='Build type (i.e. Debug, Release, RelWithDebInfo).', default=None),
dict(name='configuration',
- description="""The build configuration used. Only used on OS X for
- now, but it could be used for other platforms as well.""",
- default="Universal"),
+ description="""The build configuration used.""",
+ default="Release"),
dict(name='dest', description='Destination directory.', default=DEFAULT_SRCTREE),
dict(name='grid',
description="""Which grid the client will try to connect to. Even
@@ -366,20 +367,23 @@ class LLManifest(object):
def run_command(self, command):
""" Runs an external command, and returns the output. Raises
- an exception if the command reurns a nonzero status code. For
- debugging/informational purpoases, prints out the command's
+ an exception if the command returns a nonzero status code. For
+ debugging/informational purposes, prints out the command's
output as it is received."""
print "Running command:", command
- fd = os.popen(command, 'r')
+ sys.stdout.flush()
+ child = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ shell=True)
lines = []
while True:
- lines.append(fd.readline())
+ lines.append(child.stdout.readline())
if lines[-1] == '':
break
else:
print lines[-1],
output = ''.join(lines)
- status = fd.close()
+ child.stdout.close()
+ status = child.wait()
if status:
raise RuntimeError(
"Command %s returned non-zero status (%s) \noutput:\n%s"
@@ -584,7 +588,7 @@ class LLManifest(object):
def wildcard_regex(self, src_glob, dst_glob):
src_re = re.escape(src_glob)
- src_re = src_re.replace('\*', '([-a-zA-Z0-9._ ]+)')
+ src_re = src_re.replace('\*', '([-a-zA-Z0-9._ ]*)')
dst_temp = dst_glob
i = 1
while dst_temp.count("*") > 0:
@@ -621,6 +625,7 @@ class LLManifest(object):
count = 0
if self.wildcard_pattern.search(src):
for s,d in self.expand_globs(src, dst):
+ assert(s != d)
count += self.process_file(s, d)
else:
# if we're specifying a single path (not a glob),
diff --git a/indra/lib/python/indra/util/llperformance.py b/indra/lib/python/indra/util/llperformance.py
new file mode 100755
index 0000000000..7c52730b5e
--- /dev/null
+++ b/indra/lib/python/indra/util/llperformance.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+
+# ------------------------------------------------
+# Sim metrics utility functions.
+
+import glob, os, time, sys, stat, exceptions
+
+from indra.base import llsd
+
+gBlockMap = {} #Map of performance metric data with function hierarchy information.
+gCurrentStatPath = ""
+
+gIsLoggingEnabled=False
+
+class LLPerfStat:
+ def __init__(self,key):
+ self.mTotalTime = 0
+ self.mNumRuns = 0
+ self.mName=key
+ self.mTimeStamp = int(time.time()*1000)
+ self.mUTCTime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+ def __str__(self):
+ return "%f" % self.mTotalTime
+
+ def start(self):
+ self.mStartTime = int(time.time() * 1000000)
+ self.mNumRuns += 1
+
+ def stop(self):
+ execution_time = int(time.time() * 1000000) - self.mStartTime
+ self.mTotalTime += execution_time
+
+ def get_map(self):
+ results={}
+ results['name']=self.mName
+ results['utc_time']=self.mUTCTime
+ results['timestamp']=self.mTimeStamp
+ results['us']=self.mTotalTime
+ results['count']=self.mNumRuns
+ return results
+
+class PerfError(exceptions.Exception):
+ def __init__(self):
+ return
+
+ def __Str__(self):
+ print "","Unfinished LLPerfBlock"
+
+class LLPerfBlock:
+ def __init__( self, key ):
+ global gBlockMap
+ global gCurrentStatPath
+ global gIsLoggingEnabled
+
+ #Check to see if we're running metrics right now.
+ if gIsLoggingEnabled:
+ self.mRunning = True #Mark myself as running.
+
+ self.mPreviousStatPath = gCurrentStatPath
+ gCurrentStatPath += "/" + key
+ if gCurrentStatPath not in gBlockMap:
+ gBlockMap[gCurrentStatPath] = LLPerfStat(key)
+
+ self.mStat = gBlockMap[gCurrentStatPath]
+ self.mStat.start()
+
+ def finish( self ):
+ global gBlockMap
+ global gIsLoggingEnabled
+
+ if gIsLoggingEnabled:
+ self.mStat.stop()
+ self.mRunning = False
+ gCurrentStatPath = self.mPreviousStatPath
+
+# def __del__( self ):
+# if self.mRunning:
+# #SPATTERS FIXME
+# raise PerfError
+
+class LLPerformance:
+ #--------------------------------------------------
+ # Determine whether or not we want to log statistics
+
+ def __init__( self, process_name = "python" ):
+ self.process_name = process_name
+ self.init_testing()
+ self.mTimeStamp = int(time.time()*1000)
+ self.mUTCTime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+ def init_testing( self ):
+ global gIsLoggingEnabled
+
+ host_performance_file = "/dev/shm/simperf/simperf_proc_config.llsd"
+
+ #If file exists, open
+ if os.path.exists(host_performance_file):
+ file = open (host_performance_file,'r')
+
+ #Read serialized LLSD from file.
+ body = llsd.parse(file.read())
+
+ #Calculate time since file last modified.
+ stats = os.stat(host_performance_file)
+ now = time.time()
+ mod = stats[stat.ST_MTIME]
+ age = now - mod
+
+ if age < ( body['duration'] ):
+ gIsLoggingEnabled = True
+
+
+ def get ( self ):
+ global gIsLoggingEnabled
+ return gIsLoggingEnabled
+
+ #def output(self,ptr,path):
+ # if 'stats' in ptr:
+ # stats = ptr['stats']
+ # self.mOutputPtr[path] = stats.get_map()
+
+ # if 'children' in ptr:
+ # children=ptr['children']
+
+ # curptr = self.mOutputPtr
+ # curchildren={}
+ # curptr['children'] = curchildren
+
+ # for key in children:
+ # curchildren[key]={}
+ # self.mOutputPtr = curchildren[key]
+ # self.output(children[key],path + '/' + key)
+
+ def done(self):
+ global gBlockMap
+
+ if not self.get():
+ return
+
+ output_name = "/dev/shm/simperf/%s_proc.%d.llsd" % (self.process_name, os.getpid())
+ output_file = open(output_name, 'w')
+ process_info = {
+ "name" : self.process_name,
+ "pid" : os.getpid(),
+ "ppid" : os.getppid(),
+ "timestamp" : self.mTimeStamp,
+ "utc_time" : self.mUTCTime,
+ }
+ output_file.write(llsd.format_notation(process_info))
+ output_file.write('\n')
+
+ for key in gBlockMap.keys():
+ gBlockMap[key] = gBlockMap[key].get_map()
+ output_file.write(llsd.format_notation(gBlockMap))
+ output_file.write('\n')
+ output_file.close()
+
diff --git a/indra/lib/python/indra/util/llsubprocess.py b/indra/lib/python/indra/util/llsubprocess.py
index b6082de74a..7e0e115d14 100644
--- a/indra/lib/python/indra/util/llsubprocess.py
+++ b/indra/lib/python/indra/util/llsubprocess.py
@@ -6,7 +6,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -90,6 +90,17 @@ all the output, and get the result.
child.tochild.close()
result = child.poll()
if result != -1:
+ # At this point, the child process has exited and result
+ # is the return value from the process. Between the time
+ # we called select() and poll() the process may have
+ # exited so read all the data left on the child process
+ # stdout and stderr.
+ last = child.fromchild.read()
+ if last:
+ out.append(last)
+ last = child.childerr.read()
+ if last:
+ err.append(last)
child.tochild.close()
child.fromchild.close()
child.childerr.close()
diff --git a/indra/lib/python/indra/util/llversion.py b/indra/lib/python/indra/util/llversion.py
index 5e699d58ba..2718a85f41 100644
--- a/indra/lib/python/indra/util/llversion.py
+++ b/indra/lib/python/indra/util/llversion.py
@@ -1,11 +1,11 @@
"""@file llversion.py
@brief Utility for parsing llcommon/llversion${server}.h
for the version string and channel string
- Utility that parses svn info for branch and revision
+ Utility that parses hg or svn info for branch and revision
$LicenseInfo:firstyear=2006&license=mit$
-Copyright (c) 2006-2007, Linden Research, Inc.
+Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -79,8 +79,8 @@ def get_svn_status_matching(regular_expression):
status, output = commands.getstatusoutput('svn info %s' % get_src_root())
m = regular_expression.search(output)
if not m:
- print "Failed to parse svn info output, resultfollows:"
- print output
+ print >> sys.stderr, "Failed to parse svn info output, result follows:"
+ print >> sys.stderr, output
raise Exception, "No matching svn status in "+src_root
return m.group(1)
@@ -92,4 +92,35 @@ def get_svn_revision():
last_rev_re = re.compile('Last Changed Rev: (\d+)')
return get_svn_status_matching(last_rev_re)
-
+def get_hg_repo():
+ status, output = commands.getstatusoutput('hg showconfig paths.default')
+ if status:
+ print >> sys.stderr, output
+ sys.exit(1)
+ if not output:
+ print >> sys.stderr, 'ERROR: cannot find repo we cloned from'
+ sys.exit(1)
+ return output
+
+def get_hg_changeset():
+ # The right thing to do:
+ # status, output = commands.getstatusoutput('hg id -i')
+ # if status:
+ # print >> sys.stderr, output
+ # sys.exit(1)
+
+ # The temporary hack:
+ status, output = commands.getstatusoutput('hg parents --template "{rev}"')
+ if status:
+ print >> sys.stderr, output
+ sys.exit(1)
+ lines = output.splitlines()
+ if len(lines) > 1:
+ print >> sys.stderr, 'ERROR: working directory has %d parents' % len(lines)
+ return lines[0]
+
+def using_svn():
+ return os.path.isdir(os.path.join(get_src_root(), '.svn'))
+
+def using_hg():
+ return os.path.isdir(os.path.join(get_src_root(), '.hg'))
diff --git a/indra/lib/python/indra/util/named_query.py b/indra/lib/python/indra/util/named_query.py
index 063ef7932e..5c19368240 100644
--- a/indra/lib/python/indra/util/named_query.py
+++ b/indra/lib/python/indra/util/named_query.py
@@ -6,7 +6,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -47,7 +47,8 @@ except NameError:
from indra.base import llsd
from indra.base import config
-NQ_FILE_SUFFIX = config.get('named-query-file-suffix', '')
+DEBUG = False
+NQ_FILE_SUFFIX = config.get('named-query-file-suffix', '.nq')
NQ_FILE_SUFFIX_LEN = len(NQ_FILE_SUFFIX)
_g_named_manager = None
@@ -58,22 +59,29 @@ def _init_g_named_manager(sql_dir = None):
This function is intended entirely for testing purposes,
because it's tricky to control the config from inside a test."""
+ global NQ_FILE_SUFFIX
+ NQ_FILE_SUFFIX = config.get('named-query-file-suffix', '.nq')
+ global NQ_FILE_SUFFIX_LEN
+ NQ_FILE_SUFFIX_LEN = len(NQ_FILE_SUFFIX)
+
if sql_dir is None:
sql_dir = config.get('named-query-base-dir')
# extra fallback directory in case config doesn't return what we want
if sql_dir is None:
- sql_dir = os.path.dirname(__file__) + "../../../../web/dataservice/sql"
+ sql_dir = os.path.abspath(
+ os.path.join(
+ os.path.realpath(os.path.dirname(__file__)), "..", "..", "..", "..", "web", "dataservice", "sql"))
global _g_named_manager
_g_named_manager = NamedQueryManager(
os.path.abspath(os.path.realpath(sql_dir)))
-def get(name):
+def get(name, schema = None):
"Get the named query object to be used to perform queries"
if _g_named_manager is None:
_init_g_named_manager()
- return _g_named_manager.get(name)
+ return _g_named_manager.get(name).for_schema(schema)
def sql(connection, name, params):
# use module-global NamedQuery object to perform default substitution
@@ -103,11 +111,12 @@ class NamedQuery(object):
def __init__(self, name, filename):
""" Construct a NamedQuery object. The name argument is an
arbitrary name as a handle for the query, and the filename is
- a path to a file containing an llsd named query document."""
+ a path to a file or a file-like object containing an llsd named
+ query document."""
self._stat_interval_seconds = 5 # 5 seconds
self._name = name
- if (filename is not None) \
- and (NQ_FILE_SUFFIX != filename[-NQ_FILE_SUFFIX_LEN:]):
+ if (filename is not None and isinstance(filename, (str, unicode))
+ and NQ_FILE_SUFFIX != filename[-NQ_FILE_SUFFIX_LEN:]):
filename = filename + NQ_FILE_SUFFIX
self._location = filename
self._alternative = dict()
@@ -122,8 +131,8 @@ class NamedQuery(object):
def get_modtime(self):
""" Returns the mtime (last modified time) of the named query
- file, if such exists."""
- if self._location:
+ filename. For file-like objects, expect a modtime of 0"""
+ if self._location and isinstance(self._location, (str, unicode)):
return os.path.getmtime(self._location)
return 0
@@ -131,7 +140,12 @@ class NamedQuery(object):
""" Loads and parses the named query file into self. Does
nothing if self.location is nonexistant."""
if self._location:
- self._reference_contents(llsd.parse(open(self._location).read()))
+ if isinstance(self._location, (str, unicode)):
+ contents = llsd.parse(open(self._location).read())
+ else:
+ # we probably have a file-like object. Godspeed!
+ contents = llsd.parse(self._location.read())
+ self._reference_contents(contents)
# Check for alternative implementations
try:
for name, alt in self._contents['alternative'].items():
@@ -182,6 +196,16 @@ class NamedQuery(object):
ready them for use in LIKE statements"""
if sql:
#print >>sys.stderr, "sql:",sql
+
+ # This first sub is to properly escape any % signs that
+ # are meant to be literally passed through to mysql in the
+ # query. It leaves any %'s that are used for
+ # like-expressions.
+ expr = re.compile("(?<=[^a-zA-Z0-9_-])%(?=[^:])")
+ sql = expr.sub('%%', sql)
+
+ # This should tackle the rest of the %'s in the query, by
+ # converting them to LIKE clauses.
expr = re.compile("(%?):([a-zA-Z][a-zA-Z0-9_-]*)%")
sql = expr.sub(self._prepare_like, sql)
expr = re.compile("#:([a-zA-Z][a-zA-Z0-9_-]*)")
@@ -270,7 +294,10 @@ class NamedQuery(object):
So, we need a vendor (or extention) for LIKE_STRING. Anyone
want to write it?"""
- utf8_value = unicode(value, "utf-8")
+ if isinstance(value, unicode):
+ utf8_value = value
+ else:
+ utf8_value = unicode(value, "utf-8")
esc_list = []
remove_chars = set(u"%_")
for glyph in utf8_value:
@@ -307,6 +334,8 @@ class NamedQuery(object):
def for_schema(self, db_name):
"Look trough the alternates and return the correct query"
+ if db_name is None:
+ return self
try:
return self._alternative[db_name]
except KeyError, e:
@@ -331,20 +360,21 @@ class NamedQuery(object):
cursor = connection.cursor(MySQLdb.cursors.DictCursor)
else:
cursor = connection.cursor()
-
- statement = self.sql(connection, params)
- #print "SQL:", statement
- rows = cursor.execute(statement)
-
+
+ full_query, params = self._construct_sql(params)
+ if DEBUG:
+ print "SQL:", self.sql(connection, params)
+ rows = cursor.execute(full_query, params)
+
# *NOTE: the expect_rows argument is a very cheesy way to get some
# validation on the result set. If you want to add more expectation
- # logic, do something more object-oriented and flexible. Or use an ORM.
+ # logic, do something more object-oriented and flexible. Or use an ORM.
if(self._return_as_map):
expect_rows = 1
if expect_rows is not None and rows != expect_rows:
cursor.close()
- raise ExpectationFailed("Statement expected %s rows, got %s. Sql: %s" % (
- expect_rows, rows, statement))
+ raise ExpectationFailed("Statement expected %s rows, got %s. Sql: '%s' %s" % (
+ expect_rows, rows, full_query, params))
# convert to dicts manually if we're not using a dictcursor
if use_dictcursor:
@@ -370,11 +400,9 @@ class NamedQuery(object):
return result_set[0]
return result_set
- def sql(self, connection, params):
- """ Generates an SQL statement from the named query document
- and a dictionary of parameters.
-
- """
+ def _construct_sql(self, params):
+ """ Returns a query string and a dictionary of parameters,
+ suitable for directly passing to the execute() method."""
self.refresh()
# build the query from the options available and the params
@@ -422,10 +450,23 @@ class NamedQuery(object):
new_params[self._build_integer_key(key)] = int(params[key])
params.update(new_params)
+ return full_query, params
+
+ def sql(self, connection, params):
+ """ Generates an SQL statement from the named query document
+ and a dictionary of parameters.
+
+ *NOTE: Only use for debugging, because it uses the
+ non-standard MySQLdb 'literal' method.
+ """
+ if not DEBUG:
+ import warnings
+ warnings.warn("Don't use named_query.sql() when not debugging. Used on %s" % self._location)
# do substitution using the mysql (non-standard) 'literal'
# function to do the escaping.
- sql = full_query % connection.literal(params)
- return sql
+ full_query, params = self._construct_sql(params)
+ return full_query % connection.literal(params)
+
def refresh(self):
""" Refresh self from the file on the filesystem.
diff --git a/indra/lib/python/indra/util/shutil2.py b/indra/lib/python/indra/util/shutil2.py
index 3acb44bf6f..9e2e7a6ded 100644
--- a/indra/lib/python/indra/util/shutil2.py
+++ b/indra/lib/python/indra/util/shutil2.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/indra/lib/python/indra/util/simperf_host_xml_parser.py b/indra/lib/python/indra/util/simperf_host_xml_parser.py
new file mode 100755
index 0000000000..672c1050c2
--- /dev/null
+++ b/indra/lib/python/indra/util/simperf_host_xml_parser.py
@@ -0,0 +1,338 @@
+#!/usr/bin/env python
+"""\
+@file simperf_host_xml_parser.py
+@brief Digest collector's XML dump and convert to simple dict/list structure
+
+$LicenseInfo:firstyear=2008&license=mit$
+
+Copyright (c) 2008-2009, Linden Research, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+$/LicenseInfo$
+"""
+
+import sys, os, getopt, time
+import simplejson
+from xml import sax
+
+
+def usage():
+ print "Usage:"
+ print sys.argv[0] + " [options]"
+ print " Convert RRD's XML dump to JSON. Script to convert the simperf_host_collector-"
+ print " generated RRD dump into JSON. Steps include converting selected named"
+ print " fields from GAUGE type to COUNTER type by computing delta with preceding"
+ print " values. Top-level named fields are:"
+ print
+ print " lastupdate Time (javascript timestamp) of last data sample"
+ print " step Time in seconds between samples"
+ print " ds Data specification (name/type) for each column"
+ print " database Table of data samples, one time step per row"
+ print
+ print "Options:"
+ print " -i, --in Input settings filename. (Default: stdin)"
+ print " -o, --out Output settings filename. (Default: stdout)"
+ print " -h, --help Print this message and exit."
+ print
+ print "Example: %s -i rrddump.xml -o rrddump.json" % sys.argv[0]
+ print
+ print "Interfaces:"
+ print " class SimPerfHostXMLParser() # SAX content handler"
+ print " def simperf_host_xml_fixup(parser) # post-parse value fixup"
+
+class SimPerfHostXMLParser(sax.handler.ContentHandler):
+
+ def __init__(self):
+ pass
+
+ def startDocument(self):
+ self.rrd_last_update = 0 # public
+ self.rrd_step = 0 # public
+ self.rrd_ds = [] # public
+ self.rrd_records = [] # public
+ self._rrd_level = 0
+ self._rrd_parse_state = 0
+ self._rrd_chars = ""
+ self._rrd_capture = False
+ self._rrd_ds_val = {}
+ self._rrd_data_row = []
+ self._rrd_data_row_has_nan = False
+
+ def endDocument(self):
+ pass
+
+ # Nasty little ad-hoc state machine to extract the elements that are
+ # necessary from the 'rrdtool dump' XML output. The same element
+ # name '<ds>' is used for two different data sets so we need to pay
+ # some attention to the actual structure to get the ones we want
+ # and ignore the ones we don't.
+
+ def startElement(self, name, attrs):
+ self._rrd_level = self._rrd_level + 1
+ self._rrd_capture = False
+ if self._rrd_level == 1:
+ if name == "rrd" and self._rrd_parse_state == 0:
+ self._rrd_parse_state = 1 # In <rrd>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif self._rrd_level == 2:
+ if self._rrd_parse_state == 1:
+ if name == "lastupdate":
+ self._rrd_parse_state = 2 # In <rrd><lastupdate>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif name == "step":
+ self._rrd_parse_state = 3 # In <rrd><step>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif name == "ds":
+ self._rrd_parse_state = 4 # In <rrd><ds>
+ self._rrd_ds_val = {}
+ self._rrd_chars = ""
+ elif name == "rra":
+ self._rrd_parse_state = 5 # In <rrd><rra>
+ elif self._rrd_level == 3:
+ if self._rrd_parse_state == 4:
+ if name == "name":
+ self._rrd_parse_state = 6 # In <rrd><ds><name>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif name == "type":
+ self._rrd_parse_state = 7 # In <rrd><ds><type>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif self._rrd_parse_state == 5:
+ if name == "database":
+ self._rrd_parse_state = 8 # In <rrd><rra><database>
+ elif self._rrd_level == 4:
+ if self._rrd_parse_state == 8:
+ if name == "row":
+ self._rrd_parse_state = 9 # In <rrd><rra><database><row>
+ self._rrd_data_row = []
+ self._rrd_data_row_has_nan = False
+ elif self._rrd_level == 5:
+ if self._rrd_parse_state == 9:
+ if name == "v":
+ self._rrd_parse_state = 10 # In <rrd><rra><database><row><v>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+
+ def endElement(self, name):
+ self._rrd_capture = False
+ if self._rrd_parse_state == 10:
+ self._rrd_capture = self._rrd_level == 6
+ if self._rrd_level == 5:
+ if self._rrd_chars == "NaN":
+ self._rrd_data_row_has_nan = True
+ else:
+ self._rrd_data_row.append(self._rrd_chars)
+ self._rrd_parse_state = 9 # In <rrd><rra><database><row>
+ elif self._rrd_parse_state == 9:
+ if self._rrd_level == 4:
+ if not self._rrd_data_row_has_nan:
+ self.rrd_records.append(self._rrd_data_row)
+ self._rrd_parse_state = 8 # In <rrd><rra><database>
+ elif self._rrd_parse_state == 8:
+ if self._rrd_level == 3:
+ self._rrd_parse_state = 5 # In <rrd><rra>
+ elif self._rrd_parse_state == 7:
+ if self._rrd_level == 3:
+ self._rrd_ds_val["type"] = self._rrd_chars
+ self._rrd_parse_state = 4 # In <rrd><ds>
+ elif self._rrd_parse_state == 6:
+ if self._rrd_level == 3:
+ self._rrd_ds_val["name"] = self._rrd_chars
+ self._rrd_parse_state = 4 # In <rrd><ds>
+ elif self._rrd_parse_state == 5:
+ if self._rrd_level == 2:
+ self._rrd_parse_state = 1 # In <rrd>
+ elif self._rrd_parse_state == 4:
+ if self._rrd_level == 2:
+ self.rrd_ds.append(self._rrd_ds_val)
+ self._rrd_parse_state = 1 # In <rrd>
+ elif self._rrd_parse_state == 3:
+ if self._rrd_level == 2:
+ self.rrd_step = long(self._rrd_chars)
+ self._rrd_parse_state = 1 # In <rrd>
+ elif self._rrd_parse_state == 2:
+ if self._rrd_level == 2:
+ self.rrd_last_update = long(self._rrd_chars)
+ self._rrd_parse_state = 1 # In <rrd>
+ elif self._rrd_parse_state == 1:
+ if self._rrd_level == 1:
+ self._rrd_parse_state = 0 # At top
+
+ if self._rrd_level:
+ self._rrd_level = self._rrd_level - 1
+
+ def characters(self, content):
+ if self._rrd_capture:
+ self._rrd_chars = self._rrd_chars + content.strip()
+
+def _make_numeric(value):
+ try:
+ value = float(value)
+ except:
+ value = ""
+ return value
+
+def simperf_host_xml_fixup(parser, filter_start_time = None, filter_end_time = None):
+ # Fixup for GAUGE fields that are really COUNTS. They
+ # were forced to GAUGE to try to disable rrdtool's
+ # data interpolation/extrapolation for non-uniform time
+ # samples.
+ fixup_tags = [ "cpu_user",
+ "cpu_nice",
+ "cpu_sys",
+ "cpu_idle",
+ "cpu_waitio",
+ "cpu_intr",
+ # "file_active",
+ # "file_free",
+ # "inode_active",
+ # "inode_free",
+ "netif_in_kb",
+ "netif_in_pkts",
+ "netif_in_errs",
+ "netif_in_drop",
+ "netif_out_kb",
+ "netif_out_pkts",
+ "netif_out_errs",
+ "netif_out_drop",
+ "vm_page_in",
+ "vm_page_out",
+ "vm_swap_in",
+ "vm_swap_out",
+ #"vm_mem_total",
+ #"vm_mem_used",
+ #"vm_mem_active",
+ #"vm_mem_inactive",
+ #"vm_mem_free",
+ #"vm_mem_buffer",
+ #"vm_swap_cache",
+ #"vm_swap_total",
+ #"vm_swap_used",
+ #"vm_swap_free",
+ "cpu_interrupts",
+ "cpu_switches",
+ "cpu_forks" ]
+
+ col_count = len(parser.rrd_ds)
+ row_count = len(parser.rrd_records)
+
+ # Process the last row separately, just to make all values numeric.
+ for j in range(col_count):
+ parser.rrd_records[row_count - 1][j] = _make_numeric(parser.rrd_records[row_count - 1][j])
+
+ # Process all other row/columns.
+ last_different_row = row_count - 1
+ current_row = row_count - 2
+ while current_row >= 0:
+ # Check for a different value than the previous row. If everything is the same
+ # then this is probably just a filler/bogus entry.
+ is_different = False
+ for j in range(col_count):
+ parser.rrd_records[current_row][j] = _make_numeric(parser.rrd_records[current_row][j])
+ if parser.rrd_records[current_row][j] != parser.rrd_records[last_different_row][j]:
+ # We're good. This is a different row.
+ is_different = True
+
+ if not is_different:
+ # This is a filler/bogus entry. Just ignore it.
+ for j in range(col_count):
+ parser.rrd_records[current_row][j] = float('nan')
+ else:
+ # Some tags need to be converted into deltas.
+ for j in range(col_count):
+ if parser.rrd_ds[j]["name"] in fixup_tags:
+ parser.rrd_records[last_different_row][j] = \
+ parser.rrd_records[last_different_row][j] - parser.rrd_records[current_row][j]
+ last_different_row = current_row
+
+ current_row -= 1
+
+ # Set fixup_tags in the first row to 'nan' since they aren't useful anymore.
+ for j in range(col_count):
+ if parser.rrd_ds[j]["name"] in fixup_tags:
+ parser.rrd_records[0][j] = float('nan')
+
+ # Add a timestamp to each row and to the catalog. Format and name
+ # chosen to match other simulator logging (hopefully).
+ start_time = parser.rrd_last_update - (parser.rrd_step * (row_count - 1))
+ # Build a filtered list of rrd_records if we are limited to a time range.
+ filter_records = False
+ if filter_start_time is not None or filter_end_time is not None:
+ filter_records = True
+ filtered_rrd_records = []
+ if filter_start_time is None:
+ filter_start_time = start_time * 1000
+ if filter_end_time is None:
+ filter_end_time = parser.rrd_last_update * 1000
+
+ for i in range(row_count):
+ record_timestamp = (start_time + (i * parser.rrd_step)) * 1000
+ parser.rrd_records[i].insert(0, record_timestamp)
+ if filter_records:
+ if filter_start_time <= record_timestamp and record_timestamp <= filter_end_time:
+ filtered_rrd_records.append(parser.rrd_records[i])
+
+ if filter_records:
+ parser.rrd_records = filtered_rrd_records
+
+ parser.rrd_ds.insert(0, {"type": "GAUGE", "name": "javascript_timestamp"})
+
+
+def main(argv=None):
+ opts, args = getopt.getopt(sys.argv[1:], "i:o:h", ["in=", "out=", "help"])
+ input_file = sys.stdin
+ output_file = sys.stdout
+ for o, a in opts:
+ if o in ("-i", "--in"):
+ input_file = open(a, 'r')
+ if o in ("-o", "--out"):
+ output_file = open(a, 'w')
+ if o in ("-h", "--help"):
+ usage()
+ sys.exit(0)
+
+ # Using the SAX parser as it is at least 4X faster and far, far
+ # smaller on this dataset than the DOM-based interface in xml.dom.minidom.
+ # With SAX and a 5.4MB xml file, this requires about seven seconds of
+ # wall-clock time and 32MB VSZ. With the DOM interface, about 22 seconds
+ # and over 270MB VSZ.
+
+ handler = SimPerfHostXMLParser()
+ sax.parse(input_file, handler)
+ if input_file != sys.stdin:
+ input_file.close()
+
+ # Various format fixups: string-to-num, gauge-to-counts, add
+ # a time stamp, etc.
+ simperf_host_xml_fixup(handler)
+
+ # Create JSONable dict with interesting data and format/print it
+ print >>output_file, simplejson.dumps({ "step" : handler.rrd_step,
+ "lastupdate": handler.rrd_last_update * 1000,
+ "ds" : handler.rrd_ds,
+ "database" : handler.rrd_records })
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/indra/lib/python/indra/util/simperf_oprof_interface.py b/indra/lib/python/indra/util/simperf_oprof_interface.py
new file mode 100755
index 0000000000..547d2f9980
--- /dev/null
+++ b/indra/lib/python/indra/util/simperf_oprof_interface.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+"""\
+@file simperf_oprof_interface.py
+@brief Manage OProfile data collection on a host
+
+$LicenseInfo:firstyear=2008&license=mit$
+
+Copyright (c) 2008-2009, Linden Research, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+$/LicenseInfo$
+"""
+
+import sys, os, getopt
+import simplejson
+
+
+def usage():
+ print "Usage:"
+ print sys.argv[0] + " [options]"
+ print " Digest the OProfile report forms that come out of the"
+ print " simperf_oprof_ctl program's -r/--report command. The result"
+ print " is an array of dictionaires with the following keys:"
+ print
+ print " symbol Name of sampled, calling, or called procedure"
+ print " file Executable or library where symbol resides"
+ print " percentage Percentage contribution to profile, calls or called"
+ print " samples Sample count"
+ print " calls Methods called by the method in question (full only)"
+ print " called_by Methods calling the method (full only)"
+ print
+ print " For 'full' reports the two keys 'calls' and 'called_by' are"
+ print " themselves arrays of dictionaries based on the first four keys."
+ print
+ print "Return Codes:"
+ print " None. Aggressively digests everything. Will likely mung results"
+ print " if a program or library has whitespace in its name."
+ print
+ print "Options:"
+ print " -i, --in Input settings filename. (Default: stdin)"
+ print " -o, --out Output settings filename. (Default: stdout)"
+ print " -h, --help Print this message and exit."
+ print
+ print "Interfaces:"
+ print " class SimPerfOProfileInterface()"
+
+class SimPerfOProfileInterface:
+ def __init__(self):
+ self.isBrief = True # public
+ self.isValid = False # public
+ self.result = [] # public
+
+ def parse(self, input):
+ in_samples = False
+ for line in input:
+ if in_samples:
+ if line[0:6] == "------":
+ self.isBrief = False
+ self._parseFull(input)
+ else:
+ self._parseBrief(input, line)
+ self.isValid = True
+ return
+ try:
+ hd1, remain = line.split(None, 1)
+ if hd1 == "samples":
+ in_samples = True
+ except ValueError:
+ pass
+
+ def _parseBrief(self, input, line1):
+ try:
+ fld1, fld2, fld3, fld4 = line1.split(None, 3)
+ self.result.append({"samples" : fld1,
+ "percentage" : fld2,
+ "file" : fld3,
+ "symbol" : fld4.strip("\n")})
+ except ValueError:
+ pass
+ for line in input:
+ try:
+ fld1, fld2, fld3, fld4 = line.split(None, 3)
+ self.result.append({"samples" : fld1,
+ "percentage" : fld2,
+ "file" : fld3,
+ "symbol" : fld4.strip("\n")})
+ except ValueError:
+ pass
+
+ def _parseFull(self, input):
+ state = 0 # In 'called_by' section
+ calls = []
+ called_by = []
+ current = {}
+ for line in input:
+ if line[0:6] == "------":
+ if len(current):
+ current["calls"] = calls
+ current["called_by"] = called_by
+ self.result.append(current)
+ state = 0
+ calls = []
+ called_by = []
+ current = {}
+ else:
+ try:
+ fld1, fld2, fld3, fld4 = line.split(None, 3)
+ tmp = {"samples" : fld1,
+ "percentage" : fld2,
+ "file" : fld3,
+ "symbol" : fld4.strip("\n")}
+ except ValueError:
+ continue
+ if line[0] != " ":
+ current = tmp
+ state = 1 # In 'calls' section
+ elif state == 0:
+ called_by.append(tmp)
+ else:
+ calls.append(tmp)
+ if len(current):
+ current["calls"] = calls
+ current["called_by"] = called_by
+ self.result.append(current)
+
+
+def main(argv=None):
+ opts, args = getopt.getopt(sys.argv[1:], "i:o:h", ["in=", "out=", "help"])
+ input_file = sys.stdin
+ output_file = sys.stdout
+ for o, a in opts:
+ if o in ("-i", "--in"):
+ input_file = open(a, 'r')
+ if o in ("-o", "--out"):
+ output_file = open(a, 'w')
+ if o in ("-h", "--help"):
+ usage()
+ sys.exit(0)
+
+ oprof = SimPerfOProfileInterface()
+ oprof.parse(input_file)
+ if input_file != sys.stdin:
+ input_file.close()
+
+ # Create JSONable dict with interesting data and format/print it
+ print >>output_file, simplejson.dumps(oprof.result)
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/indra/lib/python/indra/util/simperf_proc_interface.py b/indra/lib/python/indra/util/simperf_proc_interface.py
new file mode 100755
index 0000000000..da6304a274
--- /dev/null
+++ b/indra/lib/python/indra/util/simperf_proc_interface.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+"""\
+@file simperf_proc_interface.py
+@brief Utility to extract log messages from *.<pid>.llsd files containing performance statistics.
+
+$LicenseInfo:firstyear=2008&license=mit$
+
+Copyright (c) 2008-2009, Linden Research, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+$/LicenseInfo$
+"""
+
+# ----------------------------------------------------
+# Utility to extract log messages from *.<pid>.llsd
+# files that contain performance statistics.
+
+# ----------------------------------------------------
+import sys, os
+
+if os.path.exists("setup-path.py"):
+ execfile("setup-path.py")
+
+from indra.base import llsd
+
+DEFAULT_PATH="/dev/shm/simperf/"
+
+
+# ----------------------------------------------------
+# Pull out the stats and return a single document
+def parse_logfile(filename, target_column=None, verbose=False):
+ full_doc = []
+ # Open source temp log file. Let exceptions percolate up.
+ sourcefile = open( filename,'r')
+
+ if verbose:
+ print "Reading " + filename
+
+ # Parse and output all lines from the temp file
+ for line in sourcefile.xreadlines():
+ partial_doc = llsd.parse(line)
+ if partial_doc is not None:
+ if target_column is None:
+ full_doc.append(partial_doc)
+ else:
+ trim_doc = { target_column: partial_doc[target_column] }
+ if target_column != "fps":
+ trim_doc[ 'fps' ] = partial_doc[ 'fps' ]
+ trim_doc[ '/total_time' ] = partial_doc[ '/total_time' ]
+ trim_doc[ 'utc_time' ] = partial_doc[ 'utc_time' ]
+ full_doc.append(trim_doc)
+
+ sourcefile.close()
+ return full_doc
+
+# Extract just the meta info line, and the timestamp of the first/last frame entry.
+def parse_logfile_info(filename, verbose=False):
+ # Open source temp log file. Let exceptions percolate up.
+ sourcefile = open(filename, 'rU') # U is to open with Universal newline support
+
+ if verbose:
+ print "Reading " + filename
+
+ # The first line is the meta info line.
+ info_line = sourcefile.readline()
+ if not info_line:
+ sourcefile.close()
+ return None
+
+ # The rest of the lines are frames. Read the first and last to get the time range.
+ info = llsd.parse( info_line )
+ info['start_time'] = None
+ info['end_time'] = None
+ first_frame = sourcefile.readline()
+ if first_frame:
+ try:
+ info['start_time'] = int(llsd.parse(first_frame)['timestamp'])
+ except:
+ pass
+
+ # Read the file backwards to find the last two lines.
+ sourcefile.seek(0, 2)
+ file_size = sourcefile.tell()
+ offset = 1024
+ num_attempts = 0
+ end_time = None
+ if file_size < offset:
+ offset = file_size
+ while 1:
+ sourcefile.seek(-1*offset, 2)
+ read_str = sourcefile.read(offset)
+ # Remove newline at the end
+ if read_str[offset - 1] == '\n':
+ read_str = read_str[0:-1]
+ lines = read_str.split('\n')
+ full_line = None
+ if len(lines) > 2: # Got two line
+ try:
+ end_time = llsd.parse(lines[-1])['timestamp']
+ except:
+ # We couldn't parse this line. Try once more.
+ try:
+ end_time = llsd.parse(lines[-2])['timestamp']
+ except:
+ # Nope. Just move on.
+ pass
+ break
+ if len(read_str) == file_size: # Reached the beginning
+ break
+ offset += 1024
+
+ info['end_time'] = int(end_time)
+
+ sourcefile.close()
+ return info
+
+
+def parse_proc_filename(filename):
+ try:
+ name_as_list = filename.split(".")
+ cur_stat_type = name_as_list[0].split("_")[0]
+ cur_pid = name_as_list[1]
+ except IndexError, ValueError:
+ return (None, None)
+ return (cur_pid, cur_stat_type)
+
+# ----------------------------------------------------
+def get_simstats_list(path=None):
+ """ Return stats (pid, type) listed in <type>_proc.<pid>.llsd """
+ if path is None:
+ path = DEFAULT_PATH
+ simstats_list = []
+ for file_name in os.listdir(path):
+ if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd":
+ simstats_info = parse_logfile_info(path + file_name)
+ if simstats_info is not None:
+ simstats_list.append(simstats_info)
+ return simstats_list
+
+def get_log_info_list(pid=None, stat_type=None, path=None, target_column=None, verbose=False):
+ """ Return data from all llsd files matching the pid and stat type """
+ if path is None:
+ path = DEFAULT_PATH
+ log_info_list = {}
+ for file_name in os.listdir ( path ):
+ if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd":
+ (cur_pid, cur_stat_type) = parse_proc_filename(file_name)
+ if cur_pid is None:
+ continue
+ if pid is not None and pid != cur_pid:
+ continue
+ if stat_type is not None and stat_type != cur_stat_type:
+ continue
+ log_info_list[cur_pid] = parse_logfile(path + file_name, target_column, verbose)
+ return log_info_list
+
+def delete_simstats_files(pid=None, stat_type=None, path=None):
+ """ Delete *.<pid>.llsd files """
+ if path is None:
+ path = DEFAULT_PATH
+ del_list = []
+ for file_name in os.listdir(path):
+ if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd":
+ (cur_pid, cur_stat_type) = parse_proc_filename(file_name)
+ if cur_pid is None:
+ continue
+ if pid is not None and pid != cur_pid:
+ continue
+ if stat_type is not None and stat_type != cur_stat_type:
+ continue
+ del_list.append(cur_pid)
+ # Allow delete related exceptions to percolate up if this fails.
+ os.unlink(os.path.join(DEFAULT_PATH, file_name))
+ return del_list
+
diff --git a/indra/lib/python/indra/util/term.py b/indra/lib/python/indra/util/term.py
index 1f9dd55091..8c316a1f12 100644
--- a/indra/lib/python/indra/util/term.py
+++ b/indra/lib/python/indra/util/term.py
@@ -4,7 +4,7 @@
$LicenseInfo:firstyear=2007&license=mit$
-Copyright (c) 2007, Linden Research, Inc.
+Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -13,17 +13,17 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
- $/LicenseInfo$
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+$/LicenseInfo$
'''
#http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475116
diff --git a/indra/lib/python/indra/util/test_win32_manifest.py b/indra/lib/python/indra/util/test_win32_manifest.py
new file mode 100644
index 0000000000..786521c068
--- /dev/null
+++ b/indra/lib/python/indra/util/test_win32_manifest.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+# @file test_win32_manifest.py
+# @brief Test an assembly binding version and uniqueness in a windows dll or exe.
+#
+# $LicenseInfo:firstyear=2009&license=viewerlgpl$
+# Second Life Viewer Source Code
+# Copyright (C) 2010, Linden Research, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License only.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+# $/LicenseInfo$
+
+import sys, os
+import tempfile
+from xml.dom.minidom import parse
+
+class AssemblyTestException(Exception):
+ pass
+
+class NoManifestException(AssemblyTestException):
+ pass
+
+class MultipleBindingsException(AssemblyTestException):
+ pass
+
+class UnexpectedVersionException(AssemblyTestException):
+ pass
+
+class NoMatchingAssemblyException(AssemblyTestException):
+ pass
+
+def get_HKLM_registry_value(key_str, value_str):
+ import _winreg
+ reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
+ key = _winreg.OpenKey(reg, key_str)
+ value = _winreg.QueryValueEx(key, value_str)[0]
+ #print 'Found: %s' % value
+ return value
+
+def find_vc_dir():
+ supported_versions = (r'8.0', r'9.0')
+ value_str = (r'ProductDir')
+
+ for version in supported_versions:
+ key_str = (r'SOFTWARE\Microsoft\VisualStudio\%s\Setup\VC' %
+ version)
+ try:
+ return get_HKLM_registry_value(key_str, value_str)
+ except WindowsError, err:
+ x64_key_str = (r'SOFTWARE\Wow6432Node\Microsoft\VisualStudio\%s\Setup\VS' %
+ version)
+ try:
+ return get_HKLM_registry_value(x64_key_str, value_str)
+ except:
+ print >> sys.stderr, "Didn't find MS VC version %s " % version
+
+ raise
+
+def find_mt_path():
+ vc_dir = find_vc_dir()
+ mt_path = '\"%sbin\\mt.exe\"' % vc_dir
+ return mt_path
+
+def test_assembly_binding(src_filename, assembly_name, assembly_ver):
+ print "checking %s dependency %s..." % (src_filename, assembly_name)
+
+ (tmp_file_fd, tmp_file_name) = tempfile.mkstemp(suffix='.xml')
+ tmp_file = os.fdopen(tmp_file_fd)
+ tmp_file.close()
+
+ mt_path = find_mt_path()
+ resource_id = ""
+ if os.path.splitext(src_filename)[1].lower() == ".dll":
+ resource_id = ";#2"
+ system_call = '%s -nologo -inputresource:%s%s -out:%s > NUL' % (mt_path, src_filename, resource_id, tmp_file_name)
+ print "Executing: %s" % system_call
+ mt_result = os.system(system_call)
+ if mt_result == 31:
+ print "No manifest found in %s" % src_filename
+ raise NoManifestException()
+
+ manifest_dom = parse(tmp_file_name)
+ nodes = manifest_dom.getElementsByTagName('assemblyIdentity')
+
+ versions = list()
+ for node in nodes:
+ if node.getAttribute('name') == assembly_name:
+ versions.append(node.getAttribute('version'))
+
+ if len(versions) == 0:
+ print "No matching assemblies found in %s" % src_filename
+ raise NoMatchingAssemblyException()
+
+ elif len(versions) > 1:
+ print "Multiple bindings to %s found:" % assembly_name
+ print versions
+ print
+ raise MultipleBindingsException(versions)
+
+ elif versions[0] != assembly_ver:
+ print "Unexpected version found for %s:" % assembly_name
+ print "Wanted %s, found %s" % (assembly_ver, versions[0])
+ print
+ raise UnexpectedVersionException(assembly_ver, versions[0])
+
+ os.remove(tmp_file_name)
+
+ print "SUCCESS: %s OK!" % src_filename
+ print
+
+if __name__ == '__main__':
+
+ print
+ print "Running test_win32_manifest.py..."
+
+ usage = 'test_win32_manfest <srcFileName> <assemblyName> <assemblyVersion>'
+
+ try:
+ src_filename = sys.argv[1]
+ assembly_name = sys.argv[2]
+ assembly_ver = sys.argv[3]
+ except:
+ print "Usage:"
+ print usage
+ print
+ raise
+
+ test_assembly_binding(src_filename, assembly_name, assembly_ver)
+
+