diff options
Diffstat (limited to 'indra/lib/python')
36 files changed, 3162 insertions, 409 deletions
diff --git a/indra/lib/python/indra/__init__.py b/indra/lib/python/indra/__init__.py index 353a93ffae..0c5053cf49 100644 --- a/indra/lib/python/indra/__init__.py +++ b/indra/lib/python/indra/__init__.py @@ -2,19 +2,24 @@ @file __init__.py @brief Initialization file for the indra module. -$LicenseInfo:firstyear=2006&license=internal$ +$LicenseInfo:firstyear=2006&license=viewerlgpl$ +Second Life Viewer Source Code +Copyright (C) 2006-2010, Linden Research, Inc. -Copyright (c) 2006-2007, Linden Research, Inc. +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; +version 2.1 of the License only. -The following source code is PROPRIETARY AND CONFIDENTIAL. Use of -this source code is governed by the Linden Lab Source Code Disclosure -Agreement ("Agreement") previously entered between you and Linden -Lab. By accessing, using, copying, modifying or distributing this -software, you acknowledge that you have been informed of your -obligations under the Agreement and agree to abide by those obligations. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. -ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO -WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, -COMPLETENESS OR PERFORMANCE. +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA $/LicenseInfo$ """ diff --git a/indra/lib/python/indra/base/__init__.py b/indra/lib/python/indra/base/__init__.py index 913164d090..2904fd3380 100644 --- a/indra/lib/python/indra/base/__init__.py +++ b/indra/lib/python/indra/base/__init__.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/base/cllsd_test.py b/indra/lib/python/indra/base/cllsd_test.py new file mode 100644 index 0000000000..0b20d99d80 --- /dev/null +++ b/indra/lib/python/indra/base/cllsd_test.py @@ -0,0 +1,51 @@ +from indra.base import llsd, lluuid +from datetime import datetime +import cllsd +import time, sys + +class myint(int): + pass + +values = ( + '&<>', + u'\u81acj', + llsd.uri('http://foo<'), + lluuid.UUID(), + llsd.LLSD(['thing']), + 1, + myint(31337), + sys.maxint + 10, + llsd.binary('foo'), + [], + {}, + {u'f&\u1212': 3}, + 3.1, + True, + None, + datetime.fromtimestamp(time.time()), + ) + +def valuator(values): + for v in values: + yield v + +longvalues = () # (values, list(values), iter(values), valuator(values)) + +for v in values + longvalues: + print '%r => %r' % (v, cllsd.llsd_to_xml(v)) + +a = [[{'a':3}]] * 1000000 + +s = time.time() +print hash(cllsd.llsd_to_xml(a)) +e = time.time() +t1 = e - s +print t1 + +s = time.time() +print hash(llsd.LLSDXMLFormatter()._format(a)) +e = time.time() +t2 = e - s +print t2 + +print 'Speedup:', t2 / t1 diff --git a/indra/lib/python/indra/base/config.py b/indra/lib/python/indra/base/config.py index a28c59c702..adafa29b51 100644 --- a/indra/lib/python/indra/base/config.py +++ b/indra/lib/python/indra/base/config.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2006&license=mit$ -Copyright (c) 2006-2007, Linden Research, Inc. +Copyright (c) 2006-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -26,74 +26,241 @@ THE SOFTWARE. $/LicenseInfo$ """ -from os.path import dirname, join, realpath +import copy +import errno +import os +import traceback +import time import types + +from os.path import dirname, getmtime, join, realpath from indra.base import llsd -_g_config_dict = None - -def load(indra_xml_file=None): - global _g_config_dict - if _g_config_dict == None: - if indra_xml_file is None: - ## going from: - ## "/opt/linden/indra/lib/python/indra/base/config.py" - ## to: - ## "/opt/linden/etc/indra.xml" - indra_xml_file = realpath( - dirname(realpath(__file__)) + "../../../../../../etc/indra.xml") - config_file = file(indra_xml_file) - _g_config_dict = llsd.LLSD().parse(config_file.read()) +_g_config = None + +class IndraConfig(object): + """ + IndraConfig loads a 'indra' xml configuration file + and loads into memory. This representation in memory + can get updated to overwrite values or add new values. + + The xml configuration file is considered a live file and changes + to the file are checked and reloaded periodically. If a value had + been overwritten via the update or set method, the loaded values + from the file are ignored (the values from the update/set methods + override) + """ + def __init__(self, indra_config_file): + self._indra_config_file = indra_config_file + self._reload_check_interval = 30 # seconds + self._last_check_time = 0 + self._last_mod_time = 0 + + self._config_overrides = {} + self._config_file_dict = {} + self._combined_dict = {} + + self._load() + + def _load(self): + # if you initialize the IndraConfig with None, no attempt + # is made to load any files + if self._indra_config_file is None: + return + + config_file = open(self._indra_config_file) + self._config_file_dict = llsd.parse(config_file.read()) + self._combine_dictionaries() config_file.close() - #print "loaded config from",indra_xml_file,"into",_g_config_dict -def dump(indra_xml_file, indra_cfg={}, update_in_mem=False): + self._last_mod_time = self._get_last_modified_time() + self._last_check_time = time.time() # now + + def _get_last_modified_time(self): + """ + Returns the mtime (last modified time) of the config file, + if such exists. + """ + if self._indra_config_file is not None: + return os.path.getmtime(self._indra_config_file) + + return 0 + + def _combine_dictionaries(self): + self._combined_dict = {} + self._combined_dict.update(self._config_file_dict) + self._combined_dict.update(self._config_overrides) + + def _reload_if_necessary(self): + now = time.time() + + if (now - self._last_check_time) > self._reload_check_interval: + self._last_check_time = now + try: + modtime = self._get_last_modified_time() + if modtime > self._last_mod_time: + self._load() + except OSError, e: + if e.errno == errno.ENOENT: # file not found + # someone messed with our internal state + # or removed the file + + print 'WARNING: Configuration file has been removed ' + (self._indra_config_file) + print 'Disabling reloading of configuration file.' + + traceback.print_exc() + + self._indra_config_file = None + self._last_check_time = 0 + self._last_mod_time = 0 + else: + raise # pass the exception along to the caller + + def __getitem__(self, key): + self._reload_if_necessary() + + return self._combined_dict[key] + + def get(self, key, default = None): + try: + return self.__getitem__(key) + except KeyError: + return default + + def __setitem__(self, key, value): + """ + Sets the value of the config setting of key to be newval + + Once any key/value pair is changed via the set method, + that key/value pair will remain set with that value until + change via the update or set method + """ + self._config_overrides[key] = value + self._combine_dictionaries() + + def set(self, key, newval): + return self.__setitem__(key, newval) + + def update(self, new_conf): + """ + Load an XML file and apply its map as overrides or additions + to the existing config. Update can be a file or a dict. + + Once any key/value pair is changed via the update method, + that key/value pair will remain set with that value until + change via the update or set method + """ + if isinstance(new_conf, dict): + overrides = new_conf + else: + # assuming that it is a filename + config_file = open(new_conf) + overrides = llsd.parse(config_file.read()) + config_file.close() + + self._config_overrides.update(overrides) + self._combine_dictionaries() + + def as_dict(self): + """ + Returns immutable copy of the IndraConfig as a dictionary + """ + return copy.deepcopy(self._combined_dict) + +def load(config_xml_file = None): + global _g_config + + load_default_files = config_xml_file is None + if load_default_files: + ## going from: + ## "/opt/linden/indra/lib/python/indra/base/config.py" + ## to: + ## "/opt/linden/etc/indra.xml" + config_xml_file = realpath( + dirname(realpath(__file__)) + "../../../../../../etc/indra.xml") + + try: + _g_config = IndraConfig(config_xml_file) + except IOError: + # Failure to load passed in file + # or indra.xml default file + if load_default_files: + try: + config_xml_file = realpath( + dirname(realpath(__file__)) + "../../../../../../etc/globals.xml") + _g_config = IndraConfig(config_xml_file) + return + except IOError: + # Failure to load globals.xml + # fall to code below + pass + + # Either failed to load passed in file + # or failed to load all default files + _g_config = IndraConfig(None) + +def dump(indra_xml_file, indra_cfg = None, update_in_mem=False): ''' Dump config contents into a file Kindof reverse of load. Optionally takes a new config to dump. Does NOT update global config unless requested. ''' - global _g_config_dict + global _g_config + if not indra_cfg: - indra_cfg = _g_config_dict + if _g_config is None: + return + + indra_cfg = _g_config.as_dict() + if not indra_cfg: return + config_file = open(indra_xml_file, 'w') _config_xml = llsd.format_xml(indra_cfg) config_file.write(_config_xml) config_file.close() + if update_in_mem: update(indra_cfg) def update(new_conf): - """Load an XML file and apply its map as overrides or additions - to the existing config. The dataserver does this with indra.xml - and dataserver.xml.""" - global _g_config_dict - if _g_config_dict == None: - _g_config_dict = {} - if isinstance(new_conf, dict): - overrides = new_conf - else: - config_file = file(new_conf) - overrides = llsd.LLSD().parse(config_file.read()) - config_file.close() - - _g_config_dict.update(overrides) + global _g_config + + if _g_config is None: + # To keep with how this function behaved + # previously, a call to update + # before the global is defined + # make a new global config which does not + # load data from a file. + _g_config = IndraConfig(None) + + return _g_config.update(new_conf) def get(key, default = None): - global _g_config_dict - if _g_config_dict == None: + global _g_config + + if _g_config is None: load() - return _g_config_dict.get(key, default) + + return _g_config.get(key, default) def set(key, newval): - global _g_config_dict - if _g_config_dict == None: - load() - _g_config_dict[key] = newval + """ + Sets the value of the config setting of key to be newval + + Once any key/value pair is changed via the set method, + that key/value pair will remain set with that value until + change via the update or set method or program termination + """ + global _g_config + + if _g_config is None: + _g_config = IndraConfig(None) + + _g_config.set(key, newval) -def as_dict(): - global _g_config_dict - return _g_config_dict +def get_config(): + global _g_config + return _g_config diff --git a/indra/lib/python/indra/base/llsd.py b/indra/lib/python/indra/base/llsd.py index 9e636ea423..4527b115f9 100644 --- a/indra/lib/python/indra/base/llsd.py +++ b/indra/lib/python/indra/base/llsd.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2006&license=mit$ -Copyright (c) 2006-2007, Linden Research, Inc. +Copyright (c) 2006-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -28,31 +28,35 @@ $/LicenseInfo$ import datetime import base64 +import string import struct import time import types import re -#from cElementTree import fromstring ## This does not work under Windows -try: - ## This is the old name of elementtree, for use with 2.3 - from elementtree.ElementTree import fromstring -except ImportError: - ## This is the name of elementtree under python 2.5 - from xml.etree.ElementTree import fromstring - +from indra.util.fastest_elementtree import ElementTreeError, fromstring from indra.base import lluuid -int_regex = re.compile("[-+]?\d+") -real_regex = re.compile("[-+]?(\d+(\.\d*)?|\d*\.\d+)([eE][-+]?\d+)?") -alpha_regex = re.compile("[a-zA-Z]+") -date_regex = re.compile("(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(?P<second_float>\.\d{2})?Z") -#date: d"YYYY-MM-DDTHH:MM:SS.FFZ" +# cllsd.c in server/server-1.25 has memory leaks, +# so disabling cllsd for now +#try: +# import cllsd +#except ImportError: +# cllsd = None +cllsd = None + +int_regex = re.compile(r"[-+]?\d+") +real_regex = re.compile(r"[-+]?(\d+(\.\d*)?|\d*\.\d+)([eE][-+]?\d+)?") +alpha_regex = re.compile(r"[a-zA-Z]+") +date_regex = re.compile(r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T" + r"(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})" + r"(?P<second_float>(\.\d+)?)Z") +#date: d"YYYY-MM-DDTHH:MM:SS.FFFFFFZ" class LLSDParseError(Exception): pass -class LLSDSerializationError(Exception): +class LLSDSerializationError(TypeError): pass @@ -67,6 +71,35 @@ BOOL_TRUE = ('1', '1.0', 'true') BOOL_FALSE = ('0', '0.0', 'false', '') +def format_datestr(v): + """ Formats a datetime or date object into the string format shared by xml and notation serializations.""" + if hasattr(v, 'microsecond'): + return v.isoformat() + 'Z' + else: + return v.strftime('%Y-%m-%dT%H:%M:%SZ') + +def parse_datestr(datestr): + """Parses a datetime object from the string format shared by xml and notation serializations.""" + if datestr == "": + return datetime.datetime(1970, 1, 1) + + match = re.match(date_regex, datestr) + if not match: + raise LLSDParseError("invalid date string '%s'." % datestr) + + year = int(match.group('year')) + month = int(match.group('month')) + day = int(match.group('day')) + hour = int(match.group('hour')) + minute = int(match.group('minute')) + second = int(match.group('second')) + seconds_float = match.group('second_float') + microsecond = 0 + if seconds_float: + microsecond = int(float('0' + seconds_float) * 1e6) + return datetime.datetime(year, month, day, hour, minute, second, microsecond) + + def bool_to_python(node): val = node.text or '' if val in BOOL_TRUE: @@ -90,7 +123,7 @@ def uuid_to_python(node): return lluuid.UUID(node.text) def str_to_python(node): - return unicode(node.text or '').encode('utf8', 'replace') + return node.text or '' def bin_to_python(node): return binary(base64.decodestring(node.text or '')) @@ -99,8 +132,8 @@ def date_to_python(node): val = node.text or '' if not val: val = "1970-01-01T00:00:00Z" - return datetime.datetime( - *time.strptime(val, '%Y-%m-%dT%H:%M:%SZ')[:6]) + return parse_datestr(val) + def uri_to_python(node): val = node.text or '' @@ -153,6 +186,7 @@ class LLSDXMLFormatter(object): unicode : self.STRING, uri : self.URI, datetime.datetime : self.DATE, + datetime.date : self.DATE, list : self.ARRAY, tuple : self.ARRAY, types.GeneratorType : self.ARRAY, @@ -164,9 +198,13 @@ class LLSDXMLFormatter(object): if(contents is None or contents is ''): return "<%s />" % (name,) else: + if type(contents) is unicode: + contents = contents.encode('utf-8') return "<%s>%s</%s>" % (name, contents, name) def xml_esc(self, v): + if type(v) is unicode: + v = v.encode('utf-8') return v.replace('&', '&').replace('<', '<').replace('>', '>') def LLSD(self, v): @@ -194,13 +232,13 @@ class LLSDXMLFormatter(object): def URI(self, v): return self.elt('uri', self.xml_esc(str(v))) def DATE(self, v): - return self.elt('date', v.strftime('%Y-%m-%dT%H:%M:%SZ')) + return self.elt('date', format_datestr(v)) def ARRAY(self, v): return self.elt('array', ''.join([self.generate(item) for item in v])) def MAP(self, v): return self.elt( 'map', - ''.join(["%s%s" % (self.elt('key', key), self.generate(value)) + ''.join(["%s%s" % (self.elt('key', self.xml_esc(str(key))), self.generate(value)) for key, value in v.items()])) typeof = type @@ -212,11 +250,92 @@ class LLSDXMLFormatter(object): raise LLSDSerializationError("Cannot serialize unknown type: %s (%s)" % ( t, something)) - def format(self, something): + def _format(self, something): return '<?xml version="1.0" ?>' + self.elt("llsd", self.generate(something)) + def format(self, something): + if cllsd: + return cllsd.llsd_to_xml(something) + return self._format(something) + +_g_xml_formatter = None def format_xml(something): - return LLSDXMLFormatter().format(something) + global _g_xml_formatter + if _g_xml_formatter is None: + _g_xml_formatter = LLSDXMLFormatter() + return _g_xml_formatter.format(something) + +class LLSDXMLPrettyFormatter(LLSDXMLFormatter): + def __init__(self, indent_atom = None): + # Call the super class constructor so that we have the type map + super(LLSDXMLPrettyFormatter, self).__init__() + + # Override the type map to use our specialized formatters to + # emit the pretty output. + self.type_map[list] = self.PRETTY_ARRAY + self.type_map[tuple] = self.PRETTY_ARRAY + self.type_map[types.GeneratorType] = self.PRETTY_ARRAY, + self.type_map[dict] = self.PRETTY_MAP + + # Private data used for indentation. + self._indent_level = 1 + if indent_atom is None: + self._indent_atom = ' ' + else: + self._indent_atom = indent_atom + + def _indent(self): + "Return an indentation based on the atom and indentation level." + return self._indent_atom * self._indent_level + + def PRETTY_ARRAY(self, v): + rv = [] + rv.append('<array>\n') + self._indent_level = self._indent_level + 1 + rv.extend(["%s%s\n" % + (self._indent(), + self.generate(item)) + for item in v]) + self._indent_level = self._indent_level - 1 + rv.append(self._indent()) + rv.append('</array>') + return ''.join(rv) + + def PRETTY_MAP(self, v): + rv = [] + rv.append('<map>\n') + self._indent_level = self._indent_level + 1 + keys = v.keys() + keys.sort() + rv.extend(["%s%s\n%s%s\n" % + (self._indent(), + self.elt('key', key), + self._indent(), + self.generate(v[key])) + for key in keys]) + self._indent_level = self._indent_level - 1 + rv.append(self._indent()) + rv.append('</map>') + return ''.join(rv) + + def format(self, something): + data = [] + data.append('<?xml version="1.0" ?>\n<llsd>') + data.append(self.generate(something)) + data.append('</llsd>\n') + return '\n'.join(data) + +def format_pretty_xml(something): + """@brief Serialize a python object as 'pretty' llsd xml. + + The output conforms to the LLSD DTD, unlike the output from the + standard python xml.dom DOM::toprettyxml() method which does not + preserve significant whitespace. + This function is not necessarily suited for serializing very large + objects. It is not optimized by the cllsd module, and sorts on + dict (llsd map) keys alphabetically to ease human reading. + """ + return LLSDXMLPrettyFormatter().format(something) class LLSDNotationFormatter(object): def __init__(self): @@ -232,6 +351,7 @@ class LLSDNotationFormatter(object): unicode : self.STRING, uri : self.URI, datetime.datetime : self.DATE, + datetime.date : self.DATE, list : self.ARRAY, tuple : self.ARRAY, types.GeneratorType : self.ARRAY, @@ -255,32 +375,36 @@ class LLSDNotationFormatter(object): def UUID(self, v): return "u%s" % v def BINARY(self, v): - raise LLSDSerializationError("binary notation not yet supported") + return 'b64"' + base64.encodestring(v) + '"' def STRING(self, v): + if isinstance(v, unicode): + v = v.encode('utf-8') return "'%s'" % v.replace("\\", "\\\\").replace("'", "\\'") def URI(self, v): return 'l"%s"' % str(v).replace("\\", "\\\\").replace('"', '\\"') def DATE(self, v): - second_str = "" - if v.microsecond > 0: - seconds = v.second + float(v.microsecond) / 1000000 - second_str = "%05.2f" % seconds - else: - second_str = "%d" % v.second - return 'd"%s%sZ"' % (v.strftime('%Y-%m-%dT%H:%M:'), second_str) + return 'd"%s"' % format_datestr(v) def ARRAY(self, v): return "[%s]" % ','.join([self.generate(item) for item in v]) def MAP(self, v): - return "{%s}" % ','.join(["'%s':%s" % (key.replace("\\", "\\\\").replace("'", "\\'"), self.generate(value)) + def fix(key): + if isinstance(key, unicode): + return key.encode('utf-8') + return key + return "{%s}" % ','.join(["'%s':%s" % (fix(key).replace("\\", "\\\\").replace("'", "\\'"), self.generate(value)) for key, value in v.items()]) def generate(self, something): t = type(something) - if self.type_map.has_key(t): - return self.type_map[t](something) + handler = self.type_map.get(t) + if handler: + return handler(something) else: - raise LLSDSerializationError("Cannot serialize unknown type: %s (%s)" % ( - t, something)) + try: + return self.ARRAY(iter(something)) + except TypeError: + raise LLSDSerializationError( + "Cannot serialize unknown type: %s (%s)" % (t, something)) def format(self, something): return self.generate(something) @@ -384,7 +508,6 @@ class LLSDBinaryParser(object): raise LLSDParseError("invalid map key at byte %d." % ( self._index - 1,)) value = self._parse() - #print "kv:",key,value rv[key] = value count += 1 cc = self._buffer[self._index] @@ -476,10 +599,11 @@ class LLSDNotationParser(object): integer: i#### real: r#### uuid: u#### - string: "g'day" | 'have a "nice" day' | s(size)"raw data" + string: "g\'day" | 'have a "nice" day' | s(size)"raw data" uri: l"escaped" date: d"YYYY-MM-DDTHH:MM:SS.FFZ" - binary: b##"ff3120ab1" | b(size)"raw data" """ + binary: b##"ff3120ab1" | b(size)"raw data" + """ def __init__(self): pass @@ -540,12 +664,23 @@ class LLSDNotationParser(object): # 'd' = date in seconds since epoch return self._parse_date() elif cc == 'b': - raise LLSDParseError("binary notation not yet supported") + return self._parse_binary() else: - print cc raise LLSDParseError("invalid token at index %d: %d" % ( self._index - 1, ord(cc))) + def _parse_binary(self): + i = self._index + if self._buffer[i:i+2] == '64': + q = self._buffer[i+2] + e = self._buffer.find(q, i+3) + try: + return base64.decodestring(self._buffer[i+3:e]) + finally: + self._index = e + 1 + else: + raise LLSDParseError('random horrible binary format not supported') + def _parse_map(self): """ map: { string:object, string:object } """ rv = {} @@ -558,30 +693,23 @@ class LLSDNotationParser(object): if cc in ("'", '"', 's'): key = self._parse_string(cc) found_key = True - #print "key:",key elif cc.isspace() or cc == ',': cc = self._buffer[self._index] self._index += 1 else: raise LLSDParseError("invalid map key at byte %d." % ( self._index - 1,)) + elif cc.isspace() or cc == ':': + cc = self._buffer[self._index] + self._index += 1 + continue else: - if cc.isspace() or cc == ':': - #print "skipping whitespace '%s'" % cc - cc = self._buffer[self._index] - self._index += 1 - continue self._index += 1 value = self._parse() - #print "kv:",key,value rv[key] = value found_key = False cc = self._buffer[self._index] self._index += 1 - #if cc == '}': - # break - #cc = self._buffer[self._index] - #self._index += 1 return rv @@ -623,25 +751,7 @@ class LLSDNotationParser(object): delim = self._buffer[self._index] self._index += 1 datestr = self._parse_string(delim) - - if datestr == "": - return datetime.datetime(1970, 1, 1) - - match = re.match(date_regex, datestr) - if not match: - raise LLSDParseError("invalid date string '%s'." % datestr) - - year = int(match.group('year')) - month = int(match.group('month')) - day = int(match.group('day')) - hour = int(match.group('hour')) - minute = int(match.group('minute')) - second = int(match.group('second')) - seconds_float = match.group('second_float') - microsecond = 0 - if seconds_float: - microsecond = int(seconds_float[1:]) * 10000 - return datetime.datetime(year, month, day, hour, minute, second, microsecond) + return parse_datestr(datestr) def _parse_real(self): match = re.match(real_regex, self._buffer[self._index:]) @@ -666,7 +776,7 @@ class LLSDNotationParser(object): return int( self._buffer[start:end] ) def _parse_string(self, delim): - """ string: "g'day" | 'have a "nice" day' | s(size)"raw data" """ + """ string: "g\'day" | 'have a "nice" day' | s(size)"raw data" """ rv = "" if delim in ("'", '"'): @@ -763,6 +873,14 @@ def format_binary(something): return '<?llsd/binary?>\n' + _format_binary_recurse(something) def _format_binary_recurse(something): + def _format_list(something): + array_builder = [] + array_builder.append('[' + struct.pack('!i', len(something))) + for item in something: + array_builder.append(_format_binary_recurse(item)) + array_builder.append(']') + return ''.join(array_builder) + if something is None: return '!' elif isinstance(something, LLSD): @@ -780,7 +898,10 @@ def _format_binary_recurse(something): return 'u' + something._bits elif isinstance(something, binary): return 'b' + struct.pack('!i', len(something)) + something - elif isinstance(something, (str, unicode)): + elif isinstance(something, str): + return 's' + struct.pack('!i', len(something)) + something + elif isinstance(something, unicode): + something = something.encode('utf-8') return 's' + struct.pack('!i', len(something)) + something elif isinstance(something, uri): return 'l' + struct.pack('!i', len(something)) + something @@ -788,35 +909,52 @@ def _format_binary_recurse(something): seconds_since_epoch = time.mktime(something.timetuple()) return 'd' + struct.pack('!d', seconds_since_epoch) elif isinstance(something, (list, tuple)): - array_builder = [] - array_builder.append('[' + struct.pack('!i', len(something))) - for item in something: - array_builder.append(_format_binary_recurse(item)) - array_builder.append(']') - return ''.join(array_builder) + return _format_list(something) elif isinstance(something, dict): map_builder = [] map_builder.append('{' + struct.pack('!i', len(something))) for key, value in something.items(): + if isinstance(key, unicode): + key = key.encode('utf-8') map_builder.append('k' + struct.pack('!i', len(key)) + key) map_builder.append(_format_binary_recurse(value)) map_builder.append('}') return ''.join(map_builder) else: - raise LLSDSerializationError("Cannot serialize unknown type: %s (%s)" % ( - type(something), something)) + try: + return _format_list(list(something)) + except TypeError: + raise LLSDSerializationError( + "Cannot serialize unknown type: %s (%s)" % + (type(something), something)) + +def parse_binary(binary): + if binary.startswith('<?llsd/binary?>'): + just_binary = binary.split('\n', 1)[1] + else: + just_binary = binary + return LLSDBinaryParser().parse(just_binary) + +def parse_xml(something): + try: + return to_python(fromstring(something)[0]) + except ElementTreeError, err: + raise LLSDParseError(*err.args) + +def parse_notation(something): + return LLSDNotationParser().parse(something) def parse(something): try: + something = string.lstrip(something) #remove any pre-trailing whitespace if something.startswith('<?llsd/binary?>'): - just_binary = something.split('\n', 1)[1] - return LLSDBinaryParser().parse(just_binary) + return parse_binary(something) # This should be better. elif something.startswith('<'): - return to_python(fromstring(something)[0]) + return parse_xml(something) else: - return LLSDNotationParser().parse(something) + return parse_notation(something) except KeyError, e: raise Exception('LLSD could not be parsed: %s' % (e,)) @@ -829,28 +967,27 @@ class LLSD(object): parse = staticmethod(parse) toXML = staticmethod(format_xml) + toPrettyXML = staticmethod(format_pretty_xml) toBinary = staticmethod(format_binary) toNotation = staticmethod(format_notation) undef = LLSD(None) -# register converters for stacked, if stacked is available +XML_MIME_TYPE = 'application/llsd+xml' +BINARY_MIME_TYPE = 'application/llsd+binary' + +# register converters for llsd in mulib, if it is available try: - from mulib import stacked + from mulib import stacked, mu stacked.NoProducer() # just to exercise stacked + mu.safe_load(None) # just to exercise mu except: - print "Couldn't import mulib.stacked, not registering LLSD converters" + # mulib not available, don't print an error message since this is normal + pass else: - def llsd_convert_json(llsd_stuff, request): - callback = request.get_header('callback') - if callback is not None: - ## See Yahoo's ajax documentation for information about using this - ## callback style of programming - ## http://developer.yahoo.com/common/json.html#callbackparam - req.write("%s(%s)" % (callback, simplejson.dumps(llsd_stuff))) - else: - req.write(simplejson.dumps(llsd_stuff)) + mu.add_parser(parse, XML_MIME_TYPE) + mu.add_parser(parse, 'application/llsd+binary') def llsd_convert_xml(llsd_stuff, request): request.write(format_xml(llsd_stuff)) @@ -858,13 +995,58 @@ else: def llsd_convert_binary(llsd_stuff, request): request.write(format_binary(llsd_stuff)) - for typ in [LLSD, dict, list, tuple, str, int, float, bool, unicode, type(None)]: - stacked.add_producer(typ, llsd_convert_json, 'application/json') - - stacked.add_producer(typ, llsd_convert_xml, 'application/llsd+xml') + for typ in [LLSD, dict, list, tuple, str, int, long, float, bool, unicode, type(None)]: + stacked.add_producer(typ, llsd_convert_xml, XML_MIME_TYPE) stacked.add_producer(typ, llsd_convert_xml, 'application/xml') stacked.add_producer(typ, llsd_convert_xml, 'text/xml') stacked.add_producer(typ, llsd_convert_binary, 'application/llsd+binary') stacked.add_producer(LLSD, llsd_convert_xml, '*/*') + + # in case someone is using the legacy mu.xml wrapper, we need to + # tell mu to produce application/xml or application/llsd+xml + # (based on the accept header) from raw xml. Phoenix 2008-07-21 + stacked.add_producer(mu.xml, mu.produce_raw, XML_MIME_TYPE) + stacked.add_producer(mu.xml, mu.produce_raw, 'application/xml') + + + +# mulib wsgi stuff +# try: +# from mulib import mu, adapters +# +# # try some known attributes from mulib to be ultra-sure we've imported it +# mu.get_current +# adapters.handlers +# except: +# # mulib not available, don't print an error message since this is normal +# pass +# else: +# def llsd_xml_handler(content_type): +# def handle_llsd_xml(env, start_response): +# llsd_stuff, _ = mu.get_current(env) +# result = format_xml(llsd_stuff) +# start_response("200 OK", [('Content-Type', content_type)]) +# env['mu.negotiated_type'] = content_type +# yield result +# return handle_llsd_xml +# +# def llsd_binary_handler(content_type): +# def handle_llsd_binary(env, start_response): +# llsd_stuff, _ = mu.get_current(env) +# result = format_binary(llsd_stuff) +# start_response("200 OK", [('Content-Type', content_type)]) +# env['mu.negotiated_type'] = content_type +# yield result +# return handle_llsd_binary +# +# adapters.DEFAULT_PARSERS[XML_MIME_TYPE] = parse + +# for typ in [LLSD, dict, list, tuple, str, int, float, bool, unicode, type(None)]: +# for content_type in (XML_MIME_TYPE, 'application/xml'): +# adapters.handlers.set_handler(typ, llsd_xml_handler(content_type), content_type) +# +# adapters.handlers.set_handler(typ, llsd_binary_handler(BINARY_MIME_TYPE), BINARY_MIME_TYPE) +# +# adapters.handlers.set_handler(LLSD, llsd_xml_handler(XML_MIME_TYPE), '*/*') diff --git a/indra/lib/python/indra/base/lluuid.py b/indra/lib/python/indra/base/lluuid.py index 019ccfc215..1cdd8e915b 100644 --- a/indra/lib/python/indra/base/lluuid.py +++ b/indra/lib/python/indra/base/lluuid.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2004&license=mit$ -Copyright (c) 2004-2007, Linden Research, Inc. +Copyright (c) 2004-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -26,8 +26,14 @@ THE SOFTWARE. $/LicenseInfo$ """ -import md5, random, socket, string, time, re +import random, socket, string, time, re import uuid +try: + # Python 2.6 + from hashlib import md5 +except ImportError: + # Python 2.5 and earlier + from md5 import new as md5 def _int2binstr(i,l): s='' @@ -74,21 +80,29 @@ class UUID(object): hexip = ''.join(["%04x" % long(i) for i in ip.split('.')]) lastid = '' - def __init__(self, string_with_uuid=None): + def __init__(self, possible_uuid=None): """ - Initialize to first valid UUID in string argument, - or to null UUID if none found or string is not supplied. + Initialize to first valid UUID in argument (if a string), + or to null UUID if none found or argument is not supplied. + + If the argument is a UUID, the constructed object will be a copy of it. """ self._bits = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" - if string_with_uuid: - uuid_match = UUID.uuid_regex.search(string_with_uuid) - if uuid_match: - uuid_string = uuid_match.group() - s = string.replace(uuid_string, '-', '') - self._bits = _int2binstr(string.atol(s[:8],16),4) + \ - _int2binstr(string.atol(s[8:16],16),4) + \ - _int2binstr(string.atol(s[16:24],16),4) + \ - _int2binstr(string.atol(s[24:],16),4) + if possible_uuid is None: + return + + if isinstance(possible_uuid, type(self)): + self.set(possible_uuid) + return + + uuid_match = UUID.uuid_regex.search(possible_uuid) + if uuid_match: + uuid_string = uuid_match.group() + s = string.replace(uuid_string, '-', '') + self._bits = _int2binstr(string.atol(s[:8],16),4) + \ + _int2binstr(string.atol(s[8:16],16),4) + \ + _int2binstr(string.atol(s[16:24],16),4) + \ + _int2binstr(string.atol(s[24:],16),4) def __len__(self): """ @@ -188,7 +202,7 @@ class UUID(object): from c++ implementation for portability reasons. Returns self. """ - m = md5.new() + m = md5() m.update(uuid.uuid1().bytes) self._bits = m.digest() return self diff --git a/indra/lib/python/indra/base/metrics.py b/indra/lib/python/indra/base/metrics.py index d26f571be7..ff8380265f 100644 --- a/indra/lib/python/indra/base/metrics.py +++ b/indra/lib/python/indra/base/metrics.py @@ -6,7 +6,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -29,25 +29,93 @@ $/LicenseInfo$ """ import sys -from indra.base import llsd +try: + import syslog +except ImportError: + # Windows + import sys + class syslog(object): + # wrap to a lame syslog for windows + _logfp = sys.stderr + def syslog(msg): + _logfp.write(msg) + if not msg.endswith('\n'): + _logfp.write('\n') + syslog = staticmethod(syslog) -_sequence_id = 0 +from indra.base.llsd import format_notation -def record_metrics(table, stats, dest=None): +def record_metrics(table, stats): "Write a standard metrics log" - _log("LLMETRICS", table, stats, dest) + _log("LLMETRICS", table, stats) -def record_event(table, data, dest=None): +def record_event(table, data): "Write a standard logmessage log" - _log("LLLOGMESSAGE", table, data, dest) + _log("LLLOGMESSAGE", table, data) + +def set_destination(dest): + """Set the destination of metrics logs for this process. -def _log(header, table, data, dest): + If you do not call this function prior to calling a logging + method, that function will open sys.stdout as a destination. + Attempts to set dest to None will throw a RuntimeError. + @param dest a file-like object which will be the destination for logs.""" if dest is None: - # do this check here in case sys.stdout changes at some - # point. as a default parameter, it will never be - # re-evaluated. - dest = sys.stdout + raise RuntimeError("Attempt to unset metrics destination.") + global _destination + _destination = dest + +def destination(): + """Get the destination of the metrics logs for this process. + Returns None if no destination is set""" + global _destination + return _destination + +class SysLogger(object): + "A file-like object which writes to syslog." + def __init__(self, ident='indra', logopt = None, facility = None): + try: + if logopt is None: + logopt = syslog.LOG_CONS | syslog.LOG_PID + if facility is None: + facility = syslog.LOG_LOCAL0 + syslog.openlog(ident, logopt, facility) + import atexit + atexit.register(syslog.closelog) + except AttributeError: + # No syslog module on Windows + pass + + def write(str): + syslog.syslog(str) + write = staticmethod(write) + + def flush(): + pass + flush = staticmethod(flush) + +# +# internal API +# +_sequence_id = 0 +_destination = None + +def _next_id(): global _sequence_id - print >>dest, header, "(" + str(_sequence_id) + ")", - print >>dest, table, llsd.format_notation(data) + next = _sequence_id _sequence_id += 1 + return next + +def _dest(): + global _destination + if _destination is None: + # this default behavior is documented in the metrics functions above. + _destination = sys.stdout + return _destination + +def _log(header, table, data): + log_line = "%s (%d) %s %s" \ + % (header, _next_id(), table, format_notation(data)) + dest = _dest() + dest.write(log_line) + dest.flush() diff --git a/indra/lib/python/indra/ipc/__init__.py b/indra/lib/python/indra/ipc/__init__.py index 4395361323..302bbf4a03 100644 --- a/indra/lib/python/indra/ipc/__init__.py +++ b/indra/lib/python/indra/ipc/__init__.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2006&license=mit$ -Copyright (c) 2006-2007, Linden Research, Inc. +Copyright (c) 2006-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/ipc/compatibility.py b/indra/lib/python/indra/ipc/compatibility.py index 8435528787..b9045c22f3 100644 --- a/indra/lib/python/indra/ipc/compatibility.py +++ b/indra/lib/python/indra/ipc/compatibility.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/ipc/llmessage.py b/indra/lib/python/indra/ipc/llmessage.py index 2497393cbd..91fb36b72c 100644 --- a/indra/lib/python/indra/ipc/llmessage.py +++ b/indra/lib/python/indra/ipc/llmessage.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -26,8 +26,6 @@ THE SOFTWARE. $/LicenseInfo$ """ -from sets import Set, ImmutableSet - from compatibility import Incompatible, Older, Newer, Same from tokenstream import TokenStream @@ -44,8 +42,8 @@ class Template: def compatibleWithBase(self, base): messagenames = ( - ImmutableSet(self.messages.keys()) - | ImmutableSet(base.messages.keys()) + frozenset(self.messages.keys()) + | frozenset(base.messages.keys()) ) compatibility = Same() @@ -86,8 +84,9 @@ class Message: NOTDEPRECATED = "NotDeprecated" DEPRECATED = "Deprecated" UDPDEPRECATED = "UDPDeprecated" - deprecations = [ NOTDEPRECATED, UDPDEPRECATED, DEPRECATED ] - # in order of increasing deprecation + UDPBLACKLISTED = "UDPBlackListed" + deprecations = [ NOTDEPRECATED, UDPDEPRECATED, UDPBLACKLISTED, DEPRECATED ] + # in order of increasing deprecation def __init__(self, name, number, priority, trust, coding): self.name = name diff --git a/indra/lib/python/indra/ipc/llsdhttp.py b/indra/lib/python/indra/ipc/llsdhttp.py index eb9247da93..cbe8ee1eca 100644 --- a/indra/lib/python/indra/ipc/llsdhttp.py +++ b/indra/lib/python/indra/ipc/llsdhttp.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2006&license=mit$ -Copyright (c) 2006-2007, Linden Research, Inc. +Copyright (c) 2006-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -34,7 +34,7 @@ from indra.base import llsd from eventlet import httpc -suite = httpc.HttpSuite(llsd.format_xml, llsd.parse, 'application/xml+llsd') +suite = httpc.HttpSuite(llsd.format_xml, llsd.parse, 'application/llsd+xml') delete = suite.delete delete_ = suite.delete_ get = suite.get @@ -48,7 +48,13 @@ put_ = suite.put_ request = suite.request request_ = suite.request_ -for x in (httpc.ConnectionError, httpc.NotFound, httpc.Forbidden): +# import every httpc error exception into our namespace for convenience +for x in httpc.status_to_error_map.itervalues(): + globals()[x.__name__] = x +ConnectionError = httpc.ConnectionError +Retriable = httpc.Retriable + +for x in (httpc.ConnectionError,): globals()[x.__name__] = x @@ -60,21 +66,22 @@ def postFile(url, filename): return post_(url, llsd_body) +# deprecated in favor of get_ def getStatus(url, use_proxy=False): status, _headers, _body = get_(url, use_proxy=use_proxy) return status - +# deprecated in favor of put_ def putStatus(url, data): status, _headers, _body = put_(url, data) return status - +# deprecated in favor of delete_ def deleteStatus(url): status, _headers, _body = delete_(url) return status - +# deprecated in favor of post_ def postStatus(url, data): status, _headers, _body = post_(url, data) return status diff --git a/indra/lib/python/indra/ipc/mysql_pool.py b/indra/lib/python/indra/ipc/mysql_pool.py index 827b6d42e9..e5855a3091 100644 --- a/indra/lib/python/indra/ipc/mysql_pool.py +++ b/indra/lib/python/indra/ipc/mysql_pool.py @@ -1,10 +1,10 @@ """\ @file mysql_pool.py -@brief Uses saranwrap to implement a pool of nonblocking database connections to a mysql server. +@brief Thin wrapper around eventlet.db_pool that chooses MySQLdb and Tpool. $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -26,44 +26,16 @@ THE SOFTWARE. $/LicenseInfo$ """ -import os - -from eventlet.pools import Pool -from eventlet.processes import DeadProcess -from indra.ipc import saranwrap - import MySQLdb +from eventlet import db_pool -# method 2: better -- admits the existence of the pool -# dbp = my_db_connector.get() -# dbh = dbp.get() -# dbc = dbh.cursor() -# dbc.execute(named_query) -# dbc.close() -# dbp.put(dbh) - -class DatabaseConnector(object): - """\ -@brief This is an object which will maintain a collection of database -connection pools keyed on host,databasename""" - def __init__(self, credentials, min_size = 0, max_size = 4, *args, **kwargs): - """\ - @brief constructor - @param min_size the minimum size of a child pool. - @param max_size the maximum size of a child pool.""" - self._min_size = min_size - self._max_size = max_size - self._args = args - self._kwargs = kwargs - self._credentials = credentials # this is a map of hostname to username/password - self._databases = {} - - def credentials_for(self, host): - if host in self._credentials: - return self._credentials[host] - else: - return self._credentials.get('default', None) +class DatabaseConnector(db_pool.DatabaseConnector): + def __init__(self, credentials, *args, **kwargs): + super(DatabaseConnector, self).__init__(MySQLdb, credentials, + conn_pool=db_pool.ConnectionPool, + *args, **kwargs) + # get is extended relative to eventlet.db_pool to accept a port argument def get(self, host, dbname, port=3306): key = (host, dbname, port) if key not in self._databases: @@ -72,33 +44,38 @@ connection pools keyed on host,databasename""" new_kwargs['host'] = host new_kwargs['port'] = port new_kwargs.update(self.credentials_for(host)) - dbpool = ConnectionPool(self._min_size, self._max_size, *self._args, **new_kwargs) + dbpool = ConnectionPool(*self._args, **new_kwargs) self._databases[key] = dbpool return self._databases[key] - -class ConnectionPool(Pool): +class ConnectionPool(db_pool.TpooledConnectionPool): """A pool which gives out saranwrapped MySQLdb connections from a pool """ - def __init__(self, min_size = 0, max_size = 4, *args, **kwargs): - self._args = args - self._kwargs = kwargs - Pool.__init__(self, min_size, max_size) - def create(self): - return saranwrap.wrap(MySQLdb).connect(*self._args, **self._kwargs) + def __init__(self, *args, **kwargs): + super(ConnectionPool, self).__init__(MySQLdb, *args, **kwargs) + + def get(self): + conn = super(ConnectionPool, self).get() + # annotate the connection object with the details on the + # connection; this is used elsewhere to check that you haven't + # suddenly changed databases in midstream while making a + # series of queries on a connection. + arg_names = ['host','user','passwd','db','port','unix_socket','conv','connect_timeout', + 'compress', 'named_pipe', 'init_command', 'read_default_file', 'read_default_group', + 'cursorclass', 'use_unicode', 'charset', 'sql_mode', 'client_flag', 'ssl', + 'local_infile'] + # you could have constructed this connectionpool with a mix of + # keyword and non-keyword arguments, but we want to annotate + # the connection object with a dict so it's easy to check + # against so here we are converting the list of non-keyword + # arguments (in self._args) into a dict of keyword arguments, + # and merging that with the actual keyword arguments + # (self._kwargs). The arg_names variable lists the + # constructor arguments for MySQLdb Connection objects. + converted_kwargs = dict([ (arg_names[i], arg) for i, arg in enumerate(self._args) ]) + converted_kwargs.update(self._kwargs) + conn.connection_parameters = converted_kwargs + return conn - def put(self, conn): - # rollback any uncommitted changes, so that the next process - # has a clean slate. This also pokes the process to see if - # it's dead or None - try: - conn.rollback() - except (AttributeError, DeadProcess), e: - conn = self.create() - # TODO figure out if we're still connected to the database - if conn is not None: - Pool.put(self, conn) - else: - self.current_size -= 1 diff --git a/indra/lib/python/indra/ipc/russ.py b/indra/lib/python/indra/ipc/russ.py index bd50569d3a..35d8afb158 100644 --- a/indra/lib/python/indra/ipc/russ.py +++ b/indra/lib/python/indra/ipc/russ.py @@ -11,7 +11,7 @@ implementations section. $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/ipc/servicebuilder.py b/indra/lib/python/indra/ipc/servicebuilder.py index ebd2583385..0a0ce2b4e2 100644 --- a/indra/lib/python/indra/ipc/servicebuilder.py +++ b/indra/lib/python/indra/ipc/servicebuilder.py @@ -5,7 +5,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -39,6 +39,12 @@ except: pass _g_builder = None +def _builder(): + global _g_builder + if _g_builder is None: + _g_builder = ServiceBuilder() + return _g_builder + def build(name, context={}, **kwargs): """ Convenience method for using a global, singleton, service builder. Pass arguments either via a dict or via python keyword arguments, or both! @@ -51,12 +57,15 @@ def build(name, context={}, **kwargs): > servicebuilder.build('version-manager-version', context, version='1.18.1.2') 'http://int.util.vaak.lindenlab.com/channel/Second%20Life%20Release/1.18.1.2' """ - context = context.copy() # shouldn't modify the caller's dictionary - context.update(kwargs) global _g_builder if _g_builder is None: _g_builder = ServiceBuilder() - return _g_builder.buildServiceURL(name, context) + return _g_builder.buildServiceURL(name, context, **kwargs) + +def build_path(name, context={}, **kwargs): + context = context.copy() # shouldn't modify the caller's dictionary + context.update(kwargs) + return _builder().buildPath(name, context) class ServiceBuilder(object): def __init__(self, services_definition = services_config): @@ -75,19 +84,51 @@ class ServiceBuilder(object): continue if isinstance(service_builder, dict): # We will be constructing several builders - for name, builder in service_builder.items(): + for name, builder in service_builder.iteritems(): full_builder_name = service['name'] + '-' + name self.builders[full_builder_name] = builder else: self.builders[service['name']] = service_builder - def buildServiceURL(self, name, context): + def buildPath(self, name, context): + """\ + @brief given the environment on construction, return a service path. + @param name The name of the service. + @param context A dict of name value lookups for the service. + @returns Returns the + """ + return russ.format(self.builders[name], context) + + def buildServiceURL(self, name, context={}, **kwargs): """\ @brief given the environment on construction, return a service URL. @param name The name of the service. @param context A dict of name value lookups for the service. + @param kwargs Any keyword arguments are treated as members of the + context, this allows you to be all 31337 by writing shit like: + servicebuilder.build('name', param=value) @returns Returns the """ + context = context.copy() # shouldn't modify the caller's dictionary + context.update(kwargs) base_url = config.get('services-base-url') svc_path = russ.format(self.builders[name], context) return base_url + svc_path + + +def on_in(query_name, host_key, schema_key): + """\ + @brief Constructs an on/in snippet (for running named queries) + from a schema name and two keys referencing values stored in + indra.xml. + + @param query_name Name of the query. + @param host_key Logical name of destination host. Will be + looked up in indra.xml. + @param schema_key Logical name of destination schema. Will + be looked up in indra.xml. + """ + return "on/config:%s/in/config:%s/%s" % (host_key.strip('/'), + schema_key.strip('/'), + query_name.lstrip('/')) + diff --git a/indra/lib/python/indra/ipc/siesta.py b/indra/lib/python/indra/ipc/siesta.py new file mode 100644 index 0000000000..d867e71537 --- /dev/null +++ b/indra/lib/python/indra/ipc/siesta.py @@ -0,0 +1,468 @@ +"""\ +@file siesta.py +@brief A tiny llsd based RESTful web services framework + +$LicenseInfo:firstyear=2008&license=mit$ + +Copyright (c) 2008, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +$/LicenseInfo$ +""" + +from indra.base import config +from indra.base import llsd +from webob import exc +import webob +import re, socket + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +try: + import cjson + json_decode = cjson.decode + json_encode = cjson.encode + JsonDecodeError = cjson.DecodeError + JsonEncodeError = cjson.EncodeError +except ImportError: + import simplejson + json_decode = simplejson.loads + json_encode = simplejson.dumps + JsonDecodeError = ValueError + JsonEncodeError = TypeError + + +llsd_parsers = { + 'application/json': json_decode, + llsd.BINARY_MIME_TYPE: llsd.parse_binary, + 'application/llsd+notation': llsd.parse_notation, + llsd.XML_MIME_TYPE: llsd.parse_xml, + 'application/xml': llsd.parse_xml, + } + + +def mime_type(content_type): + '''Given a Content-Type header, return only the MIME type.''' + + return content_type.split(';', 1)[0].strip().lower() + +class BodyLLSD(object): + '''Give a webob Request or Response an llsd based "content" property. + + Getting the content property parses the body, and caches the result. + + Setting the content property formats a payload, and the body property + is set.''' + + def _llsd__get(self): + '''Get, set, or delete the LLSD value stored in this object.''' + + try: + return self._llsd + except AttributeError: + if not self.body: + raise AttributeError('No llsd attribute has been set') + else: + mtype = mime_type(self.content_type) + try: + parser = llsd_parsers[mtype] + except KeyError: + raise exc.HTTPUnsupportedMediaType( + 'Content type %s not supported' % mtype).exception + try: + self._llsd = parser(self.body) + except (llsd.LLSDParseError, JsonDecodeError, TypeError), err: + raise exc.HTTPBadRequest( + 'Could not parse body: %r' % err.args).exception + return self._llsd + + def _llsd__set(self, val): + req = getattr(self, 'request', None) + if req is not None: + formatter, ctype = formatter_for_request(req) + self.content_type = ctype + else: + formatter, ctype = formatter_for_mime_type( + mime_type(self.content_type)) + self.body = formatter(val) + + def _llsd__del(self): + if hasattr(self, '_llsd'): + del self._llsd + + content = property(_llsd__get, _llsd__set, _llsd__del) + + +class Response(webob.Response, BodyLLSD): + '''Response class with LLSD support. + + A sensible default content type is used. + + Setting the llsd property also sets the body. Getting the llsd + property parses the body if necessary. + + If you set the body property directly, the llsd property will be + deleted.''' + + default_content_type = 'application/llsd+xml' + + def _body__set(self, body): + if hasattr(self, '_llsd'): + del self._llsd + super(Response, self)._body__set(body) + + def cache_forever(self): + self.cache_expires(86400 * 365) + + body = property(webob.Response._body__get, _body__set, + webob.Response._body__del, + webob.Response._body__get.__doc__) + + +class Request(webob.Request, BodyLLSD): + '''Request class with LLSD support. + + Sensible content type and accept headers are used by default. + + Setting the content property also sets the body. Getting the content + property parses the body if necessary. + + If you set the body property directly, the content property will be + deleted.''' + + default_content_type = 'application/llsd+xml' + default_accept = ('application/llsd+xml; q=0.5, ' + 'application/llsd+notation; q=0.3, ' + 'application/llsd+binary; q=0.2, ' + 'application/xml; q=0.1, ' + 'application/json; q=0.0') + + def __init__(self, environ=None, *args, **kwargs): + if environ is None: + environ = {} + else: + environ = environ.copy() + if 'CONTENT_TYPE' not in environ: + environ['CONTENT_TYPE'] = self.default_content_type + if 'HTTP_ACCEPT' not in environ: + environ['HTTP_ACCEPT'] = self.default_accept + super(Request, self).__init__(environ, *args, **kwargs) + + def _body__set(self, body): + if hasattr(self, '_llsd'): + del self._llsd + super(Request, self)._body__set(body) + + def path_urljoin(self, *parts): + return '/'.join([path_url.rstrip('/')] + list(parts)) + + body = property(webob.Request._body__get, _body__set, + webob.Request._body__del, webob.Request._body__get.__doc__) + + def create_response(self, content=None, status='200 OK', + conditional_response=webob.NoDefault): + resp = self.ResponseClass(status=status, request=self, + conditional_response=conditional_response) + resp.content = content + return resp + + def curl(self): + '''Create and fill out a pycurl easy object from this request.''' + + import pycurl + c = pycurl.Curl() + c.setopt(pycurl.URL, self.url()) + if self.headers: + c.setopt(pycurl.HTTPHEADER, + ['%s: %s' % (k, self.headers[k]) for k in self.headers]) + c.setopt(pycurl.FOLLOWLOCATION, True) + c.setopt(pycurl.AUTOREFERER, True) + c.setopt(pycurl.MAXREDIRS, 16) + c.setopt(pycurl.NOSIGNAL, True) + c.setopt(pycurl.READFUNCTION, self.body_file.read) + c.setopt(pycurl.SSL_VERIFYHOST, 2) + + if self.method == 'POST': + c.setopt(pycurl.POST, True) + post301 = getattr(pycurl, 'POST301', None) + if post301 is not None: + # Added in libcurl 7.17.1. + c.setopt(post301, True) + elif self.method == 'PUT': + c.setopt(pycurl.PUT, True) + elif self.method != 'GET': + c.setopt(pycurl.CUSTOMREQUEST, self.method) + return c + +Request.ResponseClass = Response +Response.RequestClass = Request + + +llsd_formatters = { + 'application/json': json_encode, + 'application/llsd+binary': llsd.format_binary, + 'application/llsd+notation': llsd.format_notation, + 'application/llsd+xml': llsd.format_xml, + 'application/xml': llsd.format_xml, + } + +formatter_qualities = ( + ('application/llsd+xml', 1.0), + ('application/llsd+notation', 0.5), + ('application/llsd+binary', 0.4), + ('application/xml', 0.3), + ('application/json', 0.2), + ) + +def formatter_for_mime_type(mime_type): + '''Return a formatter that encodes to the given MIME type. + + The result is a pair of function and MIME type.''' + try: + return llsd_formatters[mime_type], mime_type + except KeyError: + raise exc.HTTPInternalServerError( + 'Could not use MIME type %r to format response' % + mime_type).exception + + +def formatter_for_request(req): + '''Return a formatter that encodes to the preferred type of the client. + + The result is a pair of function and actual MIME type.''' + ctype = req.accept.best_match(formatter_qualities) + try: + return llsd_formatters[ctype], ctype + except KeyError: + raise exc.HTTPNotAcceptable().exception + + +def wsgi_adapter(func, environ, start_response): + '''Adapt a Siesta callable to act as a WSGI application.''' + # Process the request as appropriate. + try: + req = Request(environ) + #print req.urlvars + resp = func(req, **req.urlvars) + if not isinstance(resp, webob.Response): + try: + formatter, ctype = formatter_for_request(req) + resp = req.ResponseClass(formatter(resp), content_type=ctype) + resp._llsd = resp + except (JsonEncodeError, TypeError), err: + resp = exc.HTTPInternalServerError( + detail='Could not format response') + except exc.HTTPException, e: + resp = e + except socket.error, e: + resp = exc.HTTPInternalServerError(detail=e.args[1]) + return resp(environ, start_response) + + +def llsd_callable(func): + '''Turn a callable into a Siesta application.''' + + def replacement(environ, start_response): + return wsgi_adapter(func, environ, start_response) + + return replacement + + +def llsd_method(http_method, func): + def replacement(environ, start_response): + if environ['REQUEST_METHOD'] == http_method: + return wsgi_adapter(func, environ, start_response) + return exc.HTTPMethodNotAllowed()(environ, start_response) + + return replacement + + +http11_methods = 'OPTIONS GET HEAD POST PUT DELETE TRACE CONNECT'.split() +http11_methods.sort() + +def llsd_class(cls): + '''Turn a class into a Siesta application. + + A new instance is created for each request. A HTTP method FOO is + turned into a call to the handle_foo method of the instance.''' + + def foo(req, **kwargs): + instance = cls() + method = req.method.lower() + try: + handler = getattr(instance, 'handle_' + method) + except AttributeError: + allowed = [m for m in http11_methods + if hasattr(instance, 'handle_' + m.lower())] + raise exc.HTTPMethodNotAllowed( + headers={'Allow': ', '.join(allowed)}).exception + #print "kwargs: ", kwargs + return handler(req, **kwargs) + + def replacement(environ, start_response): + return wsgi_adapter(foo, environ, start_response) + + return replacement + + +def curl(reqs): + import pycurl + + m = pycurl.CurlMulti() + curls = [r.curl() for r in reqs] + io = {} + for c in curls: + fp = StringIO() + hdr = StringIO() + c.setopt(pycurl.WRITEFUNCTION, fp.write) + c.setopt(pycurl.HEADERFUNCTION, hdr.write) + io[id(c)] = fp, hdr + m.handles = curls + try: + while True: + ret, num_handles = m.perform() + if ret != pycurl.E_CALL_MULTI_PERFORM: + break + finally: + m.close() + + for req, c in zip(reqs, curls): + fp, hdr = io[id(c)] + hdr.seek(0) + status = hdr.readline().rstrip() + headers = [] + name, values = None, None + + # XXX We don't currently handle bogus header data. + + for line in hdr.readlines(): + if not line[0].isspace(): + if name: + headers.append((name, ' '.join(values))) + name, value = line.strip().split(':', 1) + value = [value] + else: + values.append(line.strip()) + if name: + headers.append((name, ' '.join(values))) + + resp = c.ResponseClass(fp.getvalue(), status, headers, request=req) + + +route_re = re.compile(r''' + \{ # exact character "{" + (\w*) # "config" or variable (restricted to a-z, 0-9, _) + (?:([:~])([^}]+))? # optional :type or ~regex part + \} # exact character "}" + ''', re.VERBOSE) + +predefined_regexps = { + 'uuid': r'[a-f0-9][a-f0-9-]{31,35}', + 'int': r'\d+', + 'host': r'[a-z0-9][a-z0-9\-\.]*', + } + +def compile_route(route): + fp = StringIO() + last_pos = 0 + for match in route_re.finditer(route): + #print "matches: ", match.groups() + fp.write(re.escape(route[last_pos:match.start()])) + var_name = match.group(1) + sep = match.group(2) + expr = match.group(3) + if var_name == 'config': + expr = re.escape(str(config.get(var_name))) + else: + if expr: + if sep == ':': + expr = predefined_regexps[expr] + # otherwise, treat what follows '~' as a regexp + else: + expr = '[^/]+' + if var_name != '': + expr = '(?P<%s>%s)' % (var_name, expr) + else: + expr = '(%s)' % (expr,) + fp.write(expr) + last_pos = match.end() + fp.write(re.escape(route[last_pos:])) + compiled_route = '^%s$' % fp.getvalue() + #print route, "->", compiled_route + return compiled_route + +class Router(object): + '''WSGI routing class. Parses a URL and hands off a request to + some other WSGI application. If no suitable application is found, + responds with a 404.''' + + def __init__(self): + self._new_routes = [] + self._routes = [] + self._paths = [] + + def add(self, route, app, methods=None): + self._new_routes.append((route, app, methods)) + + def _create_routes(self): + for route, app, methods in self._new_routes: + self._paths.append(route) + self._routes.append( + (re.compile(compile_route(route)), + app, + methods and dict.fromkeys(methods))) + self._new_routes = [] + + def __call__(self, environ, start_response): + # load up the config from the config file. Only needs to be + # done once per interpreter. This is the entry point of all + # siesta applications, so this is where we trap it. + _conf = config.get_config() + if _conf is None: + import os.path + fname = os.path.join( + environ.get('ll.config_dir', '/local/linden/etc'), + 'indra.xml') + config.load(fname) + + # proceed with handling the request + self._create_routes() + path_info = environ['PATH_INFO'] + request_method = environ['REQUEST_METHOD'] + allowed = [] + for regex, app, methods in self._routes: + m = regex.match(path_info) + if m: + #print "groupdict:",m.groupdict() + if not methods or request_method in methods: + environ['paste.urlvars'] = m.groupdict() + return app(environ, start_response) + else: + allowed += methods + if allowed: + allowed = dict.fromkeys(allows).keys() + allowed.sort() + resp = exc.HTTPMethodNotAllowed( + headers={'Allow': ', '.join(allowed)}) + else: + resp = exc.HTTPNotFound() + return resp(environ, start_response) diff --git a/indra/lib/python/indra/ipc/siesta_test.py b/indra/lib/python/indra/ipc/siesta_test.py new file mode 100644 index 0000000000..177ea710d1 --- /dev/null +++ b/indra/lib/python/indra/ipc/siesta_test.py @@ -0,0 +1,214 @@ +from indra.base import llsd, lluuid +from indra.ipc import siesta +import datetime, math, unittest +from webob import exc + + +class ClassApp(object): + def handle_get(self, req): + pass + + def handle_post(self, req): + return req.llsd + + +def callable_app(req): + if req.method == 'UNDERPANTS': + raise exc.HTTPMethodNotAllowed() + elif req.method == 'GET': + return None + return req.llsd + + +class TestBase: + def test_basic_get(self): + req = siesta.Request.blank('/') + self.assertEquals(req.get_response(self.server).body, + llsd.format_xml(None)) + + def test_bad_method(self): + req = siesta.Request.blank('/') + req.environ['REQUEST_METHOD'] = 'UNDERPANTS' + self.assertEquals(req.get_response(self.server).status_int, + exc.HTTPMethodNotAllowed.code) + + json_safe = { + 'none': None, + 'bool_true': True, + 'bool_false': False, + 'int_zero': 0, + 'int_max': 2147483647, + 'int_min': -2147483648, + 'long_zero': 0, + 'long_max': 2147483647L, + 'long_min': -2147483648L, + 'float_zero': 0, + 'float': math.pi, + 'float_huge': 3.14159265358979323846e299, + 'str_empty': '', + 'str': 'foo', + u'unic\u1e51de_empty': u'', + u'unic\u1e51de': u'\u1e4exx\u10480', + } + json_safe['array'] = json_safe.values() + json_safe['tuple'] = tuple(json_safe.values()) + json_safe['dict'] = json_safe.copy() + + json_unsafe = { + 'uuid_empty': lluuid.UUID(), + 'uuid_full': lluuid.UUID('dc61ab0530200d7554d23510559102c1a98aab1b'), + 'binary_empty': llsd.binary(), + 'binary': llsd.binary('f\0\xff'), + 'uri_empty': llsd.uri(), + 'uri': llsd.uri('http://www.secondlife.com/'), + 'datetime_empty': datetime.datetime(1970,1,1), + 'datetime': datetime.datetime(1999,9,9,9,9,9), + } + json_unsafe.update(json_safe) + json_unsafe['array'] = json_unsafe.values() + json_unsafe['tuple'] = tuple(json_unsafe.values()) + json_unsafe['dict'] = json_unsafe.copy() + json_unsafe['iter'] = iter(json_unsafe.values()) + + def _test_client_content_type_good(self, content_type, ll): + def run(ll): + req = siesta.Request.blank('/') + req.environ['REQUEST_METHOD'] = 'POST' + req.content_type = content_type + req.llsd = ll + req.accept = content_type + resp = req.get_response(self.server) + self.assertEquals(resp.status_int, 200) + return req, resp + + if False and isinstance(ll, dict): + def fixup(v): + if isinstance(v, float): + return '%.5f' % v + if isinstance(v, long): + return int(v) + if isinstance(v, (llsd.binary, llsd.uri)): + return v + if isinstance(v, (tuple, list)): + return [fixup(i) for i in v] + if isinstance(v, dict): + return dict([(k, fixup(i)) for k, i in v.iteritems()]) + return v + for k, v in ll.iteritems(): + l = [k, v] + req, resp = run(l) + self.assertEquals(fixup(resp.llsd), fixup(l)) + + run(ll) + + def test_client_content_type_json_good(self): + self._test_client_content_type_good('application/json', self.json_safe) + + def test_client_content_type_llsd_xml_good(self): + self._test_client_content_type_good('application/llsd+xml', + self.json_unsafe) + + def test_client_content_type_llsd_notation_good(self): + self._test_client_content_type_good('application/llsd+notation', + self.json_unsafe) + + def test_client_content_type_llsd_binary_good(self): + self._test_client_content_type_good('application/llsd+binary', + self.json_unsafe) + + def test_client_content_type_xml_good(self): + self._test_client_content_type_good('application/xml', + self.json_unsafe) + + def _test_client_content_type_bad(self, content_type): + req = siesta.Request.blank('/') + req.environ['REQUEST_METHOD'] = 'POST' + req.body = '\0invalid nonsense under all encodings' + req.content_type = content_type + self.assertEquals(req.get_response(self.server).status_int, + exc.HTTPBadRequest.code) + + def test_client_content_type_json_bad(self): + self._test_client_content_type_bad('application/json') + + def test_client_content_type_llsd_xml_bad(self): + self._test_client_content_type_bad('application/llsd+xml') + + def test_client_content_type_llsd_notation_bad(self): + self._test_client_content_type_bad('application/llsd+notation') + + def test_client_content_type_llsd_binary_bad(self): + self._test_client_content_type_bad('application/llsd+binary') + + def test_client_content_type_xml_bad(self): + self._test_client_content_type_bad('application/xml') + + def test_client_content_type_bad(self): + req = siesta.Request.blank('/') + req.environ['REQUEST_METHOD'] = 'POST' + req.body = 'XXX' + req.content_type = 'application/nonsense' + self.assertEquals(req.get_response(self.server).status_int, + exc.HTTPUnsupportedMediaType.code) + + def test_request_default_content_type(self): + req = siesta.Request.blank('/') + self.assertEquals(req.content_type, req.default_content_type) + + def test_request_default_accept(self): + req = siesta.Request.blank('/') + from webob import acceptparse + self.assertEquals(str(req.accept).replace(' ', ''), + req.default_accept.replace(' ', '')) + + def test_request_llsd_auto_body(self): + req = siesta.Request.blank('/') + req.llsd = {'a': 2} + self.assertEquals(req.body, '<?xml version="1.0" ?><llsd><map>' + '<key>a</key><integer>2</integer></map></llsd>') + + def test_request_llsd_mod_body_changes_llsd(self): + req = siesta.Request.blank('/') + req.llsd = {'a': 2} + req.body = '<?xml version="1.0" ?><llsd><integer>1337</integer></llsd>' + self.assertEquals(req.llsd, 1337) + + def test_request_bad_llsd_fails(self): + def crashme(ctype): + def boom(): + class foo(object): pass + req = siesta.Request.blank('/') + req.content_type = ctype + req.llsd = foo() + for mime_type in siesta.llsd_parsers: + self.assertRaises(TypeError, crashme(mime_type)) + + +class ClassServer(TestBase, unittest.TestCase): + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self, *args, **kwargs) + self.server = siesta.llsd_class(ClassApp) + + +class CallableServer(TestBase, unittest.TestCase): + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self, *args, **kwargs) + self.server = siesta.llsd_callable(callable_app) + + +class RouterServer(unittest.TestCase): + def test_router(self): + def foo(req, quux): + print quux + + r = siesta.Router() + r.add('/foo/{quux:int}', siesta.llsd_callable(foo), methods=['GET']) + req = siesta.Request.blank('/foo/33') + req.get_response(r) + + req = siesta.Request.blank('/foo/bar') + self.assertEquals(req.get_response(r).status_int, + exc.HTTPNotFound.code) + +if __name__ == '__main__': + unittest.main() diff --git a/indra/lib/python/indra/ipc/tokenstream.py b/indra/lib/python/indra/ipc/tokenstream.py index 37896d30d5..b96f26d3ff 100644 --- a/indra/lib/python/indra/ipc/tokenstream.py +++ b/indra/lib/python/indra/ipc/tokenstream.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/ipc/webdav.py b/indra/lib/python/indra/ipc/webdav.py index 66e55ca426..98b8499b6a 100644 --- a/indra/lib/python/indra/ipc/webdav.py +++ b/indra/lib/python/indra/ipc/webdav.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/ipc/xml_rpc.py b/indra/lib/python/indra/ipc/xml_rpc.py index dc8f0aac4b..47536c10c3 100644 --- a/indra/lib/python/indra/ipc/xml_rpc.py +++ b/indra/lib/python/indra/ipc/xml_rpc.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2006&license=mit$ -Copyright (c) 2006-2007, Linden Research, Inc. +Copyright (c) 2006-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/util/__init__.py b/indra/lib/python/indra/util/__init__.py index 3eda1849ce..b004e5804f 100644 --- a/indra/lib/python/indra/util/__init__.py +++ b/indra/lib/python/indra/util/__init__.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2006&license=mit$ -Copyright (c) 2006-2007, Linden Research, Inc. +Copyright (c) 2006-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/util/fastest_elementtree.py b/indra/lib/python/indra/util/fastest_elementtree.py new file mode 100644 index 0000000000..4fcf662dd9 --- /dev/null +++ b/indra/lib/python/indra/util/fastest_elementtree.py @@ -0,0 +1,64 @@ +"""\ +@file fastest_elementtree.py +@brief Concealing some gnarly import logic in here. This should export the interface of elementtree. + +$LicenseInfo:firstyear=2008&license=mit$ + +Copyright (c) 2008-2009, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +$/LicenseInfo$ +""" + +# The parsing exception raised by the underlying library depends +# on the ElementTree implementation we're using, so we provide an +# alias here. +# +# Use ElementTreeError as the exception type for catching parsing +# errors. + + +# Using cElementTree might cause some unforeseen problems, so here's a +# convenient off switch. +use_celementree = True + +try: + if not use_celementree: + raise ImportError() + # Python 2.3 and 2.4. + from cElementTree import * + ElementTreeError = SyntaxError +except ImportError: + try: + if not use_celementree: + raise ImportError() + # Python 2.5 and above. + from xml.etree.cElementTree import * + ElementTreeError = SyntaxError + except ImportError: + # Pure Python code. + try: + # Python 2.3 and 2.4. + from elementtree.ElementTree import * + except ImportError: + # Python 2.5 and above. + from xml.etree.ElementTree import * + + # The pure Python ElementTree module uses Expat for parsing. + from xml.parsers.expat import ExpatError as ElementTreeError diff --git a/indra/lib/python/indra/util/helpformatter.py b/indra/lib/python/indra/util/helpformatter.py index c4ff27f616..ba5c9b67d1 100644 --- a/indra/lib/python/indra/util/helpformatter.py +++ b/indra/lib/python/indra/util/helpformatter.py @@ -5,7 +5,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/util/iterators.py b/indra/lib/python/indra/util/iterators.py new file mode 100644 index 0000000000..9013fa6303 --- /dev/null +++ b/indra/lib/python/indra/util/iterators.py @@ -0,0 +1,63 @@ +"""\ +@file iterators.py +@brief Useful general-purpose iterators. + +$LicenseInfo:firstyear=2008&license=mit$ + +Copyright (c) 2008-2009, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +$/LicenseInfo$ +""" + +from __future__ import nested_scopes + +def iter_chunks(rows, aggregate_size=100): + """ + Given an iterable set of items (@p rows), produces lists of up to @p + aggregate_size items at a time, for example: + + iter_chunks([1,2,3,4,5,6,7,8,9,10], 3) + + Values for @p aggregate_size < 1 will raise ValueError. + + Will return a generator that produces, in the following order: + - [1, 2, 3] + - [4, 5, 6] + - [7, 8, 9] + - [10] + """ + if aggregate_size < 1: + raise ValueError() + + def iter_chunks_inner(): + row_iter = iter(rows) + done = False + agg = [] + while not done: + try: + row = row_iter.next() + agg.append(row) + except StopIteration: + done = True + if agg and (len(agg) >= aggregate_size or done): + yield agg + agg = [] + + return iter_chunks_inner() diff --git a/indra/lib/python/indra/util/iterators_test.py b/indra/lib/python/indra/util/iterators_test.py new file mode 100755 index 0000000000..66928c8e7d --- /dev/null +++ b/indra/lib/python/indra/util/iterators_test.py @@ -0,0 +1,72 @@ +"""\ +@file iterators_test.py +@brief Test cases for iterators module. + +$LicenseInfo:firstyear=2008&license=mit$ + +Copyright (c) 2008-2009, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +$/LicenseInfo$ +""" + +import unittest + +from indra.util.iterators import iter_chunks + +class TestIterChunks(unittest.TestCase): + """Unittests for iter_chunks""" + def test_bad_agg_size(self): + rows = [1,2,3,4] + self.assertRaises(ValueError, iter_chunks, rows, 0) + self.assertRaises(ValueError, iter_chunks, rows, -1) + + try: + for i in iter_chunks(rows, 0): + pass + except ValueError: + pass + else: + self.fail() + + try: + result = list(iter_chunks(rows, 0)) + except ValueError: + pass + else: + self.fail() + def test_empty(self): + rows = [] + result = list(iter_chunks(rows)) + self.assertEqual(result, []) + def test_small(self): + rows = [[1]] + result = list(iter_chunks(rows, 2)) + self.assertEqual(result, [[[1]]]) + def test_size(self): + rows = [[1],[2]] + result = list(iter_chunks(rows, 2)) + self.assertEqual(result, [[[1],[2]]]) + def test_multi_agg(self): + rows = [[1],[2],[3],[4],[5]] + result = list(iter_chunks(rows, 2)) + self.assertEqual(result, [[[1],[2]],[[3],[4]],[[5]]]) + +if __name__ == "__main__": + unittest.main() diff --git a/indra/lib/python/indra/util/llmanifest.py b/indra/lib/python/indra/util/llmanifest.py index 814e1c9f95..c33a03034a 100644 --- a/indra/lib/python/indra/util/llmanifest.py +++ b/indra/lib/python/indra/util/llmanifest.py @@ -5,7 +5,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -34,18 +34,18 @@ import fnmatch import getopt import glob import os -import os.path import re import shutil import sys import tarfile import errno +import subprocess def path_ancestors(path): - path = os.path.normpath(path) + drive, path = os.path.splitdrive(os.path.normpath(path)) result = [] - while len(path) > 0: - result.append(path) + while len(path) > 0 and path != os.path.sep: + result.append(drive+path) path, sub = os.path.split(path) return result @@ -57,13 +57,13 @@ def proper_windows_path(path, current_platform = sys.platform): drive_letter = None rel = None match = re.match("/cygdrive/([a-z])/(.*)", path) - if(not match): + if not match: match = re.match('([a-zA-Z]):\\\(.*)', path) - if(not match): + if not match: return None # not an absolute path drive_letter = match.group(1) rel = match.group(2) - if(current_platform == "cygwin"): + if current_platform == "cygwin": return "/cygdrive/" + drive_letter.lower() + '/' + rel.replace('\\', '/') else: return drive_letter.upper() + ':\\' + rel.replace('/', '\\') @@ -94,10 +94,11 @@ def get_channel(srctree): for p in paths: if os.path.exists(p): contents = open(p, 'r').read() - channel = re.search("LL_CHANNEL\s=\s\"([\w\s]+)\"", contents).group(1) + channel = re.search("LL_CHANNEL\s=\s\"(.+)\";\s*$", contents, flags = re.M).group(1) return channel +DEFAULT_SRCTREE = os.path.dirname(sys.argv[0]) DEFAULT_CHANNEL = 'Second Life Release' ARGUMENTS=[ @@ -118,10 +119,12 @@ ARGUMENTS=[ Example use: %(name)s --arch=i686 On Linux this would try to use Linux_i686Manifest.""", default=""), + dict(name='build', description='Build directory.', default=DEFAULT_SRCTREE), + dict(name='buildtype', description='Build type (i.e. Debug, Release, RelWithDebInfo).', default=None), dict(name='configuration', - description="""The build configuration used. Only used on OS X for - now, but it could be used for other platforms as well.""", - default="Universal"), + description="""The build configuration used.""", + default="Release"), + dict(name='dest', description='Destination directory.', default=DEFAULT_SRCTREE), dict(name='grid', description="""Which grid the client will try to connect to. Even though it's not strictly a grid, 'firstlook' is also an acceptable @@ -144,6 +147,15 @@ ARGUMENTS=[ description="""The current platform, to be used for looking up which manifest class to run.""", default=get_default_platform), + dict(name='source', + description='Source directory.', + default=DEFAULT_SRCTREE), + dict(name='artwork', description='Artwork directory.', default=DEFAULT_SRCTREE), + dict(name='touch', + description="""File to touch when action is finished. Touch file will + contain the name of the final package in a form suitable + for use by a .bat file.""", + default=None), dict(name='version', description="""This specifies the version of Second Life that is being packaged up.""", @@ -167,63 +179,75 @@ def usage(srctree=""): default, arg['description'] % nd) -def main(argv=None, srctree='.', dsttree='./dst'): - if(argv == None): - argv = sys.argv - +def main(): option_names = [arg['name'] + '=' for arg in ARGUMENTS] option_names.append('help') - options, remainder = getopt.getopt(argv[1:], "", option_names) - if len(remainder) >= 1: - dsttree = remainder[0] - - print "Source tree:", srctree - print "Destination tree:", dsttree + options, remainder = getopt.getopt(sys.argv[1:], "", option_names) # convert options to a hash - args = {} + args = {'source': DEFAULT_SRCTREE, + 'artwork': DEFAULT_SRCTREE, + 'build': DEFAULT_SRCTREE, + 'dest': DEFAULT_SRCTREE } for opt in options: args[opt[0].replace("--", "")] = opt[1] + for k in 'artwork build dest source'.split(): + args[k] = os.path.normpath(args[k]) + + print "Source tree:", args['source'] + print "Artwork tree:", args['artwork'] + print "Build tree:", args['build'] + print "Destination tree:", args['dest'] + # early out for help - if args.has_key('help'): + if 'help' in args: # *TODO: it is a huge hack to pass around the srctree like this - usage(srctree) + usage(args['source']) return # defaults for arg in ARGUMENTS: - if not args.has_key(arg['name']): + if arg['name'] not in args: default = arg['default'] if hasattr(default, '__call__'): - default = default(srctree) + default = default(args['source']) if default is not None: args[arg['name']] = default # fix up version - if args.has_key('version') and type(args['version']) == str: + if isinstance(args.get('version'), str): args['version'] = args['version'].split('.') # default and agni are default if args['grid'] in ['default', 'agni']: args['grid'] = '' - if args.has_key('actions'): + if 'actions' in args: args['actions'] = args['actions'].split() # debugging for opt in args: print "Option:", opt, "=", args[opt] - wm = LLManifest.for_platform(args['platform'], args.get('arch'))(srctree, dsttree, args) + wm = LLManifest.for_platform(args['platform'], args.get('arch'))(args) wm.do(*args['actions']) + + # Write out the package file in this format, so that it can easily be called + # and used in a .bat file - yeah, it sucks, but this is the simplest... + touch = args.get('touch') + if touch: + fp = open(touch, 'w') + fp.write('set package_file=%s\n' % wm.package_file) + fp.close() + print 'touched', touch return 0 class LLManifestRegistry(type): def __init__(cls, name, bases, dct): super(LLManifestRegistry, cls).__init__(name, bases, dct) match = re.match("(\w+)Manifest", name) - if(match): + if match: cls.manifests[match.group(1).lower()] = cls class LLManifest(object): @@ -235,15 +259,18 @@ class LLManifest(object): return self.manifests[platform.lower()] for_platform = classmethod(for_platform) - def __init__(self, srctree, dsttree, args): + def __init__(self, args): super(LLManifest, self).__init__() self.args = args self.file_list = [] self.excludes = [] self.actions = [] - self.src_prefix = [srctree] - self.dst_prefix = [dsttree] + self.src_prefix = [args['source']] + self.artwork_prefix = [args['artwork']] + self.build_prefix = [args['build']] + self.dst_prefix = [args['dest']] self.created_paths = [] + self.package_name = "Unknown" def default_grid(self): return self.args.get('grid', None) == '' @@ -260,16 +287,20 @@ class LLManifest(object): in the file list by path().""" self.excludes.append(glob) - def prefix(self, src='', dst=None): + def prefix(self, src='', build=None, dst=None): """ Pushes a prefix onto the stack. Until end_prefix is called, all relevant method calls (esp. to path()) will prefix paths with the entire prefix stack. Source and destination prefixes can be different, though if only one is provided they are both equal. To specify a no-op, use an empty string, not None.""" - if(dst == None): + if dst is None: dst = src + if build is None: + build = src self.src_prefix.append(src) + self.artwork_prefix.append(src) + self.build_prefix.append(build) self.dst_prefix.append(dst) return True # so that you can wrap it in an if to get indentation @@ -281,14 +312,24 @@ class LLManifest(object): exception is raised.""" # as an error-prevention mechanism, check the prefix and see if it matches the source or destination prefix. If not, improper nesting may have occurred. src = self.src_prefix.pop() + artwork = self.artwork_prefix.pop() + build = self.build_prefix.pop() dst = self.dst_prefix.pop() - if descr and not(src == descr or dst == descr): + if descr and not(src == descr or build == descr or dst == descr): raise ValueError, "End prefix '" + descr + "' didn't match '" +src+ "' or '" +dst + "'" def get_src_prefix(self): """ Returns the current source prefix.""" return os.path.join(*self.src_prefix) + def get_artwork_prefix(self): + """ Returns the current artwork prefix.""" + return os.path.join(*self.artwork_prefix) + + def get_build_prefix(self): + """ Returns the current build prefix.""" + return os.path.join(*self.build_prefix) + def get_dst_prefix(self): """ Returns the current destination prefix.""" return os.path.join(*self.dst_prefix) @@ -298,6 +339,11 @@ class LLManifest(object): relative to the source directory.""" return os.path.join(self.get_src_prefix(), relpath) + def build_path_of(self, relpath): + """Returns the full path to a file or directory specified + relative to the build directory.""" + return os.path.join(self.get_build_prefix(), relpath) + def dst_path_of(self, relpath): """Returns the full path to a file or directory specified relative to the destination directory.""" @@ -321,21 +367,24 @@ class LLManifest(object): def run_command(self, command): """ Runs an external command, and returns the output. Raises - an exception if the command reurns a nonzero status code. For - debugging/informational purpoases, prints out the command's + an exception if the command returns a nonzero status code. For + debugging/informational purposes, prints out the command's output as it is received.""" print "Running command:", command - fd = os.popen(command, 'r') + sys.stdout.flush() + child = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + shell=True) lines = [] while True: - lines.append(fd.readline()) - if(lines[-1] == ''): + lines.append(child.stdout.readline()) + if lines[-1] == '': break else: print lines[-1], output = ''.join(lines) - status = fd.close() - if(status): + child.stdout.close() + status = child.wait() + if status: raise RuntimeError( "Command %s returned non-zero status (%s) \noutput:\n%s" % (command, status, output) ) @@ -356,7 +405,7 @@ class LLManifest(object): f.close() def replace_in(self, src, dst=None, searchdict={}): - if(dst == None): + if dst == None: dst = src # read src f = open(self.src_path_of(src), "rbU") @@ -369,11 +418,11 @@ class LLManifest(object): self.created_paths.append(dst) def copy_action(self, src, dst): - if(src and (os.path.exists(src) or os.path.islink(src))): + if src and (os.path.exists(src) or os.path.islink(src)): # ensure that destination path exists self.cmakedirs(os.path.dirname(dst)) self.created_paths.append(dst) - if(not os.path.isdir(src)): + if not os.path.isdir(src): self.ccopy(src,dst) else: # src is a dir @@ -408,7 +457,7 @@ class LLManifest(object): print "Cleaning up " + c def process_file(self, src, dst): - if(self.includes(src, dst)): + if self.includes(src, dst): # print src, "=>", dst for action in self.actions: methodname = action + "_action" @@ -416,26 +465,29 @@ class LLManifest(object): if method is not None: method(src, dst) self.file_list.append([src, dst]) + return 1 else: - print "Excluding: ", src, dst - + sys.stdout.write(" (excluding %r, %r)" % (src, dst)) + sys.stdout.flush() + return 0 def process_directory(self, src, dst): - if(not self.includes(src, dst)): - print "Excluding: ", src, dst - return + if not self.includes(src, dst): + sys.stdout.write(" (excluding %r, %r)" % (src, dst)) + sys.stdout.flush() + return 0 names = os.listdir(src) self.cmakedirs(dst) errors = [] + count = 0 for name in names: srcname = os.path.join(src, name) dstname = os.path.join(dst, name) if os.path.isdir(srcname): - self.process_directory(srcname, dstname) + count += self.process_directory(srcname, dstname) else: - self.process_file(srcname, dstname) - - + count += self.process_file(srcname, dstname) + return count def includes(self, src, dst): if src: @@ -446,9 +498,9 @@ class LLManifest(object): def remove(self, *paths): for path in paths: - if(os.path.exists(path)): + if os.path.exists(path): print "Removing path", path - if(os.path.isdir(path)): + if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path) @@ -457,17 +509,17 @@ class LLManifest(object): """ Copy a single file or symlink. Uses filecmp to skip copying for existing files.""" if os.path.islink(src): linkto = os.readlink(src) - if(os.path.islink(dst) or os.path.exists(dst)): + if os.path.islink(dst) or os.path.exists(dst): os.remove(dst) # because symlinking over an existing link fails os.symlink(linkto, dst) else: # Don't recopy file if it's up-to-date. # If we seem to be not not overwriting files that have been # updated, set the last arg to False, but it will take longer. - if(os.path.exists(dst) and filecmp.cmp(src, dst, True)): + if os.path.exists(dst) and filecmp.cmp(src, dst, True): return # only copy if it's not excluded - if(self.includes(src, dst)): + if self.includes(src, dst): try: os.unlink(dst) except OSError, err: @@ -481,7 +533,7 @@ class LLManifest(object): feature that the destination directory can exist. It is so dumb that Python doesn't come with this. Also it implements the excludes functionality.""" - if(not self.includes(src, dst)): + if not self.includes(src, dst): return names = os.listdir(src) self.cmakedirs(dst) @@ -512,7 +564,7 @@ class LLManifest(object): def find_existing_file(self, *list): for f in list: - if(os.path.exists(f)): + if os.path.exists(f): return f # didn't find it, return last item in list if len(list) > 0: @@ -535,62 +587,64 @@ class LLManifest(object): def wildcard_regex(self, src_glob, dst_glob): - # print "regex_pair:", src_glob, dst_glob src_re = re.escape(src_glob) - src_re = src_re.replace('\*', '([-a-zA-Z0-9._ ]+)') + src_re = src_re.replace('\*', '([-a-zA-Z0-9._ ]*)') dst_temp = dst_glob i = 1 - while(dst_temp.count("*") > 0): + while dst_temp.count("*") > 0: dst_temp = dst_temp.replace('*', '\g<' + str(i) + '>', 1) i = i+1 - # print "regex_result:", src_re, dst_temp return re.compile(src_re), dst_temp def check_file_exists(self, path): - if(not os.path.exists(path) and not os.path.islink(path)): + if not os.path.exists(path) and not os.path.islink(path): raise RuntimeError("Path %s doesn't exist" % ( os.path.normpath(os.path.join(os.getcwd(), path)),)) wildcard_pattern = re.compile('\*') def expand_globs(self, src, dst): - def fw_slash(str): - return str.replace('\\', '/') - def os_slash(str): - return str.replace('/', os.path.sep) - dst = fw_slash(dst) - src = fw_slash(src) src_list = glob.glob(src) - src_re, d_template = self.wildcard_regex(src, dst) + src_re, d_template = self.wildcard_regex(src.replace('\\', '/'), + dst.replace('\\', '/')) for s in src_list: - s = fw_slash(s) - d = src_re.sub(d_template, s) - #print "s:",s, "d_t", d_template, "dst", dst, "d", d - yield os_slash(s), os_slash(d) + d = src_re.sub(d_template, s.replace('\\', '/')) + yield os.path.normpath(s), os.path.normpath(d) def path(self, src, dst=None): - print "Processing", src, "=>", dst + sys.stdout.write("Processing %s => %s ... " % (src, dst)) + sys.stdout.flush() if src == None: raise RuntimeError("No source file, dst is " + dst) if dst == None: dst = src dst = os.path.join(self.get_dst_prefix(), dst) - src = os.path.join(self.get_src_prefix(), src) - # expand globs - if(self.wildcard_pattern.search(src)): - for s,d in self.expand_globs(src, dst): - self.process_file(s, d) - else: - # if we're specifying a single path (not a glob), - # we should error out if it doesn't exist - self.check_file_exists(src) - # if it's a directory, recurse through it - if(os.path.isdir(src)): - self.process_directory(src, dst) + def try_path(src): + # expand globs + count = 0 + if self.wildcard_pattern.search(src): + for s,d in self.expand_globs(src, dst): + assert(s != d) + count += self.process_file(s, d) else: - self.process_file(src, dst) - + # if we're specifying a single path (not a glob), + # we should error out if it doesn't exist + self.check_file_exists(src) + # if it's a directory, recurse through it + if os.path.isdir(src): + count += self.process_directory(src, dst) + else: + count += self.process_file(src, dst) + return count + try: + count = try_path(os.path.join(self.get_src_prefix(), src)) + except RuntimeError: + try: + count = try_path(os.path.join(self.get_artwork_prefix(), src)) + except RuntimeError: + count = try_path(os.path.join(self.get_build_prefix(), src)) + print "%d files" % count def do(self, *actions): self.actions = actions diff --git a/indra/lib/python/indra/util/llperformance.py b/indra/lib/python/indra/util/llperformance.py new file mode 100755 index 0000000000..7c52730b5e --- /dev/null +++ b/indra/lib/python/indra/util/llperformance.py @@ -0,0 +1,158 @@ +#!/usr/bin/python + +# ------------------------------------------------ +# Sim metrics utility functions. + +import glob, os, time, sys, stat, exceptions + +from indra.base import llsd + +gBlockMap = {} #Map of performance metric data with function hierarchy information. +gCurrentStatPath = "" + +gIsLoggingEnabled=False + +class LLPerfStat: + def __init__(self,key): + self.mTotalTime = 0 + self.mNumRuns = 0 + self.mName=key + self.mTimeStamp = int(time.time()*1000) + self.mUTCTime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + + def __str__(self): + return "%f" % self.mTotalTime + + def start(self): + self.mStartTime = int(time.time() * 1000000) + self.mNumRuns += 1 + + def stop(self): + execution_time = int(time.time() * 1000000) - self.mStartTime + self.mTotalTime += execution_time + + def get_map(self): + results={} + results['name']=self.mName + results['utc_time']=self.mUTCTime + results['timestamp']=self.mTimeStamp + results['us']=self.mTotalTime + results['count']=self.mNumRuns + return results + +class PerfError(exceptions.Exception): + def __init__(self): + return + + def __Str__(self): + print "","Unfinished LLPerfBlock" + +class LLPerfBlock: + def __init__( self, key ): + global gBlockMap + global gCurrentStatPath + global gIsLoggingEnabled + + #Check to see if we're running metrics right now. + if gIsLoggingEnabled: + self.mRunning = True #Mark myself as running. + + self.mPreviousStatPath = gCurrentStatPath + gCurrentStatPath += "/" + key + if gCurrentStatPath not in gBlockMap: + gBlockMap[gCurrentStatPath] = LLPerfStat(key) + + self.mStat = gBlockMap[gCurrentStatPath] + self.mStat.start() + + def finish( self ): + global gBlockMap + global gIsLoggingEnabled + + if gIsLoggingEnabled: + self.mStat.stop() + self.mRunning = False + gCurrentStatPath = self.mPreviousStatPath + +# def __del__( self ): +# if self.mRunning: +# #SPATTERS FIXME +# raise PerfError + +class LLPerformance: + #-------------------------------------------------- + # Determine whether or not we want to log statistics + + def __init__( self, process_name = "python" ): + self.process_name = process_name + self.init_testing() + self.mTimeStamp = int(time.time()*1000) + self.mUTCTime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + + def init_testing( self ): + global gIsLoggingEnabled + + host_performance_file = "/dev/shm/simperf/simperf_proc_config.llsd" + + #If file exists, open + if os.path.exists(host_performance_file): + file = open (host_performance_file,'r') + + #Read serialized LLSD from file. + body = llsd.parse(file.read()) + + #Calculate time since file last modified. + stats = os.stat(host_performance_file) + now = time.time() + mod = stats[stat.ST_MTIME] + age = now - mod + + if age < ( body['duration'] ): + gIsLoggingEnabled = True + + + def get ( self ): + global gIsLoggingEnabled + return gIsLoggingEnabled + + #def output(self,ptr,path): + # if 'stats' in ptr: + # stats = ptr['stats'] + # self.mOutputPtr[path] = stats.get_map() + + # if 'children' in ptr: + # children=ptr['children'] + + # curptr = self.mOutputPtr + # curchildren={} + # curptr['children'] = curchildren + + # for key in children: + # curchildren[key]={} + # self.mOutputPtr = curchildren[key] + # self.output(children[key],path + '/' + key) + + def done(self): + global gBlockMap + + if not self.get(): + return + + output_name = "/dev/shm/simperf/%s_proc.%d.llsd" % (self.process_name, os.getpid()) + output_file = open(output_name, 'w') + process_info = { + "name" : self.process_name, + "pid" : os.getpid(), + "ppid" : os.getppid(), + "timestamp" : self.mTimeStamp, + "utc_time" : self.mUTCTime, + } + output_file.write(llsd.format_notation(process_info)) + output_file.write('\n') + + for key in gBlockMap.keys(): + gBlockMap[key] = gBlockMap[key].get_map() + output_file.write(llsd.format_notation(gBlockMap)) + output_file.write('\n') + output_file.close() + diff --git a/indra/lib/python/indra/util/llsubprocess.py b/indra/lib/python/indra/util/llsubprocess.py index b6082de74a..7e0e115d14 100644 --- a/indra/lib/python/indra/util/llsubprocess.py +++ b/indra/lib/python/indra/util/llsubprocess.py @@ -6,7 +6,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -90,6 +90,17 @@ all the output, and get the result. child.tochild.close() result = child.poll() if result != -1: + # At this point, the child process has exited and result + # is the return value from the process. Between the time + # we called select() and poll() the process may have + # exited so read all the data left on the child process + # stdout and stderr. + last = child.fromchild.read() + if last: + out.append(last) + last = child.childerr.read() + if last: + err.append(last) child.tochild.close() child.fromchild.close() child.childerr.close() diff --git a/indra/lib/python/indra/util/llversion.py b/indra/lib/python/indra/util/llversion.py index 5e699d58ba..2718a85f41 100644 --- a/indra/lib/python/indra/util/llversion.py +++ b/indra/lib/python/indra/util/llversion.py @@ -1,11 +1,11 @@ """@file llversion.py @brief Utility for parsing llcommon/llversion${server}.h for the version string and channel string - Utility that parses svn info for branch and revision + Utility that parses hg or svn info for branch and revision $LicenseInfo:firstyear=2006&license=mit$ -Copyright (c) 2006-2007, Linden Research, Inc. +Copyright (c) 2006-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -79,8 +79,8 @@ def get_svn_status_matching(regular_expression): status, output = commands.getstatusoutput('svn info %s' % get_src_root()) m = regular_expression.search(output) if not m: - print "Failed to parse svn info output, resultfollows:" - print output + print >> sys.stderr, "Failed to parse svn info output, result follows:" + print >> sys.stderr, output raise Exception, "No matching svn status in "+src_root return m.group(1) @@ -92,4 +92,35 @@ def get_svn_revision(): last_rev_re = re.compile('Last Changed Rev: (\d+)') return get_svn_status_matching(last_rev_re) - +def get_hg_repo(): + status, output = commands.getstatusoutput('hg showconfig paths.default') + if status: + print >> sys.stderr, output + sys.exit(1) + if not output: + print >> sys.stderr, 'ERROR: cannot find repo we cloned from' + sys.exit(1) + return output + +def get_hg_changeset(): + # The right thing to do: + # status, output = commands.getstatusoutput('hg id -i') + # if status: + # print >> sys.stderr, output + # sys.exit(1) + + # The temporary hack: + status, output = commands.getstatusoutput('hg parents --template "{rev}"') + if status: + print >> sys.stderr, output + sys.exit(1) + lines = output.splitlines() + if len(lines) > 1: + print >> sys.stderr, 'ERROR: working directory has %d parents' % len(lines) + return lines[0] + +def using_svn(): + return os.path.isdir(os.path.join(get_src_root(), '.svn')) + +def using_hg(): + return os.path.isdir(os.path.join(get_src_root(), '.hg')) diff --git a/indra/lib/python/indra/util/named_query.py b/indra/lib/python/indra/util/named_query.py index 471998388d..6bf956107d 100644 --- a/indra/lib/python/indra/util/named_query.py +++ b/indra/lib/python/indra/util/named_query.py @@ -6,7 +6,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -36,17 +36,13 @@ import os.path import re import time -#import sys # *TODO: remove. only used in testing. -#import pprint # *TODO: remove. only used in testing. - -try: - set = set -except NameError: - from sets import Set as set - from indra.base import llsd from indra.base import config +DEBUG = False +NQ_FILE_SUFFIX = config.get('named-query-file-suffix', '.nq') +NQ_FILE_SUFFIX_LEN = len(NQ_FILE_SUFFIX) + _g_named_manager = None def _init_g_named_manager(sql_dir = None): @@ -55,17 +51,29 @@ def _init_g_named_manager(sql_dir = None): This function is intended entirely for testing purposes, because it's tricky to control the config from inside a test.""" + global NQ_FILE_SUFFIX + NQ_FILE_SUFFIX = config.get('named-query-file-suffix', '.nq') + global NQ_FILE_SUFFIX_LEN + NQ_FILE_SUFFIX_LEN = len(NQ_FILE_SUFFIX) + if sql_dir is None: sql_dir = config.get('named-query-base-dir') + + # extra fallback directory in case config doesn't return what we want + if sql_dir is None: + sql_dir = os.path.abspath( + os.path.join( + os.path.realpath(os.path.dirname(__file__)), "..", "..", "..", "..", "web", "dataservice", "sql")) + global _g_named_manager _g_named_manager = NamedQueryManager( os.path.abspath(os.path.realpath(sql_dir))) -def get(name): +def get(name, schema = None): "Get the named query object to be used to perform queries" if _g_named_manager is None: _init_g_named_manager() - return _g_named_manager.get(name) + return _g_named_manager.get(name).for_schema(schema) def sql(connection, name, params): # use module-global NamedQuery object to perform default substitution @@ -95,9 +103,13 @@ class NamedQuery(object): def __init__(self, name, filename): """ Construct a NamedQuery object. The name argument is an arbitrary name as a handle for the query, and the filename is - a path to a file containing an llsd named query document.""" + a path to a file or a file-like object containing an llsd named + query document.""" self._stat_interval_seconds = 5 # 5 seconds self._name = name + if (filename is not None and isinstance(filename, (str, unicode)) + and NQ_FILE_SUFFIX != filename[-NQ_FILE_SUFFIX_LEN:]): + filename = filename + NQ_FILE_SUFFIX self._location = filename self._alternative = dict() self._last_mod_time = 0 @@ -111,8 +123,8 @@ class NamedQuery(object): def get_modtime(self): """ Returns the mtime (last modified time) of the named query - file, if such exists.""" - if self._location: + filename. For file-like objects, expect a modtime of 0""" + if self._location and isinstance(self._location, (str, unicode)): return os.path.getmtime(self._location) return 0 @@ -120,7 +132,12 @@ class NamedQuery(object): """ Loads and parses the named query file into self. Does nothing if self.location is nonexistant.""" if self._location: - self._reference_contents(llsd.parse(open(self._location).read())) + if isinstance(self._location, (str, unicode)): + contents = llsd.parse(open(self._location).read()) + else: + # we probably have a file-like object. Godspeed! + contents = llsd.parse(self._location.read()) + self._reference_contents(contents) # Check for alternative implementations try: for name, alt in self._contents['alternative'].items(): @@ -170,7 +187,15 @@ class NamedQuery(object): style. It also has to look for %:name% and :name% and ready them for use in LIKE statements""" if sql: - #print >>sys.stderr, "sql:",sql + # This first sub is to properly escape any % signs that + # are meant to be literally passed through to mysql in the + # query. It leaves any %'s that are used for + # like-expressions. + expr = re.compile("(?<=[^a-zA-Z0-9_-])%(?=[^:])") + sql = expr.sub('%%', sql) + + # This should tackle the rest of the %'s in the query, by + # converting them to LIKE clauses. expr = re.compile("(%?):([a-zA-Z][a-zA-Z0-9_-]*)%") sql = expr.sub(self._prepare_like, sql) expr = re.compile("#:([a-zA-Z][a-zA-Z0-9_-]*)") @@ -259,7 +284,10 @@ class NamedQuery(object): So, we need a vendor (or extention) for LIKE_STRING. Anyone want to write it?""" - utf8_value = unicode(value, "utf-8") + if isinstance(value, unicode): + utf8_value = value + else: + utf8_value = unicode(value, "utf-8") esc_list = [] remove_chars = set(u"%_") for glyph in utf8_value: @@ -296,6 +324,8 @@ class NamedQuery(object): def for_schema(self, db_name): "Look trough the alternates and return the correct query" + if db_name is None: + return self try: return self._alternative[db_name] except KeyError, e: @@ -320,20 +350,21 @@ class NamedQuery(object): cursor = connection.cursor(MySQLdb.cursors.DictCursor) else: cursor = connection.cursor() - - statement = self.sql(connection, params) - #print "SQL:", statement - rows = cursor.execute(statement) - + + full_query, params = self._construct_sql(params) + if DEBUG: + print "SQL:", self.sql(connection, params) + rows = cursor.execute(full_query, params) + # *NOTE: the expect_rows argument is a very cheesy way to get some # validation on the result set. If you want to add more expectation - # logic, do something more object-oriented and flexible. Or use an ORM. + # logic, do something more object-oriented and flexible. Or use an ORM. if(self._return_as_map): expect_rows = 1 if expect_rows is not None and rows != expect_rows: cursor.close() - raise ExpectationFailed("Statement expected %s rows, got %s. Sql: %s" % ( - expect_rows, rows, statement)) + raise ExpectationFailed("Statement expected %s rows, got %s. Sql: '%s' %s" % ( + expect_rows, rows, full_query, params)) # convert to dicts manually if we're not using a dictcursor if use_dictcursor: @@ -359,17 +390,14 @@ class NamedQuery(object): return result_set[0] return result_set - def sql(self, connection, params): - """ Generates an SQL statement from the named query document - and a dictionary of parameters. - - """ + def _construct_sql(self, params): + """ Returns a query string and a dictionary of parameters, + suitable for directly passing to the execute() method.""" self.refresh() # build the query from the options available and the params base_query = [] base_query.append(self._base_query) - #print >>sys.stderr, "base_query:",base_query for opt, extra_where in self._options.items(): if type(extra_where) in (dict, list, tuple): if opt in params: @@ -379,7 +407,6 @@ class NamedQuery(object): base_query.append(extra_where) if self._query_suffix: base_query.append(self._query_suffix) - #print >>sys.stderr, "base_query:",base_query full_query = '\n'.join(base_query) # Go through the query and rewrite all of the ones with the @@ -411,10 +438,23 @@ class NamedQuery(object): new_params[self._build_integer_key(key)] = int(params[key]) params.update(new_params) + return full_query, params + + def sql(self, connection, params): + """ Generates an SQL statement from the named query document + and a dictionary of parameters. + + *NOTE: Only use for debugging, because it uses the + non-standard MySQLdb 'literal' method. + """ + if not DEBUG: + import warnings + warnings.warn("Don't use named_query.sql() when not debugging. Used on %s" % self._location) # do substitution using the mysql (non-standard) 'literal' # function to do the escaping. - sql = full_query % connection.literal(params) - return sql + full_query, params = self._construct_sql(params) + return full_query % connection.literal(params) + def refresh(self): """ Refresh self from the file on the filesystem. diff --git a/indra/lib/python/indra/util/shutil2.py b/indra/lib/python/indra/util/shutil2.py index 3acb44bf6f..9e2e7a6ded 100644 --- a/indra/lib/python/indra/util/shutil2.py +++ b/indra/lib/python/indra/util/shutil2.py @@ -4,7 +4,7 @@ $LicenseInfo:firstyear=2007&license=mit$ -Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/indra/lib/python/indra/util/simperf_host_xml_parser.py b/indra/lib/python/indra/util/simperf_host_xml_parser.py new file mode 100755 index 0000000000..672c1050c2 --- /dev/null +++ b/indra/lib/python/indra/util/simperf_host_xml_parser.py @@ -0,0 +1,338 @@ +#!/usr/bin/env python +"""\ +@file simperf_host_xml_parser.py +@brief Digest collector's XML dump and convert to simple dict/list structure + +$LicenseInfo:firstyear=2008&license=mit$ + +Copyright (c) 2008-2009, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +$/LicenseInfo$ +""" + +import sys, os, getopt, time +import simplejson +from xml import sax + + +def usage(): + print "Usage:" + print sys.argv[0] + " [options]" + print " Convert RRD's XML dump to JSON. Script to convert the simperf_host_collector-" + print " generated RRD dump into JSON. Steps include converting selected named" + print " fields from GAUGE type to COUNTER type by computing delta with preceding" + print " values. Top-level named fields are:" + print + print " lastupdate Time (javascript timestamp) of last data sample" + print " step Time in seconds between samples" + print " ds Data specification (name/type) for each column" + print " database Table of data samples, one time step per row" + print + print "Options:" + print " -i, --in Input settings filename. (Default: stdin)" + print " -o, --out Output settings filename. (Default: stdout)" + print " -h, --help Print this message and exit." + print + print "Example: %s -i rrddump.xml -o rrddump.json" % sys.argv[0] + print + print "Interfaces:" + print " class SimPerfHostXMLParser() # SAX content handler" + print " def simperf_host_xml_fixup(parser) # post-parse value fixup" + +class SimPerfHostXMLParser(sax.handler.ContentHandler): + + def __init__(self): + pass + + def startDocument(self): + self.rrd_last_update = 0 # public + self.rrd_step = 0 # public + self.rrd_ds = [] # public + self.rrd_records = [] # public + self._rrd_level = 0 + self._rrd_parse_state = 0 + self._rrd_chars = "" + self._rrd_capture = False + self._rrd_ds_val = {} + self._rrd_data_row = [] + self._rrd_data_row_has_nan = False + + def endDocument(self): + pass + + # Nasty little ad-hoc state machine to extract the elements that are + # necessary from the 'rrdtool dump' XML output. The same element + # name '<ds>' is used for two different data sets so we need to pay + # some attention to the actual structure to get the ones we want + # and ignore the ones we don't. + + def startElement(self, name, attrs): + self._rrd_level = self._rrd_level + 1 + self._rrd_capture = False + if self._rrd_level == 1: + if name == "rrd" and self._rrd_parse_state == 0: + self._rrd_parse_state = 1 # In <rrd> + self._rrd_capture = True + self._rrd_chars = "" + elif self._rrd_level == 2: + if self._rrd_parse_state == 1: + if name == "lastupdate": + self._rrd_parse_state = 2 # In <rrd><lastupdate> + self._rrd_capture = True + self._rrd_chars = "" + elif name == "step": + self._rrd_parse_state = 3 # In <rrd><step> + self._rrd_capture = True + self._rrd_chars = "" + elif name == "ds": + self._rrd_parse_state = 4 # In <rrd><ds> + self._rrd_ds_val = {} + self._rrd_chars = "" + elif name == "rra": + self._rrd_parse_state = 5 # In <rrd><rra> + elif self._rrd_level == 3: + if self._rrd_parse_state == 4: + if name == "name": + self._rrd_parse_state = 6 # In <rrd><ds><name> + self._rrd_capture = True + self._rrd_chars = "" + elif name == "type": + self._rrd_parse_state = 7 # In <rrd><ds><type> + self._rrd_capture = True + self._rrd_chars = "" + elif self._rrd_parse_state == 5: + if name == "database": + self._rrd_parse_state = 8 # In <rrd><rra><database> + elif self._rrd_level == 4: + if self._rrd_parse_state == 8: + if name == "row": + self._rrd_parse_state = 9 # In <rrd><rra><database><row> + self._rrd_data_row = [] + self._rrd_data_row_has_nan = False + elif self._rrd_level == 5: + if self._rrd_parse_state == 9: + if name == "v": + self._rrd_parse_state = 10 # In <rrd><rra><database><row><v> + self._rrd_capture = True + self._rrd_chars = "" + + def endElement(self, name): + self._rrd_capture = False + if self._rrd_parse_state == 10: + self._rrd_capture = self._rrd_level == 6 + if self._rrd_level == 5: + if self._rrd_chars == "NaN": + self._rrd_data_row_has_nan = True + else: + self._rrd_data_row.append(self._rrd_chars) + self._rrd_parse_state = 9 # In <rrd><rra><database><row> + elif self._rrd_parse_state == 9: + if self._rrd_level == 4: + if not self._rrd_data_row_has_nan: + self.rrd_records.append(self._rrd_data_row) + self._rrd_parse_state = 8 # In <rrd><rra><database> + elif self._rrd_parse_state == 8: + if self._rrd_level == 3: + self._rrd_parse_state = 5 # In <rrd><rra> + elif self._rrd_parse_state == 7: + if self._rrd_level == 3: + self._rrd_ds_val["type"] = self._rrd_chars + self._rrd_parse_state = 4 # In <rrd><ds> + elif self._rrd_parse_state == 6: + if self._rrd_level == 3: + self._rrd_ds_val["name"] = self._rrd_chars + self._rrd_parse_state = 4 # In <rrd><ds> + elif self._rrd_parse_state == 5: + if self._rrd_level == 2: + self._rrd_parse_state = 1 # In <rrd> + elif self._rrd_parse_state == 4: + if self._rrd_level == 2: + self.rrd_ds.append(self._rrd_ds_val) + self._rrd_parse_state = 1 # In <rrd> + elif self._rrd_parse_state == 3: + if self._rrd_level == 2: + self.rrd_step = long(self._rrd_chars) + self._rrd_parse_state = 1 # In <rrd> + elif self._rrd_parse_state == 2: + if self._rrd_level == 2: + self.rrd_last_update = long(self._rrd_chars) + self._rrd_parse_state = 1 # In <rrd> + elif self._rrd_parse_state == 1: + if self._rrd_level == 1: + self._rrd_parse_state = 0 # At top + + if self._rrd_level: + self._rrd_level = self._rrd_level - 1 + + def characters(self, content): + if self._rrd_capture: + self._rrd_chars = self._rrd_chars + content.strip() + +def _make_numeric(value): + try: + value = float(value) + except: + value = "" + return value + +def simperf_host_xml_fixup(parser, filter_start_time = None, filter_end_time = None): + # Fixup for GAUGE fields that are really COUNTS. They + # were forced to GAUGE to try to disable rrdtool's + # data interpolation/extrapolation for non-uniform time + # samples. + fixup_tags = [ "cpu_user", + "cpu_nice", + "cpu_sys", + "cpu_idle", + "cpu_waitio", + "cpu_intr", + # "file_active", + # "file_free", + # "inode_active", + # "inode_free", + "netif_in_kb", + "netif_in_pkts", + "netif_in_errs", + "netif_in_drop", + "netif_out_kb", + "netif_out_pkts", + "netif_out_errs", + "netif_out_drop", + "vm_page_in", + "vm_page_out", + "vm_swap_in", + "vm_swap_out", + #"vm_mem_total", + #"vm_mem_used", + #"vm_mem_active", + #"vm_mem_inactive", + #"vm_mem_free", + #"vm_mem_buffer", + #"vm_swap_cache", + #"vm_swap_total", + #"vm_swap_used", + #"vm_swap_free", + "cpu_interrupts", + "cpu_switches", + "cpu_forks" ] + + col_count = len(parser.rrd_ds) + row_count = len(parser.rrd_records) + + # Process the last row separately, just to make all values numeric. + for j in range(col_count): + parser.rrd_records[row_count - 1][j] = _make_numeric(parser.rrd_records[row_count - 1][j]) + + # Process all other row/columns. + last_different_row = row_count - 1 + current_row = row_count - 2 + while current_row >= 0: + # Check for a different value than the previous row. If everything is the same + # then this is probably just a filler/bogus entry. + is_different = False + for j in range(col_count): + parser.rrd_records[current_row][j] = _make_numeric(parser.rrd_records[current_row][j]) + if parser.rrd_records[current_row][j] != parser.rrd_records[last_different_row][j]: + # We're good. This is a different row. + is_different = True + + if not is_different: + # This is a filler/bogus entry. Just ignore it. + for j in range(col_count): + parser.rrd_records[current_row][j] = float('nan') + else: + # Some tags need to be converted into deltas. + for j in range(col_count): + if parser.rrd_ds[j]["name"] in fixup_tags: + parser.rrd_records[last_different_row][j] = \ + parser.rrd_records[last_different_row][j] - parser.rrd_records[current_row][j] + last_different_row = current_row + + current_row -= 1 + + # Set fixup_tags in the first row to 'nan' since they aren't useful anymore. + for j in range(col_count): + if parser.rrd_ds[j]["name"] in fixup_tags: + parser.rrd_records[0][j] = float('nan') + + # Add a timestamp to each row and to the catalog. Format and name + # chosen to match other simulator logging (hopefully). + start_time = parser.rrd_last_update - (parser.rrd_step * (row_count - 1)) + # Build a filtered list of rrd_records if we are limited to a time range. + filter_records = False + if filter_start_time is not None or filter_end_time is not None: + filter_records = True + filtered_rrd_records = [] + if filter_start_time is None: + filter_start_time = start_time * 1000 + if filter_end_time is None: + filter_end_time = parser.rrd_last_update * 1000 + + for i in range(row_count): + record_timestamp = (start_time + (i * parser.rrd_step)) * 1000 + parser.rrd_records[i].insert(0, record_timestamp) + if filter_records: + if filter_start_time <= record_timestamp and record_timestamp <= filter_end_time: + filtered_rrd_records.append(parser.rrd_records[i]) + + if filter_records: + parser.rrd_records = filtered_rrd_records + + parser.rrd_ds.insert(0, {"type": "GAUGE", "name": "javascript_timestamp"}) + + +def main(argv=None): + opts, args = getopt.getopt(sys.argv[1:], "i:o:h", ["in=", "out=", "help"]) + input_file = sys.stdin + output_file = sys.stdout + for o, a in opts: + if o in ("-i", "--in"): + input_file = open(a, 'r') + if o in ("-o", "--out"): + output_file = open(a, 'w') + if o in ("-h", "--help"): + usage() + sys.exit(0) + + # Using the SAX parser as it is at least 4X faster and far, far + # smaller on this dataset than the DOM-based interface in xml.dom.minidom. + # With SAX and a 5.4MB xml file, this requires about seven seconds of + # wall-clock time and 32MB VSZ. With the DOM interface, about 22 seconds + # and over 270MB VSZ. + + handler = SimPerfHostXMLParser() + sax.parse(input_file, handler) + if input_file != sys.stdin: + input_file.close() + + # Various format fixups: string-to-num, gauge-to-counts, add + # a time stamp, etc. + simperf_host_xml_fixup(handler) + + # Create JSONable dict with interesting data and format/print it + print >>output_file, simplejson.dumps({ "step" : handler.rrd_step, + "lastupdate": handler.rrd_last_update * 1000, + "ds" : handler.rrd_ds, + "database" : handler.rrd_records }) + + return 0 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/indra/lib/python/indra/util/simperf_oprof_interface.py b/indra/lib/python/indra/util/simperf_oprof_interface.py new file mode 100755 index 0000000000..547d2f9980 --- /dev/null +++ b/indra/lib/python/indra/util/simperf_oprof_interface.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +"""\ +@file simperf_oprof_interface.py +@brief Manage OProfile data collection on a host + +$LicenseInfo:firstyear=2008&license=mit$ + +Copyright (c) 2008-2009, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +$/LicenseInfo$ +""" + +import sys, os, getopt +import simplejson + + +def usage(): + print "Usage:" + print sys.argv[0] + " [options]" + print " Digest the OProfile report forms that come out of the" + print " simperf_oprof_ctl program's -r/--report command. The result" + print " is an array of dictionaires with the following keys:" + print + print " symbol Name of sampled, calling, or called procedure" + print " file Executable or library where symbol resides" + print " percentage Percentage contribution to profile, calls or called" + print " samples Sample count" + print " calls Methods called by the method in question (full only)" + print " called_by Methods calling the method (full only)" + print + print " For 'full' reports the two keys 'calls' and 'called_by' are" + print " themselves arrays of dictionaries based on the first four keys." + print + print "Return Codes:" + print " None. Aggressively digests everything. Will likely mung results" + print " if a program or library has whitespace in its name." + print + print "Options:" + print " -i, --in Input settings filename. (Default: stdin)" + print " -o, --out Output settings filename. (Default: stdout)" + print " -h, --help Print this message and exit." + print + print "Interfaces:" + print " class SimPerfOProfileInterface()" + +class SimPerfOProfileInterface: + def __init__(self): + self.isBrief = True # public + self.isValid = False # public + self.result = [] # public + + def parse(self, input): + in_samples = False + for line in input: + if in_samples: + if line[0:6] == "------": + self.isBrief = False + self._parseFull(input) + else: + self._parseBrief(input, line) + self.isValid = True + return + try: + hd1, remain = line.split(None, 1) + if hd1 == "samples": + in_samples = True + except ValueError: + pass + + def _parseBrief(self, input, line1): + try: + fld1, fld2, fld3, fld4 = line1.split(None, 3) + self.result.append({"samples" : fld1, + "percentage" : fld2, + "file" : fld3, + "symbol" : fld4.strip("\n")}) + except ValueError: + pass + for line in input: + try: + fld1, fld2, fld3, fld4 = line.split(None, 3) + self.result.append({"samples" : fld1, + "percentage" : fld2, + "file" : fld3, + "symbol" : fld4.strip("\n")}) + except ValueError: + pass + + def _parseFull(self, input): + state = 0 # In 'called_by' section + calls = [] + called_by = [] + current = {} + for line in input: + if line[0:6] == "------": + if len(current): + current["calls"] = calls + current["called_by"] = called_by + self.result.append(current) + state = 0 + calls = [] + called_by = [] + current = {} + else: + try: + fld1, fld2, fld3, fld4 = line.split(None, 3) + tmp = {"samples" : fld1, + "percentage" : fld2, + "file" : fld3, + "symbol" : fld4.strip("\n")} + except ValueError: + continue + if line[0] != " ": + current = tmp + state = 1 # In 'calls' section + elif state == 0: + called_by.append(tmp) + else: + calls.append(tmp) + if len(current): + current["calls"] = calls + current["called_by"] = called_by + self.result.append(current) + + +def main(argv=None): + opts, args = getopt.getopt(sys.argv[1:], "i:o:h", ["in=", "out=", "help"]) + input_file = sys.stdin + output_file = sys.stdout + for o, a in opts: + if o in ("-i", "--in"): + input_file = open(a, 'r') + if o in ("-o", "--out"): + output_file = open(a, 'w') + if o in ("-h", "--help"): + usage() + sys.exit(0) + + oprof = SimPerfOProfileInterface() + oprof.parse(input_file) + if input_file != sys.stdin: + input_file.close() + + # Create JSONable dict with interesting data and format/print it + print >>output_file, simplejson.dumps(oprof.result) + + return 0 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/indra/lib/python/indra/util/simperf_proc_interface.py b/indra/lib/python/indra/util/simperf_proc_interface.py new file mode 100755 index 0000000000..da6304a274 --- /dev/null +++ b/indra/lib/python/indra/util/simperf_proc_interface.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +"""\ +@file simperf_proc_interface.py +@brief Utility to extract log messages from *.<pid>.llsd files containing performance statistics. + +$LicenseInfo:firstyear=2008&license=mit$ + +Copyright (c) 2008-2009, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +$/LicenseInfo$ +""" + +# ---------------------------------------------------- +# Utility to extract log messages from *.<pid>.llsd +# files that contain performance statistics. + +# ---------------------------------------------------- +import sys, os + +if os.path.exists("setup-path.py"): + execfile("setup-path.py") + +from indra.base import llsd + +DEFAULT_PATH="/dev/shm/simperf/" + + +# ---------------------------------------------------- +# Pull out the stats and return a single document +def parse_logfile(filename, target_column=None, verbose=False): + full_doc = [] + # Open source temp log file. Let exceptions percolate up. + sourcefile = open( filename,'r') + + if verbose: + print "Reading " + filename + + # Parse and output all lines from the temp file + for line in sourcefile.xreadlines(): + partial_doc = llsd.parse(line) + if partial_doc is not None: + if target_column is None: + full_doc.append(partial_doc) + else: + trim_doc = { target_column: partial_doc[target_column] } + if target_column != "fps": + trim_doc[ 'fps' ] = partial_doc[ 'fps' ] + trim_doc[ '/total_time' ] = partial_doc[ '/total_time' ] + trim_doc[ 'utc_time' ] = partial_doc[ 'utc_time' ] + full_doc.append(trim_doc) + + sourcefile.close() + return full_doc + +# Extract just the meta info line, and the timestamp of the first/last frame entry. +def parse_logfile_info(filename, verbose=False): + # Open source temp log file. Let exceptions percolate up. + sourcefile = open(filename, 'rU') # U is to open with Universal newline support + + if verbose: + print "Reading " + filename + + # The first line is the meta info line. + info_line = sourcefile.readline() + if not info_line: + sourcefile.close() + return None + + # The rest of the lines are frames. Read the first and last to get the time range. + info = llsd.parse( info_line ) + info['start_time'] = None + info['end_time'] = None + first_frame = sourcefile.readline() + if first_frame: + try: + info['start_time'] = int(llsd.parse(first_frame)['timestamp']) + except: + pass + + # Read the file backwards to find the last two lines. + sourcefile.seek(0, 2) + file_size = sourcefile.tell() + offset = 1024 + num_attempts = 0 + end_time = None + if file_size < offset: + offset = file_size + while 1: + sourcefile.seek(-1*offset, 2) + read_str = sourcefile.read(offset) + # Remove newline at the end + if read_str[offset - 1] == '\n': + read_str = read_str[0:-1] + lines = read_str.split('\n') + full_line = None + if len(lines) > 2: # Got two line + try: + end_time = llsd.parse(lines[-1])['timestamp'] + except: + # We couldn't parse this line. Try once more. + try: + end_time = llsd.parse(lines[-2])['timestamp'] + except: + # Nope. Just move on. + pass + break + if len(read_str) == file_size: # Reached the beginning + break + offset += 1024 + + info['end_time'] = int(end_time) + + sourcefile.close() + return info + + +def parse_proc_filename(filename): + try: + name_as_list = filename.split(".") + cur_stat_type = name_as_list[0].split("_")[0] + cur_pid = name_as_list[1] + except IndexError, ValueError: + return (None, None) + return (cur_pid, cur_stat_type) + +# ---------------------------------------------------- +def get_simstats_list(path=None): + """ Return stats (pid, type) listed in <type>_proc.<pid>.llsd """ + if path is None: + path = DEFAULT_PATH + simstats_list = [] + for file_name in os.listdir(path): + if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd": + simstats_info = parse_logfile_info(path + file_name) + if simstats_info is not None: + simstats_list.append(simstats_info) + return simstats_list + +def get_log_info_list(pid=None, stat_type=None, path=None, target_column=None, verbose=False): + """ Return data from all llsd files matching the pid and stat type """ + if path is None: + path = DEFAULT_PATH + log_info_list = {} + for file_name in os.listdir ( path ): + if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd": + (cur_pid, cur_stat_type) = parse_proc_filename(file_name) + if cur_pid is None: + continue + if pid is not None and pid != cur_pid: + continue + if stat_type is not None and stat_type != cur_stat_type: + continue + log_info_list[cur_pid] = parse_logfile(path + file_name, target_column, verbose) + return log_info_list + +def delete_simstats_files(pid=None, stat_type=None, path=None): + """ Delete *.<pid>.llsd files """ + if path is None: + path = DEFAULT_PATH + del_list = [] + for file_name in os.listdir(path): + if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd": + (cur_pid, cur_stat_type) = parse_proc_filename(file_name) + if cur_pid is None: + continue + if pid is not None and pid != cur_pid: + continue + if stat_type is not None and stat_type != cur_stat_type: + continue + del_list.append(cur_pid) + # Allow delete related exceptions to percolate up if this fails. + os.unlink(os.path.join(DEFAULT_PATH, file_name)) + return del_list + diff --git a/indra/lib/python/indra/util/term.py b/indra/lib/python/indra/util/term.py new file mode 100644 index 0000000000..8c316a1f12 --- /dev/null +++ b/indra/lib/python/indra/util/term.py @@ -0,0 +1,222 @@ +''' +@file term.py +@brief a better shutil.copytree replacement + +$LicenseInfo:firstyear=2007&license=mit$ + +Copyright (c) 2007-2009, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +$/LicenseInfo$ +''' + +#http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475116 + +import sys, re + +class TerminalController: + """ + A class that can be used to portably generate formatted output to + a terminal. + + `TerminalController` defines a set of instance variables whose + values are initialized to the control sequence necessary to + perform a given action. These can be simply included in normal + output to the terminal: + + >>> term = TerminalController() + >>> print 'This is '+term.GREEN+'green'+term.NORMAL + + Alternatively, the `render()` method can used, which replaces + '${action}' with the string required to perform 'action': + + >>> term = TerminalController() + >>> print term.render('This is ${GREEN}green${NORMAL}') + + If the terminal doesn't support a given action, then the value of + the corresponding instance variable will be set to ''. As a + result, the above code will still work on terminals that do not + support color, except that their output will not be colored. + Also, this means that you can test whether the terminal supports a + given action by simply testing the truth value of the + corresponding instance variable: + + >>> term = TerminalController() + >>> if term.CLEAR_SCREEN: + ... print 'This terminal supports clearning the screen.' + + Finally, if the width and height of the terminal are known, then + they will be stored in the `COLS` and `LINES` attributes. + """ + # Cursor movement: + BOL = '' #: Move the cursor to the beginning of the line + UP = '' #: Move the cursor up one line + DOWN = '' #: Move the cursor down one line + LEFT = '' #: Move the cursor left one char + RIGHT = '' #: Move the cursor right one char + + # Deletion: + CLEAR_SCREEN = '' #: Clear the screen and move to home position + CLEAR_EOL = '' #: Clear to the end of the line. + CLEAR_BOL = '' #: Clear to the beginning of the line. + CLEAR_EOS = '' #: Clear to the end of the screen + + # Output modes: + BOLD = '' #: Turn on bold mode + BLINK = '' #: Turn on blink mode + DIM = '' #: Turn on half-bright mode + REVERSE = '' #: Turn on reverse-video mode + NORMAL = '' #: Turn off all modes + + # Cursor display: + HIDE_CURSOR = '' #: Make the cursor invisible + SHOW_CURSOR = '' #: Make the cursor visible + + # Terminal size: + COLS = None #: Width of the terminal (None for unknown) + LINES = None #: Height of the terminal (None for unknown) + + # Foreground colors: + BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = '' + + # Background colors: + BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = '' + BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = '' + + _STRING_CAPABILITIES = """ + BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1 + CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold + BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0 + HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split() + _COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split() + _ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split() + + def __init__(self, term_stream=sys.stdout): + """ + Create a `TerminalController` and initialize its attributes + with appropriate values for the current terminal. + `term_stream` is the stream that will be used for terminal + output; if this stream is not a tty, then the terminal is + assumed to be a dumb terminal (i.e., have no capabilities). + """ + # Curses isn't available on all platforms + try: import curses + except: return + + # If the stream isn't a tty, then assume it has no capabilities. + if not term_stream.isatty(): return + + # Check the terminal type. If we fail, then assume that the + # terminal has no capabilities. + try: curses.setupterm() + except: return + + # Look up numeric capabilities. + self.COLS = curses.tigetnum('cols') + self.LINES = curses.tigetnum('lines') + + # Look up string capabilities. + for capability in self._STRING_CAPABILITIES: + (attrib, cap_name) = capability.split('=') + setattr(self, attrib, self._tigetstr(cap_name) or '') + + # Colors + set_fg = self._tigetstr('setf') + if set_fg: + for i,color in zip(range(len(self._COLORS)), self._COLORS): + setattr(self, color, curses.tparm(set_fg, i) or '') + set_fg_ansi = self._tigetstr('setaf') + if set_fg_ansi: + for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS): + setattr(self, color, curses.tparm(set_fg_ansi, i) or '') + set_bg = self._tigetstr('setb') + if set_bg: + for i,color in zip(range(len(self._COLORS)), self._COLORS): + setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '') + set_bg_ansi = self._tigetstr('setab') + if set_bg_ansi: + for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS): + setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '') + + def _tigetstr(self, cap_name): + # String capabilities can include "delays" of the form "$<2>". + # For any modern terminal, we should be able to just ignore + # these, so strip them out. + import curses + cap = curses.tigetstr(cap_name) or '' + return re.sub(r'\$<\d+>[/*]?', '', cap) + + def render(self, template): + """ + Replace each $-substitutions in the given template string with + the corresponding terminal control string (if it's defined) or + '' (if it's not). + """ + return re.sub(r'\$\$|\${\w+}', self._render_sub, template) + + def _render_sub(self, match): + s = match.group() + if s == '$$': return s + else: return getattr(self, s[2:-1]) + +####################################################################### +# Example use case: progress bar +####################################################################### + +class ProgressBar: + """ + A 3-line progress bar, which looks like:: + + Header + 20% [===========----------------------------------] + progress message + + The progress bar is colored, if the terminal supports color + output; and adjusts to the width of the terminal. + """ + BAR = '%3d%% ${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}\n' + HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n' + + def __init__(self, term, header): + self.term = term + if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL): + raise ValueError("Terminal isn't capable enough -- you " + "should use a simpler progress dispaly.") + self.width = self.term.COLS or 75 + self.bar = term.render(self.BAR) + self.header = self.term.render(self.HEADER % header.center(self.width)) + self.cleared = 1 #: true if we haven't drawn the bar yet. + self.update(0, '') + + def update(self, percent, message): + if self.cleared: + sys.stdout.write(self.header) + self.cleared = 0 + n = int((self.width-10)*percent) + sys.stdout.write( + self.term.BOL + self.term.UP + self.term.CLEAR_EOL + + (self.bar % (100*percent, '='*n, '-'*(self.width-10-n))) + + self.term.CLEAR_EOL + message.center(self.width)) + + def clear(self): + if not self.cleared: + sys.stdout.write(self.term.BOL + self.term.CLEAR_EOL + + self.term.UP + self.term.CLEAR_EOL + + self.term.UP + self.term.CLEAR_EOL) + self.cleared = 1 diff --git a/indra/lib/python/indra/util/test_win32_manifest.py b/indra/lib/python/indra/util/test_win32_manifest.py new file mode 100644 index 0000000000..786521c068 --- /dev/null +++ b/indra/lib/python/indra/util/test_win32_manifest.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python +# @file test_win32_manifest.py +# @brief Test an assembly binding version and uniqueness in a windows dll or exe. +# +# $LicenseInfo:firstyear=2009&license=viewerlgpl$ +# Second Life Viewer Source Code +# Copyright (C) 2010, Linden Research, Inc. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License only. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA +# $/LicenseInfo$ + +import sys, os +import tempfile +from xml.dom.minidom import parse + +class AssemblyTestException(Exception): + pass + +class NoManifestException(AssemblyTestException): + pass + +class MultipleBindingsException(AssemblyTestException): + pass + +class UnexpectedVersionException(AssemblyTestException): + pass + +class NoMatchingAssemblyException(AssemblyTestException): + pass + +def get_HKLM_registry_value(key_str, value_str): + import _winreg + reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + key = _winreg.OpenKey(reg, key_str) + value = _winreg.QueryValueEx(key, value_str)[0] + #print 'Found: %s' % value + return value + +def find_vc_dir(): + supported_versions = (r'8.0', r'9.0') + value_str = (r'ProductDir') + + for version in supported_versions: + key_str = (r'SOFTWARE\Microsoft\VisualStudio\%s\Setup\VC' % + version) + try: + return get_HKLM_registry_value(key_str, value_str) + except WindowsError, err: + x64_key_str = (r'SOFTWARE\Wow6432Node\Microsoft\VisualStudio\%s\Setup\VS' % + version) + try: + return get_HKLM_registry_value(x64_key_str, value_str) + except: + print >> sys.stderr, "Didn't find MS VC version %s " % version + + raise + +def find_mt_path(): + vc_dir = find_vc_dir() + mt_path = '\"%sbin\\mt.exe\"' % vc_dir + return mt_path + +def test_assembly_binding(src_filename, assembly_name, assembly_ver): + print "checking %s dependency %s..." % (src_filename, assembly_name) + + (tmp_file_fd, tmp_file_name) = tempfile.mkstemp(suffix='.xml') + tmp_file = os.fdopen(tmp_file_fd) + tmp_file.close() + + mt_path = find_mt_path() + resource_id = "" + if os.path.splitext(src_filename)[1].lower() == ".dll": + resource_id = ";#2" + system_call = '%s -nologo -inputresource:%s%s -out:%s > NUL' % (mt_path, src_filename, resource_id, tmp_file_name) + print "Executing: %s" % system_call + mt_result = os.system(system_call) + if mt_result == 31: + print "No manifest found in %s" % src_filename + raise NoManifestException() + + manifest_dom = parse(tmp_file_name) + nodes = manifest_dom.getElementsByTagName('assemblyIdentity') + + versions = list() + for node in nodes: + if node.getAttribute('name') == assembly_name: + versions.append(node.getAttribute('version')) + + if len(versions) == 0: + print "No matching assemblies found in %s" % src_filename + raise NoMatchingAssemblyException() + + elif len(versions) > 1: + print "Multiple bindings to %s found:" % assembly_name + print versions + print + raise MultipleBindingsException(versions) + + elif versions[0] != assembly_ver: + print "Unexpected version found for %s:" % assembly_name + print "Wanted %s, found %s" % (assembly_ver, versions[0]) + print + raise UnexpectedVersionException(assembly_ver, versions[0]) + + os.remove(tmp_file_name) + + print "SUCCESS: %s OK!" % src_filename + print + +if __name__ == '__main__': + + print + print "Running test_win32_manifest.py..." + + usage = 'test_win32_manfest <srcFileName> <assemblyName> <assemblyVersion>' + + try: + src_filename = sys.argv[1] + assembly_name = sys.argv[2] + assembly_ver = sys.argv[3] + except: + print "Usage:" + print usage + print + raise + + test_assembly_binding(src_filename, assembly_name, assembly_ver) + + diff --git a/indra/lib/python/uuid.py b/indra/lib/python/uuid.py index 48dac84377..0bc21a35f8 100644 --- a/indra/lib/python/uuid.py +++ b/indra/lib/python/uuid.py @@ -446,8 +446,14 @@ def uuid1(node=None, clock_seq=None): def uuid3(namespace, name): """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" - import md5 - hash = md5.md5(namespace.bytes + name).digest() + try: + # Python 2.6 + from hashlib import md5 + except ImportError: + # Python 2.5 and earlier + from md5 import new as md5 + + hash = md5(namespace.bytes + name).digest() return UUID(bytes=hash[:16], version=3) def uuid4(): |