summaryrefslogtreecommitdiff
path: root/indra/lib
diff options
context:
space:
mode:
authorDon Kjer <don@lindenlab.com>2008-10-09 18:07:46 +0000
committerDon Kjer <don@lindenlab.com>2008-10-09 18:07:46 +0000
commit4ff16b735f59326514ad92ec38e3261cd996e05c (patch)
tree170416c912dc272e7e171f156494946e05444e55 /indra/lib
parentb807e3df990e6fad25cd0bca94d2959dac042b13 (diff)
QAR-907: svn merge -r 98908:98910 svn+ssh://svn/svn/linden/qa/sim-metrics/sim-metrics2-release-merge-98903 into release
Diffstat (limited to 'indra/lib')
-rwxr-xr-xindra/lib/python/indra/util/llperformance.py158
-rwxr-xr-xindra/lib/python/indra/util/simperf_host_xml_parser.py338
-rwxr-xr-xindra/lib/python/indra/util/simperf_oprof_interface.py160
-rwxr-xr-xindra/lib/python/indra/util/simperf_proc_interface.py164
4 files changed, 820 insertions, 0 deletions
diff --git a/indra/lib/python/indra/util/llperformance.py b/indra/lib/python/indra/util/llperformance.py
new file mode 100755
index 0000000000..7c52730b5e
--- /dev/null
+++ b/indra/lib/python/indra/util/llperformance.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+
+# ------------------------------------------------
+# Sim metrics utility functions.
+
+import glob, os, time, sys, stat, exceptions
+
+from indra.base import llsd
+
+gBlockMap = {} #Map of performance metric data with function hierarchy information.
+gCurrentStatPath = ""
+
+gIsLoggingEnabled=False
+
+class LLPerfStat:
+ def __init__(self,key):
+ self.mTotalTime = 0
+ self.mNumRuns = 0
+ self.mName=key
+ self.mTimeStamp = int(time.time()*1000)
+ self.mUTCTime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+ def __str__(self):
+ return "%f" % self.mTotalTime
+
+ def start(self):
+ self.mStartTime = int(time.time() * 1000000)
+ self.mNumRuns += 1
+
+ def stop(self):
+ execution_time = int(time.time() * 1000000) - self.mStartTime
+ self.mTotalTime += execution_time
+
+ def get_map(self):
+ results={}
+ results['name']=self.mName
+ results['utc_time']=self.mUTCTime
+ results['timestamp']=self.mTimeStamp
+ results['us']=self.mTotalTime
+ results['count']=self.mNumRuns
+ return results
+
+class PerfError(exceptions.Exception):
+ def __init__(self):
+ return
+
+ def __Str__(self):
+ print "","Unfinished LLPerfBlock"
+
+class LLPerfBlock:
+ def __init__( self, key ):
+ global gBlockMap
+ global gCurrentStatPath
+ global gIsLoggingEnabled
+
+ #Check to see if we're running metrics right now.
+ if gIsLoggingEnabled:
+ self.mRunning = True #Mark myself as running.
+
+ self.mPreviousStatPath = gCurrentStatPath
+ gCurrentStatPath += "/" + key
+ if gCurrentStatPath not in gBlockMap:
+ gBlockMap[gCurrentStatPath] = LLPerfStat(key)
+
+ self.mStat = gBlockMap[gCurrentStatPath]
+ self.mStat.start()
+
+ def finish( self ):
+ global gBlockMap
+ global gIsLoggingEnabled
+
+ if gIsLoggingEnabled:
+ self.mStat.stop()
+ self.mRunning = False
+ gCurrentStatPath = self.mPreviousStatPath
+
+# def __del__( self ):
+# if self.mRunning:
+# #SPATTERS FIXME
+# raise PerfError
+
+class LLPerformance:
+ #--------------------------------------------------
+ # Determine whether or not we want to log statistics
+
+ def __init__( self, process_name = "python" ):
+ self.process_name = process_name
+ self.init_testing()
+ self.mTimeStamp = int(time.time()*1000)
+ self.mUTCTime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+ def init_testing( self ):
+ global gIsLoggingEnabled
+
+ host_performance_file = "/dev/shm/simperf/simperf_proc_config.llsd"
+
+ #If file exists, open
+ if os.path.exists(host_performance_file):
+ file = open (host_performance_file,'r')
+
+ #Read serialized LLSD from file.
+ body = llsd.parse(file.read())
+
+ #Calculate time since file last modified.
+ stats = os.stat(host_performance_file)
+ now = time.time()
+ mod = stats[stat.ST_MTIME]
+ age = now - mod
+
+ if age < ( body['duration'] ):
+ gIsLoggingEnabled = True
+
+
+ def get ( self ):
+ global gIsLoggingEnabled
+ return gIsLoggingEnabled
+
+ #def output(self,ptr,path):
+ # if 'stats' in ptr:
+ # stats = ptr['stats']
+ # self.mOutputPtr[path] = stats.get_map()
+
+ # if 'children' in ptr:
+ # children=ptr['children']
+
+ # curptr = self.mOutputPtr
+ # curchildren={}
+ # curptr['children'] = curchildren
+
+ # for key in children:
+ # curchildren[key]={}
+ # self.mOutputPtr = curchildren[key]
+ # self.output(children[key],path + '/' + key)
+
+ def done(self):
+ global gBlockMap
+
+ if not self.get():
+ return
+
+ output_name = "/dev/shm/simperf/%s_proc.%d.llsd" % (self.process_name, os.getpid())
+ output_file = open(output_name, 'w')
+ process_info = {
+ "name" : self.process_name,
+ "pid" : os.getpid(),
+ "ppid" : os.getppid(),
+ "timestamp" : self.mTimeStamp,
+ "utc_time" : self.mUTCTime,
+ }
+ output_file.write(llsd.format_notation(process_info))
+ output_file.write('\n')
+
+ for key in gBlockMap.keys():
+ gBlockMap[key] = gBlockMap[key].get_map()
+ output_file.write(llsd.format_notation(gBlockMap))
+ output_file.write('\n')
+ output_file.close()
+
diff --git a/indra/lib/python/indra/util/simperf_host_xml_parser.py b/indra/lib/python/indra/util/simperf_host_xml_parser.py
new file mode 100755
index 0000000000..b6084151c9
--- /dev/null
+++ b/indra/lib/python/indra/util/simperf_host_xml_parser.py
@@ -0,0 +1,338 @@
+#!/usr/bin/env python
+"""\
+@file simperf_host_xml_parser.py
+@brief Digest collector's XML dump and convert to simple dict/list structure
+
+$LicenseInfo:firstyear=2008&license=mit$
+
+Copyright (c) 2008, Linden Research, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+$/LicenseInfo$
+"""
+
+import sys, os, getopt, time
+import simplejson
+from xml import sax
+
+
+def usage():
+ print "Usage:"
+ print sys.argv[0] + " [options]"
+ print " Convert RRD's XML dump to JSON. Script to convert the simperf_host_collector-"
+ print " generated RRD dump into JSON. Steps include converting selected named"
+ print " fields from GAUGE type to COUNTER type by computing delta with preceding"
+ print " values. Top-level named fields are:"
+ print
+ print " lastupdate Time (javascript timestamp) of last data sample"
+ print " step Time in seconds between samples"
+ print " ds Data specification (name/type) for each column"
+ print " database Table of data samples, one time step per row"
+ print
+ print "Options:"
+ print " -i, --in Input settings filename. (Default: stdin)"
+ print " -o, --out Output settings filename. (Default: stdout)"
+ print " -h, --help Print this message and exit."
+ print
+ print "Example: %s -i rrddump.xml -o rrddump.json" % sys.argv[0]
+ print
+ print "Interfaces:"
+ print " class SimPerfHostXMLParser() # SAX content handler"
+ print " def simperf_host_xml_fixup(parser) # post-parse value fixup"
+
+class SimPerfHostXMLParser(sax.handler.ContentHandler):
+
+ def __init__(self):
+ pass
+
+ def startDocument(self):
+ self.rrd_last_update = 0 # public
+ self.rrd_step = 0 # public
+ self.rrd_ds = [] # public
+ self.rrd_records = [] # public
+ self._rrd_level = 0
+ self._rrd_parse_state = 0
+ self._rrd_chars = ""
+ self._rrd_capture = False
+ self._rrd_ds_val = {}
+ self._rrd_data_row = []
+ self._rrd_data_row_has_nan = False
+
+ def endDocument(self):
+ pass
+
+ # Nasty little ad-hoc state machine to extract the elements that are
+ # necessary from the 'rrdtool dump' XML output. The same element
+ # name '<ds>' is used for two different data sets so we need to pay
+ # some attention to the actual structure to get the ones we want
+ # and ignore the ones we don't.
+
+ def startElement(self, name, attrs):
+ self._rrd_level = self._rrd_level + 1
+ self._rrd_capture = False
+ if self._rrd_level == 1:
+ if name == "rrd" and self._rrd_parse_state == 0:
+ self._rrd_parse_state = 1 # In <rrd>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif self._rrd_level == 2:
+ if self._rrd_parse_state == 1:
+ if name == "lastupdate":
+ self._rrd_parse_state = 2 # In <rrd><lastupdate>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif name == "step":
+ self._rrd_parse_state = 3 # In <rrd><step>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif name == "ds":
+ self._rrd_parse_state = 4 # In <rrd><ds>
+ self._rrd_ds_val = {}
+ self._rrd_chars = ""
+ elif name == "rra":
+ self._rrd_parse_state = 5 # In <rrd><rra>
+ elif self._rrd_level == 3:
+ if self._rrd_parse_state == 4:
+ if name == "name":
+ self._rrd_parse_state = 6 # In <rrd><ds><name>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif name == "type":
+ self._rrd_parse_state = 7 # In <rrd><ds><type>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+ elif self._rrd_parse_state == 5:
+ if name == "database":
+ self._rrd_parse_state = 8 # In <rrd><rra><database>
+ elif self._rrd_level == 4:
+ if self._rrd_parse_state == 8:
+ if name == "row":
+ self._rrd_parse_state = 9 # In <rrd><rra><database><row>
+ self._rrd_data_row = []
+ self._rrd_data_row_has_nan = False
+ elif self._rrd_level == 5:
+ if self._rrd_parse_state == 9:
+ if name == "v":
+ self._rrd_parse_state = 10 # In <rrd><rra><database><row><v>
+ self._rrd_capture = True
+ self._rrd_chars = ""
+
+ def endElement(self, name):
+ self._rrd_capture = False
+ if self._rrd_parse_state == 10:
+ self._rrd_capture = self._rrd_level == 6
+ if self._rrd_level == 5:
+ if self._rrd_chars == "NaN":
+ self._rrd_data_row_has_nan = True
+ else:
+ self._rrd_data_row.append(self._rrd_chars)
+ self._rrd_parse_state = 9 # In <rrd><rra><database><row>
+ elif self._rrd_parse_state == 9:
+ if self._rrd_level == 4:
+ if not self._rrd_data_row_has_nan:
+ self.rrd_records.append(self._rrd_data_row)
+ self._rrd_parse_state = 8 # In <rrd><rra><database>
+ elif self._rrd_parse_state == 8:
+ if self._rrd_level == 3:
+ self._rrd_parse_state = 5 # In <rrd><rra>
+ elif self._rrd_parse_state == 7:
+ if self._rrd_level == 3:
+ self._rrd_ds_val["type"] = self._rrd_chars
+ self._rrd_parse_state = 4 # In <rrd><ds>
+ elif self._rrd_parse_state == 6:
+ if self._rrd_level == 3:
+ self._rrd_ds_val["name"] = self._rrd_chars
+ self._rrd_parse_state = 4 # In <rrd><ds>
+ elif self._rrd_parse_state == 5:
+ if self._rrd_level == 2:
+ self._rrd_parse_state = 1 # In <rrd>
+ elif self._rrd_parse_state == 4:
+ if self._rrd_level == 2:
+ self.rrd_ds.append(self._rrd_ds_val)
+ self._rrd_parse_state = 1 # In <rrd>
+ elif self._rrd_parse_state == 3:
+ if self._rrd_level == 2:
+ self.rrd_step = long(self._rrd_chars)
+ self._rrd_parse_state = 1 # In <rrd>
+ elif self._rrd_parse_state == 2:
+ if self._rrd_level == 2:
+ self.rrd_last_update = long(self._rrd_chars)
+ self._rrd_parse_state = 1 # In <rrd>
+ elif self._rrd_parse_state == 1:
+ if self._rrd_level == 1:
+ self._rrd_parse_state = 0 # At top
+
+ if self._rrd_level:
+ self._rrd_level = self._rrd_level - 1
+
+ def characters(self, content):
+ if self._rrd_capture:
+ self._rrd_chars = self._rrd_chars + content.strip()
+
+def _make_numeric(value):
+ try:
+ value = float(value)
+ except:
+ value = ""
+ return value
+
+def simperf_host_xml_fixup(parser, filter_start_time = None, filter_end_time = None):
+ # Fixup for GAUGE fields that are really COUNTS. They
+ # were forced to GAUGE to try to disable rrdtool's
+ # data interpolation/extrapolation for non-uniform time
+ # samples.
+ fixup_tags = [ "cpu_user",
+ "cpu_nice",
+ "cpu_sys",
+ "cpu_idle",
+ "cpu_waitio",
+ "cpu_intr",
+ # "file_active",
+ # "file_free",
+ # "inode_active",
+ # "inode_free",
+ "netif_in_kb",
+ "netif_in_pkts",
+ "netif_in_errs",
+ "netif_in_drop",
+ "netif_out_kb",
+ "netif_out_pkts",
+ "netif_out_errs",
+ "netif_out_drop",
+ "vm_page_in",
+ "vm_page_out",
+ "vm_swap_in",
+ "vm_swap_out",
+ #"vm_mem_total",
+ #"vm_mem_used",
+ #"vm_mem_active",
+ #"vm_mem_inactive",
+ #"vm_mem_free",
+ #"vm_mem_buffer",
+ #"vm_swap_cache",
+ #"vm_swap_total",
+ #"vm_swap_used",
+ #"vm_swap_free",
+ "cpu_interrupts",
+ "cpu_switches",
+ "cpu_forks" ]
+
+ col_count = len(parser.rrd_ds)
+ row_count = len(parser.rrd_records)
+
+ # Process the last row separately, just to make all values numeric.
+ for j in range(col_count):
+ parser.rrd_records[row_count - 1][j] = _make_numeric(parser.rrd_records[row_count - 1][j])
+
+ # Process all other row/columns.
+ last_different_row = row_count - 1
+ current_row = row_count - 2
+ while current_row >= 0:
+ # Check for a different value than the previous row. If everything is the same
+ # then this is probably just a filler/bogus entry.
+ is_different = False
+ for j in range(col_count):
+ parser.rrd_records[current_row][j] = _make_numeric(parser.rrd_records[current_row][j])
+ if parser.rrd_records[current_row][j] != parser.rrd_records[last_different_row][j]:
+ # We're good. This is a different row.
+ is_different = True
+
+ if not is_different:
+ # This is a filler/bogus entry. Just ignore it.
+ for j in range(col_count):
+ parser.rrd_records[current_row][j] = float('nan')
+ else:
+ # Some tags need to be converted into deltas.
+ for j in range(col_count):
+ if parser.rrd_ds[j]["name"] in fixup_tags:
+ parser.rrd_records[last_different_row][j] = \
+ parser.rrd_records[last_different_row][j] - parser.rrd_records[current_row][j]
+ last_different_row = current_row
+
+ current_row -= 1
+
+ # Set fixup_tags in the first row to 'nan' since they aren't useful anymore.
+ for j in range(col_count):
+ if parser.rrd_ds[j]["name"] in fixup_tags:
+ parser.rrd_records[0][j] = float('nan')
+
+ # Add a timestamp to each row and to the catalog. Format and name
+ # chosen to match other simulator logging (hopefully).
+ start_time = parser.rrd_last_update - (parser.rrd_step * (row_count - 1))
+ # Build a filtered list of rrd_records if we are limited to a time range.
+ filter_records = False
+ if filter_start_time is not None or filter_end_time is not None:
+ filter_records = True
+ filtered_rrd_records = []
+ if filter_start_time is None:
+ filter_start_time = start_time * 1000
+ if filter_end_time is None:
+ filter_end_time = parser.rrd_last_update * 1000
+
+ for i in range(row_count):
+ record_timestamp = (start_time + (i * parser.rrd_step)) * 1000
+ parser.rrd_records[i].insert(0, record_timestamp)
+ if filter_records:
+ if filter_start_time <= record_timestamp and record_timestamp <= filter_end_time:
+ filtered_rrd_records.append(parser.rrd_records[i])
+
+ if filter_records:
+ parser.rrd_records = filtered_rrd_records
+
+ parser.rrd_ds.insert(0, {"type": "GAUGE", "name": "javascript_timestamp"})
+
+
+def main(argv=None):
+ opts, args = getopt.getopt(sys.argv[1:], "i:o:h", ["in=", "out=", "help"])
+ input_file = sys.stdin
+ output_file = sys.stdout
+ for o, a in opts:
+ if o in ("-i", "--in"):
+ input_file = open(a, 'r')
+ if o in ("-o", "--out"):
+ output_file = open(a, 'w')
+ if o in ("-h", "--help"):
+ usage()
+ sys.exit(0)
+
+ # Using the SAX parser as it is at least 4X faster and far, far
+ # smaller on this dataset than the DOM-based interface in xml.dom.minidom.
+ # With SAX and a 5.4MB xml file, this requires about seven seconds of
+ # wall-clock time and 32MB VSZ. With the DOM interface, about 22 seconds
+ # and over 270MB VSZ.
+
+ handler = SimPerfHostXMLParser()
+ sax.parse(input_file, handler)
+ if input_file != sys.stdin:
+ input_file.close()
+
+ # Various format fixups: string-to-num, gauge-to-counts, add
+ # a time stamp, etc.
+ simperf_host_xml_fixup(handler)
+
+ # Create JSONable dict with interesting data and format/print it
+ print >>output_file, simplejson.dumps({ "step" : handler.rrd_step,
+ "lastupdate": handler.rrd_last_update * 1000,
+ "ds" : handler.rrd_ds,
+ "database" : handler.rrd_records })
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/indra/lib/python/indra/util/simperf_oprof_interface.py b/indra/lib/python/indra/util/simperf_oprof_interface.py
new file mode 100755
index 0000000000..a7e9a4cb32
--- /dev/null
+++ b/indra/lib/python/indra/util/simperf_oprof_interface.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+"""\
+@file simperf_oprof_interface.py
+@brief Manage OProfile data collection on a host
+
+$LicenseInfo:firstyear=2008&license=internal$
+
+Copyright (c) 2008, Linden Research, Inc.
+
+The following source code is PROPRIETARY AND CONFIDENTIAL. Use of
+this source code is governed by the Linden Lab Source Code Disclosure
+Agreement ("Agreement") previously entered between you and Linden
+Lab. By accessing, using, copying, modifying or distributing this
+software, you acknowledge that you have been informed of your
+obligations under the Agreement and agree to abide by those obligations.
+
+ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
+WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
+COMPLETENESS OR PERFORMANCE.
+$/LicenseInfo$
+"""
+
+import sys, os, getopt
+import simplejson
+
+
+def usage():
+ print "Usage:"
+ print sys.argv[0] + " [options]"
+ print " Digest the OProfile report forms that come out of the"
+ print " simperf_oprof_ctl program's -r/--report command. The result"
+ print " is an array of dictionaires with the following keys:"
+ print
+ print " symbol Name of sampled, calling, or called procedure"
+ print " file Executable or library where symbol resides"
+ print " percentage Percentage contribution to profile, calls or called"
+ print " samples Sample count"
+ print " calls Methods called by the method in question (full only)"
+ print " called_by Methods calling the method (full only)"
+ print
+ print " For 'full' reports the two keys 'calls' and 'called_by' are"
+ print " themselves arrays of dictionaries based on the first four keys."
+ print
+ print "Return Codes:"
+ print " None. Aggressively digests everything. Will likely mung results"
+ print " if a program or library has whitespace in its name."
+ print
+ print "Options:"
+ print " -i, --in Input settings filename. (Default: stdin)"
+ print " -o, --out Output settings filename. (Default: stdout)"
+ print " -h, --help Print this message and exit."
+ print
+ print "Interfaces:"
+ print " class SimPerfOProfileInterface()"
+
+class SimPerfOProfileInterface:
+ def __init__(self):
+ self.isBrief = True # public
+ self.isValid = False # public
+ self.result = [] # public
+
+ def parse(self, input):
+ in_samples = False
+ for line in input:
+ if in_samples:
+ if line[0:6] == "------":
+ self.isBrief = False
+ self._parseFull(input)
+ else:
+ self._parseBrief(input, line)
+ self.isValid = True
+ return
+ try:
+ hd1, remain = line.split(None, 1)
+ if hd1 == "samples":
+ in_samples = True
+ except ValueError:
+ pass
+
+ def _parseBrief(self, input, line1):
+ try:
+ fld1, fld2, fld3, fld4 = line1.split(None, 3)
+ self.result.append({"samples" : fld1,
+ "percentage" : fld2,
+ "file" : fld3,
+ "symbol" : fld4.strip("\n")})
+ except ValueError:
+ pass
+ for line in input:
+ try:
+ fld1, fld2, fld3, fld4 = line.split(None, 3)
+ self.result.append({"samples" : fld1,
+ "percentage" : fld2,
+ "file" : fld3,
+ "symbol" : fld4.strip("\n")})
+ except ValueError:
+ pass
+
+ def _parseFull(self, input):
+ state = 0 # In 'called_by' section
+ calls = []
+ called_by = []
+ current = {}
+ for line in input:
+ if line[0:6] == "------":
+ if len(current):
+ current["calls"] = calls
+ current["called_by"] = called_by
+ self.result.append(current)
+ state = 0
+ calls = []
+ called_by = []
+ current = {}
+ else:
+ try:
+ fld1, fld2, fld3, fld4 = line.split(None, 3)
+ tmp = {"samples" : fld1,
+ "percentage" : fld2,
+ "file" : fld3,
+ "symbol" : fld4.strip("\n")}
+ except ValueError:
+ continue
+ if line[0] != " ":
+ current = tmp
+ state = 1 # In 'calls' section
+ elif state == 0:
+ called_by.append(tmp)
+ else:
+ calls.append(tmp)
+ if len(current):
+ current["calls"] = calls
+ current["called_by"] = called_by
+ self.result.append(current)
+
+
+def main(argv=None):
+ opts, args = getopt.getopt(sys.argv[1:], "i:o:h", ["in=", "out=", "help"])
+ input_file = sys.stdin
+ output_file = sys.stdout
+ for o, a in opts:
+ if o in ("-i", "--in"):
+ input_file = open(a, 'r')
+ if o in ("-o", "--out"):
+ output_file = open(a, 'w')
+ if o in ("-h", "--help"):
+ usage()
+ sys.exit(0)
+
+ oprof = SimPerfOProfileInterface()
+ oprof.parse(input_file)
+ if input_file != sys.stdin:
+ input_file.close()
+
+ # Create JSONable dict with interesting data and format/print it
+ print >>output_file, simplejson.dumps(oprof.result)
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/indra/lib/python/indra/util/simperf_proc_interface.py b/indra/lib/python/indra/util/simperf_proc_interface.py
new file mode 100755
index 0000000000..62a63fa872
--- /dev/null
+++ b/indra/lib/python/indra/util/simperf_proc_interface.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+
+# ----------------------------------------------------
+# Utility to extract log messages from *.<pid>.llsd
+# files that contain performance statistics.
+
+# ----------------------------------------------------
+import sys, os
+
+if os.path.exists("setup-path.py"):
+ execfile("setup-path.py")
+
+from indra.base import llsd
+
+DEFAULT_PATH="/dev/shm/simperf/"
+
+
+# ----------------------------------------------------
+# Pull out the stats and return a single document
+def parse_logfile(filename, target_column=None, verbose=False):
+ full_doc = []
+ # Open source temp log file. Let exceptions percolate up.
+ sourcefile = open( filename,'r')
+
+ if verbose:
+ print "Reading " + filename
+
+ # Parse and output all lines from the temp file
+ for line in sourcefile.xreadlines():
+ partial_doc = llsd.parse(line)
+ if partial_doc is not None:
+ if target_column is None:
+ full_doc.append(partial_doc)
+ else:
+ trim_doc = { target_column: partial_doc[target_column] }
+ if target_column != "fps":
+ trim_doc[ 'fps' ] = partial_doc[ 'fps' ]
+ trim_doc[ '/total_time' ] = partial_doc[ '/total_time' ]
+ trim_doc[ 'utc_time' ] = partial_doc[ 'utc_time' ]
+ full_doc.append(trim_doc)
+
+ sourcefile.close()
+ return full_doc
+
+# Extract just the meta info line, and the timestamp of the first/last frame entry.
+def parse_logfile_info(filename, verbose=False):
+ # Open source temp log file. Let exceptions percolate up.
+ sourcefile = open(filename, 'rU') # U is to open with Universal newline support
+
+ if verbose:
+ print "Reading " + filename
+
+ # The first line is the meta info line.
+ info_line = sourcefile.readline()
+ if not info_line:
+ sourcefile.close()
+ return None
+
+ # The rest of the lines are frames. Read the first and last to get the time range.
+ info = llsd.parse( info_line )
+ info['start_time'] = None
+ info['end_time'] = None
+ first_frame = sourcefile.readline()
+ if first_frame:
+ try:
+ info['start_time'] = int(llsd.parse(first_frame)['timestamp'])
+ except:
+ pass
+
+ # Read the file backwards to find the last two lines.
+ sourcefile.seek(0, 2)
+ file_size = sourcefile.tell()
+ offset = 1024
+ num_attempts = 0
+ end_time = None
+ if file_size < offset:
+ offset = file_size
+ while 1:
+ sourcefile.seek(-1*offset, 2)
+ read_str = sourcefile.read(offset)
+ # Remove newline at the end
+ if read_str[offset - 1] == '\n':
+ read_str = read_str[0:-1]
+ lines = read_str.split('\n')
+ full_line = None
+ if len(lines) > 2: # Got two line
+ try:
+ end_time = llsd.parse(lines[-1])['timestamp']
+ except:
+ # We couldn't parse this line. Try once more.
+ try:
+ end_time = llsd.parse(lines[-2])['timestamp']
+ except:
+ # Nope. Just move on.
+ pass
+ break
+ if len(read_str) == file_size: # Reached the beginning
+ break
+ offset += 1024
+
+ info['end_time'] = int(end_time)
+
+ sourcefile.close()
+ return info
+
+
+def parse_proc_filename(filename):
+ try:
+ name_as_list = filename.split(".")
+ cur_stat_type = name_as_list[0].split("_")[0]
+ cur_pid = name_as_list[1]
+ except IndexError, ValueError:
+ return (None, None)
+ return (cur_pid, cur_stat_type)
+
+# ----------------------------------------------------
+def get_simstats_list(path=None):
+ """ Return stats (pid, type) listed in <type>_proc.<pid>.llsd """
+ if path is None:
+ path = DEFAULT_PATH
+ simstats_list = []
+ for file_name in os.listdir(path):
+ if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd":
+ simstats_info = parse_logfile_info(path + file_name)
+ if simstats_info is not None:
+ simstats_list.append(simstats_info)
+ return simstats_list
+
+def get_log_info_list(pid=None, stat_type=None, path=None, target_column=None, verbose=False):
+ """ Return data from all llsd files matching the pid and stat type """
+ if path is None:
+ path = DEFAULT_PATH
+ log_info_list = {}
+ for file_name in os.listdir ( path ):
+ if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd":
+ (cur_pid, cur_stat_type) = parse_proc_filename(file_name)
+ if cur_pid is None:
+ continue
+ if pid is not None and pid != cur_pid:
+ continue
+ if stat_type is not None and stat_type != cur_stat_type:
+ continue
+ log_info_list[cur_pid] = parse_logfile(path + file_name, target_column, verbose)
+ return log_info_list
+
+def delete_simstats_files(pid=None, stat_type=None, path=None):
+ """ Delete *.<pid>.llsd files """
+ if path is None:
+ path = DEFAULT_PATH
+ del_list = []
+ for file_name in os.listdir(path):
+ if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd":
+ (cur_pid, cur_stat_type) = parse_proc_filename(file_name)
+ if cur_pid is None:
+ continue
+ if pid is not None and pid != cur_pid:
+ continue
+ if stat_type is not None and stat_type != cur_stat_type:
+ continue
+ del_list.append(cur_pid)
+ # Allow delete related exceptions to percolate up if this fails.
+ os.unlink(os.path.join(DEFAULT_PATH, file_name))
+ return del_list
+