diff options
Diffstat (limited to 'scripts')
-rw-r--r-- | scripts/code_tools/fix_whitespace.py | 87 | ||||
-rw-r--r-- | scripts/code_tools/fix_xml_indentations.py | 125 | ||||
-rw-r--r-- | scripts/code_tools/modified-strings.sh | 14 | ||||
-rw-r--r-- | scripts/code_tools/modified_strings.py | 24 | ||||
-rw-r--r-- | scripts/content_tools/anim_tool.py | 26 | ||||
-rw-r--r-- | scripts/content_tools/arche_tool.py | 6 | ||||
-rw-r--r-- | scripts/content_tools/dae_tool.py | 10 | ||||
-rw-r--r-- | scripts/content_tools/skel_tool.py | 26 | ||||
-rwxr-xr-x | scripts/messages/message_template.msg | 5 | ||||
-rwxr-xr-x | scripts/messages/message_template.msg.sha1 | 2 | ||||
-rw-r--r-- | scripts/metrics/viewer_asset_logs.py | 8 | ||||
-rwxr-xr-x | scripts/metrics/viewerstats.py | 16 | ||||
-rw-r--r-- | scripts/perf/logsdir.py | 46 | ||||
-rw-r--r-- | scripts/perf/profile_cmp.py | 104 | ||||
-rw-r--r-- | scripts/perf/profile_csv.py | 60 | ||||
-rw-r--r-- | scripts/perf/profile_pretty.py | 40 | ||||
-rwxr-xr-x | scripts/template_verifier.py | 10 | ||||
-rwxr-xr-x | scripts/templates/template-cpp.cpp | 2 | ||||
-rwxr-xr-x | scripts/templates/template-h.h | 2 |
19 files changed, 542 insertions, 71 deletions
diff --git a/scripts/code_tools/fix_whitespace.py b/scripts/code_tools/fix_whitespace.py new file mode 100644 index 0000000000..7a88265479 --- /dev/null +++ b/scripts/code_tools/fix_whitespace.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +"""\ + +This script replaces tab characters with spaces in source code files. + +$LicenseInfo:firstyear=2024&license=viewerlgpl$ +Second Life Viewer Source Code +Copyright (C) 2024, Linden Research, Inc. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; +version 2.1 of the License only. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA +$/LicenseInfo$ +""" + +import argparse +import os + +def convert_tabs_to_spaces(file_path, tab_stop): + """Convert tabs in a file to spaces, considering tab stops.""" + with open(file_path, 'r') as file: + lines = file.readlines() + + # Skip files with no tabs + if not any('\t' in line for line in lines): + return + + new_lines = [] + for line in lines: + # Remove trailing spaces + line = line.rstrip() + new_line = '' + column = 0 # Track the column index for calculating tab stops + for char in line: + if char == '\t': + # Calculate spaces needed to reach the next tab stop + spaces_needed = tab_stop - (column % tab_stop) + new_line += ' ' * spaces_needed + column += spaces_needed + else: + new_line += char + column += 1 + + new_lines.append(new_line + '\n') + + with open(file_path, 'w', newline='\n') as file: + file.writelines(new_lines) + +def process_directory(directory, extensions, tab_stop): + """Recursively process files in directory, considering tab stops.""" + extensions = tuple(extensions) + for root, dirs, files in os.walk(directory): + for file in files: + if file.endswith(extensions): + file_path = os.path.join(root, file) + print(f"Processing {file_path}") + convert_tabs_to_spaces(file_path, tab_stop) + +def main(): + parser = argparse.ArgumentParser(description='Convert tabs to spaces in files, considering tab stops.') + parser.add_argument('-e', '--extensions', type=str, default='c,cpp,h,hpp,inl,py,glsl,cmake', help='Comma-separated list of file extensions to process (default: "c,cpp,h,hpp,inl,py,glsl,cmake")') + parser.add_argument('-t', '--tabstop', type=int, default=4, help='Tab stop size (default: 4)') + parser.add_argument('-d', '--directory', type=str, required=True, help='Directory to process') + + args = parser.parse_args() + + extensions = args.extensions.split(',') + # Add a dot prefix to each extension if not present + extensions = [ext if ext.startswith('.') else f".{ext}" for ext in extensions] + + process_directory(args.directory, extensions, args.tabstop) + print("Processing completed.") + +if __name__ == "__main__": + main() diff --git a/scripts/code_tools/fix_xml_indentations.py b/scripts/code_tools/fix_xml_indentations.py new file mode 100644 index 0000000000..e317e4f7f6 --- /dev/null +++ b/scripts/code_tools/fix_xml_indentations.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +"""\ + +This script formats XML files in a given directory with options for indentation and space removal. + +$LicenseInfo:firstyear=2023&license=viewerlgpl$ +Second Life Viewer Source Code +Copyright (C) 2023, Linden Research, Inc. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; +version 2.1 of the License only. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA +$/LicenseInfo$ +""" + +import os +import sys +import glob +import io +import xml.etree.ElementTree as ET + +def get_xml_declaration(file_path): + with open(file_path, 'r', encoding='utf-8') as file: + first_line = file.readline().strip() + if first_line.startswith('<?xml'): + return first_line + return None + +def parse_xml_file(file_path): + try: + tree = ET.parse(file_path) + return tree + except ET.ParseError as e: + print(f"Error parsing XML file {file_path}: {e}") + return None + +def indent(elem, level=0, indent_text=False, indent_tab=False): + indent_string = "\t" if indent_tab else " " + i = "\n" + level * indent_string + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + indent_string + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + indent(elem, level + 1, indent_text, indent_tab) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + if indent_text and elem.text and not elem.text.isspace(): + elem.text = "\n" + (level + 1) * indent_string + elem.text.strip() + "\n" + level * indent_string + +def save_xml(tree, file_path, xml_decl, indent_text=False, indent_tab=False, rm_space=False, rewrite_decl=False): + if tree is not None: + root = tree.getroot() + indent(root, indent_text=indent_text, indent_tab=indent_tab) + xml_string = ET.tostring(root, encoding='unicode') + if rm_space: + xml_string = xml_string.replace(' />', '/>') + + xml_decl = (xml_decl if (xml_decl and not rewrite_decl) + else '<?xml version="1.0" encoding="utf-8" standalone="yes" ?>') + + try: + with io.open(file_path, 'wb') as file: + file.write(xml_decl.encode('utf-8')) + file.write('\n'.encode('utf-8')) + if xml_string: + file.write(xml_string.encode('utf-8')) + if not xml_string.endswith('\n'): + file.write('\n'.encode('utf-8')) + except IOError as e: + print(f"Error saving file {file_path}: {e}") + +def process_directory(directory_path, indent_text=False, indent_tab=False, rm_space=False, rewrite_decl=False): + if not os.path.isdir(directory_path): + print(f"Directory not found: {directory_path}") + return + + xml_files = glob.glob(os.path.join(directory_path, "*.xml")) + if not xml_files: + print(f"No XML files found in directory: {directory_path}") + return + + for file_path in xml_files: + xml_decl = get_xml_declaration(file_path) + tree = parse_xml_file(file_path) + if tree is not None: + save_xml(tree, file_path, xml_decl, indent_text, indent_tab, rm_space, rewrite_decl) + +if __name__ == "__main__": + if len(sys.argv) < 2 or '--help' in sys.argv: + print("This script formats XML files in a given directory. Useful to fix XUI XMLs after processing by other tools.") + print("\nUsage:") + print(" python fix_xml_indentations.py <path/to/directory> [options]") + print("\nOptions:") + print(" --indent-text Indents text within XML tags.") + print(" --indent-tab Uses tabs instead of spaces for indentation.") + print(" --rm-space Removes spaces in self-closing tags.") + print(" --rewrite_decl Replaces the XML declaration line.") + print("\nCommon Usage:") + print(" To format XML files with text indentation, tab indentation, and removal of spaces in self-closing tags:") + print(" python fix_xml_indentations.py /path/to/xmls --indent-text --indent-tab --rm-space") + sys.exit(1) + + directory_path = sys.argv[1] + indent_text = '--indent-text' in sys.argv + indent_tab = '--indent-tab' in sys.argv + rm_space = '--rm-space' in sys.argv + rewrite_decl = '--rewrite_decl' in sys.argv + process_directory(directory_path, indent_text, indent_tab, rm_space, rewrite_decl) diff --git a/scripts/code_tools/modified-strings.sh b/scripts/code_tools/modified-strings.sh index 435dda3f5d..932b0a4bfc 100644 --- a/scripts/code_tools/modified-strings.sh +++ b/scripts/code_tools/modified-strings.sh @@ -38,11 +38,11 @@ do -h|--help) Action=USAGE ;; - + -v|--verbose) Verbose=true ;; - + ## ## Select the revision to compare against ## @@ -79,7 +79,7 @@ do break fi ;; - esac + esac shift # always consume 1 done @@ -107,10 +107,10 @@ then cat <<USAGE Usage: - + modified-strings.sh [ { -v | --verbose } ] [-r <revision>] [<path-to-xui>] - where + where --verbose shows progress messages on stderr (the command takes a while, so this is reassuring) -r <revision> specifies a git revision (branch, tag, commit, or relative specifier) @@ -124,9 +124,9 @@ Usage: the path of a file that has a string change (columns 2 and 3 are empty for lines with a filename) name the name attribute of a string or label whose value changed - English value + English value the current value of the string or label whose value changed - for strings, newlines are changed to '\n' and tab characters are changed to '\t' + for strings, newlines are changed to '\n' and tab characters are changed to '\t' There is also a column for each of the language directories following the English. diff --git a/scripts/code_tools/modified_strings.py b/scripts/code_tools/modified_strings.py index e7a9d239dc..c777fc8c0d 100644 --- a/scripts/code_tools/modified_strings.py +++ b/scripts/code_tools/modified_strings.py @@ -49,7 +49,7 @@ into google sheets. If the --rev revision already contains a translation for the text, it will be included in the spreadsheet for reference. - + Normally you would want --rev_base to be the last revision to have translations added, and --rev to be the tip of the current project. You can find the last commit with translation work using "git log --grep INTL- | head" @@ -162,6 +162,8 @@ def make_translation_table(mod_tree, base_tree, lang, args): filename = mod_blob.path if mod_blob.type == "tree": # directory, skip continue + if args.files and os.path.basename(filename) not in args.files: + continue # process only the specified files if args.verbose: print(filename) @@ -240,7 +242,7 @@ def find_deletions(mod_tree, base_tree, lang, args, f): mod_filename = transl_filename.replace("/xui/{}/".format(lang), "/xui/{}/".format(args.base_lang)) #print("checking",transl_filename,"against",mod_filename) try: - mod_blob = mod_tree[mod_filename] + mod_blob = mod_tree[mod_filename] except: print(" delete file", transl_filename, file=f) continue @@ -255,7 +257,7 @@ def find_deletions(mod_tree, base_tree, lang, args, f): if not elt_key in mod_dict: if lines == 0: print(" in file", transl_filename, file=f) - lines += 1 + lines += 1 print(" delete element", elt_key, file=f) else: transl_elt = transl_dict[elt_key] @@ -264,14 +266,14 @@ def find_deletions(mod_tree, base_tree, lang, args, f): if not a in mod_elt.attrib: if lines == 0: print(" in file", transl_filename, file=f) - lines += 1 + lines += 1 print(" delete attribute", a, "from", elt_key, file=f) if transl_elt.text and (not mod_elt.text): if lines == 0: print(" in file", transl_filename, file=f) - lines += 1 + lines += 1 print(" delete text from", elt_key, file=f) - + def save_translation_file(per_lang_data, aux_data, outfile): langs = sorted(per_lang_data.keys()) @@ -308,12 +310,12 @@ def save_translation_file(per_lang_data, aux_data, outfile): # Reference info, not for translation for aux, data in list(aux_data.items()): - df = pd.DataFrame(data, columns = ["Key", "Value"]) + df = pd.DataFrame(data, columns = ["Key", "Value"]) df.to_excel(writer, index=False, sheet_name=aux) worksheet = writer.sheets[aux] worksheet.set_column('A:A', 50, bold_wrap_format) worksheet.set_column('B:B', 80, wrap_format) - + print("Writing", outfile) writer.save() @@ -325,9 +327,11 @@ if __name__ == "__main__": parser.add_argument("--deleted", action="store_true", default = False, help="show all translated entities which don't exist in english") parser.add_argument("--skip_spreadsheet", action="store_true", default = False, help="skip creating the translation spreadsheet") parser.add_argument("--rev", help="revision with modified strings, default HEAD", default="HEAD") - parser.add_argument("--rev_base", help="previous revision to compare against, default master", default="master") + parser.add_argument("--rev_base", help="previous revision to compare against, default main", default="main") parser.add_argument("--base_lang", help="base language, default en (normally leave unchanged - other values are only useful for testing)", default="en") parser.add_argument("--lang", help="target languages, or 'all_valid' or 'supported'; default is 'supported'", nargs="+", default = ["supported"]) + parser.add_argument("--files", help='list of files to process', metavar='F', type=str, nargs='*') + parser.add_argument("--outfile", help='name of the output file', type=str, nargs='?', default="SL_Translations.xlsx") args = parser.parse_args() cwd = os.getcwd() @@ -370,7 +374,7 @@ if __name__ == "__main__": print("Target language(s) are", ",".join(sorted(langs))) sys.stdout.flush() - outfile = "SL_Translations.xlsx" + outfile = args.outfile try: f = open(outfile,"a+") f.close() diff --git a/scripts/content_tools/anim_tool.py b/scripts/content_tools/anim_tool.py index 4a0773951e..07159a8052 100644 --- a/scripts/content_tools/anim_tool.py +++ b/scripts/content_tools/anim_tool.py @@ -92,7 +92,7 @@ class FilePacker(object): # Now pad what's left of str out to 'size' with nul bytes. buf = str + ("\000" * (size-len(str))) self.buffer.write(buf) - + class FileUnpacker(object): def __init__(self, filename): with open(filename,"rb") as f: @@ -103,7 +103,7 @@ class FileUnpacker(object): result = struct.unpack_from(fmt, self.buffer, self.offset) self.offset += struct.calcsize(fmt) return result - + def unpack_string(self, size=0): # Nonzero size means we must consider exactly the next 'size' # characters in self.buffer. @@ -131,7 +131,7 @@ def F32_to_U16(val, lower, upper): # make sure that the value is positive and normalized to <0, 1> val -= lower; val /= (upper - lower); - + # return the U16 return int(math.floor(val*U16MAX)) @@ -149,7 +149,7 @@ def U16_to_F32(ival, lower, upper): # make sure that zeroes come through as zero if abs(val) < max_error: val = 0.0 - return val; + return val; class RotKey(object): def __init__(self, time, duration, rot): @@ -185,7 +185,7 @@ class RotKey(object): fp.pack("<H",self.time_short) (x,y,z) = [F32_to_U16(v, -1.0, 1.0) for v in self.rotation] fp.pack("<HHH",x,y,z) - + class PosKey(object): def __init__(self, time, duration, pos): """ @@ -216,7 +216,7 @@ class PosKey(object): def dump(self, f): print(" pos_key: t %.3f" % self.time,"pos ",",".join("%.3f" % f for f in self.position), file=f) - + def pack(self, fp): fp.pack("<H",self.time_short) (x,y,z) = [F32_to_U16(v, -LL_MAX_PELVIS_OFFSET, LL_MAX_PELVIS_OFFSET) for v in self.position] @@ -259,7 +259,7 @@ class Constraint(object): print(" ease_in_stop",self.ease_in_stop, file=f) print(" ease_out_start",self.ease_out_start, file=f) print(" ease_out_stop",self.ease_out_stop, file=f) - + class Constraints(object): @staticmethod def unpack(duration, fup): @@ -340,7 +340,7 @@ class RotationCurve(object): print(" num_rot_keys", len(self.keys), file=f) for k in self.keys: k.dump(f) - + class JointInfo(object): def __init__(self, name, priority): self.joint_name = name @@ -434,11 +434,11 @@ class Anim(object): # find that parent in list of joints, set its index in index list self.emote_name = fup.unpack_string() - + (self.loop_in_point, self.loop_out_point, self.loop, self.ease_in_duration, self.ease_out_duration, self.hand_pose, num_joints) = \ fup.unpack("@ffiffII") - + self.joints = [JointInfo.unpack(self.duration, fup) for j in range(num_joints)] if self.verbose: @@ -446,7 +446,7 @@ class Anim(object): print("unpacked joint",joint_info.joint_name) self.constraints = Constraints.unpack(self.duration, fup) self.buffer = fup.buffer - + def pack(self, fp): fp.pack("@HHhf", self.version, self.sub_version, self.base_priority, self.duration) fp.pack_string(self.emote_name, 0) @@ -475,7 +475,7 @@ class Anim(object): for j in self.joints: j.dump(f) self.constraints.dump(f) - + def write(self, filename): fp = FilePacker() self.pack(fp) @@ -603,7 +603,7 @@ def main(*argv): # Use sys.argv[0] because (a) this script lives where it lives regardless # of what our caller passes and (b) we don't expect our caller to pass the # script name anyway. - pathname = os.path.dirname(sys.argv[0]) + pathname = os.path.dirname(sys.argv[0]) # we're in scripts/content_tools; hop back to base of repository clone path_to_skel = os.path.join(os.path.abspath(pathname),os.pardir,os.pardir, "indra","newview","character") diff --git a/scripts/content_tools/arche_tool.py b/scripts/content_tools/arche_tool.py index 677af62d2f..9979c61b21 100644 --- a/scripts/content_tools/arche_tool.py +++ b/scripts/content_tools/arche_tool.py @@ -78,7 +78,7 @@ def compare_trees(file_trees): compare_matched_nodes(key,items,summary) print("Summary:") print(summary) - + def dump_appearance_params(tree): vals = [] for e in tree.getroot().iter(): @@ -89,8 +89,8 @@ def dump_appearance_params(tree): #print e.get("id"), e.get("name"), e.get("group"), e.get("u8") if len(vals)==253: print(", ".join(vals)) - - + + if __name__ == "__main__": parser = argparse.ArgumentParser(description="compare avatar XML archetype files") diff --git a/scripts/content_tools/dae_tool.py b/scripts/content_tools/dae_tool.py index 2454fafa46..5286c37de3 100644 --- a/scripts/content_tools/dae_tool.py +++ b/scripts/content_tools/dae_tool.py @@ -58,7 +58,7 @@ def mesh_lock_offsets(tree, joints): floats[11] += 0.0001 matrix_node.text = " ".join([str(f) for f in floats]) print(joint_node.get("name"),matrix_node.tag,"text",matrix_node.text,len(floats),floats) - + def mesh_random_offsets(tree, joints): print("mesh_random_offsets",tree,joints) @@ -80,7 +80,7 @@ def mesh_random_offsets(tree, joints): floats[11] += random.uniform(-1.0,1.0) matrix_node.text = " ".join([str(f) for f in floats]) print(joint_node.get("name"),matrix_node.tag,"text",matrix_node.text,len(floats),floats) - + if __name__ == "__main__": parser = argparse.ArgumentParser(description="process SL animations") @@ -92,7 +92,7 @@ if __name__ == "__main__": parser.add_argument("--summary", action="store_true", help="print summary info about input file") args = parser.parse_args() - mesh = None + mesh = None tree = None if args.infilename: @@ -103,7 +103,7 @@ if __name__ == "__main__": if args.summary: print("summarizing",args.infilename) mesh_summary(mesh) - + if args.lock_offsets: print("locking offsets for",args.lock_offsets) mesh_lock_offsets(tree, args.lock_offsets) @@ -116,4 +116,4 @@ if __name__ == "__main__": print("writing",args.outfilename) f = open(args.outfilename,"w") print(etree.tostring(tree, pretty_print=True), file=f) #need update to get: , short_empty_elements=True) - + diff --git a/scripts/content_tools/skel_tool.py b/scripts/content_tools/skel_tool.py index 696e4e2923..604d2b7db9 100644 --- a/scripts/content_tools/skel_tool.py +++ b/scripts/content_tools/skel_tool.py @@ -29,7 +29,7 @@ $/LicenseInfo$ import argparse from lxml import etree - + def get_joint_names(tree): joints = [element.get('name') for element in tree.getroot().iter() if element.tag in ['bone','collision_volume']] print("joints:",joints) @@ -45,10 +45,10 @@ def get_aliases(tree): val = element.get('aliases') aliases[name] = val return aliases - + def fix_name(element): pass - + def enforce_precision_rules(element): pass @@ -104,7 +104,7 @@ def enforce_symmetry(tree, element, field, fix=False): def get_element_by_name(tree,name): if tree is None: return None - matches = [elt for elt in tree.getroot().iter() if elt.get("name")==name] + matches = [elt for elt in tree.getroot().iter() if elt.get("name")==name] if len(matches)==1: return matches[0] elif len(matches)>1: @@ -117,7 +117,7 @@ def list_skel_tree(tree): for element in tree.getroot().iter(): if element.tag == "bone": print(element.get("name"),"-",element.get("support")) - + def validate_child_order(tree, ogtree, fix=False): unfixable = 0 @@ -182,7 +182,7 @@ def validate_skel_tree(tree, ogtree, reftree, fix=False): print("validate_skel_tree") (num_bones,num_cvs) = (0,0) unfixable = 0 - defaults = {"connected": "false", + defaults = {"connected": "false", "group": "Face" } for element in tree.getroot().iter(): @@ -232,7 +232,7 @@ def validate_skel_tree(tree, ogtree, reftree, fix=False): if element.get("support")=="extended": if element.get("pos") != element.get("pivot"): print("extended joint",element.get("name"),"has mismatched pos, pivot") - + if element.tag == "linden_skeleton": num_bones = int(element.get("num_bones")) @@ -253,7 +253,7 @@ def validate_skel_tree(tree, ogtree, reftree, fix=False): if fix and (unfixable > 0): print("BAD FILE:", unfixable,"errs could not be fixed") - + def slider_info(ladtree,skeltree): for param in ladtree.iter("param"): @@ -287,7 +287,7 @@ def slider_info(ladtree,skeltree): print(" Offset MaxX", offset_max[0]) print(" Offset MaxY", offset_max[1]) print(" Offset MaxZ", offset_max[2]) - + # Check contents of avatar_lad file relative to a specified skeleton def validate_lad_tree(ladtree,skeltree,orig_ladtree): print("validate_lad_tree") @@ -344,7 +344,7 @@ def validate_lad_tree(ladtree,skeltree,orig_ladtree): expected_offset = tuple([bone_offset[0],-bone_offset[1],bone_offset[2]]) if left_offset != expected_offset: print("offset mismatch between",bone_name,"and",left_name,"in param",param.get("id","-1")) - + drivers = {} for driven_param in ladtree.iter("driven"): driver = driven_param.getparent().getparent() @@ -380,7 +380,7 @@ def validate_lad_tree(ladtree,skeltree,orig_ladtree): print("removed",set(orig_message_ids) - set(message_ids)) else: print("message ids OK") - + def remove_joint_by_name(tree, name): print("remove joint:",name) elt = get_element_by_name(tree,name) @@ -395,7 +395,7 @@ def remove_joint_by_name(tree, name): elt[:] = [] print("parent now:",[e.get("name") for e in list(parent)]) elt = get_element_by_name(tree,name) - + def compare_skel_trees(atree,btree): diffs = {} realdiffs = {} @@ -513,7 +513,7 @@ if __name__ == "__main__": if ladtree and tree and args.slider_info: slider_info(ladtree,tree) - + if args.outfilename: f = open(args.outfilename,"w") print(etree.tostring(tree, pretty_print=True), file=f) #need update to get: , short_empty_elements=True) diff --git a/scripts/messages/message_template.msg b/scripts/messages/message_template.msg index c019a76793..1450c111c2 100755 --- a/scripts/messages/message_template.msg +++ b/scripts/messages/message_template.msg @@ -3607,6 +3607,11 @@ version 2.0 AppearanceHover Variable { HoverHeight LLVector3 } } + { + AttachmentBlock Variable + { ID LLUUID } + { AttachmentPoint U8 } + } } // AvatarSitResponse - response to a request to sit on an object diff --git a/scripts/messages/message_template.msg.sha1 b/scripts/messages/message_template.msg.sha1 index 5ad85458e9..efa5f3cf48 100755 --- a/scripts/messages/message_template.msg.sha1 +++ b/scripts/messages/message_template.msg.sha1 @@ -1 +1 @@ -e3bd0529a647d938ab6d48f26d21dd52c07ebc6e
\ No newline at end of file +d7915d67467e59287857630bd89bf9529d065199
\ No newline at end of file diff --git a/scripts/metrics/viewer_asset_logs.py b/scripts/metrics/viewer_asset_logs.py index bd996dff79..4fb9fd15b3 100644 --- a/scripts/metrics/viewer_asset_logs.py +++ b/scripts/metrics/viewer_asset_logs.py @@ -63,7 +63,7 @@ def update_stats(stats,rec): # handle fps record as special case pass else: - #print "field",field + #print "field",field stats.setdefault(field,{}) type_stats = stats.get(field) newcount = val["resp_count"] @@ -75,9 +75,9 @@ def update_stats(stats,rec): type_stats["sum_bytes"] = type_stats.get("sum_bytes",0) + val["resp_count"] * val.get("resp_mean_bytes",0) type_stats["enqueued"] = type_stats.get("enqueued",0) + val["enqueued"] type_stats["dequeued"] = type_stats.get("dequeued",0) + val["dequeued"] - - - + + + if __name__ == "__main__": parser = argparse.ArgumentParser(description="process metric xml files for viewer asset fetching") diff --git a/scripts/metrics/viewerstats.py b/scripts/metrics/viewerstats.py index e64343329c..41bc493aaa 100755 --- a/scripts/metrics/viewerstats.py +++ b/scripts/metrics/viewerstats.py @@ -144,7 +144,7 @@ def get_used_strings(root_dir): #if ext not in [".cpp", ".hpp", ".h", ".xml"]: # skipped_ext.add(ext) # continue - + full_name = os.path.join(dir_name,fname) with open(full_name,"r") as f: @@ -158,8 +158,8 @@ def get_used_strings(root_dir): print("skipped extensions", skipped_ext) print("got used_str", len(used_str)) return used_str - - + + if __name__ == "__main__": parser = argparse.ArgumentParser(description="process tab-separated table containing viewerstats logs") @@ -184,7 +184,7 @@ if __name__ == "__main__": if args.preferences: print("\nSETTINGS.XML") settings_sd = parse_settings_xml("settings.xml") - #for skey,svals in settings_sd.items(): + #for skey,svals in settings_sd.items(): # print skey, "=>", svals (all_str,_,_,_) = show_stats_by_key(recs,["preferences","settings"],settings_sd) print() @@ -211,16 +211,16 @@ if __name__ == "__main__": print("PREFIX_USED", len(prefix_used), ",".join(list(prefix_used))) print() unref_strings = unref_strings - prefix_used - + print("\nUNREF_IN_CODE " + str(len(unref_strings)) + "\n") print("\n".join(list(unref_strings))) settings_str = read_raw_settings_xml("settings.xml") # Do this via direct string munging to generate minimal changeset settings_edited = remove_settings(settings_str,unref_strings) write_raw_settings_xml("settings.xml.edit",settings_edited) - - - + + + diff --git a/scripts/perf/logsdir.py b/scripts/perf/logsdir.py new file mode 100644 index 0000000000..5ab45a28b6 --- /dev/null +++ b/scripts/perf/logsdir.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +"""\ +@file logsdir.py +@author Nat Goodspeed +@date 2024-09-12 +@brief Locate the Second Life logs directory for the current user on the + current platform. + +$LicenseInfo:firstyear=2024&license=viewerlgpl$ +Copyright (c) 2024, Linden Research, Inc. +$/LicenseInfo$ +""" + +import os +from pathlib import Path +import platform + +class Error(Exception): + pass + +# logic used by SLVersionChecker +def logsdir(): + app = 'SecondLife' + system = platform.system() + if (system == 'Darwin'): + base_dir = os.path.join(os.path.expanduser('~'), + 'Library','Application Support',app) + elif (system == 'Linux'): + base_dir = os.path.join(os.path.expanduser('~'), + '.' + app.lower()) + elif (system == 'Windows'): + appdata = os.getenv('APPDATA') + base_dir = os.path.join(appdata, app) + else: + raise ValueError("Unsupported platform '%s'" % system) + + return os.path.join(base_dir, 'logs') + +def latest_file(dirpath, pattern): + files = Path(dirpath).glob(pattern) + sort = [(p.stat().st_mtime, p) for p in files if p.is_file()] + sort.sort(reverse=True) + try: + return sort[0][1] + except IndexError: + raise Error(f'No {pattern} files in {dirpath}') diff --git a/scripts/perf/profile_cmp.py b/scripts/perf/profile_cmp.py new file mode 100644 index 0000000000..9dbfa3145b --- /dev/null +++ b/scripts/perf/profile_cmp.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +"""\ +@file profile_cmp.py +@author Nat Goodspeed +@date 2024-09-13 +@brief Compare a frame profile stats file with a similar baseline file. + +$LicenseInfo:firstyear=2024&license=viewerlgpl$ +Copyright (c) 2024, Linden Research, Inc. +$/LicenseInfo$ +""" + +from datetime import datetime +import json +from logsdir import Error, latest_file, logsdir +from pathlib import Path +import sys + +# variance that's ignorable +DEFAULT_EPSILON = 0.03 # 3% + +def compare(baseline, test, epsilon=DEFAULT_EPSILON): + if Path(baseline).samefile(test): + print(f'{baseline} same as\n{test}\nAnalysis moot.') + return + + with open(baseline) as inf: + bdata = json.load(inf) + with open(test) as inf: + tdata = json.load(inf) + print(f'baseline {baseline}\ntestfile {test}') + + for k, tv in tdata['context'].items(): + bv = bdata['context'].get(k) + if bv != tv: + print(f'baseline {k}={bv} vs.\ntestfile {k}={tv}') + + btime = bdata['context'].get('time') + ttime = tdata['context'].get('time') + if btime and ttime: + print('testfile newer by', + datetime.fromisoformat(ttime) - datetime.fromisoformat(btime)) + + # The following ignores totals and unused shaders, except to the extent + # that some shaders were used in the baseline but not in the recent test + # or vice-versa. While the viewer considers that a shader has been used if + # 'binds' is nonzero, we exclude any whose 'time' is zero to avoid zero + # division. + bshaders = {s['name']: s for s in bdata['shaders'] if s['time'] and s['samples']} + tshaders = {s['name']: s for s in tdata['shaders'] if s['time']} + + bothshaders = set(bshaders).intersection(tshaders) + deltas = [] + for shader in bothshaders: + bshader = bshaders[shader] + tshader = tshaders[shader] + bthruput = bshader['samples']/bshader['time'] + tthruput = tshader['samples']/tshader['time'] + delta = (tthruput - bthruput)/bthruput + if abs(delta) > epsilon: + deltas.append((delta, shader, bthruput, tthruput)) + + # descending order of performance gain + deltas.sort(reverse=True) + print(f'{len(deltas)} shaders showed nontrivial performance differences ' + '(millon samples/sec):') + namelen = max(len(s[1]) for s in deltas) if deltas else 0 + for delta, shader, bthruput, tthruput in deltas: + print(f' {shader.rjust(namelen)} {delta*100:6.1f}% ' + f'{bthruput/1000000:8.2f} -> {tthruput/1000000:8.2f}') + + tunused = set(bshaders).difference(tshaders) + print(f'{len(tunused)} baseline shaders not used in test:') + for s in tunused: + print(f' {s}') + bunused = set(tshaders).difference(bshaders) + print(f'{len(bunused)} shaders newly used in test:') + for s in bunused: + print(f' {s}') + +def main(*raw_args): + from argparse import ArgumentParser + parser = ArgumentParser(description=""" +%(prog)s compares a baseline JSON file from Develop -> Render Tests -> Frame +Profile to another such file from a more recent test. It identifies shaders +that have gained and lost in throughput. +""") + parser.add_argument('-e', '--epsilon', type=float, default=int(DEFAULT_EPSILON*100), + help="""percent variance considered ignorable (default %(default)s%%)""") + parser.add_argument('baseline', + help="""baseline profile filename to compare against""") + parser.add_argument('test', nargs='?', + help="""test profile filename to compare + (default is most recent)""") + args = parser.parse_args(raw_args) + compare(args.baseline, + args.test or latest_file(logsdir(), 'profile.*.json'), + epsilon=(args.epsilon / 100.)) + +if __name__ == "__main__": + try: + sys.exit(main(*sys.argv[1:])) + except (Error, OSError, json.JSONDecodeError) as err: + sys.exit(str(err)) diff --git a/scripts/perf/profile_csv.py b/scripts/perf/profile_csv.py new file mode 100644 index 0000000000..7a6b2b338e --- /dev/null +++ b/scripts/perf/profile_csv.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +"""\ +@file profile_csv.py +@author Nat Goodspeed +@date 2024-09-12 +@brief Convert a JSON file from Develop -> Render Tests -> Frame Profile to CSV + +$LicenseInfo:firstyear=2024&license=viewerlgpl$ +Copyright (c) 2024, Linden Research, Inc. +$/LicenseInfo$ +""" + +import json +from logsdir import Error, latest_file, logsdir +import sys + +def convert(path, totals=True, unused=True, file=sys.stdout): + with open(path) as inf: + data = json.load(inf) + # print path to sys.stderr in case user is redirecting stdout + print(path, file=sys.stderr) + + print('"name", "file1", "file2", "time", "binds", "samples", "triangles"', file=file) + + if totals: + t = data['totals'] + print(f'"totals", "", "", {t["time"]}, {t["binds"]}, {t["samples"]}, {t["triangles"]}', + file=file) + + for sh in data['shaders']: + print(f'"{sh["name"]}", "{sh["files"][0]}", "{sh["files"][1]}", ' + f'{sh["time"]}, {sh["binds"]}, {sh["samples"]}, {sh["triangles"]}', file=file) + + if unused: + for u in data['unused']: + print(f'"{u}", "", "", 0, 0, 0, 0', file=file) + +def main(*raw_args): + from argparse import ArgumentParser + parser = ArgumentParser(description=""" +%(prog)s converts a JSON file from Develop -> Render Tests -> Frame Profile to +a more-or-less equivalent CSV file. It expands the totals stats and unused +shaders list to full shaders lines. +""") + parser.add_argument('-t', '--totals', action='store_false', default=True, + help="""omit totals from CSV file""") + parser.add_argument('-u', '--unused', action='store_false', default=True, + help="""omit unused shaders from CSV file""") + parser.add_argument('path', nargs='?', + help="""profile filename to convert (default is most recent)""") + + args = parser.parse_args(raw_args) + convert(args.path or latest_file(logsdir(), 'profile.*.json'), + totals=args.totals, unused=args.unused) + +if __name__ == "__main__": + try: + sys.exit(main(*sys.argv[1:])) + except (Error, OSError, json.JSONDecodeError) as err: + sys.exit(str(err)) diff --git a/scripts/perf/profile_pretty.py b/scripts/perf/profile_pretty.py new file mode 100644 index 0000000000..405b14b373 --- /dev/null +++ b/scripts/perf/profile_pretty.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +"""\ +@file profile_pretty.py +@author Nat Goodspeed +@date 2024-09-12 +@brief Pretty-print a JSON file from Develop -> Render Tests -> Frame Profile + +$LicenseInfo:firstyear=2024&license=viewerlgpl$ +Copyright (c) 2024, Linden Research, Inc. +$/LicenseInfo$ +""" + +import json +from logsdir import Error, latest_file, logsdir +import sys + +def pretty(path): + with open(path) as inf: + data = json.load(inf) + # print path to sys.stderr in case user is redirecting stdout + print(path, file=sys.stderr) + json.dump(data, sys.stdout, indent=4) + +def main(*raw_args): + from argparse import ArgumentParser + parser = ArgumentParser(description=""" +%(prog)s pretty-prints a JSON file from Develop -> Render Tests -> Frame Profile. +The file produced by the viewer is a single dense line of JSON. +""") + parser.add_argument('path', nargs='?', + help="""profile filename to pretty-print (default is most recent)""") + + args = parser.parse_args(raw_args) + pretty(args.path or latest_file(logsdir(), 'profile.*.json')) + +if __name__ == "__main__": + try: + sys.exit(main(*sys.argv[1:])) + except (Error, OSError, json.JSONDecodeError) as err: + sys.exit(str(err)) diff --git a/scripts/template_verifier.py b/scripts/template_verifier.py index ee8492db5e..65850f7a28 100755 --- a/scripts/template_verifier.py +++ b/scripts/template_verifier.py @@ -73,8 +73,8 @@ from indra.ipc import tokenstream from indra.ipc import llmessage def getstatusall(command): - """ Like commands.getstatusoutput, but returns stdout and - stderr separately(to get around "killed by signal 15" getting + """ Like commands.getstatusoutput, but returns stdout and + stderr separately(to get around "killed by signal 15" getting included as part of the file). Also, works on Windows.""" (input, out, err) = os.popen3(command, 't') status = input.close() # send no input to the command @@ -257,7 +257,7 @@ http://wiki.secondlife.com/wiki/Template_verifier.py elif len(args) == 1: master_url = None current_filename = args[0] - print("master:", options.master_url) + print("master:", options.master_url) print("current:", current_filename) current_url = 'file://%s' % current_filename # nothing specified, use defaults for everything @@ -269,7 +269,7 @@ http://wiki.secondlife.com/wiki/Template_verifier.py if master_url is None: master_url = options.master_url - + if current_url is None: current_filename = local_template_filename() print("master:", options.master_url) @@ -307,7 +307,7 @@ http://wiki.secondlife.com/wiki/Template_verifier.py print("Syntax-checking the local template ONLY, no compatibility check is being run.") print("Cause: %s\n\n" % e) return 0 - + acceptable, compat = compare( master_parsed, current_parsed, options.mode) diff --git a/scripts/templates/template-cpp.cpp b/scripts/templates/template-cpp.cpp index 35d8441c87..8ee04942bf 100755 --- a/scripts/templates/template-cpp.cpp +++ b/scripts/templates/template-cpp.cpp @@ -1,4 +1,4 @@ -/** +/** * @file #filename#.cpp * @brief Implementation of #filename# * @author #getpass.getuser()#@lindenlab.com diff --git a/scripts/templates/template-h.h b/scripts/templates/template-h.h index ce7b4ddc87..d7677c256b 100755 --- a/scripts/templates/template-h.h +++ b/scripts/templates/template-h.h @@ -1,4 +1,4 @@ -/** +/** * @file #filename#.h * @brief Header file for #filename# * @author #getpass.getuser()#@lindenlab.com |