#!/usr/bin/env python3 """analyze-log - Generate a model from performance benchmarks log files foo """ import argparse import dfatool.cli import dfatool.plotter import dfatool.utils import dfatool.functions as df from dfatool.model import AnalyticModel from dfatool.validation import CrossValidator from functools import reduce import logging import json import sys import re def parse_logfile(filename): lf = dfatool.utils.Logfile() if filename.endswith("xz"): import lzma with lzma.open(filename, "rt") as f: return lf.load(f) with open(filename, "r") as f: return lf.load(f) def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__ ) dfatool.cli.add_standard_arguments(parser) parser.add_argument( "--plot-param", metavar="::[;::;...])", type=str, help="Plot measurements for by . " "X axis is parameter value. " "Plots the model function as one solid line for each combination of non- parameters. " "Also plots the corresponding measurements. " "If gplearn function is set, it is plotted using dashed lines.", ) parser.add_argument( "--show-quality", choices=["table"], action="append", default=list(), help="table: show LUT, model, and static prediction error for each key and attribute.", ) parser.add_argument( "--force-tree", action="store_true", help="Build decision tree without checking for analytic functions first", ) parser.add_argument( "--export-model", metavar="FILE", type=str, help="Export JSON model to FILE" ) parser.add_argument( "logfiles", nargs="+", type=str, help="Path to benchmark output (.txt or .txt.xz)", ) args = parser.parse_args() dfatool.cli.sanity_check(args) if args.log_level: numeric_level = getattr(logging, args.log_level.upper(), None) if not isinstance(numeric_level, int): print(f"Invalid log level: {args.log_level}", file=sys.stderr) sys.exit(1) logging.basicConfig(level=numeric_level) if args.filter_param: args.filter_param = list( map(lambda x: x.split("="), args.filter_param.split(",")) ) else: args.filter_param = list() observations = reduce(lambda a, b: a + b, map(parse_logfile, args.logfiles)) by_name, parameter_names = dfatool.utils.observations_to_by_name(observations) del observations if args.ignore_param: args.ignore_param = args.ignore_param.split(",") dfatool.utils.ignore_param(by_name, parameter_names, args.ignore_param) dfatool.utils.filter_aggregate_by_param(by_name, parameter_names, args.filter_param) if args.param_shift: param_shift = dfatool.cli.parse_param_shift(args.param_shift) dfatool.utils.shift_param_in_aggregate(by_name, parameter_names, param_shift) if args.normalize_nfp: norm = dfatool.cli.parse_nfp_normalization(args.normalize_nfp) dfatool.utils.normalize_nfp_in_aggregate(by_name, norm) function_override = dict() if args.function_override: for function_desc in args.function_override.split(";"): state_or_tran, attribute, *function_str = function_desc.split(" ") function_override[(state_or_tran, attribute)] = " ".join(function_str) model = AnalyticModel( by_name, parameter_names, force_tree=args.force_tree, function_override=function_override, ) if args.info: dfatool.cli.print_info_by_name(model, by_name) if args.export_pgf_unparam: dfatool.cli.export_pgf_unparam(model, args.export_pgf_unparam) if args.export_json_unparam: dfatool.cli.export_json_unparam(model, args.export_json_unparam) if args.plot_unparam: for kv in args.plot_unparam.split(";"): state_or_trans, attribute, ylabel = kv.split(":") fname = "param_y_{}_{}.pdf".format(state_or_trans, attribute) dfatool.plotter.plot_y( model.by_name[state_or_trans][attribute], xlabel="measurement #", ylabel=ylabel, # output=fname, show=not args.non_interactive, ) if args.boxplot_unparam: title = None if args.filter_param: title = "filter: " + ", ".join( map(lambda kv: f"{kv[0]}={kv[1]}", args.filter_param) ) for name in model.names: attr_names = sorted(model.attributes(name)) dfatool.plotter.boxplot( attr_names, [model.by_name[name][attr] for attr in attr_names], xlabel="Attribute", output=f"{args.boxplot_unparam}{name}.pdf", title=title, show=not args.non_interactive, ) for attribute in attr_names: dfatool.plotter.boxplot( [attribute], [model.by_name[name][attribute]], output=f"{args.boxplot_unparam}{name}-{attribute}.pdf", title=title, show=not args.non_interactive, ) if args.boxplot_param: dfatool.cli.boxplot_param(args, model) if args.cross_validate: xv_method, xv_count = args.cross_validate.split(":") xv_count = int(xv_count) xv = CrossValidator( AnalyticModel, by_name, parameter_names, force_tree=args.force_tree, ) xv.parameter_aware = args.parameter_aware_cross_validation else: xv_method = None static_model = model.get_static() try: lut_model = model.get_param_lut() lut_quality = model.assess(lut_model) except RuntimeError as e: if args.force_tree: # this is to be expected logging.debug(f"Skipping LUT model: {e}") else: logging.warning(f"Skipping LUT model: {e}") lut_model = None lut_quality = None param_model, param_info = model.get_fitted() if xv_method == "montecarlo": static_quality, _ = xv.montecarlo( lambda m: m.get_static(), xv_count, static=True ) xv.export_filename = args.export_xv analytic_quality, _ = xv.montecarlo(lambda m: m.get_fitted()[0], xv_count) elif xv_method == "kfold": static_quality, _ = xv.kfold(lambda m: m.get_static(), xv_count, static=True) xv.export_filename = args.export_xv analytic_quality, _ = xv.kfold(lambda m: m.get_fitted()[0], xv_count) else: static_quality = model.assess(static_model) if args.export_raw_predictions: analytic_quality, raw_results = model.assess(param_model, return_raw=True) with open(args.export_raw_predictions, "w") as f: json.dump(raw_results, f, cls=dfatool.utils.NpEncoder) else: analytic_quality = model.assess(param_model) if "static" in args.show_model or "all" in args.show_model: print("--- static model ---") for name in sorted(model.names): for attribute in sorted(model.attributes(name)): dfatool.cli.print_static( model, static_model, name, attribute, with_dependence="all" in args.show_model, ) if "param" in args.show_model or "all" in args.show_model: print("--- param model ---") for name in sorted(model.names): for attribute in sorted(model.attributes(name)): info = param_info(name, attribute) dfatool.cli.print_model( f"{name:10s} {attribute:15s}", info, model.parameters ) if "table" in args.show_quality or "all" in args.show_quality: if xv_method is not None: print( f"Model error ({args.error_metric}) after cross validation ({xv_method}, {xv_count}):" ) else: print(f"Model error ({args.error_metric}) on training data:") dfatool.cli.model_quality_table( lut=lut_quality, model=analytic_quality, static=static_quality, model_info=param_info, xv_method=xv_method, error_metric=args.error_metric, ) if args.show_model_size: dfatool.cli.print_model_size(model) if args.export_model: print(f"Exportding model to {args.export_model}") json_model = model.to_json() with open(args.export_model, "w") as f: json.dump( json_model, f, indent=2, sort_keys=True, cls=dfatool.utils.NpEncoder ) if args.export_dot: dfatool.cli.export_dot(model, args.export_dot) if args.export_dref: dref = model.to_dref(static_quality, lut_quality, analytic_quality) dfatool.cli.export_dataref( args.export_dref, dref, precision=args.dref_precision ) if args.plot_param: for kv in args.plot_param.split(";"): try: state_or_trans, attribute, param_name = kv.split(":") except ValueError: print( "Usage: --plot-param='state_or_trans:attribute:param_name'", file=sys.stderr, ) sys.exit(1) dfatool.plotter.plot_param( model, state_or_trans, attribute, model.param_index(param_name), title=state_or_trans, ylabel=attribute, xlabel=param_name, output=f"{state_or_trans} {attribute} {param_name}.pdf", show=not args.non_interactive, ) if __name__ == "__main__": main()