diff options
author | Daniel Friesel <daniel.friesel@uos.de> | 2019-07-24 15:20:16 +0200 |
---|---|---|
committer | Daniel Friesel <daniel.friesel@uos.de> | 2019-07-24 15:20:16 +0200 |
commit | 27cb361e14e68a9fba5cfd6095f2b88862657fbe (patch) | |
tree | eba609af4677bb3d9b796ee12f6374527d6b5d5c | |
parent | 1d010bd59bc5cf100310b285c743ae27f921b681 (diff) |
add timing analysis script
-rwxr-xr-x | bin/analyze-timing.py | 321 | ||||
-rwxr-xr-x | lib/dfatool.py | 3 |
2 files changed, 324 insertions, 0 deletions
diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py new file mode 100755 index 0000000..b0aaaca --- /dev/null +++ b/bin/analyze-timing.py @@ -0,0 +1,321 @@ +#!/usr/bin/env python3 +""" +analyze-timing -- generate analytic energy model from annotated OnboardTimerHarness traces. + +Usage: +PYTHONPATH=lib bin/analyze-timing.py [options] <tracefiles ...> + +analyze-timing generates an analytic energy model (``AnalyticModel``)from one or more annotated +traces generated by generate-dfa-benchmark using OnboardTimerHarness. By default, it does nothing else -- +use one of the --plot-* or --show-* options to examine the generated model. + +Options: +--plot-unparam=<name>:<attribute>:<Y axis label>[;<name>:<attribute>:<label>;...] + Plot all mesurements for <name> <attribute> without regard for parameter values. + X axis is measurement number/id. + +--plot-param=<name> <attribute> <parameter> [gplearn function][;<name> <attribute> <parameter> [function];...] + Plot measurements for <name> <attribute> by <parameter>. + X axis is parameter value. + Plots the model function as one solid line for each combination of non-<parameter> + parameters. Also plots the corresponding measurements. + If gplearn function is set, it is plotted using dashed lines. + +--show-models=<static|paramdetection|param|all|tex> + static: show static model values as well as parameter detection heuristic + paramdetection: show stddev of static/lut/fitted model + param: show parameterized model functions and regression variable values + all: all of the above + tex: print tex/pgfplots-compatible model data on stdout + +--show-quality=<table|summary|all|tex> + table: show static/fitted/lut SMAPE and MAE for each name and attribute + summary: show static/fitted/lut SMAPE and MAE for each attribute, averaged over all states/transitions + all: all of the above + tex: print tex/pgfplots-compatible model quality data on stdout + +--ignored-trace-indexes=<i1,i2,...> + Specify traces which should be ignored due to bogus data. 1 is the first + trace, 2 the second, and so on. + +--cross-validate=<method>:<count> + Perform cross validation when computing model quality. + Only works with --show-quality=table at the moment. + If <method> is "montecarlo": Randomly divide data into 2/3 training and 1/3 + validation, <count> times. Reported model quality is the average of all + validation runs. Data is partitioned without regard for parameter values, + so a specific parameter combination may be present in both training and + validation sets or just one of them. + +--function-override=<name attribute function>[;<name> <attribute> <function>;...] + Manually specify the function to fit for <name> <attribute>. A function + specified this way bypasses parameter detection: It is always assigned, + even if the model seems to be independent of the parameters it references. + +--with-safe-functions + If set, include "safe" functions (safe_log, safe_inv, safe_sqrt) which are + also defined for cases such as safe_inv(0) or safe_sqrt(-1). This allows + a greater range of functions to be tried during fitting. + +--hwmodel=<hwmodel.json> + Load DFA hardware model from JSON + +--export-energymodel=<model.json> + Export energy model. Requires --hwmodel. +""" + +import getopt +import json +import plotter +import re +import sys +from dfatool import AnalyticModel, TimingData, pta_trace_to_aggregate +from dfatool import soft_cast_int, is_numeric, gplearn_to_function +from dfatool import CrossValidator + +opts = {} + +def print_model_quality(results): + for state_or_tran in results.keys(): + print() + for key, result in results[state_or_tran].items(): + if 'smape' in result: + print('{:20s} {:15s} {:.2f}% / {:.0f}'.format( + state_or_tran, key, result['smape'], result['mae'])) + else: + print('{:20s} {:15s} {:.0f}'.format( + state_or_tran, key, result['mae'])) + +def format_quality_measures(result): + if 'smape' in result: + return '{:6.2f}% / {:9.0f}'.format(result['smape'], result['mae']) + else: + return '{:6} {:9.0f}'.format('', result['mae']) + +def model_quality_table(result_lists, info_list): + for state_or_tran in result_lists[0]['by_name'].keys(): + for key in result_lists[0]['by_name'][state_or_tran].keys(): + buf = '{:20s} {:15s}'.format(state_or_tran, key) + for i, results in enumerate(result_lists): + info = info_list[i] + buf += ' ||| ' + if info == None or info(state_or_tran, key): + result = results['by_name'][state_or_tran][key] + buf += format_quality_measures(result) + else: + buf += '{:6}----{:9}'.format('', '') + print(buf) + +def model_summary_table(result_list): + buf = 'transition duration' + for results in result_list: + if len(buf): + buf += ' ||| ' + buf += format_quality_measures(results['duration_by_trace']) + print(buf) + buf = 'total energy ' + for results in result_list: + if len(buf): + buf += ' ||| ' + buf += format_quality_measures(results['energy_by_trace']) + print(buf) + buf = 'rel total energy ' + for results in result_list: + if len(buf): + buf += ' ||| ' + buf += format_quality_measures(results['rel_energy_by_trace']) + print(buf) + buf = 'state-only energy ' + for results in result_list: + if len(buf): + buf += ' ||| ' + buf += format_quality_measures(results['state_energy_by_trace']) + print(buf) + buf = 'transition timeout ' + for results in result_list: + if len(buf): + buf += ' ||| ' + buf += format_quality_measures(results['timeout_by_trace']) + print(buf) + + +def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq): + print('') + print(r'key attribute $1 - \frac{\sigma_X}{...}$') + for state_or_tran in model.by_name.keys(): + for attribute in model.attributes(state_or_tran): + print('{} {} {:.8f}'.format(state_or_tran, attribute, model.stats.generic_param_dependence_ratio(state_or_tran, attribute))) + + print('') + print(r'key attribute parameter $1 - \frac{...}{...}$') + for state_or_tran in model.by_name.keys(): + for attribute in model.attributes(state_or_tran): + for param in model.parameters(): + print('{} {} {} {:.8f}'.format(state_or_tran, attribute, param, model.stats.param_dependence_ratio(state_or_tran, attribute, param))) + if state_or_tran in model._num_args: + for arg_index in range(model._num_args[state_or_tran]): + print('{} {} {:d} {:.8f}'.format(state_or_tran, attribute, arg_index, model.stats.arg_dependence_ratio(state_or_tran, attribute, arg_index))) + +if __name__ == '__main__': + + ignored_trace_indexes = [] + discard_outliers = None + safe_functions_enabled = False + function_override = {} + show_models = [] + show_quality = [] + hwmodel = None + energymodel_export_file = None + xv_method = None + xv_count = 10 + + try: + optspec = ( + 'plot-unparam= plot-param= show-models= show-quality= ' + 'ignored-trace-indexes= discard-outliers= function-override= ' + 'cross-validate= ' + 'with-safe-functions hwmodel= export-energymodel=' + ) + raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(' ')) + + for option, parameter in raw_opts: + optname = re.sub(r'^--', '', option) + opts[optname] = parameter + + if 'ignored-trace-indexes' in opts: + ignored_trace_indexes = list(map(int, opts['ignored-trace-indexes'].split(','))) + if 0 in ignored_trace_indexes: + print('[E] arguments to --ignored-trace-indexes start from 1') + + if 'discard-outliers' in opts: + discard_outliers = float(opts['discard-outliers']) + + if 'function-override' in opts: + for function_desc in opts['function-override'].split(';'): + state_or_tran, attribute, *function_str = function_desc.split(' ') + function_override[(state_or_tran, attribute)] = ' '.join(function_str) + + if 'show-models' in opts: + show_models = opts['show-models'].split(',') + + if 'show-quality' in opts: + show_quality = opts['show-quality'].split(',') + + if 'cross-validate' in opts: + xv_method, xv_count = opts['cross-validate'].split(':') + xv_count = int(xv_count) + + if 'with-safe-functions' in opts: + safe_functions_enabled = True + + if 'hwmodel' in opts: + with open(opts['hwmodel'], 'r') as f: + hwmodel = json.load(f) + + except getopt.GetoptError as err: + print(err) + sys.exit(2) + + raw_data = TimingData(args) + + preprocessed_data = raw_data.get_preprocessed_data() + by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data, ignored_trace_indexes) + model = AnalyticModel(by_name, parameters) + + if xv_method: + xv = CrossValidator(AnalyticModel, by_name, parameters, arg_count) + + if 'plot-unparam' in opts: + for kv in opts['plot-unparam'].split(';'): + state_or_trans, attribute, ylabel = kv.split(':') + fname = 'param_y_{}_{}.pdf'.format(state_or_trans,attribute) + plotter.plot_y(model.by_name[state_or_trans][attribute], xlabel = 'measurement #', ylabel = ylabel, output = fname) + + if len(show_models): + print('--- simple static model ---') + static_model = model.get_static() + if 'static' in show_models or 'all' in show_models: + for trans in model.names: + print('{:10s}: {:.0f} µs'.format(trans, static_model(trans, 'duration'))) + + if xv_method == 'montecarlo': + static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count) + else: + static_quality = model.assess(static_model) + + if len(show_models): + print('--- LUT ---') + lut_model = model.get_param_lut() + + if xv_method == 'montecarlo': + lut_quality = xv.montecarlo(lambda m: m.get_param_lut(fallback=True), xv_count) + else: + lut_quality = model.assess(lut_model) + + if len(show_models): + print('--- param model ---') + + param_model, param_info = model.get_fitted(safe_functions_enabled = safe_functions_enabled) + + if 'paramdetection' in show_models or 'all' in show_models: + for transition in model.names: + for attribute in ['duration']: + info = param_info(transition, attribute) + print('{:10s} {:10s} non-param stddev {:f}'.format( + transition, attribute, model.stats.stats[transition][attribute]['std_static'] + )) + print('{:10s} {:10s} param-lut stddev {:f}'.format( + transition, attribute, model.stats.stats[transition][attribute]['std_param_lut'] + )) + for param in sorted(model.stats.stats[transition][attribute]['std_by_param'].keys()): + print('{:10s} {:10s} {:10s} stddev {:f}'.format( + transition, attribute, param, model.stats.stats[transition][attribute]['std_by_param'][param] + )) + if info != None: + for param_name in sorted(info['fit_result'].keys(), key=str): + param_fit = info['fit_result'][param_name]['results'] + for function_type in sorted(param_fit.keys()): + function_rmsd = param_fit[function_type]['rmsd'] + print('{:10s} {:10s} {:10s} mean {:10s} RMSD {:.0f}'.format( + transition, attribute, str(param_name), function_type, function_rmsd + )) + + if 'param' in show_models or 'all' in show_models: + for trans in model.names: + for attribute in ['duration']: + if param_info(trans, attribute): + print('{:10s}: {:10s}: {}'.format(trans, attribute, param_info(trans, attribute)['function']._model_str)) + print('{:10s} {:10s} {}'.format('', '', param_info(trans, attribute)['function']._regression_args)) + + if xv_method == 'montecarlo': + analytic_quality = xv.montecarlo(lambda m: m.get_fitted()[0], xv_count) + else: + analytic_quality = model.assess(param_model) + + if 'tex' in show_models or 'tex' in show_quality: + print_text_model_data(model, static_model, static_quality, lut_model, lut_quality, param_model, param_info, analytic_quality) + + if 'table' in show_quality or 'all' in show_quality: + model_quality_table([static_quality, analytic_quality, lut_quality], [None, param_info, None]) + if 'summary' in show_quality or 'all' in show_quality: + model_summary_table([static_quality, analytic_quality, lut_quality]) + + if 'plot-param' in opts: + for kv in opts['plot-param'].split(';'): + state_or_trans, attribute, param_name, *function = kv.split(' ') + if len(function): + function = gplearn_to_function(' '.join(function)) + else: + function = None + plotter.plot_param(model, state_or_trans, attribute, model.param_index(param_name), extra_function=function) + + if 'export-energymodel' in opts: + if not hwmodel: + print('[E] --export-energymodel requires --hwmodel to be set') + sys.exit(1) + json_model = model.to_json() + with open(opts['export-energymodel'], 'w') as f: + json.dump(json_model, f, indent = 2, sort_keys = True) + + + sys.exit(0) diff --git a/lib/dfatool.py b/lib/dfatool.py index 8f08211..ffe5a3b 100755 --- a/lib/dfatool.py +++ b/lib/dfatool.py @@ -1250,6 +1250,9 @@ def pta_trace_to_aggregate(traces, ignore_trace_indexes = []): arg_count[elem['name']] = len(elem['args']) if elem['name'] != 'UNINITIALIZED': _add_trace_data_to_aggregate(by_name, elem['name'], elem) + for elem in by_name.values(): + for key in elem['attributes']: + elem[key] = np.array(elem[key]) return by_name, parameter_names, arg_count |