summaryrefslogtreecommitdiff
path: root/bin
diff options
context:
space:
mode:
Diffstat (limited to 'bin')
-rwxr-xr-xbin/analyze-archive.py635
-rwxr-xr-xbin/analyze-timing.py382
-rwxr-xr-xbin/eval-accounting-overhead.py18
-rwxr-xr-xbin/eval-online-model-accuracy.py171
-rwxr-xr-xbin/eval-outlier-removal.py187
-rwxr-xr-xbin/eval-rel-energy.py104
-rwxr-xr-xbin/generate-dfa-benchmark.py541
-rwxr-xr-xbin/generate-dummy-class.py14
-rwxr-xr-xbin/gptest.py38
-rwxr-xr-xbin/mim-vs-keysight.py205
-rwxr-xr-xbin/test_corrcoef.py318
-rwxr-xr-xbin/workload.py58
12 files changed, 1753 insertions, 918 deletions
diff --git a/bin/analyze-archive.py b/bin/analyze-archive.py
index ecfda51..d248d1b 100755
--- a/bin/analyze-archive.py
+++ b/bin/analyze-archive.py
@@ -114,122 +114,168 @@ def print_model_quality(results):
for state_or_tran in results.keys():
print()
for key, result in results[state_or_tran].items():
- if 'smape' in result:
- print('{:20s} {:15s} {:.2f}% / {:.0f}'.format(
- state_or_tran, key, result['smape'], result['mae']))
+ if "smape" in result:
+ print(
+ "{:20s} {:15s} {:.2f}% / {:.0f}".format(
+ state_or_tran, key, result["smape"], result["mae"]
+ )
+ )
else:
- print('{:20s} {:15s} {:.0f}'.format(
- state_or_tran, key, result['mae']))
+ print("{:20s} {:15s} {:.0f}".format(state_or_tran, key, result["mae"]))
def format_quality_measures(result):
- if 'smape' in result:
- return '{:6.2f}% / {:9.0f}'.format(result['smape'], result['mae'])
+ if "smape" in result:
+ return "{:6.2f}% / {:9.0f}".format(result["smape"], result["mae"])
else:
- return '{:6} {:9.0f}'.format('', result['mae'])
+ return "{:6} {:9.0f}".format("", result["mae"])
def model_quality_table(result_lists, info_list):
- for state_or_tran in result_lists[0]['by_name'].keys():
- for key in result_lists[0]['by_name'][state_or_tran].keys():
- buf = '{:20s} {:15s}'.format(state_or_tran, key)
+ for state_or_tran in result_lists[0]["by_name"].keys():
+ for key in result_lists[0]["by_name"][state_or_tran].keys():
+ buf = "{:20s} {:15s}".format(state_or_tran, key)
for i, results in enumerate(result_lists):
info = info_list[i]
- buf += ' ||| '
+ buf += " ||| "
if info is None or info(state_or_tran, key):
- result = results['by_name'][state_or_tran][key]
+ result = results["by_name"][state_or_tran][key]
buf += format_quality_measures(result)
else:
- buf += '{:6}----{:9}'.format('', '')
+ buf += "{:6}----{:9}".format("", "")
print(buf)
def model_summary_table(result_list):
- buf = 'transition duration'
+ buf = "transition duration"
for results in result_list:
if len(buf):
- buf += ' ||| '
- buf += format_quality_measures(results['duration_by_trace'])
+ buf += " ||| "
+ buf += format_quality_measures(results["duration_by_trace"])
print(buf)
- buf = 'total energy '
+ buf = "total energy "
for results in result_list:
if len(buf):
- buf += ' ||| '
- buf += format_quality_measures(results['energy_by_trace'])
+ buf += " ||| "
+ buf += format_quality_measures(results["energy_by_trace"])
print(buf)
- buf = 'rel total energy '
+ buf = "rel total energy "
for results in result_list:
if len(buf):
- buf += ' ||| '
- buf += format_quality_measures(results['rel_energy_by_trace'])
+ buf += " ||| "
+ buf += format_quality_measures(results["rel_energy_by_trace"])
print(buf)
- buf = 'state-only energy '
+ buf = "state-only energy "
for results in result_list:
if len(buf):
- buf += ' ||| '
- buf += format_quality_measures(results['state_energy_by_trace'])
+ buf += " ||| "
+ buf += format_quality_measures(results["state_energy_by_trace"])
print(buf)
- buf = 'transition timeout '
+ buf = "transition timeout "
for results in result_list:
if len(buf):
- buf += ' ||| '
- buf += format_quality_measures(results['timeout_by_trace'])
+ buf += " ||| "
+ buf += format_quality_measures(results["timeout_by_trace"])
print(buf)
def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq):
- print('')
- print(r'key attribute $1 - \frac{\sigma_X}{...}$')
+ print("")
+ print(r"key attribute $1 - \frac{\sigma_X}{...}$")
for state_or_tran in model.by_name.keys():
for attribute in model.attributes(state_or_tran):
- print('{} {} {:.8f}'.format(state_or_tran, attribute, model.stats.generic_param_dependence_ratio(state_or_tran, attribute)))
-
- print('')
- print(r'key attribute parameter $1 - \frac{...}{...}$')
+ print(
+ "{} {} {:.8f}".format(
+ state_or_tran,
+ attribute,
+ model.stats.generic_param_dependence_ratio(
+ state_or_tran, attribute
+ ),
+ )
+ )
+
+ print("")
+ print(r"key attribute parameter $1 - \frac{...}{...}$")
for state_or_tran in model.by_name.keys():
for attribute in model.attributes(state_or_tran):
for param in model.parameters():
- print('{} {} {} {:.8f}'.format(state_or_tran, attribute, param, model.stats.param_dependence_ratio(state_or_tran, attribute, param)))
+ print(
+ "{} {} {} {:.8f}".format(
+ state_or_tran,
+ attribute,
+ param,
+ model.stats.param_dependence_ratio(
+ state_or_tran, attribute, param
+ ),
+ )
+ )
if state_or_tran in model._num_args:
for arg_index in range(model._num_args[state_or_tran]):
- print('{} {} {:d} {:.8f}'.format(state_or_tran, attribute, arg_index, model.stats.arg_dependence_ratio(state_or_tran, attribute, arg_index)))
+ print(
+ "{} {} {:d} {:.8f}".format(
+ state_or_tran,
+ attribute,
+ arg_index,
+ model.stats.arg_dependence_ratio(
+ state_or_tran, attribute, arg_index
+ ),
+ )
+ )
def print_html_model_data(model, pm, pq, lm, lq, am, ai, aq):
state_attributes = model.attributes(model.states()[0])
- print('<table><tr><th>state</th><th>' + '</th><th>'.join(state_attributes) + '</th></tr>')
+ print(
+ "<table><tr><th>state</th><th>"
+ + "</th><th>".join(state_attributes)
+ + "</th></tr>"
+ )
for state in model.states():
- print('<tr>', end='')
- print('<td>{}</td>'.format(state), end='')
+ print("<tr>", end="")
+ print("<td>{}</td>".format(state), end="")
for attribute in state_attributes:
- unit = ''
- if attribute == 'power':
- unit = 'µW'
- print('<td>{:.0f} {} ({:.1f}%)</td>'.format(pm(state, attribute), unit, pq['by_name'][state][attribute]['smape']), end='')
- print('</tr>')
- print('</table>')
+ unit = ""
+ if attribute == "power":
+ unit = "µW"
+ print(
+ "<td>{:.0f} {} ({:.1f}%)</td>".format(
+ pm(state, attribute), unit, pq["by_name"][state][attribute]["smape"]
+ ),
+ end="",
+ )
+ print("</tr>")
+ print("</table>")
trans_attributes = model.attributes(model.transitions()[0])
- if 'rel_energy_prev' in trans_attributes:
- trans_attributes.remove('rel_energy_next')
-
- print('<table><tr><th>transition</th><th>' + '</th><th>'.join(trans_attributes) + '</th></tr>')
+ if "rel_energy_prev" in trans_attributes:
+ trans_attributes.remove("rel_energy_next")
+
+ print(
+ "<table><tr><th>transition</th><th>"
+ + "</th><th>".join(trans_attributes)
+ + "</th></tr>"
+ )
for trans in model.transitions():
- print('<tr>', end='')
- print('<td>{}</td>'.format(trans), end='')
+ print("<tr>", end="")
+ print("<td>{}</td>".format(trans), end="")
for attribute in trans_attributes:
- unit = ''
- if attribute == 'duration':
- unit = 'µs'
- elif attribute in ['energy', 'rel_energy_prev']:
- unit = 'pJ'
- print('<td>{:.0f} {} ({:.1f}%)</td>'.format(pm(trans, attribute), unit, pq['by_name'][trans][attribute]['smape']), end='')
- print('</tr>')
- print('</table>')
-
-
-if __name__ == '__main__':
+ unit = ""
+ if attribute == "duration":
+ unit = "µs"
+ elif attribute in ["energy", "rel_energy_prev"]:
+ unit = "pJ"
+ print(
+ "<td>{:.0f} {} ({:.1f}%)</td>".format(
+ pm(trans, attribute), unit, pq["by_name"][trans][attribute]["smape"]
+ ),
+ end="",
+ )
+ print("</tr>")
+ print("</table>")
+
+
+if __name__ == "__main__":
ignored_trace_indexes = []
discard_outliers = None
@@ -245,264 +291,413 @@ if __name__ == '__main__':
try:
optspec = (
- 'plot-unparam= plot-param= plot-traces= param-info show-models= show-quality= '
- 'ignored-trace-indexes= discard-outliers= function-override= '
- 'export-traces= '
- 'filter-param= '
- 'cross-validate= '
- 'with-safe-functions hwmodel= export-energymodel='
+ "plot-unparam= plot-param= plot-traces= param-info show-models= show-quality= "
+ "ignored-trace-indexes= discard-outliers= function-override= "
+ "export-traces= "
+ "filter-param= "
+ "cross-validate= "
+ "with-safe-functions hwmodel= export-energymodel="
)
- raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(' '))
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
+ optname = re.sub(r"^--", "", option)
opts[optname] = parameter
- if 'ignored-trace-indexes' in opts:
- ignored_trace_indexes = list(map(int, opts['ignored-trace-indexes'].split(',')))
+ if "ignored-trace-indexes" in opts:
+ ignored_trace_indexes = list(
+ map(int, opts["ignored-trace-indexes"].split(","))
+ )
if 0 in ignored_trace_indexes:
- print('[E] arguments to --ignored-trace-indexes start from 1')
+ print("[E] arguments to --ignored-trace-indexes start from 1")
- if 'discard-outliers' in opts:
- discard_outliers = float(opts['discard-outliers'])
+ if "discard-outliers" in opts:
+ discard_outliers = float(opts["discard-outliers"])
- if 'function-override' in opts:
- for function_desc in opts['function-override'].split(';'):
- state_or_tran, attribute, *function_str = function_desc.split(' ')
- function_override[(state_or_tran, attribute)] = ' '.join(function_str)
+ if "function-override" in opts:
+ for function_desc in opts["function-override"].split(";"):
+ state_or_tran, attribute, *function_str = function_desc.split(" ")
+ function_override[(state_or_tran, attribute)] = " ".join(function_str)
- if 'show-models' in opts:
- show_models = opts['show-models'].split(',')
+ if "show-models" in opts:
+ show_models = opts["show-models"].split(",")
- if 'show-quality' in opts:
- show_quality = opts['show-quality'].split(',')
+ if "show-quality" in opts:
+ show_quality = opts["show-quality"].split(",")
- if 'cross-validate' in opts:
- xv_method, xv_count = opts['cross-validate'].split(':')
+ if "cross-validate" in opts:
+ xv_method, xv_count = opts["cross-validate"].split(":")
xv_count = int(xv_count)
- if 'filter-param' in opts:
- opts['filter-param'] = list(map(lambda x: x.split('='), opts['filter-param'].split(',')))
+ if "filter-param" in opts:
+ opts["filter-param"] = list(
+ map(lambda x: x.split("="), opts["filter-param"].split(","))
+ )
else:
- opts['filter-param'] = list()
+ opts["filter-param"] = list()
- if 'with-safe-functions' in opts:
+ if "with-safe-functions" in opts:
safe_functions_enabled = True
- if 'hwmodel' in opts:
- pta = PTA.from_file(opts['hwmodel'])
+ if "hwmodel" in opts:
+ pta = PTA.from_file(opts["hwmodel"])
except getopt.GetoptError as err:
print(err)
sys.exit(2)
- raw_data = RawData(args, with_traces=('export-traces' in opts or 'plot-traces' in opts))
+ raw_data = RawData(
+ args, with_traces=("export-traces" in opts or "plot-traces" in opts)
+ )
preprocessed_data = raw_data.get_preprocessed_data()
- if 'export-traces' in opts:
+ if "export-traces" in opts:
uw_per_sot = dict()
for trace in preprocessed_data:
- for state_or_transition in trace['trace']:
- name = state_or_transition['name']
+ for state_or_transition in trace["trace"]:
+ name = state_or_transition["name"]
if name not in uw_per_sot:
uw_per_sot[name] = list()
- for elem in state_or_transition['offline']:
- elem['uW'] = list(elem['uW'])
+ for elem in state_or_transition["offline"]:
+ elem["uW"] = list(elem["uW"])
uw_per_sot[name].append(state_or_transition)
for name, data in uw_per_sot.items():
target = f"{opts['export-traces']}/{name}.json"
- print(f'exporting {target} ...')
- with open(target, 'w') as f:
+ print(f"exporting {target} ...")
+ with open(target, "w") as f:
json.dump(data, f)
- if 'plot-traces' in opts:
+ if "plot-traces" in opts:
traces = list()
for trace in preprocessed_data:
- for state_or_transition in trace['trace']:
- if state_or_transition['name'] == opts['plot-traces']:
- traces.extend(map(lambda x: x['uW'], state_or_transition['offline']))
- plotter.plot_y(traces, xlabel='t [1e-5 s]', ylabel='P [uW]', title=opts['plot-traces'], family=True)
+ for state_or_transition in trace["trace"]:
+ if state_or_transition["name"] == opts["plot-traces"]:
+ traces.extend(
+ map(lambda x: x["uW"], state_or_transition["offline"])
+ )
+ plotter.plot_y(
+ traces,
+ xlabel="t [1e-5 s]",
+ ylabel="P [uW]",
+ title=opts["plot-traces"],
+ family=True,
+ )
- if raw_data.preprocessing_stats['num_valid'] == 0:
- print('No valid data available. Abort.')
+ if raw_data.preprocessing_stats["num_valid"] == 0:
+ print("No valid data available. Abort.")
sys.exit(2)
if pta is None and raw_data.pta is not None:
pta = PTA.from_json(raw_data.pta)
- by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data, ignored_trace_indexes)
+ by_name, parameters, arg_count = pta_trace_to_aggregate(
+ preprocessed_data, ignored_trace_indexes
+ )
- filter_aggregate_by_param(by_name, parameters, opts['filter-param'])
+ filter_aggregate_by_param(by_name, parameters, opts["filter-param"])
- model = PTAModel(by_name, parameters, arg_count,
- traces=preprocessed_data,
- discard_outliers=discard_outliers,
- function_override=function_override,
- pta=pta)
+ model = PTAModel(
+ by_name,
+ parameters,
+ arg_count,
+ traces=preprocessed_data,
+ discard_outliers=discard_outliers,
+ function_override=function_override,
+ pta=pta,
+ )
if xv_method:
xv = CrossValidator(PTAModel, by_name, parameters, arg_count)
- if 'param-info' in opts:
+ if "param-info" in opts:
for state in model.states():
- print('{}:'.format(state))
+ print("{}:".format(state))
for param in model.parameters():
- print(' {} = {}'.format(param, model.stats.distinct_values[state][param]))
+ print(
+ " {} = {}".format(
+ param, model.stats.distinct_values[state][param]
+ )
+ )
for transition in model.transitions():
- print('{}:'.format(transition))
+ print("{}:".format(transition))
for param in model.parameters():
- print(' {} = {}'.format(param, model.stats.distinct_values[transition][param]))
-
- if 'plot-unparam' in opts:
- for kv in opts['plot-unparam'].split(';'):
- state_or_trans, attribute, ylabel = kv.split(':')
- fname = 'param_y_{}_{}.pdf'.format(state_or_trans, attribute)
- plotter.plot_y(model.by_name[state_or_trans][attribute], xlabel='measurement #', ylabel=ylabel, output=fname)
+ print(
+ " {} = {}".format(
+ param, model.stats.distinct_values[transition][param]
+ )
+ )
+
+ if "plot-unparam" in opts:
+ for kv in opts["plot-unparam"].split(";"):
+ state_or_trans, attribute, ylabel = kv.split(":")
+ fname = "param_y_{}_{}.pdf".format(state_or_trans, attribute)
+ plotter.plot_y(
+ model.by_name[state_or_trans][attribute],
+ xlabel="measurement #",
+ ylabel=ylabel,
+ output=fname,
+ )
if len(show_models):
- print('--- simple static model ---')
+ print("--- simple static model ---")
static_model = model.get_static()
- if 'static' in show_models or 'all' in show_models:
+ if "static" in show_models or "all" in show_models:
for state in model.states():
- print('{:10s}: {:.0f} µW ({:.2f})'.format(
- state,
- static_model(state, 'power'),
- model.stats.generic_param_dependence_ratio(state, 'power')))
+ print(
+ "{:10s}: {:.0f} µW ({:.2f})".format(
+ state,
+ static_model(state, "power"),
+ model.stats.generic_param_dependence_ratio(state, "power"),
+ )
+ )
for param in model.parameters():
- print('{:10s} dependence on {:15s}: {:.2f}'.format(
- '',
- param,
- model.stats.param_dependence_ratio(state, 'power', param)))
- if model.stats.has_codependent_parameters(state, 'power', param):
- print('{:24s} co-dependencies: {:s}'.format('', ', '.join(model.stats.codependent_parameters(state, 'power', param))))
- for param_dict in model.stats.codependent_parameter_value_dicts(state, 'power', param):
- print('{:24s} parameter-aware for {}'.format('', param_dict))
+ print(
+ "{:10s} dependence on {:15s}: {:.2f}".format(
+ "",
+ param,
+ model.stats.param_dependence_ratio(state, "power", param),
+ )
+ )
+ if model.stats.has_codependent_parameters(state, "power", param):
+ print(
+ "{:24s} co-dependencies: {:s}".format(
+ "",
+ ", ".join(
+ model.stats.codependent_parameters(
+ state, "power", param
+ )
+ ),
+ )
+ )
+ for param_dict in model.stats.codependent_parameter_value_dicts(
+ state, "power", param
+ ):
+ print("{:24s} parameter-aware for {}".format("", param_dict))
for trans in model.transitions():
# Mean power is not a typical transition attribute, but may be present for debugging or analysis purposes
try:
- print('{:10s}: {:.0f} µW ({:.2f})'.format(
- trans,
- static_model(trans, 'power'),
- model.stats.generic_param_dependence_ratio(trans, 'power')))
+ print(
+ "{:10s}: {:.0f} µW ({:.2f})".format(
+ trans,
+ static_model(trans, "power"),
+ model.stats.generic_param_dependence_ratio(trans, "power"),
+ )
+ )
except KeyError:
pass
try:
- print('{:10s}: {:.0f} / {:.0f} / {:.0f} pJ ({:.2f} / {:.2f} / {:.2f})'.format(
- trans, static_model(trans, 'energy'),
- static_model(trans, 'rel_energy_prev'),
- static_model(trans, 'rel_energy_next'),
- model.stats.generic_param_dependence_ratio(trans, 'energy'),
- model.stats.generic_param_dependence_ratio(trans, 'rel_energy_prev'),
- model.stats.generic_param_dependence_ratio(trans, 'rel_energy_next')))
+ print(
+ "{:10s}: {:.0f} / {:.0f} / {:.0f} pJ ({:.2f} / {:.2f} / {:.2f})".format(
+ trans,
+ static_model(trans, "energy"),
+ static_model(trans, "rel_energy_prev"),
+ static_model(trans, "rel_energy_next"),
+ model.stats.generic_param_dependence_ratio(trans, "energy"),
+ model.stats.generic_param_dependence_ratio(
+ trans, "rel_energy_prev"
+ ),
+ model.stats.generic_param_dependence_ratio(
+ trans, "rel_energy_next"
+ ),
+ )
+ )
except KeyError:
- print('{:10s}: {:.0f} pJ ({:.2f})'.format(
- trans, static_model(trans, 'energy'),
- model.stats.generic_param_dependence_ratio(trans, 'energy')))
- print('{:10s}: {:.0f} µs'.format(trans, static_model(trans, 'duration')))
-
- if xv_method == 'montecarlo':
+ print(
+ "{:10s}: {:.0f} pJ ({:.2f})".format(
+ trans,
+ static_model(trans, "energy"),
+ model.stats.generic_param_dependence_ratio(trans, "energy"),
+ )
+ )
+ print("{:10s}: {:.0f} µs".format(trans, static_model(trans, "duration")))
+
+ if xv_method == "montecarlo":
static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count)
else:
static_quality = model.assess(static_model)
if len(show_models):
- print('--- LUT ---')
+ print("--- LUT ---")
lut_model = model.get_param_lut()
- if xv_method == 'montecarlo':
+ if xv_method == "montecarlo":
lut_quality = xv.montecarlo(lambda m: m.get_param_lut(fallback=True), xv_count)
else:
lut_quality = model.assess(lut_model)
if len(show_models):
- print('--- param model ---')
+ print("--- param model ---")
- param_model, param_info = model.get_fitted(safe_functions_enabled=safe_functions_enabled)
+ param_model, param_info = model.get_fitted(
+ safe_functions_enabled=safe_functions_enabled
+ )
- if 'paramdetection' in show_models or 'all' in show_models:
+ if "paramdetection" in show_models or "all" in show_models:
for state in model.states_and_transitions():
for attribute in model.attributes(state):
info = param_info(state, attribute)
- print('{:10s} {:10s} non-param stddev {:f}'.format(
- state, attribute, model.stats.stats[state][attribute]['std_static']
- ))
- print('{:10s} {:10s} param-lut stddev {:f}'.format(
- state, attribute, model.stats.stats[state][attribute]['std_param_lut']
- ))
- for param in sorted(model.stats.stats[state][attribute]['std_by_param'].keys()):
- print('{:10s} {:10s} {:10s} stddev {:f}'.format(
- state, attribute, param, model.stats.stats[state][attribute]['std_by_param'][param]
- ))
+ print(
+ "{:10s} {:10s} non-param stddev {:f}".format(
+ state,
+ attribute,
+ model.stats.stats[state][attribute]["std_static"],
+ )
+ )
+ print(
+ "{:10s} {:10s} param-lut stddev {:f}".format(
+ state,
+ attribute,
+ model.stats.stats[state][attribute]["std_param_lut"],
+ )
+ )
+ for param in sorted(
+ model.stats.stats[state][attribute]["std_by_param"].keys()
+ ):
+ print(
+ "{:10s} {:10s} {:10s} stddev {:f}".format(
+ state,
+ attribute,
+ param,
+ model.stats.stats[state][attribute]["std_by_param"][param],
+ )
+ )
if info is not None:
- for param_name in sorted(info['fit_result'].keys(), key=str):
- param_fit = info['fit_result'][param_name]['results']
+ for param_name in sorted(info["fit_result"].keys(), key=str):
+ param_fit = info["fit_result"][param_name]["results"]
for function_type in sorted(param_fit.keys()):
- function_rmsd = param_fit[function_type]['rmsd']
- print('{:10s} {:10s} {:10s} mean {:10s} RMSD {:.0f}'.format(
- state, attribute, str(param_name), function_type, function_rmsd
- ))
-
- if 'param' in show_models or 'all' in show_models:
+ function_rmsd = param_fit[function_type]["rmsd"]
+ print(
+ "{:10s} {:10s} {:10s} mean {:10s} RMSD {:.0f}".format(
+ state,
+ attribute,
+ str(param_name),
+ function_type,
+ function_rmsd,
+ )
+ )
+
+ if "param" in show_models or "all" in show_models:
if not model.stats.can_be_fitted():
- print('[!] measurements have insufficient distinct numeric parameters for fitting. A parameter-aware model is not available.')
+ print(
+ "[!] measurements have insufficient distinct numeric parameters for fitting. A parameter-aware model is not available."
+ )
for state in model.states():
for attribute in model.attributes(state):
if param_info(state, attribute):
- print('{:10s}: {}'.format(state, param_info(state, attribute)['function']._model_str))
- print('{:10s} {}'.format('', param_info(state, attribute)['function']._regression_args))
+ print(
+ "{:10s}: {}".format(
+ state, param_info(state, attribute)["function"]._model_str
+ )
+ )
+ print(
+ "{:10s} {}".format(
+ "",
+ param_info(state, attribute)["function"]._regression_args,
+ )
+ )
for trans in model.transitions():
for attribute in model.attributes(trans):
if param_info(trans, attribute):
- print('{:10s}: {:10s}: {}'.format(trans, attribute, param_info(trans, attribute)['function']._model_str))
- print('{:10s} {:10s} {}'.format('', '', param_info(trans, attribute)['function']._regression_args))
-
- if xv_method == 'montecarlo':
+ print(
+ "{:10s}: {:10s}: {}".format(
+ trans,
+ attribute,
+ param_info(trans, attribute)["function"]._model_str,
+ )
+ )
+ print(
+ "{:10s} {:10s} {}".format(
+ "",
+ "",
+ param_info(trans, attribute)["function"]._regression_args,
+ )
+ )
+
+ if xv_method == "montecarlo":
analytic_quality = xv.montecarlo(lambda m: m.get_fitted()[0], xv_count)
else:
analytic_quality = model.assess(param_model)
- if 'tex' in show_models or 'tex' in show_quality:
- print_text_model_data(model, static_model, static_quality, lut_model, lut_quality, param_model, param_info, analytic_quality)
-
- if 'html' in show_models or 'html' in show_quality:
- print_html_model_data(model, static_model, static_quality, lut_model, lut_quality, param_model, param_info, analytic_quality)
-
- if 'table' in show_quality or 'all' in show_quality:
- model_quality_table([static_quality, analytic_quality, lut_quality], [None, param_info, None])
-
- if 'overall' in show_quality or 'all' in show_quality:
- print('overall static/param/lut MAE assuming equal state distribution:')
- print(' {:6.1f} / {:6.1f} / {:6.1f} µW'.format(
- model.assess_states(static_model),
- model.assess_states(param_model),
- model.assess_states(lut_model)))
- print('overall static/param/lut MAE assuming 95% STANDBY1:')
- distrib = {'STANDBY1': 0.95, 'POWERDOWN': 0.03, 'TX': 0.01, 'RX': 0.01}
- print(' {:6.1f} / {:6.1f} / {:6.1f} µW'.format(
- model.assess_states(static_model, distribution=distrib),
- model.assess_states(param_model, distribution=distrib),
- model.assess_states(lut_model, distribution=distrib)))
-
- if 'summary' in show_quality or 'all' in show_quality:
- model_summary_table([model.assess_on_traces(static_model), model.assess_on_traces(param_model), model.assess_on_traces(lut_model)])
-
- if 'plot-param' in opts:
- for kv in opts['plot-param'].split(';'):
- state_or_trans, attribute, param_name, *function = kv.split(' ')
+ if "tex" in show_models or "tex" in show_quality:
+ print_text_model_data(
+ model,
+ static_model,
+ static_quality,
+ lut_model,
+ lut_quality,
+ param_model,
+ param_info,
+ analytic_quality,
+ )
+
+ if "html" in show_models or "html" in show_quality:
+ print_html_model_data(
+ model,
+ static_model,
+ static_quality,
+ lut_model,
+ lut_quality,
+ param_model,
+ param_info,
+ analytic_quality,
+ )
+
+ if "table" in show_quality or "all" in show_quality:
+ model_quality_table(
+ [static_quality, analytic_quality, lut_quality], [None, param_info, None]
+ )
+
+ if "overall" in show_quality or "all" in show_quality:
+ print("overall static/param/lut MAE assuming equal state distribution:")
+ print(
+ " {:6.1f} / {:6.1f} / {:6.1f} µW".format(
+ model.assess_states(static_model),
+ model.assess_states(param_model),
+ model.assess_states(lut_model),
+ )
+ )
+ print("overall static/param/lut MAE assuming 95% STANDBY1:")
+ distrib = {"STANDBY1": 0.95, "POWERDOWN": 0.03, "TX": 0.01, "RX": 0.01}
+ print(
+ " {:6.1f} / {:6.1f} / {:6.1f} µW".format(
+ model.assess_states(static_model, distribution=distrib),
+ model.assess_states(param_model, distribution=distrib),
+ model.assess_states(lut_model, distribution=distrib),
+ )
+ )
+
+ if "summary" in show_quality or "all" in show_quality:
+ model_summary_table(
+ [
+ model.assess_on_traces(static_model),
+ model.assess_on_traces(param_model),
+ model.assess_on_traces(lut_model),
+ ]
+ )
+
+ if "plot-param" in opts:
+ for kv in opts["plot-param"].split(";"):
+ state_or_trans, attribute, param_name, *function = kv.split(" ")
if len(function):
- function = gplearn_to_function(' '.join(function))
+ function = gplearn_to_function(" ".join(function))
else:
function = None
- plotter.plot_param(model, state_or_trans, attribute, model.param_index(param_name), extra_function=function)
-
- if 'export-energymodel' in opts:
+ plotter.plot_param(
+ model,
+ state_or_trans,
+ attribute,
+ model.param_index(param_name),
+ extra_function=function,
+ )
+
+ if "export-energymodel" in opts:
if not pta:
- print('[E] --export-energymodel requires --hwmodel to be set')
+ print("[E] --export-energymodel requires --hwmodel to be set")
sys.exit(1)
json_model = model.to_json()
- with open(opts['export-energymodel'], 'w') as f:
+ with open(opts["export-energymodel"], "w") as f:
json.dump(json_model, f, indent=2, sort_keys=True)
sys.exit(0)
diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py
index 6a458d9..e565c8f 100755
--- a/bin/analyze-timing.py
+++ b/bin/analyze-timing.py
@@ -91,55 +91,83 @@ def print_model_quality(results):
for state_or_tran in results.keys():
print()
for key, result in results[state_or_tran].items():
- if 'smape' in result:
- print('{:20s} {:15s} {:.2f}% / {:.0f}'.format(
- state_or_tran, key, result['smape'], result['mae']))
+ if "smape" in result:
+ print(
+ "{:20s} {:15s} {:.2f}% / {:.0f}".format(
+ state_or_tran, key, result["smape"], result["mae"]
+ )
+ )
else:
- print('{:20s} {:15s} {:.0f}'.format(
- state_or_tran, key, result['mae']))
+ print("{:20s} {:15s} {:.0f}".format(state_or_tran, key, result["mae"]))
def format_quality_measures(result):
- if 'smape' in result:
- return '{:6.2f}% / {:9.0f}'.format(result['smape'], result['mae'])
+ if "smape" in result:
+ return "{:6.2f}% / {:9.0f}".format(result["smape"], result["mae"])
else:
- return '{:6} {:9.0f}'.format('', result['mae'])
+ return "{:6} {:9.0f}".format("", result["mae"])
def model_quality_table(result_lists, info_list):
- for state_or_tran in result_lists[0]['by_name'].keys():
- for key in result_lists[0]['by_name'][state_or_tran].keys():
- buf = '{:20s} {:15s}'.format(state_or_tran, key)
+ for state_or_tran in result_lists[0]["by_name"].keys():
+ for key in result_lists[0]["by_name"][state_or_tran].keys():
+ buf = "{:20s} {:15s}".format(state_or_tran, key)
for i, results in enumerate(result_lists):
info = info_list[i]
- buf += ' ||| '
+ buf += " ||| "
if info is None or info(state_or_tran, key):
- result = results['by_name'][state_or_tran][key]
+ result = results["by_name"][state_or_tran][key]
buf += format_quality_measures(result)
else:
- buf += '{:6}----{:9}'.format('', '')
+ buf += "{:6}----{:9}".format("", "")
print(buf)
def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq):
- print('')
- print(r'key attribute $1 - \frac{\sigma_X}{...}$')
+ print("")
+ print(r"key attribute $1 - \frac{\sigma_X}{...}$")
for state_or_tran in model.by_name.keys():
for attribute in model.attributes(state_or_tran):
- print('{} {} {:.8f}'.format(state_or_tran, attribute, model.stats.generic_param_dependence_ratio(state_or_tran, attribute)))
-
- print('')
- print(r'key attribute parameter $1 - \frac{...}{...}$')
+ print(
+ "{} {} {:.8f}".format(
+ state_or_tran,
+ attribute,
+ model.stats.generic_param_dependence_ratio(
+ state_or_tran, attribute
+ ),
+ )
+ )
+
+ print("")
+ print(r"key attribute parameter $1 - \frac{...}{...}$")
for state_or_tran in model.by_name.keys():
for attribute in model.attributes(state_or_tran):
for param in model.parameters():
- print('{} {} {} {:.8f}'.format(state_or_tran, attribute, param, model.stats.param_dependence_ratio(state_or_tran, attribute, param)))
+ print(
+ "{} {} {} {:.8f}".format(
+ state_or_tran,
+ attribute,
+ param,
+ model.stats.param_dependence_ratio(
+ state_or_tran, attribute, param
+ ),
+ )
+ )
if state_or_tran in model._num_args:
for arg_index in range(model._num_args[state_or_tran]):
- print('{} {} {:d} {:.8f}'.format(state_or_tran, attribute, arg_index, model.stats.arg_dependence_ratio(state_or_tran, attribute, arg_index)))
+ print(
+ "{} {} {:d} {:.8f}".format(
+ state_or_tran,
+ attribute,
+ arg_index,
+ model.stats.arg_dependence_ratio(
+ state_or_tran, attribute, arg_index
+ ),
+ )
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
ignored_trace_indexes = []
discard_outliers = None
@@ -154,56 +182,60 @@ if __name__ == '__main__':
try:
optspec = (
- 'plot-unparam= plot-param= show-models= show-quality= '
- 'ignored-trace-indexes= discard-outliers= function-override= '
- 'filter-param= '
- 'cross-validate= '
- 'corrcoef param-info '
- 'with-safe-functions hwmodel= export-energymodel='
+ "plot-unparam= plot-param= show-models= show-quality= "
+ "ignored-trace-indexes= discard-outliers= function-override= "
+ "filter-param= "
+ "cross-validate= "
+ "corrcoef param-info "
+ "with-safe-functions hwmodel= export-energymodel="
)
- raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(' '))
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
+ optname = re.sub(r"^--", "", option)
opts[optname] = parameter
- if 'ignored-trace-indexes' in opts:
- ignored_trace_indexes = list(map(int, opts['ignored-trace-indexes'].split(',')))
+ if "ignored-trace-indexes" in opts:
+ ignored_trace_indexes = list(
+ map(int, opts["ignored-trace-indexes"].split(","))
+ )
if 0 in ignored_trace_indexes:
- print('[E] arguments to --ignored-trace-indexes start from 1')
+ print("[E] arguments to --ignored-trace-indexes start from 1")
- if 'discard-outliers' in opts:
- discard_outliers = float(opts['discard-outliers'])
+ if "discard-outliers" in opts:
+ discard_outliers = float(opts["discard-outliers"])
- if 'function-override' in opts:
- for function_desc in opts['function-override'].split(';'):
- state_or_tran, attribute, *function_str = function_desc.split(' ')
- function_override[(state_or_tran, attribute)] = ' '.join(function_str)
+ if "function-override" in opts:
+ for function_desc in opts["function-override"].split(";"):
+ state_or_tran, attribute, *function_str = function_desc.split(" ")
+ function_override[(state_or_tran, attribute)] = " ".join(function_str)
- if 'show-models' in opts:
- show_models = opts['show-models'].split(',')
+ if "show-models" in opts:
+ show_models = opts["show-models"].split(",")
- if 'show-quality' in opts:
- show_quality = opts['show-quality'].split(',')
+ if "show-quality" in opts:
+ show_quality = opts["show-quality"].split(",")
- if 'cross-validate' in opts:
- xv_method, xv_count = opts['cross-validate'].split(':')
+ if "cross-validate" in opts:
+ xv_method, xv_count = opts["cross-validate"].split(":")
xv_count = int(xv_count)
- if 'with-safe-functions' in opts:
+ if "with-safe-functions" in opts:
safe_functions_enabled = True
- if 'hwmodel' in opts:
- with open(opts['hwmodel'], 'r') as f:
+ if "hwmodel" in opts:
+ with open(opts["hwmodel"], "r") as f:
hwmodel = json.load(f)
- if 'corrcoef' not in opts:
- opts['corrcoef'] = False
+ if "corrcoef" not in opts:
+ opts["corrcoef"] = False
- if 'filter-param' in opts:
- opts['filter-param'] = list(map(lambda x: x.split('='), opts['filter-param'].split(',')))
+ if "filter-param" in opts:
+ opts["filter-param"] = list(
+ map(lambda x: x.split("="), opts["filter-param"].split(","))
+ )
else:
- opts['filter-param'] = list()
+ opts["filter-param"] = list()
except getopt.GetoptError as err:
print(err)
@@ -212,44 +244,74 @@ if __name__ == '__main__':
raw_data = TimingData(args)
preprocessed_data = raw_data.get_preprocessed_data()
- by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data, ignored_trace_indexes)
+ by_name, parameters, arg_count = pta_trace_to_aggregate(
+ preprocessed_data, ignored_trace_indexes
+ )
prune_dependent_parameters(by_name, parameters)
- filter_aggregate_by_param(by_name, parameters, opts['filter-param'])
+ filter_aggregate_by_param(by_name, parameters, opts["filter-param"])
- model = AnalyticModel(by_name, parameters, arg_count, use_corrcoef=opts['corrcoef'], function_override=function_override)
+ model = AnalyticModel(
+ by_name,
+ parameters,
+ arg_count,
+ use_corrcoef=opts["corrcoef"],
+ function_override=function_override,
+ )
if xv_method:
xv = CrossValidator(AnalyticModel, by_name, parameters, arg_count)
- if 'param-info' in opts:
+ if "param-info" in opts:
for state in model.names:
- print('{}:'.format(state))
+ print("{}:".format(state))
for param in model.parameters:
- print(' {} = {}'.format(param, model.stats.distinct_values[state][param]))
-
- if 'plot-unparam' in opts:
- for kv in opts['plot-unparam'].split(';'):
- state_or_trans, attribute, ylabel = kv.split(':')
- fname = 'param_y_{}_{}.pdf'.format(state_or_trans, attribute)
- plotter.plot_y(model.by_name[state_or_trans][attribute], xlabel='measurement #', ylabel=ylabel)
+ print(
+ " {} = {}".format(
+ param, model.stats.distinct_values[state][param]
+ )
+ )
+
+ if "plot-unparam" in opts:
+ for kv in opts["plot-unparam"].split(";"):
+ state_or_trans, attribute, ylabel = kv.split(":")
+ fname = "param_y_{}_{}.pdf".format(state_or_trans, attribute)
+ plotter.plot_y(
+ model.by_name[state_or_trans][attribute],
+ xlabel="measurement #",
+ ylabel=ylabel,
+ )
if len(show_models):
- print('--- simple static model ---')
+ print("--- simple static model ---")
static_model = model.get_static()
- if 'static' in show_models or 'all' in show_models:
+ if "static" in show_models or "all" in show_models:
for trans in model.names:
- print('{:10s}: {:.0f} µs'.format(trans, static_model(trans, 'duration')))
+ print("{:10s}: {:.0f} µs".format(trans, static_model(trans, "duration")))
for param in model.parameters:
- print('{:10s} dependence on {:15s}: {:.2f}'.format(
- '',
- param,
- model.stats.param_dependence_ratio(trans, 'duration', param)))
- if model.stats.has_codependent_parameters(trans, 'duration', param):
- print('{:24s} co-dependencies: {:s}'.format('', ', '.join(model.stats.codependent_parameters(trans, 'duration', param))))
- for param_dict in model.stats.codependent_parameter_value_dicts(trans, 'duration', param):
- print('{:24s} parameter-aware for {}'.format('', param_dict))
+ print(
+ "{:10s} dependence on {:15s}: {:.2f}".format(
+ "",
+ param,
+ model.stats.param_dependence_ratio(trans, "duration", param),
+ )
+ )
+ if model.stats.has_codependent_parameters(trans, "duration", param):
+ print(
+ "{:24s} co-dependencies: {:s}".format(
+ "",
+ ", ".join(
+ model.stats.codependent_parameters(
+ trans, "duration", param
+ )
+ ),
+ )
+ )
+ for param_dict in model.stats.codependent_parameter_value_dicts(
+ trans, "duration", param
+ ):
+ print("{:24s} parameter-aware for {}".format("", param_dict))
# import numpy as np
# safe_div = np.vectorize(lambda x,y: 0. if x == 0 else 1 - x/y)
# ratio_by_value = safe_div(model.stats.stats['write']['duration']['lut_by_param_values']['max_retry_count'], model.stats.stats['write']['duration']['std_by_param_values']['max_retry_count'])
@@ -260,81 +322,153 @@ if __name__ == '__main__':
# und warum ist da eine non-power-of-two Zahl von True-Einträgen in der Matrix? 3 stück ist komisch...
# print(dep_by_value)
- if xv_method == 'montecarlo':
+ if xv_method == "montecarlo":
static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count)
else:
static_quality = model.assess(static_model)
if len(show_models):
- print('--- LUT ---')
+ print("--- LUT ---")
lut_model = model.get_param_lut()
- if xv_method == 'montecarlo':
+ if xv_method == "montecarlo":
lut_quality = xv.montecarlo(lambda m: m.get_param_lut(fallback=True), xv_count)
else:
lut_quality = model.assess(lut_model)
if len(show_models):
- print('--- param model ---')
+ print("--- param model ---")
- param_model, param_info = model.get_fitted(safe_functions_enabled=safe_functions_enabled)
+ param_model, param_info = model.get_fitted(
+ safe_functions_enabled=safe_functions_enabled
+ )
- if 'paramdetection' in show_models or 'all' in show_models:
+ if "paramdetection" in show_models or "all" in show_models:
for transition in model.names:
- for attribute in ['duration']:
+ for attribute in ["duration"]:
info = param_info(transition, attribute)
- print('{:10s} {:10s} non-param stddev {:f}'.format(
- transition, attribute, model.stats.stats[transition][attribute]['std_static']
- ))
- print('{:10s} {:10s} param-lut stddev {:f}'.format(
- transition, attribute, model.stats.stats[transition][attribute]['std_param_lut']
- ))
- for param in sorted(model.stats.stats[transition][attribute]['std_by_param'].keys()):
- print('{:10s} {:10s} {:10s} stddev {:f}'.format(
- transition, attribute, param, model.stats.stats[transition][attribute]['std_by_param'][param]
- ))
- print('{:10s} {:10s} dependence on {:15s}: {:.2f}'.format(
- transition, attribute, param, model.stats.param_dependence_ratio(transition, attribute, param)))
- for i, arg_stddev in enumerate(model.stats.stats[transition][attribute]['std_by_arg']):
- print('{:10s} {:10s} arg{:d} stddev {:f}'.format(
- transition, attribute, i, arg_stddev
- ))
- print('{:10s} {:10s} dependence on arg{:d}: {:.2f}'.format(
- transition, attribute, i, model.stats.arg_dependence_ratio(transition, attribute, i)))
+ print(
+ "{:10s} {:10s} non-param stddev {:f}".format(
+ transition,
+ attribute,
+ model.stats.stats[transition][attribute]["std_static"],
+ )
+ )
+ print(
+ "{:10s} {:10s} param-lut stddev {:f}".format(
+ transition,
+ attribute,
+ model.stats.stats[transition][attribute]["std_param_lut"],
+ )
+ )
+ for param in sorted(
+ model.stats.stats[transition][attribute]["std_by_param"].keys()
+ ):
+ print(
+ "{:10s} {:10s} {:10s} stddev {:f}".format(
+ transition,
+ attribute,
+ param,
+ model.stats.stats[transition][attribute]["std_by_param"][
+ param
+ ],
+ )
+ )
+ print(
+ "{:10s} {:10s} dependence on {:15s}: {:.2f}".format(
+ transition,
+ attribute,
+ param,
+ model.stats.param_dependence_ratio(
+ transition, attribute, param
+ ),
+ )
+ )
+ for i, arg_stddev in enumerate(
+ model.stats.stats[transition][attribute]["std_by_arg"]
+ ):
+ print(
+ "{:10s} {:10s} arg{:d} stddev {:f}".format(
+ transition, attribute, i, arg_stddev
+ )
+ )
+ print(
+ "{:10s} {:10s} dependence on arg{:d}: {:.2f}".format(
+ transition,
+ attribute,
+ i,
+ model.stats.arg_dependence_ratio(transition, attribute, i),
+ )
+ )
if info is not None:
- for param_name in sorted(info['fit_result'].keys(), key=str):
- param_fit = info['fit_result'][param_name]['results']
+ for param_name in sorted(info["fit_result"].keys(), key=str):
+ param_fit = info["fit_result"][param_name]["results"]
for function_type in sorted(param_fit.keys()):
- function_rmsd = param_fit[function_type]['rmsd']
- print('{:10s} {:10s} {:10s} mean {:10s} RMSD {:.0f}'.format(
- transition, attribute, str(param_name), function_type, function_rmsd
- ))
-
- if 'param' in show_models or 'all' in show_models:
+ function_rmsd = param_fit[function_type]["rmsd"]
+ print(
+ "{:10s} {:10s} {:10s} mean {:10s} RMSD {:.0f}".format(
+ transition,
+ attribute,
+ str(param_name),
+ function_type,
+ function_rmsd,
+ )
+ )
+
+ if "param" in show_models or "all" in show_models:
for trans in model.names:
- for attribute in ['duration']:
+ for attribute in ["duration"]:
if param_info(trans, attribute):
- print('{:10s}: {:10s}: {}'.format(trans, attribute, param_info(trans, attribute)['function']._model_str))
- print('{:10s} {:10s} {}'.format('', '', param_info(trans, attribute)['function']._regression_args))
-
- if xv_method == 'montecarlo':
+ print(
+ "{:10s}: {:10s}: {}".format(
+ trans,
+ attribute,
+ param_info(trans, attribute)["function"]._model_str,
+ )
+ )
+ print(
+ "{:10s} {:10s} {}".format(
+ "",
+ "",
+ param_info(trans, attribute)["function"]._regression_args,
+ )
+ )
+
+ if xv_method == "montecarlo":
analytic_quality = xv.montecarlo(lambda m: m.get_fitted()[0], xv_count)
else:
analytic_quality = model.assess(param_model)
- if 'tex' in show_models or 'tex' in show_quality:
- print_text_model_data(model, static_model, static_quality, lut_model, lut_quality, param_model, param_info, analytic_quality)
+ if "tex" in show_models or "tex" in show_quality:
+ print_text_model_data(
+ model,
+ static_model,
+ static_quality,
+ lut_model,
+ lut_quality,
+ param_model,
+ param_info,
+ analytic_quality,
+ )
- if 'table' in show_quality or 'all' in show_quality:
- model_quality_table([static_quality, analytic_quality, lut_quality], [None, param_info, None])
+ if "table" in show_quality or "all" in show_quality:
+ model_quality_table(
+ [static_quality, analytic_quality, lut_quality], [None, param_info, None]
+ )
- if 'plot-param' in opts:
- for kv in opts['plot-param'].split(';'):
- state_or_trans, attribute, param_name, *function = kv.split(' ')
+ if "plot-param" in opts:
+ for kv in opts["plot-param"].split(";"):
+ state_or_trans, attribute, param_name, *function = kv.split(" ")
if len(function):
- function = gplearn_to_function(' '.join(function))
+ function = gplearn_to_function(" ".join(function))
else:
function = None
- plotter.plot_param(model, state_or_trans, attribute, model.param_index(param_name), extra_function=function)
+ plotter.plot_param(
+ model,
+ state_or_trans,
+ attribute,
+ model.param_index(param_name),
+ extra_function=function,
+ )
sys.exit(0)
diff --git a/bin/eval-accounting-overhead.py b/bin/eval-accounting-overhead.py
index d4b1d2c..7ea0807 100755
--- a/bin/eval-accounting-overhead.py
+++ b/bin/eval-accounting-overhead.py
@@ -18,17 +18,21 @@ import json
import sys
for filename in sys.argv[1:]:
- with open(filename, 'r') as f:
+ with open(filename, "r") as f:
measurement = json.load(f)
raw_data = TimingData([filename])
preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
model = AnalyticModel(by_name, parameters, arg_count)
static_model = model.get_static()
- if 'setup' in model.names:
- transition_duration = static_model('setup', 'duration')
- elif 'init' in model.names:
- transition_duration = static_model('init', 'duration')
- get_energy_duration = static_model('getEnergy', 'duration')
+ if "setup" in model.names:
+ transition_duration = static_model("setup", "duration")
+ elif "init" in model.names:
+ transition_duration = static_model("init", "duration")
+ get_energy_duration = static_model("getEnergy", "duration")
- print('{:60s}: {:.0f} / {:.0f} µs'.format(measurement['opt']['accounting'], transition_duration, get_energy_duration))
+ print(
+ "{:60s}: {:.0f} / {:.0f} µs".format(
+ measurement["opt"]["accounting"], transition_duration, get_energy_duration
+ )
+ )
diff --git a/bin/eval-online-model-accuracy.py b/bin/eval-online-model-accuracy.py
index 21e7a1e..202ac28 100755
--- a/bin/eval-online-model-accuracy.py
+++ b/bin/eval-online-model-accuracy.py
@@ -33,72 +33,74 @@ import numpy as np
opt = dict()
-if __name__ == '__main__':
+if __name__ == "__main__":
try:
optspec = (
- 'accounting= '
- 'arch= '
- 'app= '
- 'depth= '
- 'dummy= '
- 'instance= '
- 'repeat= '
- 'run= '
- 'sleep= '
- 'timer-pin= '
- 'trace-filter= '
- 'timer-freq= '
- 'timer-type= '
- 'timestamp-type= '
- 'energy-type= '
- 'power-type= '
- 'timestamp-granularity= '
- 'energy-granularity= '
- 'power-granularity= '
+ "accounting= "
+ "arch= "
+ "app= "
+ "depth= "
+ "dummy= "
+ "instance= "
+ "repeat= "
+ "run= "
+ "sleep= "
+ "timer-pin= "
+ "trace-filter= "
+ "timer-freq= "
+ "timer-type= "
+ "timestamp-type= "
+ "energy-type= "
+ "power-type= "
+ "timestamp-granularity= "
+ "energy-granularity= "
+ "power-granularity= "
)
- raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(' '))
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
opt_default = {
- 'depth': 3,
- 'sleep': 0,
- 'timer-freq': 1e6,
- 'timer-type': 'uint16_t',
- 'timestamp-type': 'uint16_t',
- 'energy-type': 'uint32_t',
- 'power-type': 'uint16_t',
- 'timestamp-granularity': 1e-6,
- 'power-granularity': 1e-6,
- 'energy-granularity': 1e-12,
+ "depth": 3,
+ "sleep": 0,
+ "timer-freq": 1e6,
+ "timer-type": "uint16_t",
+ "timestamp-type": "uint16_t",
+ "energy-type": "uint32_t",
+ "power-type": "uint16_t",
+ "timestamp-granularity": 1e-6,
+ "power-granularity": 1e-6,
+ "energy-granularity": 1e-12,
}
for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
+ optname = re.sub(r"^--", "", option)
opt[optname] = parameter
- for key in 'depth sleep'.split():
+ for key in "depth sleep".split():
if key in opt:
opt[key] = int(opt[key])
else:
opt[key] = opt_default[key]
- for key in 'timer-freq timestamp-granularity energy-granularity power-granularity'.split():
+ for (
+ key
+ ) in "timer-freq timestamp-granularity energy-granularity power-granularity".split():
if key in opt:
opt[key] = float(opt[key])
else:
opt[key] = opt_default[key]
- for key in 'timer-type timestamp-type energy-type power-type'.split():
+ for key in "timer-type timestamp-type energy-type power-type".split():
if key not in opt:
opt[key] = opt_default[key]
- if 'trace-filter' in opt:
+ if "trace-filter" in opt:
trace_filter = []
- for trace in opt['trace-filter'].split():
- trace_filter.append(trace.split(','))
- opt['trace-filter'] = trace_filter
+ for trace in opt["trace-filter"].split():
+ trace_filter.append(trace.split(","))
+ opt["trace-filter"] = trace_filter
else:
- opt['trace-filter'] = None
+ opt["trace-filter"] = None
except getopt.GetoptError as err:
print(err)
@@ -109,81 +111,120 @@ if __name__ == '__main__':
pta = PTA.from_file(modelfile)
enum = dict()
- if '.json' not in modelfile:
- with open(modelfile, 'r') as f:
+ if ".json" not in modelfile:
+ with open(modelfile, "r") as f:
driver_definition = yaml.safe_load(f)
- if 'dummygen' in driver_definition and 'enum' in driver_definition['dummygen']:
- enum = driver_definition['dummygen']['enum']
+ if "dummygen" in driver_definition and "enum" in driver_definition["dummygen"]:
+ enum = driver_definition["dummygen"]["enum"]
pta.set_random_energy_model()
- runs = list(pta.dfs(opt['depth'], with_arguments=True, with_parameters=True, trace_filter=opt['trace-filter'], sleep=opt['sleep']))
+ runs = list(
+ pta.dfs(
+ opt["depth"],
+ with_arguments=True,
+ with_parameters=True,
+ trace_filter=opt["trace-filter"],
+ sleep=opt["sleep"],
+ )
+ )
num_transitions = len(runs)
if len(runs) == 0:
- print('DFS returned no traces -- perhaps your trace-filter is too restrictive?', file=sys.stderr)
+ print(
+ "DFS returned no traces -- perhaps your trace-filter is too restrictive?",
+ file=sys.stderr,
+ )
sys.exit(1)
real_energies = list()
real_durations = list()
model_energies = list()
for run in runs:
- accounting_method = get_simulated_accountingmethod(opt['accounting'])(pta, opt['timer-freq'], opt['timer-type'], opt['timestamp-type'],
- opt['power-type'], opt['energy-type'])
- real_energy, real_duration, _, _ = pta.simulate(run, accounting=accounting_method)
+ accounting_method = get_simulated_accountingmethod(opt["accounting"])(
+ pta,
+ opt["timer-freq"],
+ opt["timer-type"],
+ opt["timestamp-type"],
+ opt["power-type"],
+ opt["energy-type"],
+ )
+ real_energy, real_duration, _, _ = pta.simulate(
+ run, accounting=accounting_method
+ )
model_energy = accounting_method.get_energy()
real_energies.append(real_energy)
real_durations.append(real_duration)
model_energies.append(model_energy)
measures = regression_measures(np.array(model_energies), np.array(real_energies))
- print('SMAPE {:.0f}%, MAE {}'.format(measures['smape'], measures['mae']))
+ print("SMAPE {:.0f}%, MAE {}".format(measures["smape"], measures["mae"]))
timer_freqs = [1e3, 2e3, 5e3, 1e4, 2e4, 5e4, 1e5, 2e5, 5e5, 1e6, 2e6, 5e6]
- timer_types = timestamp_types = power_types = energy_types = 'uint8_t uint16_t uint32_t uint64_t'.split()
+ timer_types = (
+ timestamp_types
+ ) = power_types = energy_types = "uint8_t uint16_t uint32_t uint64_t".split()
def config_weight(timer_freq, timer_type, ts_type, power_type, energy_type):
base_weight = 0
for var_type in timer_type, ts_type, power_type, energy_type:
- if var_type == 'uint8_t':
+ if var_type == "uint8_t":
base_weight += 1
- elif var_type == 'uint16_t':
+ elif var_type == "uint16_t":
base_weight += 2
- elif var_type == 'uint32_t':
+ elif var_type == "uint32_t":
base_weight += 4
- elif var_type == 'uint64_t':
+ elif var_type == "uint64_t":
base_weight += 8
return base_weight
# sys.exit(0)
mean_errors = list()
- for timer_freq, timer_type, ts_type, power_type, energy_type in itertools.product(timer_freqs, timer_types, timestamp_types, power_types, energy_types):
+ for timer_freq, timer_type, ts_type, power_type, energy_type in itertools.product(
+ timer_freqs, timer_types, timestamp_types, power_types, energy_types
+ ):
real_energies = list()
real_durations = list()
model_energies = list()
# duration in µs
# Bei kurzer Dauer (z.B. nur [1e2]) performt auch uint32_t für Energie gut, sonst nicht so (weil overflow)
for sleep_duration in [1e2, 1e3, 1e4, 1e5, 1e6]:
- runs = pta.dfs(opt['depth'], with_arguments=True, with_parameters=True, trace_filter=opt['trace-filter'], sleep=sleep_duration)
+ runs = pta.dfs(
+ opt["depth"],
+ with_arguments=True,
+ with_parameters=True,
+ trace_filter=opt["trace-filter"],
+ sleep=sleep_duration,
+ )
for run in runs:
- accounting_method = get_simulated_accountingmethod(opt['accounting'])(pta, timer_freq, timer_type, ts_type, power_type, energy_type)
- real_energy, real_duration, _, _ = pta.simulate(run, accounting=accounting_method)
+ accounting_method = get_simulated_accountingmethod(opt["accounting"])(
+ pta, timer_freq, timer_type, ts_type, power_type, energy_type
+ )
+ real_energy, real_duration, _, _ = pta.simulate(
+ run, accounting=accounting_method
+ )
model_energy = accounting_method.get_energy()
real_energies.append(real_energy)
real_durations.append(real_duration)
model_energies.append(model_energy)
- measures = regression_measures(np.array(model_energies), np.array(real_energies))
- mean_errors.append(((timer_freq, timer_type, ts_type, power_type, energy_type), config_weight(timer_freq, timer_type, ts_type, power_type, energy_type), measures))
+ measures = regression_measures(
+ np.array(model_energies), np.array(real_energies)
+ )
+ mean_errors.append(
+ (
+ (timer_freq, timer_type, ts_type, power_type, energy_type),
+ config_weight(timer_freq, timer_type, ts_type, power_type, energy_type),
+ measures,
+ )
+ )
mean_errors.sort(key=lambda x: x[1])
- mean_errors.sort(key=lambda x: x[2]['mae'])
+ mean_errors.sort(key=lambda x: x[2]["mae"])
for result in mean_errors:
config, weight, measures = result
- print('{} -> {:.0f}% / {}'.format(
- config,
- measures['smape'], measures['mae']))
+ print("{} -> {:.0f}% / {}".format(config, measures["smape"], measures["mae"]))
sys.exit(0)
diff --git a/bin/eval-outlier-removal.py b/bin/eval-outlier-removal.py
index 3a4aa28..d8b0e9d 100755
--- a/bin/eval-outlier-removal.py
+++ b/bin/eval-outlier-removal.py
@@ -7,63 +7,73 @@ from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
opts = {}
+
def model_quality_table(result_lists, info_list):
- for state_or_tran in result_lists[0]['by_name'].keys():
- for key in result_lists[0]['by_name'][state_or_tran].keys():
- buf = '{:20s} {:15s}'.format(state_or_tran, key)
+ for state_or_tran in result_lists[0]["by_name"].keys():
+ for key in result_lists[0]["by_name"][state_or_tran].keys():
+ buf = "{:20s} {:15s}".format(state_or_tran, key)
for i, results in enumerate(result_lists):
- results = results['by_name']
+ results = results["by_name"]
info = info_list[i]
- buf += ' ||| '
+ buf += " ||| "
if info == None or info(state_or_tran, key):
result = results[state_or_tran][key]
- if 'smape' in result:
- buf += '{:6.2f}% / {:9.0f}'.format(result['smape'], result['mae'])
+ if "smape" in result:
+ buf += "{:6.2f}% / {:9.0f}".format(
+ result["smape"], result["mae"]
+ )
else:
- buf += '{:6} {:9.0f}'.format('', result['mae'])
+ buf += "{:6} {:9.0f}".format("", result["mae"])
else:
- buf += '{:6}----{:9}'.format('', '')
+ buf += "{:6}----{:9}".format("", "")
print(buf)
+
def combo_model_quality_table(result_lists, info_list):
- for state_or_tran in result_lists[0][0]['by_name'].keys():
- for key in result_lists[0][0]['by_name'][state_or_tran].keys():
+ for state_or_tran in result_lists[0][0]["by_name"].keys():
+ for key in result_lists[0][0]["by_name"][state_or_tran].keys():
for sub_result_lists in result_lists:
- buf = '{:20s} {:15s}'.format(state_or_tran, key)
+ buf = "{:20s} {:15s}".format(state_or_tran, key)
for i, results in enumerate(sub_result_lists):
- results = results['by_name']
+ results = results["by_name"]
info = info_list[i]
- buf += ' ||| '
+ buf += " ||| "
if info == None or info(state_or_tran, key):
result = results[state_or_tran][key]
- if 'smape' in result:
- buf += '{:6.2f}% / {:9.0f}'.format(result['smape'], result['mae'])
+ if "smape" in result:
+ buf += "{:6.2f}% / {:9.0f}".format(
+ result["smape"], result["mae"]
+ )
else:
- buf += '{:6} {:9.0f}'.format('', result['mae'])
+ buf += "{:6} {:9.0f}".format("", result["mae"])
else:
- buf += '{:6}----{:9}'.format('', '')
+ buf += "{:6}----{:9}".format("", "")
print(buf)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
ignored_trace_indexes = []
discard_outliers = None
try:
- raw_opts, args = getopt.getopt(sys.argv[1:], "",
- 'plot ignored-trace-indexes= discard-outliers='.split(' '))
+ raw_opts, args = getopt.getopt(
+ sys.argv[1:], "", "plot ignored-trace-indexes= discard-outliers=".split(" ")
+ )
for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
+ optname = re.sub(r"^--", "", option)
opts[optname] = parameter
- if 'ignored-trace-indexes' in opts:
- ignored_trace_indexes = list(map(int, opts['ignored-trace-indexes'].split(',')))
+ if "ignored-trace-indexes" in opts:
+ ignored_trace_indexes = list(
+ map(int, opts["ignored-trace-indexes"].split(","))
+ )
if 0 in ignored_trace_indexes:
- print('[E] arguments to --ignored-trace-indexes start from 1')
+ print("[E] arguments to --ignored-trace-indexes start from 1")
- if 'discard-outliers' in opts:
- discard_outliers = float(opts['discard-outliers'])
+ if "discard-outliers" in opts:
+ discard_outliers = float(opts["discard-outliers"])
except getopt.GetoptError as err:
print(err)
@@ -72,19 +82,29 @@ if __name__ == '__main__':
raw_data = RawData(args)
preprocessed_data = raw_data.get_preprocessed_data()
- by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data, ignored_trace_indexes)
- m1 = PTAModel(by_name, parameters, arg_count,
- traces = preprocessed_data,
- ignore_trace_indexes = ignored_trace_indexes)
- m2 = PTAModel(by_name, parameters, arg_count,
- traces = preprocessed_data,
- ignore_trace_indexes = ignored_trace_indexes,
- discard_outliers = discard_outliers)
-
- print('--- simple static model ---')
+ by_name, parameters, arg_count = pta_trace_to_aggregate(
+ preprocessed_data, ignored_trace_indexes
+ )
+ m1 = PTAModel(
+ by_name,
+ parameters,
+ arg_count,
+ traces=preprocessed_data,
+ ignore_trace_indexes=ignored_trace_indexes,
+ )
+ m2 = PTAModel(
+ by_name,
+ parameters,
+ arg_count,
+ traces=preprocessed_data,
+ ignore_trace_indexes=ignored_trace_indexes,
+ discard_outliers=discard_outliers,
+ )
+
+ print("--- simple static model ---")
static_m1 = m1.get_static()
static_m2 = m2.get_static()
- #for state in model.states():
+ # for state in model.states():
# print('{:10s}: {:.0f} µW ({:.2f})'.format(
# state,
# static_model(state, 'power'),
@@ -94,7 +114,7 @@ if __name__ == '__main__':
# '',
# param,
# model.param_dependence_ratio(state, 'power', param)))
- #for trans in model.transitions():
+ # for trans in model.transitions():
# print('{:10s}: {:.0f} / {:.0f} / {:.0f} pJ ({:.2f} / {:.2f} / {:.2f})'.format(
# trans, static_model(trans, 'energy'),
# static_model(trans, 'rel_energy_prev'),
@@ -107,36 +127,84 @@ if __name__ == '__main__':
static_q2 = m2.assess(static_m2)
static_q12 = m1.assess(static_m2)
- print('--- LUT ---')
+ print("--- LUT ---")
lut_m1 = m1.get_param_lut()
lut_m2 = m2.get_param_lut()
lut_q1 = m1.assess(lut_m1)
lut_q2 = m2.assess(lut_m2)
lut_q12 = m1.assess(lut_m2)
- print('--- param model ---')
+ print("--- param model ---")
param_m1, param_i1 = m1.get_fitted()
for state in m1.states():
- for attribute in ['power']:
+ for attribute in ["power"]:
if param_i1(state, attribute):
- print('{:10s}: {}'.format(state, param_i1(state, attribute)['function']._model_str))
- print('{:10s} {}'.format('', param_i1(state, attribute)['function']._regression_args))
+ print(
+ "{:10s}: {}".format(
+ state, param_i1(state, attribute)["function"]._model_str
+ )
+ )
+ print(
+ "{:10s} {}".format(
+ "", param_i1(state, attribute)["function"]._regression_args
+ )
+ )
for trans in m1.transitions():
- for attribute in ['energy', 'rel_energy_prev', 'rel_energy_next', 'duration', 'timeout']:
+ for attribute in [
+ "energy",
+ "rel_energy_prev",
+ "rel_energy_next",
+ "duration",
+ "timeout",
+ ]:
if param_i1(trans, attribute):
- print('{:10s}: {:10s}: {}'.format(trans, attribute, param_i1(trans, attribute)['function']._model_str))
- print('{:10s} {:10s} {}'.format('', '', param_i1(trans, attribute)['function']._regression_args))
+ print(
+ "{:10s}: {:10s}: {}".format(
+ trans,
+ attribute,
+ param_i1(trans, attribute)["function"]._model_str,
+ )
+ )
+ print(
+ "{:10s} {:10s} {}".format(
+ "", "", param_i1(trans, attribute)["function"]._regression_args
+ )
+ )
param_m2, param_i2 = m2.get_fitted()
for state in m2.states():
- for attribute in ['power']:
+ for attribute in ["power"]:
if param_i2(state, attribute):
- print('{:10s}: {}'.format(state, param_i2(state, attribute)['function']._model_str))
- print('{:10s} {}'.format('', param_i2(state, attribute)['function']._regression_args))
+ print(
+ "{:10s}: {}".format(
+ state, param_i2(state, attribute)["function"]._model_str
+ )
+ )
+ print(
+ "{:10s} {}".format(
+ "", param_i2(state, attribute)["function"]._regression_args
+ )
+ )
for trans in m2.transitions():
- for attribute in ['energy', 'rel_energy_prev', 'rel_energy_next', 'duration', 'timeout']:
+ for attribute in [
+ "energy",
+ "rel_energy_prev",
+ "rel_energy_next",
+ "duration",
+ "timeout",
+ ]:
if param_i2(trans, attribute):
- print('{:10s}: {:10s}: {}'.format(trans, attribute, param_i2(trans, attribute)['function']._model_str))
- print('{:10s} {:10s} {}'.format('', '', param_i2(trans, attribute)['function']._regression_args))
+ print(
+ "{:10s}: {:10s}: {}".format(
+ trans,
+ attribute,
+ param_i2(trans, attribute)["function"]._model_str,
+ )
+ )
+ print(
+ "{:10s} {:10s} {}".format(
+ "", "", param_i2(trans, attribute)["function"]._regression_args
+ )
+ )
analytic_q1 = m1.assess(param_m1)
analytic_q2 = m2.assess(param_m2)
@@ -144,10 +212,13 @@ if __name__ == '__main__':
model_quality_table([static_q1, analytic_q1, lut_q1], [None, param_i1, None])
model_quality_table([static_q2, analytic_q2, lut_q2], [None, param_i2, None])
model_quality_table([static_q12, analytic_q12, lut_q12], [None, param_i2, None])
- combo_model_quality_table([
- [static_q1, analytic_q1, lut_q1],
- [static_q2, analytic_q2, lut_q2],
- [static_q12, analytic_q12, lut_q12]],
- [None, param_i1, None])
+ combo_model_quality_table(
+ [
+ [static_q1, analytic_q1, lut_q1],
+ [static_q2, analytic_q2, lut_q2],
+ [static_q12, analytic_q12, lut_q12],
+ ],
+ [None, param_i1, None],
+ )
sys.exit(0)
diff --git a/bin/eval-rel-energy.py b/bin/eval-rel-energy.py
index 6ae35e0..44db226 100755
--- a/bin/eval-rel-energy.py
+++ b/bin/eval-rel-energy.py
@@ -11,15 +11,15 @@ opts = {}
def get_file_groups(args):
groups = []
index_low = 0
- while ':' in args[index_low:]:
- index_high = args[index_low:].index(':') + index_low
- groups.append(args[index_low: index_high])
+ while ":" in args[index_low:]:
+ index_high = args[index_low:].index(":") + index_low
+ groups.append(args[index_low:index_high])
index_low = index_high + 1
groups.append(args[index_low:])
return groups
-if __name__ == '__main__':
+if __name__ == "__main__":
ignored_trace_indexes = []
discard_outliers = None
@@ -30,36 +30,40 @@ if __name__ == '__main__':
try:
optspec = (
- 'plot-unparam= plot-param= show-models= show-quality= '
- 'ignored-trace-indexes= discard-outliers= function-override= '
- 'with-safe-functions'
+ "plot-unparam= plot-param= show-models= show-quality= "
+ "ignored-trace-indexes= discard-outliers= function-override= "
+ "with-safe-functions"
)
- raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(' '))
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
+ optname = re.sub(r"^--", "", option)
opts[optname] = parameter
- if 'ignored-trace-indexes' in opts:
- ignored_trace_indexes = list(map(int, opts['ignored-trace-indexes'].split(',')))
+ if "ignored-trace-indexes" in opts:
+ ignored_trace_indexes = list(
+ map(int, opts["ignored-trace-indexes"].split(","))
+ )
if 0 in ignored_trace_indexes:
- print('[E] arguments to --ignored-trace-indexes start from 1')
+ print("[E] arguments to --ignored-trace-indexes start from 1")
- if 'discard-outliers' in opts:
- discard_outliers = float(opts['discard-outliers'])
+ if "discard-outliers" in opts:
+ discard_outliers = float(opts["discard-outliers"])
- if 'function-override' in opts:
- for function_desc in opts['function-override'].split(';'):
- state_or_tran, attribute, *function_str = function_desc.split(' ')
- function_override[(state_or_tran, attribute)] = ' '.join(function_str)
+ if "function-override" in opts:
+ for function_desc in opts["function-override"].split(";"):
+ state_or_tran, attribute, *function_str = function_desc.split(" ")
+ function_override[(state_or_tran, attribute)] = " ".join(
+ function_str
+ )
- if 'show-models' in opts:
- show_models = opts['show-models'].split(',')
+ if "show-models" in opts:
+ show_models = opts["show-models"].split(",")
- if 'show-quality' in opts:
- show_quality = opts['show-quality'].split(',')
+ if "show-quality" in opts:
+ show_quality = opts["show-quality"].split(",")
- if 'with-safe-functions' in opts:
+ if "with-safe-functions" in opts:
safe_functions_enabled = True
except getopt.GetoptError as err:
@@ -70,36 +74,50 @@ if __name__ == '__main__':
score_relative = 0
for file_group in get_file_groups(args):
- print('')
- print('{}:'.format(' '.join(file_group)))
+ print("")
+ print("{}:".format(" ".join(file_group)))
raw_data = RawData(file_group)
preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
- by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data, ignored_trace_indexes)
- model = PTAModel(by_name, parameters, arg_count,
- traces=preprocessed_data,
- ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
- function_override=function_override,
- verbose=False)
+ by_name, parameters, arg_count = pta_trace_to_aggregate(
+ preprocessed_data, ignored_trace_indexes
+ )
+ model = PTAModel(
+ by_name,
+ parameters,
+ arg_count,
+ traces=preprocessed_data,
+ ignore_trace_indexes=ignored_trace_indexes,
+ discard_outliers=discard_outliers,
+ function_override=function_override,
+ verbose=False,
+ )
lut_quality = model.assess(model.get_param_lut())
for trans in model.transitions():
- absolute_quality = lut_quality['by_name'][trans]['energy']
- relative_quality = lut_quality['by_name'][trans]['rel_energy_prev']
- if absolute_quality['mae'] < relative_quality['mae']:
- best = 'absolute'
+ absolute_quality = lut_quality["by_name"][trans]["energy"]
+ relative_quality = lut_quality["by_name"][trans]["rel_energy_prev"]
+ if absolute_quality["mae"] < relative_quality["mae"]:
+ best = "absolute"
score_absolute += 1
else:
- best = 'relative'
+ best = "relative"
score_relative += 1
- print('{:20s}: {:s} (diff {:.0f} / {:.2f}%, abs {:.0f} / {:.2f}%, rel {:.0f} / {:.2f}%)'.format(
- trans, best,
- abs(absolute_quality['mae'] - relative_quality['mae']),
- abs(absolute_quality['mae'] - relative_quality['mae']) * 100 / max(absolute_quality['mae'], relative_quality['mae']),
- absolute_quality['mae'], absolute_quality['smape'],
- relative_quality['mae'], relative_quality['smape']))
+ print(
+ "{:20s}: {:s} (diff {:.0f} / {:.2f}%, abs {:.0f} / {:.2f}%, rel {:.0f} / {:.2f}%)".format(
+ trans,
+ best,
+ abs(absolute_quality["mae"] - relative_quality["mae"]),
+ abs(absolute_quality["mae"] - relative_quality["mae"])
+ * 100
+ / max(absolute_quality["mae"], relative_quality["mae"]),
+ absolute_quality["mae"],
+ absolute_quality["smape"],
+ relative_quality["mae"],
+ relative_quality["smape"],
+ )
+ )
sys.exit(0)
diff --git a/bin/generate-dfa-benchmark.py b/bin/generate-dfa-benchmark.py
index fedd12b..478b221 100755
--- a/bin/generate-dfa-benchmark.py
+++ b/bin/generate-dfa-benchmark.py
@@ -98,22 +98,29 @@ from dfatool.utils import flatten
opt = dict()
-def benchmark_from_runs(pta: PTA, runs: list, harness: OnboardTimerHarness, benchmark_id: int = 0, dummy=False, repeat=0) -> io.StringIO:
+def benchmark_from_runs(
+ pta: PTA,
+ runs: list,
+ harness: OnboardTimerHarness,
+ benchmark_id: int = 0,
+ dummy=False,
+ repeat=0,
+) -> io.StringIO:
outbuf = io.StringIO()
outbuf.write('#include "arch.h"\n')
if dummy:
outbuf.write('#include "driver/dummy.h"\n')
- elif 'includes' in pta.codegen:
- for include in pta.codegen['includes']:
+ elif "includes" in pta.codegen:
+ for include in pta.codegen["includes"]:
outbuf.write('#include "{}"\n'.format(include))
outbuf.write(harness.global_code())
- outbuf.write('int main(void)\n')
- outbuf.write('{\n')
+ outbuf.write("int main(void)\n")
+ outbuf.write("{\n")
- for driver in ('arch', 'gpio', 'kout'):
- outbuf.write('{}.setup();\n'.format(driver))
+ for driver in ("arch", "gpio", "kout"):
+ outbuf.write("{}.setup();\n".format(driver))
# There is a race condition between flashing the code and starting the UART log.
# When starting the log before flashing, output from a previous benchmark may cause bogus data to be added.
@@ -125,37 +132,37 @@ def benchmark_from_runs(pta: PTA, runs: list, harness: OnboardTimerHarness, benc
# For energytrace, the device is connected to VCC and set up before
# the initialization delay to -- this puts it into a well-defined state and
# decreases pre-sync power consumption
- if 'energytrace' not in opt:
- if 'mimosa' in opt:
- outbuf.write('arch.delay_ms(12000);\n')
+ if "energytrace" not in opt:
+ if "mimosa" in opt:
+ outbuf.write("arch.delay_ms(12000);\n")
else:
- outbuf.write('arch.delay_ms(2000);\n')
+ outbuf.write("arch.delay_ms(2000);\n")
# Output some newlines to ensure the parser can determine the start of the first real output line
- outbuf.write('kout << endl << endl;\n')
+ outbuf.write("kout << endl << endl;\n")
- if 'setup' in pta.codegen:
- for call in pta.codegen['setup']:
+ if "setup" in pta.codegen:
+ for call in pta.codegen["setup"]:
outbuf.write(call)
- if 'energytrace' in opt:
- outbuf.write('for (unsigned char i = 0; i < 10; i++) {\n')
- outbuf.write('arch.sleep_ms(250);\n}\n')
+ if "energytrace" in opt:
+ outbuf.write("for (unsigned char i = 0; i < 10; i++) {\n")
+ outbuf.write("arch.sleep_ms(250);\n}\n")
# Output some newlines to ensure the parser can determine the start of the first real output line
- outbuf.write('kout << endl << endl;\n')
+ outbuf.write("kout << endl << endl;\n")
if repeat:
- outbuf.write('unsigned char i = 0;\n')
- outbuf.write('while (i++ < {}) {{\n'.format(repeat))
+ outbuf.write("unsigned char i = 0;\n")
+ outbuf.write("while (i++ < {}) {{\n".format(repeat))
else:
- outbuf.write('while (1) {\n')
+ outbuf.write("while (1) {\n")
outbuf.write(harness.start_benchmark())
- class_prefix = ''
- if 'instance' in opt:
- class_prefix = '{}.'.format(opt['instance'])
- elif 'instance' in pta.codegen:
- class_prefix = '{}.'.format(pta.codegen['instance'])
+ class_prefix = ""
+ if "instance" in opt:
+ class_prefix = "{}.".format(opt["instance"])
+ elif "instance" in pta.codegen:
+ class_prefix = "{}.".format(pta.codegen["instance"])
num_transitions = 0
num_traces = 0
@@ -167,56 +174,105 @@ def benchmark_from_runs(pta: PTA, runs: list, harness: OnboardTimerHarness, benc
num_transitions += 1
harness.append_transition(transition.name, param, arguments)
harness.append_state(transition.destination.name, parameter.copy())
- outbuf.write('// {} -> {}\n'.format(transition.origin.name, transition.destination.name))
+ outbuf.write(
+ "// {} -> {}\n".format(
+ transition.origin.name, transition.destination.name
+ )
+ )
if transition.is_interrupt:
- outbuf.write('// wait for {} interrupt\n'.format(transition.name))
- transition_code = '// TODO add startTransition / stopTransition calls to interrupt routine'
+ outbuf.write("// wait for {} interrupt\n".format(transition.name))
+ transition_code = "// TODO add startTransition / stopTransition calls to interrupt routine"
else:
- transition_code = '{}{}({});'.format(class_prefix, transition.name, ', '.join(map(str, arguments)))
- outbuf.write(harness.pass_transition(pta.get_transition_id(transition), transition_code, transition=transition))
+ transition_code = "{}{}({});".format(
+ class_prefix, transition.name, ", ".join(map(str, arguments))
+ )
+ outbuf.write(
+ harness.pass_transition(
+ pta.get_transition_id(transition),
+ transition_code,
+ transition=transition,
+ )
+ )
param = parameter
- outbuf.write('// current parameters: {}\n'.format(', '.join(map(lambda kv: '{}={}'.format(*kv), param.items()))))
-
- if 'delay_after_ms' in transition.codegen:
- if 'energytrace' in opt:
- outbuf.write('arch.sleep_ms({:d}); // {} -- delay mandated by codegen.delay_after_ms\n'.format(transition.codegen['delay_after_ms'], transition.destination.name))
+ outbuf.write(
+ "// current parameters: {}\n".format(
+ ", ".join(map(lambda kv: "{}={}".format(*kv), param.items()))
+ )
+ )
+
+ if "delay_after_ms" in transition.codegen:
+ if "energytrace" in opt:
+ outbuf.write(
+ "arch.sleep_ms({:d}); // {} -- delay mandated by codegen.delay_after_ms\n".format(
+ transition.codegen["delay_after_ms"],
+ transition.destination.name,
+ )
+ )
else:
- outbuf.write('arch.delay_ms({:d}); // {} -- delay mandated by codegen.delay_after_ms\n'.format(transition.codegen['delay_after_ms'], transition.destination.name))
- elif opt['sleep']:
- if 'energytrace' in opt:
- outbuf.write('arch.sleep_ms({:d}); // {}\n'.format(opt['sleep'], transition.destination.name))
+ outbuf.write(
+ "arch.delay_ms({:d}); // {} -- delay mandated by codegen.delay_after_ms\n".format(
+ transition.codegen["delay_after_ms"],
+ transition.destination.name,
+ )
+ )
+ elif opt["sleep"]:
+ if "energytrace" in opt:
+ outbuf.write(
+ "arch.sleep_ms({:d}); // {}\n".format(
+ opt["sleep"], transition.destination.name
+ )
+ )
else:
- outbuf.write('arch.delay_ms({:d}); // {}\n'.format(opt['sleep'], transition.destination.name))
+ outbuf.write(
+ "arch.delay_ms({:d}); // {}\n".format(
+ opt["sleep"], transition.destination.name
+ )
+ )
outbuf.write(harness.stop_run(num_traces))
if dummy:
- outbuf.write('kout << "[Energy] " << {}getEnergy() << endl;\n'.format(class_prefix))
- outbuf.write('\n')
+ outbuf.write(
+ 'kout << "[Energy] " << {}getEnergy() << endl;\n'.format(class_prefix)
+ )
+ outbuf.write("\n")
num_traces += 1
outbuf.write(harness.stop_benchmark())
- outbuf.write('}\n')
+ outbuf.write("}\n")
# Ensure logging can be terminated after the specified number of measurements
outbuf.write(harness.start_benchmark())
- outbuf.write('while(1) { }\n')
- outbuf.write('return 0;\n')
- outbuf.write('}\n')
+ outbuf.write("while(1) { }\n")
+ outbuf.write("return 0;\n")
+ outbuf.write("}\n")
return outbuf
-def run_benchmark(application_file: str, pta: PTA, runs: list, arch: str, app: str, run_args: list, harness: object, sleep: int = 0, repeat: int = 0, run_offset: int = 0, runs_total: int = 0, dummy=False):
- if 'mimosa' in opt or 'energytrace' in opt:
+def run_benchmark(
+ application_file: str,
+ pta: PTA,
+ runs: list,
+ arch: str,
+ app: str,
+ run_args: list,
+ harness: object,
+ sleep: int = 0,
+ repeat: int = 0,
+ run_offset: int = 0,
+ runs_total: int = 0,
+ dummy=False,
+):
+ if "mimosa" in opt or "energytrace" in opt:
outbuf = benchmark_from_runs(pta, runs, harness, dummy=dummy, repeat=1)
else:
outbuf = benchmark_from_runs(pta, runs, harness, dummy=dummy, repeat=repeat)
- with open(application_file, 'w') as f:
+ with open(application_file, "w") as f:
f.write(outbuf.getvalue())
- print('[MAKE] building benchmark with {:d} runs'.format(len(runs)))
+ print("[MAKE] building benchmark with {:d} runs".format(len(runs)))
# assume an average of 10ms per transition. Mind the 10s start delay.
run_timeout = 10 + num_transitions * (sleep + 10) / 1000
@@ -241,23 +297,55 @@ def run_benchmark(application_file: str, pta: PTA, runs: list, arch: str, app: s
# This has been deliberately taken out of the except clause to avoid nested exception handlers
# (they lead to pretty interesting tracebacks which are probably more confusing than helpful)
if needs_split:
- print('[MAKE] benchmark code is too large, splitting up')
+ print("[MAKE] benchmark code is too large, splitting up")
mid = len(runs) // 2
# Previously prepared trace data is useless
harness.reset()
- results = run_benchmark(application_file, pta, runs[:mid], arch, app, run_args, harness.copy(), sleep, repeat, run_offset=run_offset, runs_total=runs_total, dummy=dummy)
- results.extend(run_benchmark(application_file, pta, runs[mid:], arch, app, run_args, harness.copy(), sleep, repeat, run_offset=run_offset + mid, runs_total=runs_total, dummy=dummy))
+ results = run_benchmark(
+ application_file,
+ pta,
+ runs[:mid],
+ arch,
+ app,
+ run_args,
+ harness.copy(),
+ sleep,
+ repeat,
+ run_offset=run_offset,
+ runs_total=runs_total,
+ dummy=dummy,
+ )
+ results.extend(
+ run_benchmark(
+ application_file,
+ pta,
+ runs[mid:],
+ arch,
+ app,
+ run_args,
+ harness.copy(),
+ sleep,
+ repeat,
+ run_offset=run_offset + mid,
+ runs_total=runs_total,
+ dummy=dummy,
+ )
+ )
return results
- if 'mimosa' in opt or 'energytrace' in opt:
+ if "mimosa" in opt or "energytrace" in opt:
files = list()
i = 0
- while i < opt['repeat']:
+ while i < opt["repeat"]:
runner.flash(arch, app, run_args)
- if 'mimosa' in opt:
- monitor = runner.get_monitor(arch, callback=harness.parser_cb, mimosa=opt['mimosa'])
- elif 'energytrace' in opt:
- monitor = runner.get_monitor(arch, callback=harness.parser_cb, energytrace=opt['energytrace'])
+ if "mimosa" in opt:
+ monitor = runner.get_monitor(
+ arch, callback=harness.parser_cb, mimosa=opt["mimosa"]
+ )
+ elif "energytrace" in opt:
+ monitor = runner.get_monitor(
+ arch, callback=harness.parser_cb, energytrace=opt["energytrace"]
+ )
sync_error = False
try:
@@ -266,17 +354,31 @@ def run_benchmark(application_file: str, pta: PTA, runs: list, arch: str, app: s
# possible race condition: if the benchmark completes at this
# exact point, it sets harness.done and unsets harness.synced.
# vvv
- if slept > 30 and slept < 40 and not harness.synced and not harness.done:
- print('[RUN] has been unsynced for more than 30 seconds, assuming error. Retrying.')
+ if (
+ slept > 30
+ and slept < 40
+ and not harness.synced
+ and not harness.done
+ ):
+ print(
+ "[RUN] has been unsynced for more than 30 seconds, assuming error. Retrying."
+ )
sync_error = True
break
if harness.abort:
- print('[RUN] harness encountered an error. Retrying')
+ print("[RUN] harness encountered an error. Retrying")
sync_error = True
break
time.sleep(5)
slept += 5
- print('[RUN] {:d}/{:d} ({:.0f}%), current benchmark at {:.0f}%'.format(run_offset, runs_total, run_offset * 100 / runs_total, slept * 100 / run_timeout))
+ print(
+ "[RUN] {:d}/{:d} ({:.0f}%), current benchmark at {:.0f}%".format(
+ run_offset,
+ runs_total,
+ run_offset * 100 / runs_total,
+ slept * 100 / run_timeout,
+ )
+ )
except KeyboardInterrupt:
pass
@@ -297,8 +399,8 @@ def run_benchmark(application_file: str, pta: PTA, runs: list, arch: str, app: s
runner.flash(arch, app, run_args)
monitor = runner.get_monitor(arch, callback=harness.parser_cb)
- if arch == 'posix':
- print('[RUN] Will run benchmark for {:.0f} seconds'.format(run_timeout))
+ if arch == "posix":
+ print("[RUN] Will run benchmark for {:.0f} seconds".format(run_timeout))
lines = monitor.run(int(run_timeout))
return [(runs, harness, lines, list())]
@@ -307,7 +409,14 @@ def run_benchmark(application_file: str, pta: PTA, runs: list, arch: str, app: s
while not harness.done:
time.sleep(5)
slept += 5
- print('[RUN] {:d}/{:d} ({:.0f}%), current benchmark at {:.0f}%'.format(run_offset, runs_total, run_offset * 100 / runs_total, slept * 100 / run_timeout))
+ print(
+ "[RUN] {:d}/{:d} ({:.0f}%), current benchmark at {:.0f}%".format(
+ run_offset,
+ runs_total,
+ run_offset * 100 / runs_total,
+ slept * 100 / run_timeout,
+ )
+ )
except KeyboardInterrupt:
pass
monitor.close()
@@ -315,85 +424,91 @@ def run_benchmark(application_file: str, pta: PTA, runs: list, arch: str, app: s
return [(runs, harness, monitor, list())]
-if __name__ == '__main__':
+if __name__ == "__main__":
try:
optspec = (
- 'accounting= '
- 'arch= '
- 'app= '
- 'data= '
- 'depth= '
- 'dummy= '
- 'energytrace= '
- 'instance= '
- 'mimosa= '
- 'repeat= '
- 'run= '
- 'sleep= '
- 'shrink '
- 'timing '
- 'timer-pin= '
- 'trace-filter= '
+ "accounting= "
+ "arch= "
+ "app= "
+ "data= "
+ "depth= "
+ "dummy= "
+ "energytrace= "
+ "instance= "
+ "mimosa= "
+ "repeat= "
+ "run= "
+ "sleep= "
+ "shrink "
+ "timing "
+ "timer-pin= "
+ "trace-filter= "
)
- raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(' '))
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
+ optname = re.sub(r"^--", "", option)
opt[optname] = parameter
- if 'app' not in opt:
- opt['app'] = 'aemr'
+ if "app" not in opt:
+ opt["app"] = "aemr"
- if 'depth' in opt:
- opt['depth'] = int(opt['depth'])
+ if "depth" in opt:
+ opt["depth"] = int(opt["depth"])
else:
- opt['depth'] = 3
+ opt["depth"] = 3
- if 'repeat' in opt:
- opt['repeat'] = int(opt['repeat'])
+ if "repeat" in opt:
+ opt["repeat"] = int(opt["repeat"])
else:
- opt['repeat'] = 0
+ opt["repeat"] = 0
- if 'sleep' in opt:
- opt['sleep'] = int(opt['sleep'])
+ if "sleep" in opt:
+ opt["sleep"] = int(opt["sleep"])
else:
- opt['sleep'] = 0
+ opt["sleep"] = 0
- if 'trace-filter' in opt:
+ if "trace-filter" in opt:
trace_filter = list()
- for trace in opt['trace-filter'].split():
- trace_filter.append(trace.split(','))
- opt['trace-filter'] = trace_filter
+ for trace in opt["trace-filter"].split():
+ trace_filter.append(trace.split(","))
+ opt["trace-filter"] = trace_filter
else:
- opt['trace-filter'] = None
+ opt["trace-filter"] = None
- if 'mimosa' in opt:
- if opt['mimosa'] == '':
- opt['mimosa'] = dict()
+ if "mimosa" in opt:
+ if opt["mimosa"] == "":
+ opt["mimosa"] = dict()
else:
- opt['mimosa'] = dict(map(lambda x: x.split('='), opt['mimosa'].split(',')))
- opt.pop('timing', None)
- if opt['repeat'] == 0:
- opt['repeat'] = 1
-
- if 'energytrace' in opt:
- if opt['energytrace'] == '':
- opt['energytrace'] = dict()
+ opt["mimosa"] = dict(
+ map(lambda x: x.split("="), opt["mimosa"].split(","))
+ )
+ opt.pop("timing", None)
+ if opt["repeat"] == 0:
+ opt["repeat"] = 1
+
+ if "energytrace" in opt:
+ if opt["energytrace"] == "":
+ opt["energytrace"] = dict()
else:
- opt['energytrace'] = dict(map(lambda x: x.split('='), opt['energytrace'].split(',')))
- opt.pop('timing', None)
- if opt['repeat'] == 0:
- opt['repeat'] = 1
-
- if 'data' not in opt:
- opt['data'] = '../data'
-
- if 'dummy' in opt:
- if opt['dummy'] == '':
- opt['dummy'] = dict()
+ opt["energytrace"] = dict(
+ map(lambda x: x.split("="), opt["energytrace"].split(","))
+ )
+ opt.pop("timing", None)
+ if opt["repeat"] == 0:
+ opt["repeat"] = 1
+
+ if "data" not in opt:
+ opt["data"] = "../data"
+
+ if "dummy" in opt:
+ if opt["dummy"] == "":
+ opt["dummy"] = dict()
else:
- opt['dummy'] = dict(map(lambda x: x.split('='), opt['dummy'].split(',')))
+ opt["dummy"] = dict(
+ map(lambda x: x.split("="), opt["dummy"].split(","))
+ )
except getopt.GetoptError as err:
print(err)
@@ -404,69 +519,96 @@ if __name__ == '__main__':
pta = PTA.from_file(modelfile)
run_flags = None
- if 'shrink' in opt:
+ if "shrink" in opt:
pta.shrink_argument_values()
- if 'timer-pin' in opt:
- timer_pin = opt['timer-pin']
+ if "timer-pin" in opt:
+ timer_pin = opt["timer-pin"]
else:
timer_pin = None
- if 'dummy' in opt:
+ if "dummy" in opt:
enum = dict()
- if '.json' not in modelfile:
- with open(modelfile, 'r') as f:
+ if ".json" not in modelfile:
+ with open(modelfile, "r") as f:
driver_definition = yaml.safe_load(f)
- if 'dummygen' in driver_definition and 'enum' in driver_definition['dummygen']:
- enum = driver_definition['dummygen']['enum']
-
- if 'class' in opt['dummy']:
- class_name = opt['dummy']['class']
+ if (
+ "dummygen" in driver_definition
+ and "enum" in driver_definition["dummygen"]
+ ):
+ enum = driver_definition["dummygen"]["enum"]
+
+ if "class" in opt["dummy"]:
+ class_name = opt["dummy"]["class"]
else:
- class_name = driver_definition['codegen']['class']
+ class_name = driver_definition["codegen"]["class"]
- run_flags = ['drivers=dummy']
+ run_flags = ["drivers=dummy"]
- repo = Repo('../multipass/build/repo.acp')
+ repo = Repo("../multipass/build/repo.acp")
- if 'accounting' in opt and 'getEnergy' not in map(lambda x: x.name, pta.transitions):
+ if "accounting" in opt and "getEnergy" not in map(
+ lambda x: x.name, pta.transitions
+ ):
for state in pta.get_state_names():
- pta.add_transition(state, state, 'getEnergy')
+ pta.add_transition(state, state, "getEnergy")
pta.set_random_energy_model()
- if 'accounting' in opt:
- if ',' in opt['accounting']:
- accounting_settings = opt['accounting'].split(',')
+ if "accounting" in opt:
+ if "," in opt["accounting"]:
+ accounting_settings = opt["accounting"].split(",")
accounting_name = accounting_settings[0]
- accounting_options = dict(map(lambda x: x.split('='), accounting_settings[1:]))
- accounting_object = get_accountingmethod(accounting_name)(class_name, pta, **accounting_options)
+ accounting_options = dict(
+ map(lambda x: x.split("="), accounting_settings[1:])
+ )
+ accounting_object = get_accountingmethod(accounting_name)(
+ class_name, pta, **accounting_options
+ )
else:
- accounting_object = get_accountingmethod(opt['accounting'])(class_name, pta)
+ accounting_object = get_accountingmethod(opt["accounting"])(
+ class_name, pta
+ )
else:
accounting_object = None
- drv = MultipassDriver(class_name, pta, repo.class_by_name[class_name], enum=enum, accounting=accounting_object)
- with open('../multipass/src/driver/dummy.cc', 'w') as f:
+ drv = MultipassDriver(
+ class_name,
+ pta,
+ repo.class_by_name[class_name],
+ enum=enum,
+ accounting=accounting_object,
+ )
+ with open("../multipass/src/driver/dummy.cc", "w") as f:
f.write(drv.impl)
- with open('../multipass/include/driver/dummy.h', 'w') as f:
+ with open("../multipass/include/driver/dummy.h", "w") as f:
f.write(drv.header)
- if '.json' not in modelfile:
- with open(modelfile, 'r') as f:
+ if ".json" not in modelfile:
+ with open(modelfile, "r") as f:
driver_definition = yaml.safe_load(f)
- if 'codegen' in driver_definition and 'flags' in driver_definition['codegen']:
+ if "codegen" in driver_definition and "flags" in driver_definition["codegen"]:
if run_flags is None:
- run_flags = driver_definition['codegen']['flags']
+ run_flags = driver_definition["codegen"]["flags"]
if run_flags is None:
- run_flags = opt['run'].split()
-
- runs = list(pta.dfs(opt['depth'], with_arguments=True, with_parameters=True, trace_filter=opt['trace-filter']))
+ run_flags = opt["run"].split()
+
+ runs = list(
+ pta.dfs(
+ opt["depth"],
+ with_arguments=True,
+ with_parameters=True,
+ trace_filter=opt["trace-filter"],
+ )
+ )
num_transitions = len(runs)
if len(runs) == 0:
- print('DFS returned no traces -- perhaps your trace-filter is too restrictive?', file=sys.stderr)
+ print(
+ "DFS returned no traces -- perhaps your trace-filter is too restrictive?",
+ file=sys.stderr,
+ )
sys.exit(1)
need_return_values = False
@@ -479,45 +621,78 @@ if __name__ == '__main__':
# # getEnergy() returns energy data. Log it.
# need_return_values = True
- if 'mimosa' in opt:
- harness = TransitionHarness(gpio_pin=timer_pin, pta=pta, log_return_values=need_return_values, repeat=1, post_transition_delay_us=20)
- elif 'energytrace' in opt:
- harness = OnboardTimerHarness(gpio_pin=timer_pin, gpio_mode='bar', pta=pta, counter_limits=runner.get_counter_limits_us(opt['arch']), log_return_values=need_return_values, repeat=1)
- elif 'timing' in opt:
- harness = OnboardTimerHarness(gpio_pin=timer_pin, pta=pta, counter_limits=runner.get_counter_limits_us(opt['arch']), log_return_values=need_return_values, repeat=opt['repeat'])
+ if "mimosa" in opt:
+ harness = TransitionHarness(
+ gpio_pin=timer_pin,
+ pta=pta,
+ log_return_values=need_return_values,
+ repeat=1,
+ post_transition_delay_us=20,
+ )
+ elif "energytrace" in opt:
+ harness = OnboardTimerHarness(
+ gpio_pin=timer_pin,
+ gpio_mode="bar",
+ pta=pta,
+ counter_limits=runner.get_counter_limits_us(opt["arch"]),
+ log_return_values=need_return_values,
+ repeat=1,
+ )
+ elif "timing" in opt:
+ harness = OnboardTimerHarness(
+ gpio_pin=timer_pin,
+ pta=pta,
+ counter_limits=runner.get_counter_limits_us(opt["arch"]),
+ log_return_values=need_return_values,
+ repeat=opt["repeat"],
+ )
if len(args) > 1:
- results = run_benchmark(args[1], pta, runs, opt['arch'], opt['app'], run_flags, harness, opt['sleep'], opt['repeat'], runs_total=len(runs), dummy='dummy' in opt)
+ results = run_benchmark(
+ args[1],
+ pta,
+ runs,
+ opt["arch"],
+ opt["app"],
+ run_flags,
+ harness,
+ opt["sleep"],
+ opt["repeat"],
+ runs_total=len(runs),
+ dummy="dummy" in opt,
+ )
json_out = {
- 'opt': opt,
- 'pta': pta.to_json(),
- 'traces': list(map(lambda x: x[1].traces, results)),
- 'raw_output': list(map(lambda x: x[2].get_lines(), results)),
- 'files': list(map(lambda x: x[3], results)),
- 'configs': list(map(lambda x: x[2].get_config(), results)),
+ "opt": opt,
+ "pta": pta.to_json(),
+ "traces": list(map(lambda x: x[1].traces, results)),
+ "raw_output": list(map(lambda x: x[2].get_lines(), results)),
+ "files": list(map(lambda x: x[3], results)),
+ "configs": list(map(lambda x: x[2].get_config(), results)),
}
- extra_files = flatten(json_out['files'])
- if 'instance' in pta.codegen:
- output_prefix = opt['data'] + time.strftime('/%Y%m%d-%H%M%S-') + pta.codegen['instance']
+ extra_files = flatten(json_out["files"])
+ if "instance" in pta.codegen:
+ output_prefix = (
+ opt["data"] + time.strftime("/%Y%m%d-%H%M%S-") + pta.codegen["instance"]
+ )
else:
- output_prefix = opt['data'] + time.strftime('/%Y%m%d-%H%M%S-ptalog')
+ output_prefix = opt["data"] + time.strftime("/%Y%m%d-%H%M%S-ptalog")
if len(extra_files):
- with open('ptalog.json', 'w') as f:
+ with open("ptalog.json", "w") as f:
json.dump(json_out, f)
- with tarfile.open('{}.tar'.format(output_prefix), 'w') as tar:
- tar.add('ptalog.json')
+ with tarfile.open("{}.tar".format(output_prefix), "w") as tar:
+ tar.add("ptalog.json")
for extra_file in extra_files:
tar.add(extra_file)
- print(' --> {}.tar'.format(output_prefix))
- os.remove('ptalog.json')
+ print(" --> {}.tar".format(output_prefix))
+ os.remove("ptalog.json")
for extra_file in extra_files:
os.remove(extra_file)
else:
- with open('{}.json'.format(output_prefix), 'w') as f:
+ with open("{}.json".format(output_prefix), "w") as f:
json.dump(json_out, f)
- print(' --> {}.json'.format(output_prefix))
+ print(" --> {}.json".format(output_prefix))
else:
- outbuf = benchmark_from_runs(pta, runs, harness, repeat=opt['repeat'])
+ outbuf = benchmark_from_runs(pta, runs, harness, repeat=opt["repeat"])
print(outbuf.getvalue())
sys.exit(0)
diff --git a/bin/generate-dummy-class.py b/bin/generate-dummy-class.py
index d86339b..9cfa7fb 100755
--- a/bin/generate-dummy-class.py
+++ b/bin/generate-dummy-class.py
@@ -5,19 +5,19 @@ from dfatool.codegen import MultipassDriver
from dfatool.automata import PTA
import yaml
-with open('../multipass/model/driver/nrf24l01.dfa', 'r') as f:
+with open("../multipass/model/driver/nrf24l01.dfa", "r") as f:
driver_definition = yaml.safe_load(f)
pta = PTA.from_yaml(driver_definition)
-repo = Repo('../multipass/build/repo.acp')
+repo = Repo("../multipass/build/repo.acp")
enum = dict()
-if 'dummygen' in driver_definition and 'enum' in driver_definition['dummygen']:
- enum = driver_definition['dummygen']['enum']
+if "dummygen" in driver_definition and "enum" in driver_definition["dummygen"]:
+ enum = driver_definition["dummygen"]["enum"]
-drv = MultipassDriver('Nrf24l01', pta, repo.class_by_name['Nrf24l01'], enum=enum)
+drv = MultipassDriver("Nrf24l01", pta, repo.class_by_name["Nrf24l01"], enum=enum)
-with open('../multipass/src/driver/dummy.cc', 'w') as f:
+with open("../multipass/src/driver/dummy.cc", "w") as f:
f.write(drv.impl)
-with open('../multipass/include/driver/dummy.h', 'w') as f:
+with open("../multipass/include/driver/dummy.h", "w") as f:
f.write(drv.header)
diff --git a/bin/gptest.py b/bin/gptest.py
index 869328e..82b4575 100755
--- a/bin/gptest.py
+++ b/bin/gptest.py
@@ -2,10 +2,16 @@
import sys
import numpy as np
-from dfatool.dfatool import PTAModel, RawData, regression_measures, pta_trace_to_aggregate
+from dfatool.dfatool import (
+ PTAModel,
+ RawData,
+ regression_measures,
+ pta_trace_to_aggregate,
+)
from gplearn.genetic import SymbolicRegressor
from multiprocessing import Pool
+
def splitidx_srs(length):
shuffled = np.random.permutation(np.arange(length))
border = int(length * float(2) / 3)
@@ -13,16 +19,17 @@ def splitidx_srs(length):
validation = shuffled[border:]
return (training, validation)
+
def _gp_fit(arg):
param = arg[0]
X = arg[1]
Y = arg[2]
est_gp = SymbolicRegressor(
- population_size = param[0],
- generations = 450,
- parsimony_coefficient = param[1],
- function_set = param[2].split(' '),
- const_range = (-param[3], param[3])
+ population_size=param[0],
+ generations=450,
+ parsimony_coefficient=param[1],
+ function_set=param[2].split(" "),
+ const_range=(-param[3], param[3]),
)
training, validation = splitidx_srs(len(Y))
@@ -33,22 +40,27 @@ def _gp_fit(arg):
try:
est_gp.fit(X_train, Y_train)
- return (param, str(est_gp._program), est_gp._program.raw_fitness_, regression_measures(est_gp.predict(X_validation), Y_validation))
+ return (
+ param,
+ str(est_gp._program),
+ est_gp._program.raw_fitness_,
+ regression_measures(est_gp.predict(X_validation), Y_validation),
+ )
except Exception as e:
- return (param, 'Exception: {}'.format(str(e)), 999999999)
+ return (param, "Exception: {}".format(str(e)), 999999999)
-if __name__ == '__main__':
+if __name__ == "__main__":
population_size = [100, 500, 1000, 2000, 5000, 10000]
parsimony_coefficient = [0.1, 0.5, 0.1, 1]
- function_set = ['add mul', 'add mul sub div', 'add mul sub div sqrt log inv']
+ function_set = ["add mul", "add mul sub div", "add mul sub div sqrt log inv"]
const_lim = [100000, 50000, 10000, 1000, 500, 10, 1]
filenames = sys.argv[4:]
raw_data = RawData(filenames)
preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
- model = PTAModel(by_name, parameters, arg_count, traces = preprocessed_data)
+ model = PTAModel(by_name, parameters, arg_count, traces=preprocessed_data)
by_param = model.by_param
@@ -61,14 +73,12 @@ if __name__ == '__main__':
X = [[] for i in range(dimension)]
Y = []
-
for key, val in by_param.items():
if key[0] == state_or_tran and len(key[1]) == dimension:
Y.extend(val[model_attribute])
for i in range(dimension):
X[i].extend([float(key[1][i])] * len(val[model_attribute]))
-
X = np.array(X)
Y = np.array(Y)
@@ -85,4 +95,4 @@ if __name__ == '__main__':
results = pool.map(_gp_fit, paramqueue)
for res in sorted(results, key=lambda r: r[2]):
- print('{} {:.0f} ({:.0f})\n{}'.format(res[0], res[3]['mae'], res[2], res[1]))
+ print("{} {:.0f} ({:.0f})\n{}".format(res[0], res[3]["mae"], res[2], res[1]))
diff --git a/bin/mim-vs-keysight.py b/bin/mim-vs-keysight.py
index d1ff4e6..c214f2f 100755
--- a/bin/mim-vs-keysight.py
+++ b/bin/mim-vs-keysight.py
@@ -18,6 +18,7 @@ ks = KeysightCSV()
charges, triggers = mim.load_data(mimfile)
timestamps, currents = ks.load_data(csvfile)
+
def calfunc330(charge):
if charge < 140.210488888889:
return 0
@@ -26,6 +27,7 @@ def calfunc330(charge):
else:
return float(charge) * 0.0897304193584184 + -47.2437278033012 + 36.358862
+
def calfunc82(charge):
if charge < 126.993600:
return 0
@@ -43,24 +45,25 @@ def calfunc33(charge):
else:
return charge * 0.884357 + -112.500777 + 36.358862
+
calfuncs = {
- 33 : calfunc33,
- 82 : calfunc82,
- 330 : calfunc330,
+ 33: calfunc33,
+ 82: calfunc82,
+ 330: calfunc330,
}
vcalfunc = np.vectorize(calfuncs[int(shunt)], otypes=[np.float64])
-#plt.plot(np.arange(0, 1000, 0.01), vcalfunc(np.arange(0, 1000, 0.01)))
-#plt.xlabel('Rohdatenwert')
-#plt.ylabel('Strom [µA]')
-#plt.show()
-#sys.exit(0)
+# plt.plot(np.arange(0, 1000, 0.01), vcalfunc(np.arange(0, 1000, 0.01)))
+# plt.xlabel('Rohdatenwert')
+# plt.ylabel('Strom [µA]')
+# plt.show()
+# sys.exit(0)
-mim_x = np.arange(len(charges)-199) * 1e-5
+mim_x = np.arange(len(charges) - 199) * 1e-5
mim_y = running_mean(mim.charge_to_current_nocal(charges), 200) * 1e-6
cal_y = running_mean(vcalfunc(charges), 200) * 1e-6
-ks_x = timestamps[:len(timestamps)-9]
+ks_x = timestamps[: len(timestamps) - 9]
ks_y = running_mean(currents, 10)
# look for synchronization opportunity in first 5 seconds
@@ -97,12 +100,12 @@ plt.plot([mim_x[mim_center]], [mim_y[mim_center]], "yo")
plt.plot([mim_x[mim_start]], [mim_y[mim_start]], "yo")
plt.plot([mim_x[mim_end]], [mim_y[mim_end]], "yo")
#
-mimhandle, = plt.plot(mim_x, mim_y, "r-", label='MIMOSA')
-#calhandle, = plt.plot(mim_x, cal_y, "g-", label='MIMOSA (autocal)')
-kshandle, = plt.plot(ks_x, ks_y, "b-", label='Keysight')
-#plt.legend(handles=[mimhandle, calhandle, kshandle])
-plt.xlabel('Zeit [s]')
-plt.ylabel('Strom [A]')
+(mimhandle,) = plt.plot(mim_x, mim_y, "r-", label="MIMOSA")
+# calhandle, = plt.plot(mim_x, cal_y, "g-", label='MIMOSA (autocal)')
+(kshandle,) = plt.plot(ks_x, ks_y, "b-", label="Keysight")
+# plt.legend(handles=[mimhandle, calhandle, kshandle])
+plt.xlabel("Zeit [s]")
+plt.ylabel("Strom [A]")
plt.grid(True)
ks_steps_up = []
@@ -112,11 +115,21 @@ mim_steps_down = []
skip = 0
for i, gradient in enumerate(np.gradient(ks_y, 10000)):
- if gradient > 0.5e-9 and i - skip > 200 and ks_x[i] < mim_x[mim_center] and ks_x[i] > 5:
+ if (
+ gradient > 0.5e-9
+ and i - skip > 200
+ and ks_x[i] < mim_x[mim_center]
+ and ks_x[i] > 5
+ ):
plt.plot([ks_x[i]], [ks_y[i]], "go")
ks_steps_up.append(i)
skip = i
- elif gradient < -0.5e-9 and i - skip > 200 and ks_x[i] > mim_x[mim_center] and ks_x[i] < mim_x[mim_end]:
+ elif (
+ gradient < -0.5e-9
+ and i - skip > 200
+ and ks_x[i] > mim_x[mim_center]
+ and ks_x[i] < mim_x[mim_end]
+ ):
plt.plot([ks_x[i]], [ks_y[i]], "g*")
ks_steps_down.append(i)
skip = i
@@ -141,21 +154,31 @@ cal_values = []
ks_values = []
for i in range(1, len(ks_steps_up)):
- mim_values.append(np.mean(mim_y[mim_steps_up[i-1]:mim_steps_up[i]]))
- cal_values.append(np.mean(cal_y[mim_steps_up[i-1]:mim_steps_up[i]]))
- ks_values.append(np.mean(ks_y[ks_steps_up[i-1]:ks_steps_up[i]]))
- print("step %d avg %5.3f vs %5.3f vs %5.3f mA" %
- (i, np.mean(ks_y[ks_steps_up[i-1]:ks_steps_up[i]]) * 1e3,
- np.mean(mim_y[mim_steps_up[i-1]:mim_steps_up[i]]) * 1e3,
- np.mean(cal_y[mim_steps_up[i-1]:mim_steps_up[i]]) * 1e3))
+ mim_values.append(np.mean(mim_y[mim_steps_up[i - 1] : mim_steps_up[i]]))
+ cal_values.append(np.mean(cal_y[mim_steps_up[i - 1] : mim_steps_up[i]]))
+ ks_values.append(np.mean(ks_y[ks_steps_up[i - 1] : ks_steps_up[i]]))
+ print(
+ "step %d avg %5.3f vs %5.3f vs %5.3f mA"
+ % (
+ i,
+ np.mean(ks_y[ks_steps_up[i - 1] : ks_steps_up[i]]) * 1e3,
+ np.mean(mim_y[mim_steps_up[i - 1] : mim_steps_up[i]]) * 1e3,
+ np.mean(cal_y[mim_steps_up[i - 1] : mim_steps_up[i]]) * 1e3,
+ )
+ )
for i in range(1, len(ks_steps_down)):
- mim_values.append(np.mean(mim_y[mim_steps_down[i-1]:mim_steps_down[i]]))
- cal_values.append(np.mean(cal_y[mim_steps_down[i-1]:mim_steps_down[i]]))
- ks_values.append(np.mean(ks_y[ks_steps_down[i-1]:ks_steps_down[i]]))
- print("step %d avg %5.3f vs %5.3f vs %5.3f mA" %
- (i, np.mean(ks_y[ks_steps_down[i-1]:ks_steps_down[i]]) * 1e3,
- np.mean(mim_y[mim_steps_down[i-1]:mim_steps_down[i]]) * 1e3,
- np.mean(cal_y[mim_steps_down[i-1]:mim_steps_down[i]]) * 1e3))
+ mim_values.append(np.mean(mim_y[mim_steps_down[i - 1] : mim_steps_down[i]]))
+ cal_values.append(np.mean(cal_y[mim_steps_down[i - 1] : mim_steps_down[i]]))
+ ks_values.append(np.mean(ks_y[ks_steps_down[i - 1] : ks_steps_down[i]]))
+ print(
+ "step %d avg %5.3f vs %5.3f vs %5.3f mA"
+ % (
+ i,
+ np.mean(ks_y[ks_steps_down[i - 1] : ks_steps_down[i]]) * 1e3,
+ np.mean(mim_y[mim_steps_down[i - 1] : mim_steps_down[i]]) * 1e3,
+ np.mean(cal_y[mim_steps_down[i - 1] : mim_steps_down[i]]) * 1e3,
+ )
+ )
mim_values = np.array(mim_values)
cal_values = np.array(cal_values)
@@ -163,60 +186,114 @@ ks_values = np.array(ks_values)
plt.show()
-plt.hist(ks_y[ks_steps_up[48]:ks_steps_up[49]] * 1e3, 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('mA Keysight')
-plt.ylabel('#')
+plt.hist(
+ ks_y[ks_steps_up[48] : ks_steps_up[49]] * 1e3,
+ 100,
+ normed=0,
+ facecolor="blue",
+ alpha=0.8,
+)
+plt.xlabel("mA Keysight")
+plt.ylabel("#")
plt.grid(True)
plt.show()
-plt.hist(mim_y[mim_steps_up[48]:mim_steps_up[49]] * 1e3, 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('mA MimosaGUI')
-plt.ylabel('#')
+plt.hist(
+ mim_y[mim_steps_up[48] : mim_steps_up[49]] * 1e3,
+ 100,
+ normed=0,
+ facecolor="blue",
+ alpha=0.8,
+)
+plt.xlabel("mA MimosaGUI")
+plt.ylabel("#")
plt.grid(True)
plt.show()
-mimhandle, = plt.plot(ks_values * 1e3, mim_values * 1e3, "ro", label='Unkalibriert', markersize=4)
-calhandle, = plt.plot(ks_values * 1e3, cal_values * 1e3, "bs", label='Kalibriert', markersize=4)
+(mimhandle,) = plt.plot(
+ ks_values * 1e3, mim_values * 1e3, "ro", label="Unkalibriert", markersize=4
+)
+(calhandle,) = plt.plot(
+ ks_values * 1e3, cal_values * 1e3, "bs", label="Kalibriert", markersize=4
+)
plt.legend(handles=[mimhandle, calhandle])
-plt.xlabel('mA Keysight')
-plt.ylabel('mA MIMOSA')
+plt.xlabel("mA Keysight")
+plt.ylabel("mA MIMOSA")
plt.grid(True)
plt.show()
-mimhandle, = plt.plot(ks_values * 1e3, (mim_values - ks_values) * 1e3, "ro", label='Unkalibriert', markersize=4)
-calhandle, = plt.plot(ks_values * 1e3, (cal_values - ks_values) * 1e3, "bs", label='Kalibriert', markersize=4)
+(mimhandle,) = plt.plot(
+ ks_values * 1e3,
+ (mim_values - ks_values) * 1e3,
+ "ro",
+ label="Unkalibriert",
+ markersize=4,
+)
+(calhandle,) = plt.plot(
+ ks_values * 1e3,
+ (cal_values - ks_values) * 1e3,
+ "bs",
+ label="Kalibriert",
+ markersize=4,
+)
plt.legend(handles=[mimhandle, calhandle])
-plt.xlabel('Sollstrom [mA]')
-plt.ylabel('Messfehler MIMOSA [mA]')
+plt.xlabel("Sollstrom [mA]")
+plt.ylabel("Messfehler MIMOSA [mA]")
plt.grid(True)
plt.show()
-mimhandle, = plt.plot(ks_values * 1e3, (mim_values - ks_values) * 1e3, "r--", label='Unkalibriert')
-calhandle, = plt.plot(ks_values * 1e3, (cal_values - ks_values) * 1e3, "b-", label='Kalibriert')
+(mimhandle,) = plt.plot(
+ ks_values * 1e3, (mim_values - ks_values) * 1e3, "r--", label="Unkalibriert"
+)
+(calhandle,) = plt.plot(
+ ks_values * 1e3, (cal_values - ks_values) * 1e3, "b-", label="Kalibriert"
+)
plt.legend(handles=[mimhandle, calhandle])
-plt.xlabel('Sollstrom [mA]')
-plt.ylabel('Messfehler MIMOSA [mA]')
+plt.xlabel("Sollstrom [mA]")
+plt.ylabel("Messfehler MIMOSA [mA]")
plt.grid(True)
plt.show()
-mimhandle, = plt.plot(ks_values * 1e3, (mim_values - ks_values) / ks_values * 100, "ro", label='Unkalibriert', markersize=4)
-calhandle, = plt.plot(ks_values * 1e3, (cal_values - ks_values) / ks_values * 100, "bs", label='Kalibriert', markersize=4)
+(mimhandle,) = plt.plot(
+ ks_values * 1e3,
+ (mim_values - ks_values) / ks_values * 100,
+ "ro",
+ label="Unkalibriert",
+ markersize=4,
+)
+(calhandle,) = plt.plot(
+ ks_values * 1e3,
+ (cal_values - ks_values) / ks_values * 100,
+ "bs",
+ label="Kalibriert",
+ markersize=4,
+)
plt.legend(handles=[mimhandle, calhandle])
-plt.xlabel('Sollstrom [mA]')
-plt.ylabel('Messfehler MIMOSA [%]')
+plt.xlabel("Sollstrom [mA]")
+plt.ylabel("Messfehler MIMOSA [%]")
plt.grid(True)
plt.show()
-mimhandle, = plt.plot(ks_values * 1e3, (mim_values - ks_values) / ks_values * 100, "r--", label='Unkalibriert')
-calhandle, = plt.plot(ks_values * 1e3, (cal_values - ks_values) / ks_values * 100, "b-", label='Kalibriert')
+(mimhandle,) = plt.plot(
+ ks_values * 1e3,
+ (mim_values - ks_values) / ks_values * 100,
+ "r--",
+ label="Unkalibriert",
+)
+(calhandle,) = plt.plot(
+ ks_values * 1e3,
+ (cal_values - ks_values) / ks_values * 100,
+ "b-",
+ label="Kalibriert",
+)
plt.legend(handles=[mimhandle, calhandle])
-plt.xlabel('Sollstrom [mA]')
-plt.ylabel('Messfehler MIMOSA [%]')
+plt.xlabel("Sollstrom [mA]")
+plt.ylabel("Messfehler MIMOSA [%]")
plt.grid(True)
plt.show()
-#mimhandle, = plt.plot(mim_x, np.gradient(mim_y, 10000), "r-", label='MIMOSA')
-#kshandle, = plt.plot(ks_x, np.gradient(ks_y, 10000), "b-", label='Keysight')
-#plt.legend(handles=[mimhandle, kshandle])
-#plt.xlabel('Zeit [s]')
-#plt.ylabel('Strom [A]')
-#plt.show()
+# mimhandle, = plt.plot(mim_x, np.gradient(mim_y, 10000), "r-", label='MIMOSA')
+# kshandle, = plt.plot(ks_x, np.gradient(ks_y, 10000), "b-", label='Keysight')
+# plt.legend(handles=[mimhandle, kshandle])
+# plt.xlabel('Zeit [s]')
+# plt.ylabel('Strom [A]')
+# plt.show()
diff --git a/bin/test_corrcoef.py b/bin/test_corrcoef.py
index 835e086..e389d01 100755
--- a/bin/test_corrcoef.py
+++ b/bin/test_corrcoef.py
@@ -9,76 +9,105 @@ from dfatool.dfatool import gplearn_to_function
opts = {}
+
def print_model_quality(results):
for state_or_tran in results.keys():
print()
for key, result in results[state_or_tran].items():
- if 'smape' in result:
- print('{:20s} {:15s} {:.2f}% / {:.0f}'.format(
- state_or_tran, key, result['smape'], result['mae']))
+ if "smape" in result:
+ print(
+ "{:20s} {:15s} {:.2f}% / {:.0f}".format(
+ state_or_tran, key, result["smape"], result["mae"]
+ )
+ )
else:
- print('{:20s} {:15s} {:.0f}'.format(
- state_or_tran, key, result['mae']))
+ print("{:20s} {:15s} {:.0f}".format(state_or_tran, key, result["mae"]))
+
def format_quality_measures(result):
- if 'smape' in result:
- return '{:6.2f}% / {:9.0f}'.format(result['smape'], result['mae'])
+ if "smape" in result:
+ return "{:6.2f}% / {:9.0f}".format(result["smape"], result["mae"])
else:
- return '{:6} {:9.0f}'.format('', result['mae'])
+ return "{:6} {:9.0f}".format("", result["mae"])
+
def model_quality_table(result_lists, info_list):
- for state_or_tran in result_lists[0]['by_name'].keys():
- for key in result_lists[0]['by_name'][state_or_tran].keys():
- buf = '{:20s} {:15s}'.format(state_or_tran, key)
+ for state_or_tran in result_lists[0]["by_name"].keys():
+ for key in result_lists[0]["by_name"][state_or_tran].keys():
+ buf = "{:20s} {:15s}".format(state_or_tran, key)
for i, results in enumerate(result_lists):
info = info_list[i]
- buf += ' ||| '
+ buf += " ||| "
if info == None or info(state_or_tran, key):
- result = results['by_name'][state_or_tran][key]
+ result = results["by_name"][state_or_tran][key]
buf += format_quality_measures(result)
else:
- buf += '{:6}----{:9}'.format('', '')
+ buf += "{:6}----{:9}".format("", "")
print(buf)
+
def model_summary_table(result_list):
- buf = 'transition duration'
+ buf = "transition duration"
for results in result_list:
if len(buf):
- buf += ' ||| '
- buf += format_quality_measures(results['duration_by_trace'])
+ buf += " ||| "
+ buf += format_quality_measures(results["duration_by_trace"])
print(buf)
- buf = 'total energy '
+ buf = "total energy "
for results in result_list:
if len(buf):
- buf += ' ||| '
- buf += format_quality_measures(results['energy_by_trace'])
+ buf += " ||| "
+ buf += format_quality_measures(results["energy_by_trace"])
print(buf)
- buf = 'transition timeout '
+ buf = "transition timeout "
for results in result_list:
if len(buf):
- buf += ' ||| '
- buf += format_quality_measures(results['timeout_by_trace'])
+ buf += " ||| "
+ buf += format_quality_measures(results["timeout_by_trace"])
print(buf)
def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq):
- print('')
- print(r'key attribute $1 - \frac{\sigma_X}{...}$')
+ print("")
+ print(r"key attribute $1 - \frac{\sigma_X}{...}$")
for state_or_tran in model.by_name.keys():
- for attribute in model.by_name[state_or_tran]['attributes']:
- print('{} {} {:.8f}'.format(state_or_tran, attribute, model.generic_param_dependence_ratio(state_or_tran, attribute)))
-
- print('')
- print(r'key attribute parameter $1 - \frac{...}{...}$')
+ for attribute in model.by_name[state_or_tran]["attributes"]:
+ print(
+ "{} {} {:.8f}".format(
+ state_or_tran,
+ attribute,
+ model.generic_param_dependence_ratio(state_or_tran, attribute),
+ )
+ )
+
+ print("")
+ print(r"key attribute parameter $1 - \frac{...}{...}$")
for state_or_tran in model.by_name.keys():
- for attribute in model.by_name[state_or_tran]['attributes']:
+ for attribute in model.by_name[state_or_tran]["attributes"]:
for param in model.parameters():
- print('{} {} {} {:.8f}'.format(state_or_tran, attribute, param, model.param_dependence_ratio(state_or_tran, attribute, param)))
+ print(
+ "{} {} {} {:.8f}".format(
+ state_or_tran,
+ attribute,
+ param,
+ model.param_dependence_ratio(state_or_tran, attribute, param),
+ )
+ )
if state_or_tran in model._num_args:
for arg_index in range(model._num_args[state_or_tran]):
- print('{} {} {:d} {:.8f}'.format(state_or_tran, attribute, arg_index, model.arg_dependence_ratio(state_or_tran, attribute, arg_index)))
+ print(
+ "{} {} {:d} {:.8f}".format(
+ state_or_tran,
+ attribute,
+ arg_index,
+ model.arg_dependence_ratio(
+ state_or_tran, attribute, arg_index
+ ),
+ )
+ )
+
-if __name__ == '__main__':
+if __name__ == "__main__":
ignored_trace_indexes = None
discard_outliers = None
@@ -89,36 +118,40 @@ if __name__ == '__main__':
try:
optspec = (
- 'plot-unparam= plot-param= show-models= show-quality= '
- 'ignored-trace-indexes= discard-outliers= function-override= '
- 'with-safe-functions'
+ "plot-unparam= plot-param= show-models= show-quality= "
+ "ignored-trace-indexes= discard-outliers= function-override= "
+ "with-safe-functions"
)
- raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(' '))
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
+ optname = re.sub(r"^--", "", option)
opts[optname] = parameter
- if 'ignored-trace-indexes' in opts:
- ignored_trace_indexes = list(map(int, opts['ignored-trace-indexes'].split(',')))
+ if "ignored-trace-indexes" in opts:
+ ignored_trace_indexes = list(
+ map(int, opts["ignored-trace-indexes"].split(","))
+ )
if 0 in ignored_trace_indexes:
- print('[E] arguments to --ignored-trace-indexes start from 1')
+ print("[E] arguments to --ignored-trace-indexes start from 1")
- if 'discard-outliers' in opts:
- discard_outliers = float(opts['discard-outliers'])
+ if "discard-outliers" in opts:
+ discard_outliers = float(opts["discard-outliers"])
- if 'function-override' in opts:
- for function_desc in opts['function-override'].split(';'):
- state_or_tran, attribute, *function_str = function_desc.split(' ')
- function_override[(state_or_tran, attribute)] = ' '.join(function_str)
+ if "function-override" in opts:
+ for function_desc in opts["function-override"].split(";"):
+ state_or_tran, attribute, *function_str = function_desc.split(" ")
+ function_override[(state_or_tran, attribute)] = " ".join(
+ function_str
+ )
- if 'show-models' in opts:
- show_models = opts['show-models'].split(',')
+ if "show-models" in opts:
+ show_models = opts["show-models"].split(",")
- if 'show-quality' in opts:
- show_quality = opts['show-quality'].split(',')
+ if "show-quality" in opts:
+ show_quality = opts["show-quality"].split(",")
- if 'with-safe-functions' in opts:
+ if "with-safe-functions" in opts:
safe_functions_enabled = True
except getopt.GetoptError as err:
@@ -131,105 +164,154 @@ if __name__ == '__main__':
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
ref_model = PTAModel(
- by_name, parameters, arg_count,
- traces = preprocessed_data,
- ignore_trace_indexes = ignored_trace_indexes,
- discard_outliers = discard_outliers,
- function_override = function_override,
- use_corrcoef = False)
+ by_name,
+ parameters,
+ arg_count,
+ traces=preprocessed_data,
+ ignore_trace_indexes=ignored_trace_indexes,
+ discard_outliers=discard_outliers,
+ function_override=function_override,
+ use_corrcoef=False,
+ )
model = PTAModel(
- by_name, parameters, arg_count,
- traces = preprocessed_data,
- ignore_trace_indexes = ignored_trace_indexes,
- discard_outliers = discard_outliers,
- function_override = function_override,
- use_corrcoef = True)
-
-
- if 'plot-unparam' in opts:
- for kv in opts['plot-unparam'].split(';'):
- state_or_trans, attribute = kv.split(' ')
+ by_name,
+ parameters,
+ arg_count,
+ traces=preprocessed_data,
+ ignore_trace_indexes=ignored_trace_indexes,
+ discard_outliers=discard_outliers,
+ function_override=function_override,
+ use_corrcoef=True,
+ )
+
+ if "plot-unparam" in opts:
+ for kv in opts["plot-unparam"].split(";"):
+ state_or_trans, attribute = kv.split(" ")
plotter.plot_y(model.by_name[state_or_trans][attribute])
if len(show_models):
- print('--- simple static model ---')
+ print("--- simple static model ---")
static_model = model.get_static()
ref_static_model = ref_model.get_static()
- if 'static' in show_models or 'all' in show_models:
+ if "static" in show_models or "all" in show_models:
for state in model.states():
- print('{:10s}: {:.0f} µW ({:.2f})'.format(
- state,
- static_model(state, 'power'),
- model.generic_param_dependence_ratio(state, 'power')))
+ print(
+ "{:10s}: {:.0f} µW ({:.2f})".format(
+ state,
+ static_model(state, "power"),
+ model.generic_param_dependence_ratio(state, "power"),
+ )
+ )
for param in model.parameters():
- print('{:10s} dependence on {:15s}: {:.2f}'.format(
- '',
- param,
- model.param_dependence_ratio(state, 'power', param)))
+ print(
+ "{:10s} dependence on {:15s}: {:.2f}".format(
+ "", param, model.param_dependence_ratio(state, "power", param)
+ )
+ )
for trans in model.transitions():
- print('{:10s}: {:.0f} / {:.0f} / {:.0f} pJ ({:.2f} / {:.2f} / {:.2f})'.format(
- trans, static_model(trans, 'energy'),
- static_model(trans, 'rel_energy_prev'),
- static_model(trans, 'rel_energy_next'),
- model.generic_param_dependence_ratio(trans, 'energy'),
- model.generic_param_dependence_ratio(trans, 'rel_energy_prev'),
- model.generic_param_dependence_ratio(trans, 'rel_energy_next')))
- print('{:10s}: {:.0f} µs'.format(trans, static_model(trans, 'duration')))
+ print(
+ "{:10s}: {:.0f} / {:.0f} / {:.0f} pJ ({:.2f} / {:.2f} / {:.2f})".format(
+ trans,
+ static_model(trans, "energy"),
+ static_model(trans, "rel_energy_prev"),
+ static_model(trans, "rel_energy_next"),
+ model.generic_param_dependence_ratio(trans, "energy"),
+ model.generic_param_dependence_ratio(trans, "rel_energy_prev"),
+ model.generic_param_dependence_ratio(trans, "rel_energy_next"),
+ )
+ )
+ print("{:10s}: {:.0f} µs".format(trans, static_model(trans, "duration")))
static_quality = model.assess(static_model)
ref_static_quality = ref_model.assess(ref_static_model)
if len(show_models):
- print('--- LUT ---')
+ print("--- LUT ---")
lut_model = model.get_param_lut()
lut_quality = model.assess(lut_model)
ref_lut_model = ref_model.get_param_lut()
ref_lut_quality = ref_model.assess(ref_lut_model)
if len(show_models):
- print('--- param model ---')
- param_model, param_info = model.get_fitted(safe_functions_enabled = safe_functions_enabled)
- ref_param_model, ref_param_info = ref_model.get_fitted(safe_functions_enabled = safe_functions_enabled)
- print('')
- print('')
- print('state_or_trans attribute param stddev_ratio corrcoef')
+ print("--- param model ---")
+ param_model, param_info = model.get_fitted(
+ safe_functions_enabled=safe_functions_enabled
+ )
+ ref_param_model, ref_param_info = ref_model.get_fitted(
+ safe_functions_enabled=safe_functions_enabled
+ )
+ print("")
+ print("")
+ print("state_or_trans attribute param stddev_ratio corrcoef")
for state in model.states():
for attribute in model.attributes(state):
for param in model.parameters():
- print('{:10s} {:10s} {:10s} {:f} {:f}'.format(state, attribute, param,
- ref_model.param_dependence_ratio(state, attribute, param),
- model.param_dependence_ratio(state, attribute, param)))
+ print(
+ "{:10s} {:10s} {:10s} {:f} {:f}".format(
+ state,
+ attribute,
+ param,
+ ref_model.param_dependence_ratio(state, attribute, param),
+ model.param_dependence_ratio(state, attribute, param),
+ )
+ )
for trans in model.transitions():
for attribute in model.attributes(trans):
for param in model.parameters():
- print('{:10s} {:10s} {:10s} {:f} {:f}'.format(trans, attribute, param,
- ref_model.param_dependence_ratio(trans, attribute, param),
- model.param_dependence_ratio(trans, attribute, param)))
- print('')
- print('')
+ print(
+ "{:10s} {:10s} {:10s} {:f} {:f}".format(
+ trans,
+ attribute,
+ param,
+ ref_model.param_dependence_ratio(trans, attribute, param),
+ model.param_dependence_ratio(trans, attribute, param),
+ )
+ )
+ print("")
+ print("")
analytic_quality = model.assess(param_model)
ref_analytic_quality = ref_model.assess(ref_param_model)
- if 'tex' in show_models or 'tex' in show_quality:
- print_text_model_data(model, static_model, static_quality, lut_model, lut_quality, param_model, param_info, analytic_quality)
+ if "tex" in show_models or "tex" in show_quality:
+ print_text_model_data(
+ model,
+ static_model,
+ static_quality,
+ lut_model,
+ lut_quality,
+ param_model,
+ param_info,
+ analytic_quality,
+ )
- if 'table' in show_quality or 'all' in show_quality:
- print('corrcoef:')
- model_quality_table([static_quality, analytic_quality, lut_quality], [None, param_info, None])
- print('heuristic:')
- model_quality_table([ref_static_quality, ref_analytic_quality, ref_lut_quality], [None, ref_param_info, None])
- if 'summary' in show_quality or 'all' in show_quality:
- print('corrcoef:')
+ if "table" in show_quality or "all" in show_quality:
+ print("corrcoef:")
+ model_quality_table(
+ [static_quality, analytic_quality, lut_quality], [None, param_info, None]
+ )
+ print("heuristic:")
+ model_quality_table(
+ [ref_static_quality, ref_analytic_quality, ref_lut_quality],
+ [None, ref_param_info, None],
+ )
+ if "summary" in show_quality or "all" in show_quality:
+ print("corrcoef:")
model_summary_table([static_quality, analytic_quality, lut_quality])
- print('heuristic:')
+ print("heuristic:")
model_summary_table([ref_static_quality, ref_analytic_quality, ref_lut_quality])
- if 'plot-param' in opts:
- for kv in opts['plot-param'].split(';'):
- state_or_trans, attribute, param_name, *function = kv.split(' ')
+ if "plot-param" in opts:
+ for kv in opts["plot-param"].split(";"):
+ state_or_trans, attribute, param_name, *function = kv.split(" ")
if len(function):
- function = gplearn_to_function(' '.join(function))
+ function = gplearn_to_function(" ".join(function))
else:
function = None
- plotter.plot_param(model, state_or_trans, attribute, model.param_index(param_name), extra_function=function)
+ plotter.plot_param(
+ model,
+ state_or_trans,
+ attribute,
+ model.param_index(param_name),
+ extra_function=function,
+ )
sys.exit(0)
diff --git a/bin/workload.py b/bin/workload.py
index 6199394..19a7378 100755
--- a/bin/workload.py
+++ b/bin/workload.py
@@ -13,30 +13,50 @@ loop_names = set()
def simulate_word(timedword):
- prev_state = 'UNINITIALIZED'
+ prev_state = "UNINITIALIZED"
prev_param = None
ret = dict()
for trace_part in timedword:
- print('Trace Part {}'.format(trace_part))
+ print("Trace Part {}".format(trace_part))
if type(trace_part) is TimedWord:
- result = pta.simulate(trace_part, orig_state=prev_state, orig_param=prev_param)
+ result = pta.simulate(
+ trace_part, orig_state=prev_state, orig_param=prev_param
+ )
elif type(trace_part) is Workload:
- result = pta.simulate(trace_part.word, orig_state=prev_state, orig_param=prev_param)
+ result = pta.simulate(
+ trace_part.word, orig_state=prev_state, orig_param=prev_param
+ )
if prev_state != result.end_state:
- print('Warning: loop starts in state {}, but terminates in {}'.format(prev_state, result.end_state.name))
+ print(
+ "Warning: loop starts in state {}, but terminates in {}".format(
+ prev_state, result.end_state.name
+ )
+ )
if prev_param != result.parameters:
- print('Warning: loop starts with parameters {}, but terminates with {}'.format(prev_param, result.parameters))
+ print(
+ "Warning: loop starts with parameters {}, but terminates with {}".format(
+ prev_param, result.parameters
+ )
+ )
ret[trace_part.name] = result
loop_names.add(trace_part.name)
- print(' Duration: ' + human_readable(result.duration, 's'))
+ print(" Duration: " + human_readable(result.duration, "s"))
if result.duration_mae:
- print(u' ± {} / {:.0f}%'.format(human_readable(result.duration_mae, 's'), result.duration_mape))
- print(' Energy: ' + human_readable(result.energy, 'J'))
+ print(
+ u" ± {} / {:.0f}%".format(
+ human_readable(result.duration_mae, "s"), result.duration_mape
+ )
+ )
+ print(" Energy: " + human_readable(result.energy, "J"))
if result.energy_mae:
- print(u' ± {} / {:.0f}%'.format(human_readable(result.energy_mae, 'J'), result.energy_mape))
- print(' Mean Power: ' + human_readable(result.mean_power, 'W'))
- print('')
+ print(
+ u" ± {} / {:.0f}%".format(
+ human_readable(result.energy_mae, "J"), result.energy_mape
+ )
+ )
+ print(" Mean Power: " + human_readable(result.mean_power, "W"))
+ print("")
prev_state = result.end_state
prev_param = result.parameters
@@ -49,7 +69,7 @@ for i in range(len(args) // 2):
ptafiles.append(ptafile)
pta = PTA.from_file(ptafile)
timedword = TimedSequence(raw_word)
- print('Input: {}\n'.format(timedword))
+ print("Input: {}\n".format(timedword))
loops[ptafile] = simulate_word(timedword)
for loop_name in sorted(loop_names):
@@ -59,6 +79,14 @@ for loop_name in sorted(loop_names):
if loop_name in loops[ptafile]:
result_set.append(loops[ptafile][loop_name])
total_power += loops[ptafile][loop_name].mean_power
- print('{}: total mean power is {}'.format(loop_name, human_readable(total_power, 'W')))
+ print(
+ "{}: total mean power is {}".format(loop_name, human_readable(total_power, "W"))
+ )
for i, result in enumerate(result_set):
- print(' {:.0f}% {} (period: {})'.format(result.mean_power * 100 / total_power, ptafiles[i], human_readable(result.duration, 's')))
+ print(
+ " {:.0f}% {} (period: {})".format(
+ result.mean_power * 100 / total_power,
+ ptafiles[i],
+ human_readable(result.duration, "s"),
+ )
+ )