From d38c3a191c44806d87584b7bd6052072820bcea3 Mon Sep 17 00:00:00 2001 From: Daniel Friesel Date: Thu, 15 Aug 2019 11:49:05 +0200 Subject: Fix generate-dfa-benchmark leaving out benchmark parts when splitting runs --- bin/analyze-timing.py | 12 +++++++----- bin/generate-dfa-benchmark.py | 7 ++++--- 2 files changed, 11 insertions(+), 8 deletions(-) (limited to 'bin') diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py index 7e8174d..465932b 100755 --- a/bin/analyze-timing.py +++ b/bin/analyze-timing.py @@ -63,7 +63,7 @@ Options: --export-energymodel= Export energy model. Requires --hwmodel. ---filter-param== +--filter-param==[,=...] Only consider measurements where is All other measurements (including those where it is None, that is, has not been set yet) are discarded. Note that this may remove entire @@ -192,7 +192,9 @@ if __name__ == '__main__': opts['corrcoef'] = False if 'filter-param' in opts: - opts['filter-param'] = opts['filter-param'].split('=') + opts['filter-param'] = list(map(lambda x: x.split('='), opts['filter-param'].split(','))) + else: + opts['filter-param'] = list() except getopt.GetoptError as err: print(err) @@ -203,9 +205,9 @@ if __name__ == '__main__': preprocessed_data = raw_data.get_preprocessed_data() by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data, ignored_trace_indexes) - if 'filter-param' in opts: - param_index = parameters.index(opts['filter-param'][0]) - param_value = soft_cast_int(opts['filter-param'][1]) + for param_name_and_value in opts['filter-param']: + param_index = parameters.index(param_name_and_value[0]) + param_value = soft_cast_int(param_name_and_value[1]) names_to_remove = set() for name in by_name.keys(): indices_to_keep = list(map(lambda x: x[param_index] == param_value, by_name[name]['param'])) diff --git a/bin/generate-dfa-benchmark.py b/bin/generate-dfa-benchmark.py index 4fb8df4..9919d68 100755 --- a/bin/generate-dfa-benchmark.py +++ b/bin/generate-dfa-benchmark.py @@ -136,9 +136,10 @@ def run_benchmark(application_file: str, pta: PTA, runs: list, arch: str, app: s if needs_split: print('[MAKE] benchmark code is too large, splitting up') mid = len(runs) // 2 - results = run_benchmark(application_file, pta, runs[:mid], arch, app, run_args, harness, sleep, repeat, run_offset = run_offset, runs_total = runs_total) + # Previously prepared trace data is useless harness.reset() - results.extend(run_benchmark(application_file, pta, runs[mid:], arch, app, run_args, harness, sleep, repeat, run_offset = run_offset + mid, runs_total = runs_total)) + results = run_benchmark(application_file, pta, runs[:mid], arch, app, run_args, harness.copy(), sleep, repeat, run_offset = run_offset, runs_total = runs_total) + results.extend(run_benchmark(application_file, pta, runs[mid:], arch, app, run_args, harness.copy(), sleep, repeat, run_offset = run_offset + mid, runs_total = runs_total)) return results runner.flash(arch, app, run_args) @@ -242,7 +243,7 @@ if __name__ == '__main__': json_out = { 'opt' : opt, 'pta' : pta.to_json(), - 'traces' : harness.traces, + 'traces' : list(map(lambda x: x[1].traces, results)), 'raw_output' : list(map(lambda x: x[2], results)), } with open(time.strftime('ptalog-%Y%m%d-%H%M%S.json'), 'w') as f: -- cgit v1.2.3