summaryrefslogtreecommitdiff
path: root/bin/generate-dfa-benchmark.py
diff options
context:
space:
mode:
Diffstat (limited to 'bin/generate-dfa-benchmark.py')
-rwxr-xr-xbin/generate-dfa-benchmark.py235
1 files changed, 153 insertions, 82 deletions
diff --git a/bin/generate-dfa-benchmark.py b/bin/generate-dfa-benchmark.py
index b9790ed..8e3c227 100755
--- a/bin/generate-dfa-benchmark.py
+++ b/bin/generate-dfa-benchmark.py
@@ -15,6 +15,10 @@ Options:
--depth=<depth> (default: 3)
Maximum number of function calls per run
+--repeat=<count> (default: 0)
+ Repeat benchmark runs <count> times. When 0, benchmark runs are repeated
+ indefinitely and must be explicitly terminated with Ctrl+C / SIGINT
+
--instance=<name>
Override the name of the class instance used for benchmarking
@@ -44,6 +48,132 @@ def trace_matches_filter(trace: list, trace_filter: list) -> bool:
return True
return False
+def benchmark_from_runs(pta: PTA, runs: list, harness: object, benchmark_id: int = 0) -> io.StringIO:
+ outbuf = io.StringIO()
+
+ outbuf.write('#include "arch.h"\n')
+ if 'includes' in pta.codegen:
+ for include in pta.codegen['includes']:
+ outbuf.write('#include "{}"\n'.format(include))
+ outbuf.write(harness.global_code())
+
+ outbuf.write('int main(void)\n')
+ outbuf.write('{\n')
+ for driver in ('arch', 'gpio', 'kout'):
+ outbuf.write('{}.setup();\n'.format(driver))
+ if 'setup' in pta.codegen:
+ for call in pta.codegen['setup']:
+ outbuf.write(call)
+
+ # There is a race condition between flashing the code and starting the UART log.
+ # When starting the log before flashing, output from a previous benchmark may cause bogus data to be added.
+ # When flashing first and then starting the log, the first log lines may be lost.
+ # To work around this, we flash first, then start the log, and use this delay statement to ensure that no output is lost.
+ # This is also useful to faciliate MIMOSA calibration after flashing.
+ outbuf.write('arch.delay_ms(10000);\n')
+
+ outbuf.write('while (1) {\n')
+ outbuf.write(harness.start_benchmark())
+
+ class_prefix = ''
+ if 'instance' in opt:
+ class_prefix = '{}.'.format(opt['instance'])
+ elif 'instance' in pta.codegen:
+ class_prefix = '{}.'.format(pta.codegen['instance'])
+
+ num_transitions = 0
+ num_traces = 0
+ for run in runs:
+ outbuf.write(harness.start_run())
+ harness.start_trace()
+ param = pta.get_initial_param_dict()
+ for transition, arguments, parameter in run:
+ num_transitions += 1
+ harness.append_transition(transition.name, param, arguments)
+ harness.append_state(transition.destination.name, parameter)
+ outbuf.write('// {} -> {}\n'.format(transition.origin.name, transition.destination.name))
+ if transition.is_interrupt:
+ outbuf.write('// wait for {} interrupt\n'.format(transition.name))
+ transition_code = '// TODO add startTransition / stopTransition calls to interrupt routine'
+ else:
+ transition_code = '{}{}({});'.format(class_prefix, transition.name, ', '.join(map(str, arguments)))
+ outbuf.write(harness.pass_transition(pta.get_transition_id(transition), transition_code, transition = transition))
+
+ param = parameter
+
+ outbuf.write('// current parameters: {}\n'.format(', '.join(map(lambda kv: '{}={}'.format(*kv), param.items()))))
+
+ if opt['sleep']:
+ outbuf.write('arch.delay_ms({:d}); // {}\n'.format(opt['sleep'], transition.destination.name))
+
+ outbuf.write(harness.stop_run(num_traces))
+ outbuf.write('\n')
+ num_traces += 1
+
+ outbuf.write(harness.stop_benchmark())
+ outbuf.write('}\n')
+ outbuf.write('return 0;\n')
+ outbuf.write('}\n')
+
+ return outbuf
+
+def run_benchmark(application_file: str, pta: PTA, runs: list, arch: str, app: str, run_args: list, harness: object, sleep: int = 0, repeat: int = 0, run_offset: int = 0, runs_total: int = 0):
+ outbuf = benchmark_from_runs(pta, runs, harness)
+ with open(application_file, 'w') as f:
+ f.write(outbuf.getvalue())
+ print('[MAKE] building benchmark with {:d} runs'.format(len(runs)))
+
+ # assume an average of 10ms per transition. Mind the 10s start delay.
+ run_timeout = 10 + num_transitions * (sleep+10) / 1000
+
+ if repeat:
+ run_timeout *= repeat
+
+ needs_split = False
+ try:
+ runner.build(arch, app, run_args)
+ except RuntimeError:
+ if len(runs) > 50:
+ # Application is too large -> split up runs
+ needs_split = True
+ else:
+ # Unknown error
+ raise
+
+ # This has been deliberately taken out of the except clause to avoid nested exception handlers
+ # (they lead to pretty interesting tracebacks which are probably more confusing than helpful)
+ if needs_split:
+ print('[MAKE] benchmark code is too large, splitting up')
+ mid = len(runs) // 2
+ results = run_benchmark(application_file, pta, runs[:mid], arch, app, run_args, harness, sleep, repeat, run_offset = run_offset, runs_total = runs_total)
+ harness.reset()
+ results.extend(run_benchmark(application_file, pta, runs[mid:], arch, app, run_args, harness, sleep, repeat, run_offset = run_offset + mid, runs_total = runs_total))
+ return results
+
+ runner.flash(arch, app, run_args)
+ monitor = runner.get_monitor(arch, callback = harness.parser_cb)
+
+ if arch == 'posix':
+ print('[RUN] Will run benchmark for {:.0f} seconds'.format(run_timeout))
+ lines = monitor.run(int(run_timeout))
+ return [(runs, harness, lines)]
+
+ # TODO Benchmark bei zu vielen Transitionen in mehrere Programme
+ # aufteilen und diese nacheinander bis zu X % completion (220%)
+ # laufen lassen, zwischendurch jeweils automatisch neu bauen etc.
+ try:
+ slept = 0
+ while repeat == 0 or slept / run_timeout < 1:
+ time.sleep(5)
+ slept += 5
+ print('[RUN] {:d}/{:d} ({:.0f}%), current benchmark at {:.0f}%'.format(run_offset, runs_total, run_offset * 100 / runs_total, slept * 100 / run_timeout))
+ except KeyboardInterrupt:
+ pass
+ lines = monitor.get_lines()
+ monitor.close()
+
+ return [(runs, harness, lines)]
+
if __name__ == '__main__':
@@ -53,6 +183,7 @@ if __name__ == '__main__':
'app= '
'depth= '
'instance= '
+ 'repeat= '
'run= '
'sleep= '
'timer-pin= '
@@ -69,8 +200,15 @@ if __name__ == '__main__':
else:
opt['depth'] = 3
+ if 'repeat' in opt:
+ opt['repeat'] = int(opt['repeat'])
+ else:
+ opt['repeat'] = 0
+
if 'sleep' in opt:
opt['sleep'] = int(opt['sleep'])
+ else:
+ opt['sleep'] = 0
if 'trace-filter' in opt:
trace_filter = []
@@ -95,100 +233,33 @@ if __name__ == '__main__':
else:
timer_pin = 'GPIO::p1_0'
- harness = OnboardTimerHarness(timer_pin)
-
- outbuf = io.StringIO()
-
- outbuf.write('#include "arch.h"\n')
- if 'includes' in pta.codegen:
- for include in pta.codegen['includes']:
- outbuf.write('#include "{}"\n'.format(include))
- outbuf.write(harness.global_code())
-
- outbuf.write('int main(void)\n')
- outbuf.write('{\n')
- for driver in ('arch', 'gpio', 'kout'):
- outbuf.write('{}.setup();\n'.format(driver))
- if 'setup' in pta.codegen:
- for call in pta.codegen['setup']:
- outbuf.write(call)
-
- outbuf.write('while (1) {\n')
- outbuf.write(harness.start_benchmark())
-
- class_prefix = ''
- if 'instance' in opt:
- class_prefix = '{}.'.format(opt['instance'])
- elif 'instance' in pta.codegen:
- class_prefix = '{}.'.format(pta.codegen['instance'])
+ runs = list()
- num_transitions = 0
- num_traces = 0
for run in pta.dfs(opt['depth'], with_arguments = True, with_parameters = True):
if 'trace-filter' in opt and not trace_matches_filter(run, opt['trace-filter']):
continue
- outbuf.write(harness.start_run())
- harness.start_trace()
- param = pta.get_initial_param_dict()
- for transition, arguments, parameter in run:
- num_transitions += 1
- harness.append_state(transition.origin.name, param)
- harness.append_transition(transition.name, param, arguments)
- param = transition.get_params_after_transition(param, arguments)
- outbuf.write('// {} -> {}\n'.format(transition.origin.name, transition.destination.name))
- if transition.is_interrupt:
- outbuf.write('// wait for {} interrupt\n'.format(transition.name))
- transition_code = '// TODO add startTransition / stopTransition calls to interrupt routine'
- else:
- transition_code = '{}{}({});'.format(class_prefix, transition.name, ', '.join(map(str, arguments)))
- outbuf.write(harness.pass_transition(pta.get_transition_id(transition), transition_code, transition = transition, parameter = parameter))
-
- param = parameter
-
- if 'sleep' in opt:
- outbuf.write('arch.delay_ms({:d});\n'.format(opt['sleep']))
+ runs.append(run)
- outbuf.write(harness.stop_run(num_traces))
- outbuf.write('\n')
- num_traces += 1
+ num_transitions = len(runs)
- if num_transitions == 0:
+ if len(runs) == 0:
print('DFS returned no traces -- perhaps your trace-filter is too restrictive?', file=sys.stderr)
sys.exit(1)
- outbuf.write(harness.stop_benchmark())
- outbuf.write('}\n')
- outbuf.write('return 0;\n')
- outbuf.write('}\n')
+ harness = OnboardTimerHarness(gpio_pin = timer_pin, pta = pta, counter_limits = runner.get_counter_limits_us(opt['arch']))
if len(args) > 1:
- with open(args[1], 'w') as f:
- f.write(outbuf.getvalue())
+ results = run_benchmark(args[1], pta, runs, opt['arch'], opt['app'], opt['run'].split(), harness, opt['sleep'], opt['repeat'], runs_total = len(runs))
+ json_out = {
+ 'opt' : opt,
+ 'pta' : pta.to_json(),
+ 'traces' : harness.traces,
+ 'raw_output' : list(map(lambda x: x[2], results)),
+ }
+ with open(time.strftime('ptalog-%Y%m%d-%H%M%S.json'), 'w') as f:
+ json.dump(json_out, f)
else:
+ outbuf = benchmark_from_runs(pta, runs, harness)
print(outbuf.getvalue())
- if 'run' in opt:
- if 'sleep' in opt:
- run_timeout = num_transitions * opt['sleep'] / 1000
- else:
- run_timeout = num_transitions * 10 / 1000
- monitor = runner.get_monitor(opt['arch'], callback = harness.parser_cb)
- runner.build(opt['arch'], opt['app'], opt['run'].split())
- runner.flash(opt['arch'], opt['app'], opt['run'].split())
- if opt['arch'] != 'posix':
- try:
- slept = 0
- while True:
- time.sleep(5)
- slept += 5
- print('[MON] approx. {:.0f}% done'.format(slept * 100 / run_timeout))
- except KeyboardInterrupt:
- pass
- lines = monitor.get_lines()
- monitor.close()
- else:
- print('[MON] Will run benchmark for {:.0f} seconds'.format(2 * run_timeout))
- lines = monitor.run(int(2 * run_timeout))
- print(lines)
-
sys.exit(0)