summaryrefslogtreecommitdiff
path: root/bin
diff options
context:
space:
mode:
authorjfalkenhagen <jfalkenhagen@uos.de>2020-08-09 15:13:59 +0200
committerjfalkenhagen <jfalkenhagen@uos.de>2020-08-09 15:13:59 +0200
commit8acccebc6d9bc0423c3da011b645ee379b71a417 (patch)
tree5e68cbc959fc8def610e10ae2a6d5fbb4767d1ff /bin
parent61fb6094a33c4855c763f1925e61aec90294daa3 (diff)
parent1d9bbd0ec9fdc2ed63630137602207260b13285b (diff)
Merge branch 'master' into janis
Diffstat (limited to 'bin')
-rwxr-xr-xbin/analyze-archive.py4
-rwxr-xr-xbin/generate-dfa-benchmark.py33
2 files changed, 19 insertions, 18 deletions
diff --git a/bin/analyze-archive.py b/bin/analyze-archive.py
index 8311f5c..aa266ed 100755
--- a/bin/analyze-archive.py
+++ b/bin/analyze-archive.py
@@ -150,8 +150,8 @@ def model_quality_table(result_lists, info_list):
"key",
"attribute",
"static".center(19),
- "LUT".center(19),
"parameterized".center(19),
+ "LUT".center(19),
)
)
for state_or_tran in result_lists[0]["by_name"].keys():
@@ -434,7 +434,7 @@ if __name__ == "__main__":
)
sys.exit(2)
- if len(traces) > 20:
+ if len(traces) > 40:
print(f"""Truncating plot to 40 of {len(traces)} traces (random sample)""")
traces = random.sample(traces, 40)
diff --git a/bin/generate-dfa-benchmark.py b/bin/generate-dfa-benchmark.py
index 2c53d9f..e6c3001 100755
--- a/bin/generate-dfa-benchmark.py
+++ b/bin/generate-dfa-benchmark.py
@@ -224,7 +224,7 @@ def benchmark_from_runs(
elif opt["sleep"]:
if "energytrace" in opt:
outbuf.write(f"// -> {transition.destination.name}\n")
- outbuf.write(runner.sleep_ms(opt["sleep"], opt["arch"]))
+ outbuf.write(target.sleep_ms(opt["sleep"]))
else:
outbuf.write(f"// -> {transition.destination.name}\n")
outbuf.write("arch.delay_ms({:d});\n".format(opt["sleep"]))
@@ -283,7 +283,7 @@ def run_benchmark(
needs_split = True
else:
try:
- runner.build(arch, app, run_args)
+ target.build(app, run_args)
except RuntimeError:
if len(runs) > 50:
# Application is too large -> split up runs
@@ -336,14 +336,14 @@ def run_benchmark(
i = 0
while i < opt["repeat"]:
print(f"""[RUN] flashing benchmark {i+1}/{opt["repeat"]}""")
- runner.flash(arch, app, run_args)
+ target.flash(app, run_args)
if "mimosa" in opt:
- monitor = runner.get_monitor(
- arch, callback=harness.parser_cb, mimosa=opt["mimosa"]
+ monitor = target.get_monitor(
+ callback=harness.parser_cb, mimosa=opt["mimosa"]
)
elif "energytrace" in opt:
- monitor = runner.get_monitor(
- arch, callback=harness.parser_cb, energytrace=opt["energytrace"]
+ monitor = target.get_monitor(
+ callback=harness.parser_cb, energytrace=opt["energytrace"]
)
sync_error = False
@@ -394,8 +394,8 @@ def run_benchmark(
return [(runs, harness, monitor, files)]
else:
- runner.flash(arch, app, run_args)
- monitor = runner.get_monitor(arch, callback=harness.parser_cb)
+ target.flash(app, run_args)
+ monitor = target.get_monitor(callback=harness.parser_cb)
if arch == "posix":
print("[RUN] Will run benchmark for {:.0f} seconds".format(run_timeout))
@@ -512,6 +512,11 @@ if __name__ == "__main__":
print(err)
sys.exit(2)
+ if "msp430fr" in opt["arch"]:
+ target = runner.Arch(opt["arch"], ["cpu_freq=8000000"])
+ else:
+ target = runner.Arch(opt["arch"])
+
modelfile = args[0]
pta = PTA.from_file(modelfile)
@@ -588,11 +593,7 @@ if __name__ == "__main__":
if "codegen" in driver_definition and "flags" in driver_definition["codegen"]:
if run_flags is None:
run_flags = driver_definition["codegen"]["flags"]
- if run_flags is None:
- run_flags = opt["run"].split()
-
- if "msp430fr" in opt["arch"]:
- run_flags.append("cpu_freq=8000000")
+ run_flags.extend(opt["run"].split())
runs = list(
pta.dfs(
@@ -639,7 +640,7 @@ if __name__ == "__main__":
gpio_pin=timer_pin,
gpio_mode=gpio_mode,
pta=pta,
- counter_limits=runner.get_counter_limits_us(opt["arch"]),
+ counter_limits=target.get_counter_limits_us(run_flags),
log_return_values=need_return_values,
repeat=1,
)
@@ -647,7 +648,7 @@ if __name__ == "__main__":
harness = OnboardTimerHarness(
gpio_pin=timer_pin,
pta=pta,
- counter_limits=runner.get_counter_limits_us(opt["arch"]),
+ counter_limits=target.get_counter_limits_us(run_flags),
log_return_values=need_return_values,
repeat=opt["repeat"],
)