summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Friesel <derf@finalrewind.org>2019-03-05 16:10:49 +0100
committerDaniel Friesel <derf@finalrewind.org>2019-03-05 16:10:49 +0100
commitcc31a043f21c16986d7b33eabb05cfc34d6e0390 (patch)
tree024529d92341a8b3096095e3043dc204082f17db
parentcf7e68c388bd1ef0e9e2ee64b5193e09be16b6da (diff)
working benchmark generation
-rwxr-xr-xbin/generate-dfa-benchmark.py38
-rwxr-xr-xlib/automata.py12
-rwxr-xr-xlib/dfatool.py20
-rw-r--r--lib/harness.py6
4 files changed, 51 insertions, 25 deletions
diff --git a/bin/generate-dfa-benchmark.py b/bin/generate-dfa-benchmark.py
index ce67313..76b9de4 100755
--- a/bin/generate-dfa-benchmark.py
+++ b/bin/generate-dfa-benchmark.py
@@ -1,4 +1,26 @@
#!/usr/bin/env python3
+"""
+Generate a driver/library benchmark based on DFA/PTA traces.
+
+Usage:
+PYTHONPATH=lib bin/generate-dfa-benchmark.py [options] <pta/dfa definition>
+
+generate-dfa-benchmarks reads in a DFA definition and generates runs
+(i.e., all words accepted by the DFA up to a configurable length). Each symbol
+corresponds to a function call. If arguments are specified in the DFA
+definition, each symbol corresponds to a function call with a specific set of
+arguments (so all argument combinations are present in the generated runs).
+
+Options:
+--depth=<depth> (default: 3)
+ Maximum number of function calls per run
+
+--instance=<name>
+ Override the name of the class instance used for benchmarking
+
+--sleep=<ms> (default: 0)
+ How long to sleep between function calls.
+"""
import getopt
import json
@@ -6,7 +28,7 @@ import re
import sys
import yaml
from automata import PTA
-from harness import TransitionHarness
+from harness import OnboardTimerHarness
opt = {}
@@ -44,11 +66,12 @@ if __name__ == '__main__':
else:
pta = PTA.from_yaml(yaml.safe_load(f))
- harness = TransitionHarness('GPIO::p1_0')
+ harness = OnboardTimerHarness('GPIO::p1_0')
print('#include "arch.h"')
- if pta.header:
- print('#include "{}"'.format(pta.header))
+ if 'includes' in pta.codegen:
+ for include in pta.codegen['includes']:
+ print('#include "{}"'.format(include))
print(harness.global_code())
print('void loop(void)')
@@ -59,8 +82,8 @@ if __name__ == '__main__':
class_prefix = ''
if 'instance' in opt:
class_prefix = '{}.'.format(opt['instance'])
- elif pta.instance:
- class_prefix = '{}.'.format(pta.instance)
+ elif 'intance' in pta.codegen:
+ class_prefix = '{}.'.format(pta.codegen['instance'])
for run in pta.dfs(opt['depth'], with_arguments = True):
print(harness.start_run())
@@ -85,6 +108,9 @@ if __name__ == '__main__':
print('{')
for driver in ('arch', 'gpio', 'kout'):
print('{}.setup();'.format(driver))
+ if 'setup' in pta.codegen:
+ for call in pta.codegen['setup']:
+ print(call)
print('arch.idle_loop();')
print('return 0;')
print('}')
diff --git a/lib/automata.py b/lib/automata.py
index df8363f..94b3717 100755
--- a/lib/automata.py
+++ b/lib/automata.py
@@ -259,7 +259,7 @@ class PTA:
def __init__(self, state_names: list = [],
accepting_states: list = None,
parameters: list = [], initial_param_values: list = None,
- instance: str = None, header: str = None):
+ codegen: dict = {}):
"""
Return a new PTA object.
@@ -275,8 +275,7 @@ class PTA:
self.state = dict([[state_name, State(state_name)] for state_name in state_names])
self.accepting_states = accepting_states.copy() if accepting_states else None
self.parameters = parameters.copy()
- self.instance = instance
- self.header = header
+ self.codegen = codegen
if initial_param_values:
self.initial_param_values = initial_param_values.copy()
else:
@@ -385,11 +384,8 @@ class PTA:
if 'states' in yaml_input:
kwargs['state_names'] = yaml_input['states']
- if 'instance' in yaml_input:
- kwargs['instance'] = yaml_input['instance']
-
- if 'header' in yaml_input:
- kwargs['header'] = yaml_input['header']
+ if 'codegen' in yaml_input:
+ kwargs['codegen'] = yaml_input['codegen']
pta = cls(**kwargs)
diff --git a/lib/dfatool.py b/lib/dfatool.py
index 38e140d..a089c1d 100755
--- a/lib/dfatool.py
+++ b/lib/dfatool.py
@@ -19,7 +19,7 @@ from utils import *
arg_support_enabled = True
-def running_mean(x, N):
+def running_mean(x: np.ndarray, N: int) -> np.ndarray:
"""
Compute running average.
@@ -44,7 +44,7 @@ def soft_cast_int(n):
except ValueError:
return n
-def vprint(verbose, string):
+def vprint(verbose: bool, string: str):
"""
Print string if verbose.
@@ -69,7 +69,7 @@ def vprint(verbose, string):
return x / y
return 1.
-def gplearn_to_function(function_str):
+def gplearn_to_function(function_str: str):
"""
Convert gplearn-style function string to Python function.
@@ -109,7 +109,7 @@ def gplearn_to_function(function_str):
print(eval_str)
return eval(eval_str, eval_globals)
-def _elem_param_and_arg_list(elem):
+def _elem_param_and_arg_list(elem: dict):
param_dict = elem['parameter']
paramkeys = sorted(param_dict.keys())
paramvalue = [soft_cast_int(param_dict[x]) for x in paramkeys]
@@ -117,10 +117,10 @@ def _elem_param_and_arg_list(elem):
paramvalue.extend(map(soft_cast_int, elem['args']))
return paramvalue
-def _arg_name(arg_index):
+def _arg_name(arg_index: int) -> str:
return '~arg{:02}'.format(arg_index)
-def append_if_set(aggregate, data, key):
+def append_if_set(aggregate: dict, data: dict, key: str):
"""Append data[key] to aggregate if key in data."""
if key in data:
aggregate.append(data[key])
@@ -131,7 +131,7 @@ def mean_or_none(arr):
return np.mean(arr)
return -1
-def aggregate_measures(aggregate, actual):
+def aggregate_measures(aggregate: float, actual: list) -> dict:
"""
Calculate error measures for model value on data list.
@@ -145,7 +145,7 @@ def aggregate_measures(aggregate, actual):
aggregate_array = np.array([aggregate] * len(actual))
return regression_measures(aggregate_array, np.array(actual))
-def regression_measures(predicted, actual):
+def regression_measures(predicted: np.ndarray, actual: np.ndarray):
"""
Calculate error measures by comparing model values to reference values.
@@ -204,7 +204,7 @@ class KeysightCSV:
"""Create a new KeysightCSV object."""
pass
- def load_data(self, filename):
+ def load_data(self, filename: str):
"""
Load log data from filename, return timestamps and currents.
@@ -225,7 +225,7 @@ class KeysightCSV:
currents[i] = float(row[2]) * -1
return timestamps, currents
-def by_name_to_by_param(by_name):
+def by_name_to_by_param(by_name: dict):
"""
Convert aggregation by name to aggregation by name and parameter values.
"""
diff --git a/lib/harness.py b/lib/harness.py
index 3f8d93c..67a9f7d 100644
--- a/lib/harness.py
+++ b/lib/harness.py
@@ -22,7 +22,11 @@ class OnboardTimerHarness:
return ret
def start_benchmark(self):
- return 'ptalog.startBenchmark(0);\n'
+ ret = 'counter.start();\n'
+ ret += 'counter.stop();\n'
+ ret += 'ptalog.passNop(counter);\n'
+ ret += 'ptalog.startBenchmark(0);\n'
+ return ret
def start_run(self):
return 'ptalog.reset();\n'