summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Friesel <daniel.friesel@uos.de>2019-08-15 11:49:05 +0200
committerDaniel Friesel <daniel.friesel@uos.de>2019-08-15 11:49:05 +0200
commitd38c3a191c44806d87584b7bd6052072820bcea3 (patch)
tree2f280d7612212a6d9b8d76f36956255aeff75d69
parent45952580a0acd645d032509908d0a82f72bd9c74 (diff)
Fix generate-dfa-benchmark leaving out benchmark parts when splitting runs
-rw-r--r--.gitlab-ci.yml2
-rwxr-xr-xbin/analyze-timing.py12
-rwxr-xr-xbin/generate-dfa-benchmark.py7
-rwxr-xr-xlib/dfatool.py2
-rw-r--r--lib/harness.py25
-rwxr-xr-xtest/test_timingharness.py6
6 files changed, 39 insertions, 15 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 6299918..35a4a99 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -17,7 +17,7 @@ run_tests:
- wget -qO test-data/20170116_145420_sharpLS013B4DN.tar https://lib.finalrewind.org/energy-models/20170116_145420_sharpLS013B4DN.tar
- wget -qO test-data/20170116_151348_sharpLS013B4DN.tar https://lib.finalrewind.org/energy-models/20170116_151348_sharpLS013B4DN.tar
- wget -qO test-data/20170220_164723_RF24_int_A.tar https://lib.finalrewind.org/energy-models/20170220_164723_RF24_int_A.tar
- - wget -qO test-data/20190726_150423_nRF24_no-rx.json https://lib.finalrewind.org/energy-models/20190726_150423_nRF24_no-rx.json
+ - wget -qO test-data/test-data/20190815_111745_nRF24_no-rx.json https://lib.finalrewind.org/energy-models/test-data/20190815_111745_nRF24_no-rx.json
- PYTHONPATH=lib pytest-3 --cov=lib
- python3-coverage html
artifacts:
diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py
index 7e8174d..465932b 100755
--- a/bin/analyze-timing.py
+++ b/bin/analyze-timing.py
@@ -63,7 +63,7 @@ Options:
--export-energymodel=<model.json>
Export energy model. Requires --hwmodel.
---filter-param=<parameter name>=<parameter value>
+--filter-param=<parameter name>=<parameter value>[,<parameter name>=<parameter value>...]
Only consider measurements where <parameter name> is <parameter value>
All other measurements (including those where it is None, that is, has
not been set yet) are discarded. Note that this may remove entire
@@ -192,7 +192,9 @@ if __name__ == '__main__':
opts['corrcoef'] = False
if 'filter-param' in opts:
- opts['filter-param'] = opts['filter-param'].split('=')
+ opts['filter-param'] = list(map(lambda x: x.split('='), opts['filter-param'].split(',')))
+ else:
+ opts['filter-param'] = list()
except getopt.GetoptError as err:
print(err)
@@ -203,9 +205,9 @@ if __name__ == '__main__':
preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data, ignored_trace_indexes)
- if 'filter-param' in opts:
- param_index = parameters.index(opts['filter-param'][0])
- param_value = soft_cast_int(opts['filter-param'][1])
+ for param_name_and_value in opts['filter-param']:
+ param_index = parameters.index(param_name_and_value[0])
+ param_value = soft_cast_int(param_name_and_value[1])
names_to_remove = set()
for name in by_name.keys():
indices_to_keep = list(map(lambda x: x[param_index] == param_value, by_name[name]['param']))
diff --git a/bin/generate-dfa-benchmark.py b/bin/generate-dfa-benchmark.py
index 4fb8df4..9919d68 100755
--- a/bin/generate-dfa-benchmark.py
+++ b/bin/generate-dfa-benchmark.py
@@ -136,9 +136,10 @@ def run_benchmark(application_file: str, pta: PTA, runs: list, arch: str, app: s
if needs_split:
print('[MAKE] benchmark code is too large, splitting up')
mid = len(runs) // 2
- results = run_benchmark(application_file, pta, runs[:mid], arch, app, run_args, harness, sleep, repeat, run_offset = run_offset, runs_total = runs_total)
+ # Previously prepared trace data is useless
harness.reset()
- results.extend(run_benchmark(application_file, pta, runs[mid:], arch, app, run_args, harness, sleep, repeat, run_offset = run_offset + mid, runs_total = runs_total))
+ results = run_benchmark(application_file, pta, runs[:mid], arch, app, run_args, harness.copy(), sleep, repeat, run_offset = run_offset, runs_total = runs_total)
+ results.extend(run_benchmark(application_file, pta, runs[mid:], arch, app, run_args, harness.copy(), sleep, repeat, run_offset = run_offset + mid, runs_total = runs_total))
return results
runner.flash(arch, app, run_args)
@@ -242,7 +243,7 @@ if __name__ == '__main__':
json_out = {
'opt' : opt,
'pta' : pta.to_json(),
- 'traces' : harness.traces,
+ 'traces' : list(map(lambda x: x[1].traces, results)),
'raw_output' : list(map(lambda x: x[2], results)),
}
with open(time.strftime('ptalog-%Y%m%d-%H%M%S.json'), 'w') as f:
diff --git a/lib/dfatool.py b/lib/dfatool.py
index e318e81..528eabc 100755
--- a/lib/dfatool.py
+++ b/lib/dfatool.py
@@ -537,7 +537,7 @@ class TimingData:
for filename in self.filenames:
with open(filename, 'r') as f:
log_data = json.load(f)
- self.traces_by_fileno.append(log_data['traces'])
+ self.traces_by_fileno.extend(log_data['traces'])
self._concatenate_analyzed_traces()
def get_preprocessed_data(self, verbose = True):
diff --git a/lib/harness.py b/lib/harness.py
index 020bc72..af74977 100644
--- a/lib/harness.py
+++ b/lib/harness.py
@@ -11,15 +11,30 @@ import re
# generated otherwise and it should also work with AnalyticModel (which does
# not have states)
class TransitionHarness:
+ """Foo."""
def __init__(self, gpio_pin = None, pta = None, log_return_values = False):
+ """
+ Create a new TransitionHarness
+
+ :param gpio_pin: multipass GPIO Pin used for transition synchronization, e.g. `GPIO::p1_0`. Optional.
+ The GPIO output is high iff a transition is executing
+ :param pta: PTA object
+ :param log_return_values: Log return values of transition function calls?
+ """
self.gpio_pin = gpio_pin
self.pta = pta
self.log_return_values = log_return_values
self.reset()
+ def copy(self):
+ new_object = __class__(gpio_pin = self.gpio_pin, pta = self.pta, log_return_values = self.log_return_values)
+ new_object.traces = self.traces.copy()
+ new_object.trace_id = self.trace_id
+ return new_object
+
def reset(self):
self.traces = []
- self.trace_id = 1
+ self.trace_id = 0
self.synced = False
def global_code(self):
@@ -102,12 +117,18 @@ class TransitionHarness:
pass
class OnboardTimerHarness(TransitionHarness):
+ """Bar."""
def __init__(self, counter_limits, **kwargs):
super().__init__(**kwargs)
- self.trace_id = 0
self.trace_length = 0
self.one_cycle_in_us, self.one_overflow_in_us, self.counter_max_overflow = counter_limits
+ def copy(self):
+ new_harness = __class__((self.one_cycle_in_us, self.one_overflow_in_us, self.counter_max_overflow), gpio_pin = self.gpio_pin, pta = self.pta, log_return_values = self.log_return_values)
+ new_harness.traces = self.traces.copy()
+ new_harness.trace_id = self.trace_id
+ return new_harness
+
def global_code(self):
ret = '#include "driver/counter.h"\n'
ret += '#define PTALOG_TIMING\n'
diff --git a/test/test_timingharness.py b/test/test_timingharness.py
index ac19a29..6479f0a 100755
--- a/test/test_timingharness.py
+++ b/test/test_timingharness.py
@@ -5,7 +5,7 @@ import unittest
class TestModels(unittest.TestCase):
def test_model_singlefile_rf24(self):
- raw_data = TimingData(['test-data/20190726_150423_nRF24_no-rx.json'])
+ raw_data = TimingData(['test-data/20190815_111745_nRF24_no-rx.json'])
preprocessed_data = raw_data.get_preprocessed_data(verbose = False)
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
model = AnalyticModel(by_name, parameters, arg_count, verbose = False)
@@ -14,7 +14,7 @@ class TestModels(unittest.TestCase):
self.assertAlmostEqual(static_model('setPALevel', 'duration'), 146, places=0)
self.assertAlmostEqual(static_model('setRetries', 'duration'), 73, places=0)
self.assertAlmostEqual(static_model('setup', 'duration'), 6533, places=0)
- self.assertAlmostEqual(static_model('write', 'duration'), 12400, places=0)
+ self.assertAlmostEqual(static_model('write', 'duration'), 12634, places=0)
for transition in 'setPALevel setRetries setup write'.split(' '):
self.assertAlmostEqual(model.stats.param_dependence_ratio(transition, 'duration', 'channel'), 0, places=2)
@@ -25,7 +25,7 @@ class TestModels(unittest.TestCase):
self.assertEqual(param_info('setup', 'duration'), None)
self.assertEqual(param_info('write', 'duration')['function']._model_str, '0 + regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay)')
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[0], 1016, places=0)
+ self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[0], 1163, places=0)
self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[1], 464, places=0)
self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[2], 1, places=0)
self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[3], 1, places=0)