summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.yml1
-rwxr-xr-xbin/analyze-timing.py2
-rwxr-xr-xlib/dfatool.py35
-rwxr-xr-xtest/test_timingharness.py29
4 files changed, 56 insertions, 11 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 5d19d51..8e3ef97 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -19,6 +19,7 @@ run_tests:
- wget -qO test-data/20170220_164723_RF24_int_A.tar https://lib.finalrewind.org/energy-models/20170220_164723_RF24_int_A.tar
- wget -qO test-data/20190815_103347_nRF24_no-rx.json https://lib.finalrewind.org/energy-models/20190815_103347_nRF24_no-rx.json
- wget -qO test-data/20190815_111745_nRF24_no-rx.json https://lib.finalrewind.org/energy-models/20190815_111745_nRF24_no-rx.json
+ - wget -qO test-data/20190815_122531_nRF24_no-rx.json https://lib.finalrewind.org/energy-models/20190815_122531_nRF24_no-rx.json
- PYTHONPATH=lib pytest-3 --cov=lib
- python3-coverage html
artifacts:
diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py
index 1c27533..39a915f 100755
--- a/bin/analyze-timing.py
+++ b/bin/analyze-timing.py
@@ -222,7 +222,7 @@ if __name__ == '__main__':
for name in names_to_remove:
by_name.pop(name)
- model = AnalyticModel(by_name, parameters, arg_count, use_corrcoef = opts['corrcoef'])
+ model = AnalyticModel(by_name, parameters, arg_count, use_corrcoef = opts['corrcoef'], function_override = function_override)
if xv_method:
xv = CrossValidator(AnalyticModel, by_name, parameters, arg_count)
diff --git a/lib/dfatool.py b/lib/dfatool.py
index 8990aed..95e76e7 100755
--- a/lib/dfatool.py
+++ b/lib/dfatool.py
@@ -1139,13 +1139,12 @@ class AnalyticModel:
assess -- calculate model quality
"""
- def __init__(self, by_name, parameters, arg_count = None, verbose = True, use_corrcoef = False):
+ def __init__(self, by_name, parameters, arg_count = None, function_override = dict(), verbose = True, use_corrcoef = False):
"""
Create a new AnalyticModel and compute parameter statistics.
- parameters:
- `by_name`: measurements aggregated by (function/state/...) name. Layout:
- dictionary with one key per name ('send', 'TX', ...) or
+ :param by_name: measurements aggregated by (function/state/...) name.
+ Layout: dictionary with one key per name ('send', 'TX', ...) or
one key per name and parameter combination
(('send', (1, 2)), ('send', (2, 3)), ('TX', (1, 2)), ('TX', (2, 3)), ...).
@@ -1167,16 +1166,23 @@ class AnalyticModel:
'param' : [[1, 0], [1, 0], [2, 0]]
# foo_count-^ ^-irrelevant
}
- `parameters`: List of parameter names
- `verbose`: Print debug/info output while generating the model?
- use_corrcoef -- use correlation coefficient instead of stddev comparison
- to detect whether a model attribute depends on a parameter
+ :param parameters: List of parameter names
+ :param function_override: dict of overrides for automatic parameter function generation.
+ If (state or transition name, model attribute) is present in function_override,
+ the corresponding text string is the function used for analytic (parameter-aware/fitted)
+ modeling of this attribute. It is passed to AnalyticFunction, see
+ there for the required format. Note that this happens regardless of
+ parameter dependency detection: The provided analytic function will be assigned
+ even if it seems like the model attribute is static / parameter-independent.
+ :param verbose: Print debug/info output while generating the model?
+ :param use_corrcoef: use correlation coefficient instead of stddev comparison to detect whether a model attribute depends on a parameter
"""
self.cache = dict()
self.by_name = by_name
self.by_param = by_name_to_by_param(by_name)
self.names = sorted(by_name.keys())
self.parameters = sorted(parameters)
+ self.function_override = function_override.copy()
self.verbose = verbose
self._use_corrcoef = use_corrcoef
self._num_args = arg_count
@@ -1292,7 +1298,16 @@ class AnalyticModel:
for attribute in self.by_name[name]['attributes']:
fit_result = get_fit_result(paramfit.results, name, attribute, self.verbose)
- if len(fit_result.keys()):
+ if (name, attribute) in self.function_override:
+ function_str = self.function_override[(name, attribute)]
+ x = AnalyticFunction(function_str, self.parameters, num_args)
+ x.fit(self.by_param, name, attribute)
+ if x.fit_success:
+ param_model[name][attribute] = {
+ 'fit_result': fit_result,
+ 'function' : x
+ }
+ elif len(fit_result.keys()):
x = analytic.function_powerset(fit_result, self.parameters, num_args)
x.fit(self.by_param, name, attribute)
@@ -1516,7 +1531,7 @@ class PTAModel:
self.cache = {}
np.seterr('raise')
self._outlier_threshold = discard_outliers
- self.function_override = function_override
+ self.function_override = function_override.copy()
self.verbose = verbose
self.hwmodel = hwmodel
self.ignore_trace_indexes = ignore_trace_indexes
diff --git a/test/test_timingharness.py b/test/test_timingharness.py
index b5937ad..5fb5fb1 100755
--- a/test/test_timingharness.py
+++ b/test/test_timingharness.py
@@ -49,6 +49,7 @@ class TestModels(unittest.TestCase):
self.assertAlmostEqual(model.stats.param_dependence_ratio(transition, 'duration', 'channel'), 0, places=2)
param_model, param_info = model.get_fitted()
+ self.assertEqual(param_info('getObserveTx', 'duration'), None)
self.assertEqual(param_info('setPALevel', 'duration'), None)
self.assertEqual(param_info('setRetries', 'duration'), None)
self.assertEqual(param_info('setup', 'duration'), None)
@@ -59,6 +60,34 @@ class TestModels(unittest.TestCase):
self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[2], 1, places=0)
self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[3], 1, places=0)
+ def test_function_override(self):
+ raw_data = TimingData(['test-data/20190815_122531_nRF24_no-rx.json'])
+ preprocessed_data = raw_data.get_preprocessed_data(verbose = False)
+ by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
+ model = AnalyticModel(by_name, parameters, arg_count, verbose = False, function_override={('write', 'duration'): '(parameter(auto_ack!) * (regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay))) + ((1 - parameter(auto_ack!)) * regression_arg(4))'})
+ self.assertEqual(model.names, 'setAutoAck setPALevel setRetries setup write'.split(' '))
+ static_model = model.get_static()
+ self.assertAlmostEqual(static_model('setAutoAck', 'duration'), 72, places=0)
+ self.assertAlmostEqual(static_model('setPALevel', 'duration'), 146, places=0)
+ self.assertAlmostEqual(static_model('setRetries', 'duration'), 73, places=0)
+ self.assertAlmostEqual(static_model('setup', 'duration'), 6533, places=0)
+ self.assertAlmostEqual(static_model('write', 'duration'), 1181, places=0)
+
+ for transition in 'setAutoAck setPALevel setRetries setup write'.split(' '):
+ self.assertAlmostEqual(model.stats.param_dependence_ratio(transition, 'duration', 'channel'), 0, places=2)
+
+ param_model, param_info = model.get_fitted()
+ self.assertEqual(param_info('setAutoAck', 'duration'), None)
+ self.assertEqual(param_info('setPALevel', 'duration'), None)
+ self.assertEqual(param_info('setRetries', 'duration'), None)
+ self.assertEqual(param_info('setup', 'duration'), None)
+ self.assertEqual(param_info('write', 'duration')['function']._model_str, '(parameter(auto_ack!) * (regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay))) + ((1 - parameter(auto_ack!)) * regression_arg(4))')
+
+ self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[0], 1162, places=0)
+ self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[1], 464, places=0)
+ self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[2], 1, places=0)
+ self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[3], 1, places=0)
+ self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[4], 1086, places=0)
if __name__ == '__main__':
unittest.main()