diff options
author | Daniel Friesel <daniel.friesel@uos.de> | 2020-07-03 12:26:50 +0200 |
---|---|---|
committer | Daniel Friesel <daniel.friesel@uos.de> | 2020-07-03 12:26:50 +0200 |
commit | adaa03cf0247b065e6b3863cf16ad88ee24f5169 (patch) | |
tree | 82ee685bc24872d233965b0dbee1c58f0bc61d7e | |
parent | 2b9aa06f7ca63eb58a4fe9abde9880fada1773e0 (diff) |
AnalyticFunction: Remove _ prefix from public attributes
-rwxr-xr-x | bin/analyze-archive.py | 8 | ||||
-rwxr-xr-x | bin/analyze-timing.py | 4 | ||||
-rwxr-xr-x | bin/eval-outlier-removal.py | 16 | ||||
-rwxr-xr-x | lib/automata.py | 6 | ||||
-rw-r--r-- | lib/functions.py | 20 | ||||
-rwxr-xr-x | test/test_parameters.py | 2 | ||||
-rwxr-xr-x | test/test_ptamodel.py | 18 | ||||
-rwxr-xr-x | test/test_timingharness.py | 32 |
8 files changed, 53 insertions, 53 deletions
diff --git a/bin/analyze-archive.py b/bin/analyze-archive.py index bf3ab64..e23fb9e 100755 --- a/bin/analyze-archive.py +++ b/bin/analyze-archive.py @@ -645,13 +645,13 @@ if __name__ == "__main__": if param_info(state, attribute): print( "{:10s}: {}".format( - state, param_info(state, attribute)["function"]._model_str + state, param_info(state, attribute)["function"].model_function ) ) print( "{:10s} {}".format( "", - param_info(state, attribute)["function"]._regression_args, + param_info(state, attribute)["function"].model_args, ) ) for trans in model.transitions(): @@ -661,14 +661,14 @@ if __name__ == "__main__": "{:10s}: {:10s}: {}".format( trans, attribute, - param_info(trans, attribute)["function"]._model_str, + param_info(trans, attribute)["function"].model_function, ) ) print( "{:10s} {:10s} {}".format( "", "", - param_info(trans, attribute)["function"]._regression_args, + param_info(trans, attribute)["function"].model_args, ) ) diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py index 4039f45..924388d 100755 --- a/bin/analyze-timing.py +++ b/bin/analyze-timing.py @@ -423,14 +423,14 @@ if __name__ == "__main__": "{:10s}: {:10s}: {}".format( trans, attribute, - param_info(trans, attribute)["function"]._model_str, + param_info(trans, attribute)["function"].model_function, ) ) print( "{:10s} {:10s} {}".format( "", "", - param_info(trans, attribute)["function"]._regression_args, + param_info(trans, attribute)["function"].model_args, ) ) diff --git a/bin/eval-outlier-removal.py b/bin/eval-outlier-removal.py index 14f0e60..b091ea4 100755 --- a/bin/eval-outlier-removal.py +++ b/bin/eval-outlier-removal.py @@ -141,12 +141,12 @@ if __name__ == "__main__": if param_i1(state, attribute): print( "{:10s}: {}".format( - state, param_i1(state, attribute)["function"]._model_str + state, param_i1(state, attribute)["function"].model_function ) ) print( "{:10s} {}".format( - "", param_i1(state, attribute)["function"]._regression_args + "", param_i1(state, attribute)["function"].model_args ) ) for trans in m1.transitions(): @@ -162,12 +162,12 @@ if __name__ == "__main__": "{:10s}: {:10s}: {}".format( trans, attribute, - param_i1(trans, attribute)["function"]._model_str, + param_i1(trans, attribute)["function"].model_function, ) ) print( "{:10s} {:10s} {}".format( - "", "", param_i1(trans, attribute)["function"]._regression_args + "", "", param_i1(trans, attribute)["function"].model_args ) ) param_m2, param_i2 = m2.get_fitted() @@ -176,12 +176,12 @@ if __name__ == "__main__": if param_i2(state, attribute): print( "{:10s}: {}".format( - state, param_i2(state, attribute)["function"]._model_str + state, param_i2(state, attribute)["function"].model_function ) ) print( "{:10s} {}".format( - "", param_i2(state, attribute)["function"]._regression_args + "", param_i2(state, attribute)["function"].model_args ) ) for trans in m2.transitions(): @@ -197,12 +197,12 @@ if __name__ == "__main__": "{:10s}: {:10s}: {}".format( trans, attribute, - param_i2(trans, attribute)["function"]._model_str, + param_i2(trans, attribute)["function"].model_function, ) ) print( "{:10s} {:10s} {}".format( - "", "", param_i2(trans, attribute)["function"]._regression_args + "", "", param_i2(trans, attribute)["function"].model_args ) ) diff --git a/lib/automata.py b/lib/automata.py index 69b3969..ebe1871 100755 --- a/lib/automata.py +++ b/lib/automata.py @@ -103,7 +103,7 @@ class PTAAttribute: def __repr__(self): if self.function is not None: return "PTAATtribute<{:.0f}, {}>".format( - self.value, self.function._model_str + self.value, self.function.model_function ) return "PTAATtribute<{:.0f}, None>".format(self.value) @@ -137,8 +137,8 @@ class PTAAttribute: } if self.function: ret["function"] = { - "raw": self.function._model_str, - "regression_args": list(self.function._regression_args), + "raw": self.function.model_function, + "regression_args": list(self.function.model_args), } ret["function_error"] = self.function_error return ret diff --git a/lib/functions.py b/lib/functions.py index 0b849bd..99ba17d 100644 --- a/lib/functions.py +++ b/lib/functions.py @@ -141,7 +141,7 @@ class AnalyticFunction: """ self._parameter_names = parameters self._num_args = num_args - self._model_str = function_str + self.model_function = function_str rawfunction = function_str self._dependson = [False] * (len(parameters) + num_args) self.fit_success = False @@ -174,12 +174,12 @@ class AnalyticFunction: self._function = function_str if regression_args: - self._regression_args = regression_args.copy() + self.model_args = regression_args.copy() self._fit_success = True elif type(function_str) == str: - self._regression_args = list(np.ones((num_vars))) + self.model_args = list(np.ones((num_vars))) else: - self._regression_args = [] + self.model_args = [] def get_fit_data(self, by_param, state_or_tran, model_attribute): """ @@ -260,22 +260,22 @@ class AnalyticFunction: error_function = lambda P, X, y: self._function(P, X) - y try: res = optimize.least_squares( - error_function, self._regression_args, args=(X, Y), xtol=2e-15 + error_function, self.model_args, args=(X, Y), xtol=2e-15 ) except ValueError as err: logger.warning( "Fit failed for {}/{}: {} (function: {})".format( - state_or_tran, model_attribute, err, self._model_str + state_or_tran, model_attribute, err, self.model_function ), ) return if res.status > 0: - self._regression_args = res.x + self.model_args = res.x self.fit_success = True else: logger.warning( "Fit failed for {}/{}: {} (function: {})".format( - state_or_tran, model_attribute, res.message, self._model_str + state_or_tran, model_attribute, res.message, self.model_function ), ) else: @@ -308,9 +308,9 @@ class AnalyticFunction: corresponds to lexically first parameter, etc. :param arg_list: argument values (list of float), if arguments are used. """ - if len(self._regression_args) == 0: + if len(self.model_args) == 0: return self._function(param_list, arg_list) - return self._function(self._regression_args, param_list) + return self._function(self.model_args, param_list) class analytic: diff --git a/test/test_parameters.py b/test/test_parameters.py index 5d7ec84..baf1c99 100755 --- a/test/test_parameters.py +++ b/test/test_parameters.py @@ -63,7 +63,7 @@ class TestModels(unittest.TestCase): combined_fit = analytic.function_powerset(fit_result, parameter_names, 0) self.assertEqual( - combined_fit._model_str, + combined_fit.model_function, "0 + regression_arg(0) + regression_arg(1) * parameter(p_linear)", ) self.assertEqual( diff --git a/test/test_ptamodel.py b/test/test_ptamodel.py index 3237450..9abe3c0 100755 --- a/test/test_ptamodel.py +++ b/test/test_ptamodel.py @@ -134,26 +134,26 @@ class TestModels(unittest.TestCase): param_model, param_info = model.get_fitted() self.assertEqual(param_info("POWERDOWN", "power"), None) self.assertEqual( - param_info("RX", "power")["function"]._model_str, + param_info("RX", "power")["function"].model_function, "0 + regression_arg(0) + regression_arg(1) * np.sqrt(parameter(datarate))", ) self.assertAlmostEqual( - param_info("RX", "power")["function"]._regression_args[0], 48530.7, places=0 + param_info("RX", "power")["function"].model_args[0], 48530.7, places=0 ) self.assertAlmostEqual( - param_info("RX", "power")["function"]._regression_args[1], 117, places=0 + param_info("RX", "power")["function"].model_args[1], 117, places=0 ) self.assertEqual(param_info("STANDBY1", "power"), None) self.assertEqual( - param_info("TX", "power")["function"]._model_str, + param_info("TX", "power")["function"].model_function, "0 + regression_arg(0) + regression_arg(1) * 1/(parameter(datarate)) + regression_arg(2) * parameter(txpower) + regression_arg(3) * 1/(parameter(datarate)) * parameter(txpower)", ) self.assertEqual( - param_info("epilogue", "timeout")["function"]._model_str, + param_info("epilogue", "timeout")["function"].model_function, "0 + regression_arg(0) + regression_arg(1) * 1/(parameter(datarate))", ) self.assertEqual( - param_info("stopListening", "duration")["function"]._model_str, + param_info("stopListening", "duration")["function"].model_function, "0 + regression_arg(0) + regression_arg(1) * 1/(parameter(datarate))", ) @@ -371,7 +371,7 @@ class TestModels(unittest.TestCase): param_model, param_info = model.get_fitted() self.assertEqual(param_info("IDLE", "power"), None) self.assertEqual( - param_info("RX", "power")["function"]._model_str, + param_info("RX", "power")["function"].model_function, "0 + regression_arg(0) + regression_arg(1) * np.log(parameter(symbolrate) + 1)", ) self.assertEqual(param_info("SLEEP", "power"), None) @@ -380,10 +380,10 @@ class TestModels(unittest.TestCase): self.assertEqual(param_info("XOFF", "power"), None) self.assertAlmostEqual( - param_info("RX", "power")["function"]._regression_args[0], 84415, places=0 + param_info("RX", "power")["function"].model_args[0], 84415, places=0 ) self.assertAlmostEqual( - param_info("RX", "power")["function"]._regression_args[1], 206, places=0 + param_info("RX", "power")["function"].model_args[1], 206, places=0 ) diff --git a/test/test_timingharness.py b/test/test_timingharness.py index 29e21f8..13289ea 100755 --- a/test/test_timingharness.py +++ b/test/test_timingharness.py @@ -30,25 +30,25 @@ class TestModels(unittest.TestCase): self.assertEqual(param_info("setRetries", "duration"), None) self.assertEqual(param_info("setup", "duration"), None) self.assertEqual( - param_info("write", "duration")["function"]._model_str, + param_info("write", "duration")["function"].model_function, "0 + regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay)", ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[0], + param_info("write", "duration")["function"].model_args[0], 1163, places=0, ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[1], + param_info("write", "duration")["function"].model_args[1], 464, places=0, ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[2], 1, places=0 + param_info("write", "duration")["function"].model_args[2], 1, places=0 ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[3], 1, places=0 + param_info("write", "duration")["function"].model_args[3], 1, places=0 ) def test_dependent_parameter_pruning(self): @@ -80,25 +80,25 @@ class TestModels(unittest.TestCase): self.assertEqual(param_info("setRetries", "duration"), None) self.assertEqual(param_info("setup", "duration"), None) self.assertEqual( - param_info("write", "duration")["function"]._model_str, + param_info("write", "duration")["function"].model_function, "0 + regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay)", ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[0], + param_info("write", "duration")["function"].model_args[0], 1163, places=0, ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[1], + param_info("write", "duration")["function"].model_args[1], 464, places=0, ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[2], 1, places=0 + param_info("write", "duration")["function"].model_args[2], 1, places=0 ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[3], 1, places=0 + param_info("write", "duration")["function"].model_args[3], 1, places=0 ) def test_function_override(self): @@ -139,28 +139,28 @@ class TestModels(unittest.TestCase): self.assertEqual(param_info("setRetries", "duration"), None) self.assertEqual(param_info("setup", "duration"), None) self.assertEqual( - param_info("write", "duration")["function"]._model_str, + param_info("write", "duration")["function"].model_function, "(parameter(auto_ack!) * (regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay))) + ((1 - parameter(auto_ack!)) * regression_arg(4))", ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[0], + param_info("write", "duration")["function"].model_args[0], 1162, places=0, ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[1], + param_info("write", "duration")["function"].model_args[1], 464, places=0, ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[2], 1, places=0 + param_info("write", "duration")["function"].model_args[2], 1, places=0 ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[3], 1, places=0 + param_info("write", "duration")["function"].model_args[3], 1, places=0 ) self.assertAlmostEqual( - param_info("write", "duration")["function"]._regression_args[4], + param_info("write", "duration")["function"].model_args[4], 1086, places=0, ) |