diff options
author | Daniel Friesel <daniel.friesel@uos.de> | 2020-07-06 10:26:09 +0200 |
---|---|---|
committer | Daniel Friesel <daniel.friesel@uos.de> | 2020-07-06 10:26:09 +0200 |
commit | f126d8b2d69e048627117f33f817cf22cc2e0e96 (patch) | |
tree | b23fcf136585b5e307d3b41fccbcf11d23c8ab08 /test | |
parent | 1ec48f55c80492c5ee7ee7e4d3ed7cd0eccd9a1c (diff) |
make test_parameters work with NumPy <= 1.16
The RNG has been introduced in NumPy 1.17, which is not yet available in
DebianStable
Diffstat (limited to 'test')
-rwxr-xr-x | test/test_parameters.py | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/test/test_parameters.py b/test/test_parameters.py index 63562a4..22efccf 100755 --- a/test/test_parameters.py +++ b/test/test_parameters.py @@ -25,9 +25,11 @@ class TestModels(unittest.TestCase): ) def test_parameter_detection_linear(self): - rng = np.random.default_rng(seed=1312) + # rng = np.random.default_rng(seed=1312) # requiresy NumPy >= 1.17 + np.random.seed(1312) X = np.arange(200) % 50 - Y = X + rng.normal(size=X.size) + # Y = X + rng.normal(size=X.size) # requiry NumPy >= 1.17 + Y = X + np.random.normal(size=X.size) parameter_names = ["p_mod5", "p_linear"] # Test input data: @@ -90,7 +92,8 @@ class TestModels(unittest.TestCase): self.assertAlmostEqual(combined_fit.eval([None, i]), i, places=0) def test_parameter_detection_multi_dimensional(self): - rng = np.random.default_rng(seed=1312) + # rng = np.random.default_rng(seed=1312) # requires NumPy >= 1.17 + np.random.seed(1312) # vary each parameter from 1 to 10 Xi = (np.arange(50) % 10) + 1 # Three parameters -> Build input array [[1, 1, 1], [1, 1, 2], ..., [10, 10, 10]] @@ -104,8 +107,10 @@ class TestModels(unittest.TestCase): lambda x: 23 + 5 * x[0] - 3 * x[0] / x[1], signature="(n)->()" ) - Y_lls = f_lls(X) + rng.normal(size=X.shape[0]) - Y_ll = f_ll(X) + rng.normal(size=X.shape[0]) + # Y_lls = f_lls(X) + rng.normal(size=X.shape[0]) # requires NumPy >= 1.17 + # Y_ll = f_ll(X) + rng.normal(size=X.shape[0]) # requires NumPy >= 1.17 + Y_lls = f_lls(X) + np.random.normal(size=X.shape[0]) + Y_ll = f_ll(X) + np.random.normal(size=X.shape[0]) parameter_names = ["lin_lin", "log_inv", "square_none"] |