summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjfalkenhagen <jfalkenhagen@uos.de>2020-07-16 16:39:19 +0200
committerjfalkenhagen <jfalkenhagen@uos.de>2020-07-16 16:39:19 +0200
commit98d23807e35cc211415c7e0c887f1b1b502f10e5 (patch)
treeebb649c585166e546dda704990ed4c5eeb95519f
parenta00ffc0e32ddc72a8faceec4344432cdbf3b90c7 (diff)
parentaf4cc108b5c5132a991a2b83d258ed55e985936f (diff)
Merge branch 'master' into janis
-rw-r--r--README.md6
-rwxr-xr-xbin/analyze-archive.py135
-rwxr-xr-xbin/analyze-timing.py52
-rwxr-xr-xbin/cal-hist92
-rwxr-xr-xbin/eval-accounting-overhead.py2
-rwxr-xr-xbin/eval-online-model-accuracy.py2
-rwxr-xr-xbin/eval-outlier-removal.py19
-rwxr-xr-xbin/eval-rel-energy.py10
-rwxr-xr-xbin/generate-dfa-benchmark.py33
-rwxr-xr-xbin/gptest.py5
-rwxr-xr-xbin/gradient14
-rwxr-xr-xbin/keysightdlog.py (renamed from lib/keysightdlog.py)0
-rwxr-xr-xbin/mim-vs-keysight.py2
-rwxr-xr-xbin/mimosa-etv163
-rwxr-xr-xbin/mimplot68
-rwxr-xr-xbin/test_corrcoef.py13
-rw-r--r--doc/generate-dfa-benchmark.md85
-rwxr-xr-xlib/automata.py17
-rw-r--r--lib/data_parameters.py17
-rw-r--r--lib/functions.py91
-rw-r--r--lib/harness.py4
-rw-r--r--lib/lex.py9
-rw-r--r--lib/loader.py (renamed from lib/dfatool.py)1603
-rw-r--r--lib/model.py1156
-rw-r--r--lib/parameters.py252
-rwxr-xr-xlib/protocol_benchmarks.py7
-rw-r--r--lib/runner.py50
-rw-r--r--lib/utils.py14
-rw-r--r--lib/validation.py238
-rwxr-xr-xtest/test_codegen.py170
-rwxr-xr-xtest/test_parameters.py228
-rwxr-xr-xtest/test_pta.py917
-rwxr-xr-xtest/test_ptamodel.py1003
-rwxr-xr-xtest/test_timingharness.py200
34 files changed, 3833 insertions, 2844 deletions
diff --git a/README.md b/README.md
index 47c38df..5529261 100644
--- a/README.md
+++ b/README.md
@@ -8,8 +8,8 @@ sudo apt install python3-numpy python3-scipy python3-sklearn
Code Style
---
-Please do not commit code with significant PEP8 violations. It's best to check
-this with a pre-commit hook:
+Please only commit blackened code. It's best to check this with a pre-commit
+hook:
```
#!/bin/sh
@@ -25,5 +25,5 @@ fi
# Redirect output to stderr.
exec 1>&2
-git diff --cached $against | flake8 --diff
+black --check $(git diff --cached --name-only --diff-filter=ACM $against | grep '\.py$')
```
diff --git a/bin/analyze-archive.py b/bin/analyze-archive.py
index 4531d86..8311f5c 100755
--- a/bin/analyze-archive.py
+++ b/bin/analyze-archive.py
@@ -61,18 +61,25 @@ Options:
Specify traces which should be ignored due to bogus data. 1 is the first
trace, 2 the second, and so on.
---discard-outliers=
- not supported at the moment
-
--cross-validate=<method>:<count>
Perform cross validation when computing model quality.
Only works with --show-quality=table at the moment.
+
If <method> is "montecarlo": Randomly divide data into 2/3 training and 1/3
validation, <count> times. Reported model quality is the average of all
validation runs. Data is partitioned without regard for parameter values,
so a specific parameter combination may be present in both training and
validation sets or just one of them.
+ If <method> is "kfold": Perform k-fold cross validation with k=<count>.
+ Divide data into 1-1/k training and 1/k validation, <count> times.
+ In the first set, items 0, k, 2k, ... ard used for validation, in the
+ second set, items 1, k+1, 2k+1, ... and so on.
+ validation, <count> times. Reported model quality is the average of all
+ validation runs. Data is partitioned without regard for parameter values,
+ so a specific parameter combination may be present in both training and
+ validation sets or just one of them.
+
--function-override=<name attribute function>[;<name> <attribute> <function>;...]
Manually specify the function to fit for <name> <attribute>. A function
specified this way bypasses parameter detection: It is always assigned,
@@ -94,17 +101,22 @@ Options:
--export-energymodel=<model.json>
Export energy model. Works out of the box for v1 and v2 logfiles. Requires --hwmodel for v0 logfiles.
+
+--no-cache
+ Do not load cached measurement results
"""
import getopt
import json
+import logging
import random
import re
import sys
from dfatool import plotter
-from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
-from dfatool.dfatool import gplearn_to_function
-from dfatool.dfatool import CrossValidator
+from dfatool.loader import RawData, pta_trace_to_aggregate
+from dfatool.functions import gplearn_to_function
+from dfatool.model import PTAModel
+from dfatool.validation import CrossValidator
from dfatool.utils import filter_aggregate_by_param
from dfatool.automata import PTA
@@ -133,6 +145,15 @@ def format_quality_measures(result):
def model_quality_table(result_lists, info_list):
+ print(
+ "{:20s} {:15s} {:19s} {:19s} {:19s}".format(
+ "key",
+ "attribute",
+ "static".center(19),
+ "LUT".center(19),
+ "parameterized".center(19),
+ )
+ )
for state_or_tran in result_lists[0]["by_name"].keys():
for key in result_lists[0]["by_name"][state_or_tran].keys():
buf = "{:20s} {:15s}".format(state_or_tran, key)
@@ -143,7 +164,7 @@ def model_quality_table(result_lists, info_list):
result = results["by_name"][state_or_tran][key]
buf += format_quality_measures(result)
else:
- buf += "{:6}----{:9}".format("", "")
+ buf += "{:7}----{:8}".format("", "")
print(buf)
@@ -279,7 +300,6 @@ def print_html_model_data(model, pm, pq, lm, lq, am, ai, aq):
if __name__ == "__main__":
ignored_trace_indexes = []
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -292,11 +312,12 @@ if __name__ == "__main__":
try:
optspec = (
- "info "
+ "info no-cache "
"plot-unparam= plot-param= plot-traces= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"export-traces= "
"filter-param= "
+ "log-level= "
"cross-validate= "
"with-safe-functions hwmodel= export-energymodel="
)
@@ -313,9 +334,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -344,12 +362,21 @@ if __name__ == "__main__":
if "hwmodel" in opt:
pta = PTA.from_file(opt["hwmodel"])
+ if "log-level" in opt:
+ numeric_level = getattr(logging, opt["log-level"].upper(), None)
+ if not isinstance(numeric_level, int):
+ print(f"Invalid log level: {loglevel}", file=sys.stderr)
+ sys.exit(1)
+ logging.basicConfig(level=numeric_level)
+
except getopt.GetoptError as err:
print(err, file=sys.stderr)
sys.exit(2)
raw_data = RawData(
- args, with_traces=("export-traces" in opt or "plot-traces" in opt)
+ args,
+ with_traces=("export-traces" in opt or "plot-traces" in opt),
+ skip_cache=("no-cache" in opt),
)
if "info" in opt:
@@ -357,9 +384,12 @@ if __name__ == "__main__":
if raw_data.version <= 1:
data_source = "MIMOSA"
elif raw_data.version == 2:
- data_source = "MSP430 EnergyTrace"
- else:
- data_source = "UNKNOWN"
+ if raw_data.ptalog and "sync" in raw_data.ptalog["opt"]["energytrace"]:
+ data_source = "MSP430 EnergyTrace, sync={}".format(
+ raw_data.ptalog["opt"]["energytrace"]["sync"]
+ )
+ else:
+ data_source = "MSP430 EnergyTrace"
print(f" Data source ID: {raw_data.version} ({data_source})")
preprocessed_data = raw_data.get_preprocessed_data()
@@ -434,7 +464,6 @@ if __name__ == "__main__":
parameters,
arg_count,
traces=preprocessed_data,
- discard_outliers=discard_outliers,
function_override=function_override,
pta=pta,
)
@@ -495,21 +524,6 @@ if __name__ == "__main__":
model.stats.param_dependence_ratio(state, "power", param),
)
)
- if model.stats.has_codependent_parameters(state, "power", param):
- print(
- "{:24s} co-dependencies: {:s}".format(
- "",
- ", ".join(
- model.stats.codependent_parameters(
- state, "power", param
- )
- ),
- )
- )
- for param_dict in model.stats.codependent_parameter_value_dicts(
- state, "power", param
- ):
- print("{:24s} parameter-aware for {}".format("", param_dict))
for trans in model.transitions():
# Mean power is not a typical transition attribute, but may be present for debugging or analysis purposes
@@ -551,6 +565,8 @@ if __name__ == "__main__":
if xv_method == "montecarlo":
static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count)
+ elif xv_method == "kfold":
+ static_quality = xv.kfold(lambda m: m.get_static(), xv_count)
else:
static_quality = model.assess(static_model)
@@ -560,6 +576,8 @@ if __name__ == "__main__":
if xv_method == "montecarlo":
lut_quality = xv.montecarlo(lambda m: m.get_param_lut(fallback=True), xv_count)
+ elif xv_method == "kfold":
+ lut_quality = xv.kfold(lambda m: m.get_param_lut(fallback=True), xv_count)
else:
lut_quality = model.assess(lut_model)
@@ -616,21 +634,21 @@ if __name__ == "__main__":
if "param" in show_models or "all" in show_models:
if not model.stats.can_be_fitted():
- print(
- "[!] measurements have insufficient distinct numeric parameters for fitting. A parameter-aware model is not available."
+ logging.warning(
+ "measurements have insufficient distinct numeric parameters for fitting. A parameter-aware model is not available."
)
for state in model.states():
for attribute in model.attributes(state):
if param_info(state, attribute):
print(
"{:10s}: {}".format(
- state, param_info(state, attribute)["function"]._model_str
+ state,
+ param_info(state, attribute)["function"].model_function,
)
)
print(
"{:10s} {}".format(
- "",
- param_info(state, attribute)["function"]._regression_args,
+ "", param_info(state, attribute)["function"].model_args
)
)
for trans in model.transitions():
@@ -640,19 +658,19 @@ if __name__ == "__main__":
"{:10s}: {:10s}: {}".format(
trans,
attribute,
- param_info(trans, attribute)["function"]._model_str,
+ param_info(trans, attribute)["function"].model_function,
)
)
print(
"{:10s} {:10s} {}".format(
- "",
- "",
- param_info(trans, attribute)["function"]._regression_args,
+ "", "", param_info(trans, attribute)["function"].model_args
)
)
if xv_method == "montecarlo":
analytic_quality = xv.montecarlo(lambda m: m.get_fitted()[0], xv_count)
+ elif xv_method == "kfold":
+ analytic_quality = xv.kfold(lambda m: m.get_fitted()[0], xv_count)
else:
analytic_quality = model.assess(param_model)
@@ -686,7 +704,7 @@ if __name__ == "__main__":
)
if "overall" in show_quality or "all" in show_quality:
- print("overall static/param/lut MAE assuming equal state distribution:")
+ print("overall state static/param/lut MAE assuming equal state distribution:")
print(
" {:6.1f} / {:6.1f} / {:6.1f} µW".format(
model.assess_states(static_model),
@@ -694,15 +712,30 @@ if __name__ == "__main__":
model.assess_states(lut_model),
)
)
- print("overall static/param/lut MAE assuming 95% STANDBY1:")
- distrib = {"STANDBY1": 0.95, "POWERDOWN": 0.03, "TX": 0.01, "RX": 0.01}
- print(
- " {:6.1f} / {:6.1f} / {:6.1f} µW".format(
- model.assess_states(static_model, distribution=distrib),
- model.assess_states(param_model, distribution=distrib),
- model.assess_states(lut_model, distribution=distrib),
+ distrib = dict()
+ num_states = len(model.states())
+ p95_state = None
+ for state in model.states():
+ distrib[state] = 1.0 / num_states
+
+ if "STANDBY1" in model.states():
+ p95_state = "STANDBY1"
+ elif "SLEEP" in model.states():
+ p95_state = "SLEEP"
+
+ if p95_state is not None:
+ for state in distrib.keys():
+ distrib[state] = 0.05 / (num_states - 1)
+ distrib[p95_state] = 0.95
+
+ print(f"overall state static/param/lut MAE assuming 95% {p95_state}:")
+ print(
+ " {:6.1f} / {:6.1f} / {:6.1f} µW".format(
+ model.assess_states(static_model, distribution=distrib),
+ model.assess_states(param_model, distribution=distrib),
+ model.assess_states(lut_model, distribution=distrib),
+ )
)
- )
if "summary" in show_quality or "all" in show_quality:
model_summary_table(
diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py
index 4039f45..ddd49ec 100755
--- a/bin/analyze-timing.py
+++ b/bin/analyze-timing.py
@@ -75,12 +75,14 @@ Options:
import getopt
import json
+import logging
import re
import sys
from dfatool import plotter
-from dfatool.dfatool import AnalyticModel, TimingData, pta_trace_to_aggregate
-from dfatool.dfatool import gplearn_to_function
-from dfatool.dfatool import CrossValidator
+from dfatool.loader import TimingData, pta_trace_to_aggregate
+from dfatool.functions import gplearn_to_function
+from dfatool.model import AnalyticModel
+from dfatool.validation import CrossValidator
from dfatool.utils import filter_aggregate_by_param
from dfatool.parameters import prune_dependent_parameters
@@ -170,7 +172,6 @@ def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq):
if __name__ == "__main__":
ignored_trace_indexes = []
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -183,8 +184,9 @@ if __name__ == "__main__":
try:
optspec = (
"plot-unparam= plot-param= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"filter-param= "
+ "log-level= "
"cross-validate= "
"corrcoef param-info "
"with-safe-functions hwmodel= export-energymodel="
@@ -202,9 +204,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -237,6 +236,13 @@ if __name__ == "__main__":
else:
opt["filter-param"] = list()
+ if "log-level" in opt:
+ numeric_level = getattr(logging, opt["log-level"].upper(), None)
+ if not isinstance(numeric_level, int):
+ print(f"Invalid log level: {loglevel}", file=sys.stderr)
+ sys.exit(1)
+ logging.basicConfig(level=numeric_level)
+
except getopt.GetoptError as err:
print(err)
sys.exit(2)
@@ -297,30 +303,6 @@ if __name__ == "__main__":
model.stats.param_dependence_ratio(trans, "duration", param),
)
)
- if model.stats.has_codependent_parameters(trans, "duration", param):
- print(
- "{:24s} co-dependencies: {:s}".format(
- "",
- ", ".join(
- model.stats.codependent_parameters(
- trans, "duration", param
- )
- ),
- )
- )
- for param_dict in model.stats.codependent_parameter_value_dicts(
- trans, "duration", param
- ):
- print("{:24s} parameter-aware for {}".format("", param_dict))
- # import numpy as np
- # safe_div = np.vectorize(lambda x,y: 0. if x == 0 else 1 - x/y)
- # ratio_by_value = safe_div(model.stats.stats['write']['duration']['lut_by_param_values']['max_retry_count'], model.stats.stats['write']['duration']['std_by_param_values']['max_retry_count'])
- # err_mode = np.seterr('warn')
- # dep_by_value = ratio_by_value > 0.5
- # np.seterr(**err_mode)
- # Eigentlich sollte hier ein paar mal True stehen, ist aber nicht so...
- # und warum ist da eine non-power-of-two Zahl von True-Einträgen in der Matrix? 3 stück ist komisch...
- # print(dep_by_value)
if xv_method == "montecarlo":
static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count)
@@ -423,14 +405,12 @@ if __name__ == "__main__":
"{:10s}: {:10s}: {}".format(
trans,
attribute,
- param_info(trans, attribute)["function"]._model_str,
+ param_info(trans, attribute)["function"].model_function,
)
)
print(
"{:10s} {:10s} {}".format(
- "",
- "",
- param_info(trans, attribute)["function"]._regression_args,
+ "", "", param_info(trans, attribute)["function"].model_args
)
)
diff --git a/bin/cal-hist b/bin/cal-hist
index ba2ff47..a92ae1e 100755
--- a/bin/cal-hist
+++ b/bin/cal-hist
@@ -7,7 +7,7 @@ import struct
import sys
import tarfile
import matplotlib.pyplot as plt
-from dfatool.dfatool import MIMOSA
+from dfatool.loader import MIMOSA
from dfatool.utils import running_mean
voltage = float(sys.argv[1])
@@ -18,50 +18,74 @@ mim = MIMOSA(voltage, shunt)
charges, triggers = mim.load_data(mimfile)
trigidx = mim.trigger_edges(triggers)
-cal_edges = mim.calibration_edges(running_mean(mim.currents_nocal(charges[0:trigidx[0]]), 10))
+cal_edges = mim.calibration_edges(
+ running_mean(mim.currents_nocal(charges[0 : trigidx[0]]), 10)
+)
+
+# charges = charges[charges > 20000]
+# charges = charges[charges < 21000]
-#charges = charges[charges > 20000]
-#charges = charges[charges < 21000]
def show_hist(data):
- bins = np.max(data) - np.min(data)
- if bins == 0:
- bins = 1
- if bins > 1000:
- bins = bins / 10
- #bins = 500
- n, bins, patches = plt.hist(data, bins, normed=0, facecolor='green', alpha=0.75)
- plt.grid(True)
- plt.show()
- print(np.histogram(data, bins=bins))
+ bins = np.max(data) - np.min(data)
+ if bins == 0:
+ bins = 1
+ if bins > 1000:
+ bins = bins / 10
+ # bins = 500
+ n, bins, patches = plt.hist(data, bins, normed=0, facecolor="green", alpha=0.75)
+ plt.grid(True)
+ plt.show()
+ print(np.histogram(data, bins=bins))
+
-#show_hist(charges[cal_edges[0]:cal_edges[1]])
-#show_hist(charges[cal_edges[4]:cal_edges[5]])
-#show_hist(charges[cal_edges[2]:cal_edges[3]])
-#show_hist(charges[trigidx[7]:trigidx[8]])
-#show_hist(np.array(charges))
+# show_hist(charges[cal_edges[0]:cal_edges[1]])
+# show_hist(charges[cal_edges[4]:cal_edges[5]])
+# show_hist(charges[cal_edges[2]:cal_edges[3]])
+# show_hist(charges[trigidx[7]:trigidx[8]])
+# show_hist(np.array(charges))
-#print(charges[cal_edges[0]:cal_edges[1]])
-#print(charges[cal_edges[4]:cal_edges[5]])
-#print(charges[cal_edges[2]:cal_edges[3]])
+# print(charges[cal_edges[0]:cal_edges[1]])
+# print(charges[cal_edges[4]:cal_edges[5]])
+# print(charges[cal_edges[2]:cal_edges[3]])
-plt.hist(mim.charge_to_current_nocal(charges[cal_edges[2]:cal_edges[3]]) * 1e-3, 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('mA MimosaCMD')
-plt.ylabel('#')
+plt.hist(
+ mim.charge_to_current_nocal(charges[cal_edges[2] : cal_edges[3]]) * 1e-3,
+ 100,
+ normed=0,
+ facecolor="blue",
+ alpha=0.8,
+)
+plt.xlabel("mA MimosaCMD")
+plt.ylabel("#")
plt.grid(True)
plt.show()
-plt.hist(mim.charge_to_current_nocal(charges[cal_edges[4]:cal_edges[5]]) * 1e-3, 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('mA MimosaCMD')
-plt.ylabel('#')
+plt.hist(
+ mim.charge_to_current_nocal(charges[cal_edges[4] : cal_edges[5]]) * 1e-3,
+ 100,
+ normed=0,
+ facecolor="blue",
+ alpha=0.8,
+)
+plt.xlabel("mA MimosaCMD")
+plt.ylabel("#")
plt.grid(True)
plt.show()
-plt.hist(mim.charge_to_current_nocal(charges[cal_edges[0]:cal_edges[1]]) * 1e-3, 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('mA MimosaCMD')
-plt.ylabel('#')
+plt.hist(
+ mim.charge_to_current_nocal(charges[cal_edges[0] : cal_edges[1]]) * 1e-3,
+ 100,
+ normed=0,
+ facecolor="blue",
+ alpha=0.8,
+)
+plt.xlabel("mA MimosaCMD")
+plt.ylabel("#")
plt.grid(True)
plt.show()
-plt.hist(charges[cal_edges[0]:cal_edges[1]], 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('Rohwert MimosaCMD')
-plt.ylabel('#')
+plt.hist(
+ charges[cal_edges[0] : cal_edges[1]], 100, normed=0, facecolor="blue", alpha=0.8
+)
+plt.xlabel("Rohwert MimosaCMD")
+plt.ylabel("#")
plt.grid(True)
plt.show()
diff --git a/bin/eval-accounting-overhead.py b/bin/eval-accounting-overhead.py
index 7ea0807..1c03bf8 100755
--- a/bin/eval-accounting-overhead.py
+++ b/bin/eval-accounting-overhead.py
@@ -13,7 +13,7 @@ providing overhead per transition and getEnergy overhead
"""
-from dfatool.dfatool import AnalyticModel, TimingData, pta_trace_to_aggregate
+from dfatool.loader import AnalyticModel, TimingData, pta_trace_to_aggregate
import json
import sys
diff --git a/bin/eval-online-model-accuracy.py b/bin/eval-online-model-accuracy.py
index 202ac28..97fd8e2 100755
--- a/bin/eval-online-model-accuracy.py
+++ b/bin/eval-online-model-accuracy.py
@@ -28,7 +28,7 @@ import itertools
import yaml
from dfatool.automata import PTA
from dfatool.codegen import get_simulated_accountingmethod
-from dfatool.dfatool import regression_measures
+from dfatool.model import regression_measures
import numpy as np
opt = dict()
diff --git a/bin/eval-outlier-removal.py b/bin/eval-outlier-removal.py
index 14f0e60..c03266d 100755
--- a/bin/eval-outlier-removal.py
+++ b/bin/eval-outlier-removal.py
@@ -3,7 +3,8 @@
import getopt
import re
import sys
-from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
+from dfatool.loader import RawData, pta_trace_to_aggregate
+from dfatool.model import PTAModel
opt = dict()
@@ -141,12 +142,12 @@ if __name__ == "__main__":
if param_i1(state, attribute):
print(
"{:10s}: {}".format(
- state, param_i1(state, attribute)["function"]._model_str
+ state, param_i1(state, attribute)["function"].model_function
)
)
print(
"{:10s} {}".format(
- "", param_i1(state, attribute)["function"]._regression_args
+ "", param_i1(state, attribute)["function"].model_args
)
)
for trans in m1.transitions():
@@ -162,12 +163,12 @@ if __name__ == "__main__":
"{:10s}: {:10s}: {}".format(
trans,
attribute,
- param_i1(trans, attribute)["function"]._model_str,
+ param_i1(trans, attribute)["function"].model_function,
)
)
print(
"{:10s} {:10s} {}".format(
- "", "", param_i1(trans, attribute)["function"]._regression_args
+ "", "", param_i1(trans, attribute)["function"].model_args
)
)
param_m2, param_i2 = m2.get_fitted()
@@ -176,12 +177,12 @@ if __name__ == "__main__":
if param_i2(state, attribute):
print(
"{:10s}: {}".format(
- state, param_i2(state, attribute)["function"]._model_str
+ state, param_i2(state, attribute)["function"].model_function
)
)
print(
"{:10s} {}".format(
- "", param_i2(state, attribute)["function"]._regression_args
+ "", param_i2(state, attribute)["function"].model_args
)
)
for trans in m2.transitions():
@@ -197,12 +198,12 @@ if __name__ == "__main__":
"{:10s}: {:10s}: {}".format(
trans,
attribute,
- param_i2(trans, attribute)["function"]._model_str,
+ param_i2(trans, attribute)["function"].model_function,
)
)
print(
"{:10s} {:10s} {}".format(
- "", "", param_i2(trans, attribute)["function"]._regression_args
+ "", "", param_i2(trans, attribute)["function"].model_args
)
)
diff --git a/bin/eval-rel-energy.py b/bin/eval-rel-energy.py
index 8a2be13..aeaf88c 100755
--- a/bin/eval-rel-energy.py
+++ b/bin/eval-rel-energy.py
@@ -3,7 +3,8 @@
import getopt
import re
import sys
-from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
+from dfatool.loader import RawData, pta_trace_to_aggregate
+from dfatool.model import PTAModel
opt = dict()
@@ -22,7 +23,6 @@ def get_file_groups(args):
if __name__ == "__main__":
ignored_trace_indexes = []
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -31,7 +31,7 @@ if __name__ == "__main__":
try:
optspec = (
"plot-unparam= plot-param= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"with-safe-functions"
)
raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
@@ -47,9 +47,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -88,7 +85,6 @@ if __name__ == "__main__":
arg_count,
traces=preprocessed_data,
ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
function_override=function_override,
verbose=False,
)
diff --git a/bin/generate-dfa-benchmark.py b/bin/generate-dfa-benchmark.py
index 478b221..2c53d9f 100755
--- a/bin/generate-dfa-benchmark.py
+++ b/bin/generate-dfa-benchmark.py
@@ -61,6 +61,10 @@ Options:
--energytrace=[k=v,k=v,...]
Perform energy measurements using MSP430 EnergyTrace hardware. Includes --timing.
+ Additional configuration settings:
+ sync = bar (Barcode mode (default): synchronize measurements via barcodes embedded in the energy trace)
+ sync = la (Logic Analyzer mode (WIP): An external logic analyzer captures transition timing)
+ sync = timing (Timing mode (WIP): The on-board cycle counter captures transition timing)
--trace-filter=<transition,transition,transition,...>[ <transition,transition,transition,...> ...]
Only consider traces whose beginning matches one of the provided transition sequences.
@@ -219,17 +223,11 @@ def benchmark_from_runs(
)
elif opt["sleep"]:
if "energytrace" in opt:
- outbuf.write(
- "arch.sleep_ms({:d}); // {}\n".format(
- opt["sleep"], transition.destination.name
- )
- )
+ outbuf.write(f"// -> {transition.destination.name}\n")
+ outbuf.write(runner.sleep_ms(opt["sleep"], opt["arch"]))
else:
- outbuf.write(
- "arch.delay_ms({:d}); // {}\n".format(
- opt["sleep"], transition.destination.name
- )
- )
+ outbuf.write(f"// -> {transition.destination.name}\n")
+ outbuf.write("arch.delay_ms({:d});\n".format(opt["sleep"]))
outbuf.write(harness.stop_run(num_traces))
if dummy:
@@ -337,6 +335,7 @@ def run_benchmark(
files = list()
i = 0
while i < opt["repeat"]:
+ print(f"""[RUN] flashing benchmark {i+1}/{opt["repeat"]}""")
runner.flash(arch, app, run_args)
if "mimosa" in opt:
monitor = runner.get_monitor(
@@ -353,7 +352,6 @@ def run_benchmark(
while not harness.done:
# possible race condition: if the benchmark completes at this
# exact point, it sets harness.done and unsets harness.synced.
- # vvv
if (
slept > 30
and slept < 40
@@ -372,11 +370,11 @@ def run_benchmark(
time.sleep(5)
slept += 5
print(
- "[RUN] {:d}/{:d} ({:.0f}%), current benchmark at {:.0f}%".format(
+ "[RUN] {:d}/{:d} ({:.0f}%) at trace {:d}".format(
run_offset,
runs_total,
run_offset * 100 / runs_total,
- slept * 100 / run_timeout,
+ harness.trace_id,
)
)
except KeyboardInterrupt:
@@ -593,6 +591,9 @@ if __name__ == "__main__":
if run_flags is None:
run_flags = opt["run"].split()
+ if "msp430fr" in opt["arch"]:
+ run_flags.append("cpu_freq=8000000")
+
runs = list(
pta.dfs(
opt["depth"],
@@ -630,9 +631,13 @@ if __name__ == "__main__":
post_transition_delay_us=20,
)
elif "energytrace" in opt:
+ # Use barcode sync by default
+ gpio_mode = "bar"
+ if "sync" in opt["energytrace"] and opt["energytrace"]["sync"] != "bar":
+ gpio_mode = "around"
harness = OnboardTimerHarness(
gpio_pin=timer_pin,
- gpio_mode="bar",
+ gpio_mode=gpio_mode,
pta=pta,
counter_limits=runner.get_counter_limits_us(opt["arch"]),
log_return_values=need_return_values,
diff --git a/bin/gptest.py b/bin/gptest.py
index 82b4575..b5012e5 100755
--- a/bin/gptest.py
+++ b/bin/gptest.py
@@ -2,12 +2,11 @@
import sys
import numpy as np
-from dfatool.dfatool import (
- PTAModel,
+from dfatool.loader import (
RawData,
- regression_measures,
pta_trace_to_aggregate,
)
+from dfatool.model import PTAModel, regression_measures
from gplearn.genetic import SymbolicRegressor
from multiprocessing import Pool
diff --git a/bin/gradient b/bin/gradient
index 8280794..ca60949 100755
--- a/bin/gradient
+++ b/bin/gradient
@@ -7,7 +7,7 @@ import struct
import sys
import tarfile
import matplotlib.pyplot as plt
-from dfatool.dfatool import MIMOSA
+from dfatool.loader import MIMOSA
from dfatool.utils import running_mean
voltage = float(sys.argv[1])
@@ -17,17 +17,17 @@ mimfile = sys.argv[3]
mim = MIMOSA(voltage, shunt)
charges, triggers = mim.load_file(mimfile)
-#charges = charges[2000000:3000000]
+# charges = charges[2000000:3000000]
currents = running_mean(mim.charge_to_current_nocal(charges), 10) * 1e-6
xr = np.arange(len(currents)) * 1e-5
threshold = 1e-5
grad = np.gradient(currents, 2)
tp = np.abs(grad) > threshold
-plt.plot( xr, currents, "r-")
-plt.plot( xr, grad, "y-")
-plt.plot( xr[tp], grad[tp], "bo")
-plt.xlabel('Zeit [s]')
-plt.ylabel('Strom [A]')
+plt.plot(xr, currents, "r-")
+plt.plot(xr, grad, "y-")
+plt.plot(xr[tp], grad[tp], "bo")
+plt.xlabel("Zeit [s]")
+plt.ylabel("Strom [A]")
plt.grid(True)
plt.show()
diff --git a/lib/keysightdlog.py b/bin/keysightdlog.py
index 89264b9..89264b9 100755
--- a/lib/keysightdlog.py
+++ b/bin/keysightdlog.py
diff --git a/bin/mim-vs-keysight.py b/bin/mim-vs-keysight.py
index c214f2f..c9a7249 100755
--- a/bin/mim-vs-keysight.py
+++ b/bin/mim-vs-keysight.py
@@ -3,7 +3,7 @@
import numpy as np
import sys
import matplotlib.pyplot as plt
-from dfatool.dfatool import MIMOSA, KeysightCSV
+from dfatool.loader import MIMOSA, KeysightCSV
from dfatool.utils import running_mean
voltage = float(sys.argv[1])
diff --git a/bin/mimosa-etv b/bin/mimosa-etv
index e23b46c..9b6e897 100755
--- a/bin/mimosa-etv
+++ b/bin/mimosa-etv
@@ -8,13 +8,16 @@ import numpy as np
import os
import re
import sys
-from dfatool.dfatool import aggregate_measures, MIMOSA
+from dfatool.loader import MIMOSA
+from dfatool.model import aggregate_measures
from dfatool.utils import running_mean
opt = dict()
+
def show_help():
- print('''mimosa-etv - MIMOSA Analyzer and Visualizer
+ print(
+ """mimosa-etv - MIMOSA Analyzer and Visualizer
USAGE
@@ -41,7 +44,9 @@ OPTIONS
Show power/time plot
--stat
Show mean voltage, current, and power as well as total energy consumption.
- ''')
+ """
+ )
+
def peak_search(data, lower, upper, direction_function):
while upper - lower > 1e-6:
@@ -58,6 +63,7 @@ def peak_search(data, lower, upper, direction_function):
upper = bs_test
return None
+
def peak_search2(data, lower, upper, check_function):
for power in np.arange(lower, upper, 1e-6):
peakcount = itertools.groupby(data, lambda x: x >= power)
@@ -67,38 +73,39 @@ def peak_search2(data, lower, upper, check_function):
return power
return None
-if __name__ == '__main__':
+
+if __name__ == "__main__":
try:
- optspec = ('help skip= threshold= threshold-peakcount= plot stat')
- raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(' '))
+ optspec = "help skip= threshold= threshold-peakcount= plot stat"
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
+ optname = re.sub(r"^--", "", option)
opt[optname] = parameter
- if 'help' in opt:
+ if "help" in opt:
show_help()
sys.exit(0)
- if 'skip' in opt:
- opt['skip'] = int(opt['skip'])
+ if "skip" in opt:
+ opt["skip"] = int(opt["skip"])
else:
- opt['skip'] = 0
+ opt["skip"] = 0
- if 'threshold' in opt and opt['threshold'] != 'mean':
- opt['threshold'] = float(opt['threshold'])
+ if "threshold" in opt and opt["threshold"] != "mean":
+ opt["threshold"] = float(opt["threshold"])
- if 'threshold-peakcount' in opt:
- opt['threshold-peakcount'] = int(opt['threshold-peakcount'])
+ if "threshold-peakcount" in opt:
+ opt["threshold-peakcount"] = int(opt["threshold-peakcount"])
except getopt.GetoptError as err:
print(err)
sys.exit(2)
except IndexError:
- print('Usage: mimosa-etv <duration>')
+ print("Usage: mimosa-etv <duration>")
sys.exit(2)
except ValueError:
- print('Error: duration or skip is not a number')
+ print("Error: duration or skip is not a number")
sys.exit(2)
voltage, shunt, inputfile = args
@@ -110,7 +117,7 @@ if __name__ == '__main__':
currents = mim.charge_to_current_nocal(charges) * 1e-6
powers = currents * voltage
- if 'threshold-peakcount' in opt:
+ if "threshold-peakcount" in opt:
bs_mean = np.mean(powers)
# Finding the correct threshold is tricky. If #peaks < peakcont, our
@@ -126,42 +133,59 @@ if __name__ == '__main__':
# #peaks != peakcount and threshold >= mean, we go down.
# If that doesn't work, we fall back to a linear search in 1 µW steps
def direction_function(peakcount, power):
- if peakcount == opt['threshold-peakcount']:
+ if peakcount == opt["threshold-peakcount"]:
return 0
if power < bs_mean:
return 1
return -1
+
threshold = peak_search(power, np.min(power), np.max(power), direction_function)
if threshold == None:
- threshold = peak_search2(power, np.min(power), np.max(power), direction_function)
+ threshold = peak_search2(
+ power, np.min(power), np.max(power), direction_function
+ )
if threshold != None:
- print('Threshold set to {:.0f} µW : {:.9f}'.format(threshold * 1e6, threshold))
- opt['threshold'] = threshold
+ print(
+ "Threshold set to {:.0f} µW : {:.9f}".format(
+ threshold * 1e6, threshold
+ )
+ )
+ opt["threshold"] = threshold
else:
- print('Found no working threshold')
+ print("Found no working threshold")
- if 'threshold' in opt:
- if opt['threshold'] == 'mean':
- opt['threshold'] = np.mean(powers)
- print('Threshold set to {:.0f} µW : {:.9f}'.format(opt['threshold'] * 1e6, opt['threshold']))
+ if "threshold" in opt:
+ if opt["threshold"] == "mean":
+ opt["threshold"] = np.mean(powers)
+ print(
+ "Threshold set to {:.0f} µW : {:.9f}".format(
+ opt["threshold"] * 1e6, opt["threshold"]
+ )
+ )
baseline_mean = 0
- if np.any(powers < opt['threshold']):
- baseline_mean = np.mean(powers[powers < opt['threshold']])
- print('Baseline mean: {:.0f} µW : {:.9f}'.format(
- baseline_mean * 1e6, baseline_mean))
- if np.any(powers >= opt['threshold']):
- print('Peak mean: {:.0f} µW : {:.9f}'.format(
- np.mean(powers[powers >= opt['threshold']]) * 1e6,
- np.mean(powers[powers >= opt['threshold']])))
+ if np.any(powers < opt["threshold"]):
+ baseline_mean = np.mean(powers[powers < opt["threshold"]])
+ print(
+ "Baseline mean: {:.0f} µW : {:.9f}".format(
+ baseline_mean * 1e6, baseline_mean
+ )
+ )
+ if np.any(powers >= opt["threshold"]):
+ print(
+ "Peak mean: {:.0f} µW : {:.9f}".format(
+ np.mean(powers[powers >= opt["threshold"]]) * 1e6,
+ np.mean(powers[powers >= opt["threshold"]]),
+ )
+ )
peaks = []
peak_start = -1
for i, dp in enumerate(powers):
- if dp >= opt['threshold'] and peak_start == -1:
+ if dp >= opt["threshold"] and peak_start == -1:
peak_start = i
- elif dp < opt['threshold'] and peak_start != -1:
+ elif dp < opt["threshold"] and peak_start != -1:
peaks.append((peak_start, i))
peak_start = -1
@@ -170,32 +194,55 @@ if __name__ == '__main__':
for peak in peaks:
duration = (peak[1] - peak[0]) * 1e-5
total_energy += np.mean(powers[peak[0] : peak[1]]) * duration
- delta_energy += (np.mean(powers[peak[0] : peak[1]]) - baseline_mean) * duration
+ delta_energy += (
+ np.mean(powers[peak[0] : peak[1]]) - baseline_mean
+ ) * duration
delta_powers = powers[peak[0] : peak[1]] - baseline_mean
- print('{:.2f}ms peak ({:f} -> {:f})'.format(duration * 1000,
- peak[0], peak[1]))
- print(' {:f} µJ / mean {:f} µW'.format(
- np.mean(powers[peak[0] : peak[1]]) * duration * 1e6,
- np.mean(powers[peak[0] : peak[1]]) * 1e6 ))
+ print(
+ "{:.2f}ms peak ({:f} -> {:f})".format(duration * 1000, peak[0], peak[1])
+ )
+ print(
+ " {:f} µJ / mean {:f} µW".format(
+ np.mean(powers[peak[0] : peak[1]]) * duration * 1e6,
+ np.mean(powers[peak[0] : peak[1]]) * 1e6,
+ )
+ )
measures = aggregate_measures(np.mean(delta_powers), delta_powers)
- print(' {:f} µW delta mean = {:0.1f}% / {:f} µW error'.format(np.mean(delta_powers) * 1e6, measures['smape'], measures['rmsd'] * 1e6 ))
- print('Peak energy mean: {:.0f} µJ : {:.9f}'.format(
- total_energy * 1e6 / len(peaks), total_energy / len(peaks)))
- print('Average per-peak energy (delta over baseline): {:.0f} µJ : {:.9f}'.format(
- delta_energy * 1e6 / len(peaks), delta_energy / len(peaks)))
-
-
- if 'stat' in opt:
+ print(
+ " {:f} µW delta mean = {:0.1f}% / {:f} µW error".format(
+ np.mean(delta_powers) * 1e6,
+ measures["smape"],
+ measures["rmsd"] * 1e6,
+ )
+ )
+ print(
+ "Peak energy mean: {:.0f} µJ : {:.9f}".format(
+ total_energy * 1e6 / len(peaks), total_energy / len(peaks)
+ )
+ )
+ print(
+ "Average per-peak energy (delta over baseline): {:.0f} µJ : {:.9f}".format(
+ delta_energy * 1e6 / len(peaks), delta_energy / len(peaks)
+ )
+ )
+
+ if "stat" in opt:
mean_current = np.mean(currents)
mean_power = np.mean(powers)
- print('Mean current: {:.0f} µA : {:.9f}'.format(mean_current * 1e6, mean_current))
- print('Mean power: {:.0f} µW : {:.9f}'.format(mean_power * 1e6, mean_power))
-
- if 'plot' in opt:
+ print(
+ "Mean current: {:.0f} µA : {:.9f}".format(
+ mean_current * 1e6, mean_current
+ )
+ )
+ print(
+ "Mean power: {:.0f} µW : {:.9f}".format(mean_power * 1e6, mean_power)
+ )
+
+ if "plot" in opt:
timestamps = np.arange(len(powers)) * 1e-5
- pwrhandle, = plt.plot(timestamps, powers, 'b-', label='U*I', markersize=1)
+ (pwrhandle,) = plt.plot(timestamps, powers, "b-", label="U*I", markersize=1)
plt.legend(handles=[pwrhandle])
- plt.xlabel('Time [s]')
- plt.ylabel('Power [W]')
+ plt.xlabel("Time [s]")
+ plt.ylabel("Power [W]")
plt.grid(True)
plt.show()
diff --git a/bin/mimplot b/bin/mimplot
index 2a888ee..a55a875 100755
--- a/bin/mimplot
+++ b/bin/mimplot
@@ -9,54 +9,52 @@ import struct
import sys
import tarfile
import matplotlib.pyplot as plt
-from dfatool.dfatool import MIMOSA
+from dfatool.loader import MIMOSA
from dfatool.utils import running_mean
opt = dict()
-if __name__ == '__main__':
+if __name__ == "__main__":
- try:
- optspec = (
- 'export= '
- )
+ try:
+ optspec = "export= "
- raw_opts, args = getopt.getopt(sys.argv[1:], '', optspec.split())
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split())
- for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
- opt[optname] = parameter
+ for option, parameter in raw_opts:
+ optname = re.sub(r"^--", "", option)
+ opt[optname] = parameter
- if 'export' in opt:
- opt['export'] = list(map(int, opt['export'].split(':')))
+ if "export" in opt:
+ opt["export"] = list(map(int, opt["export"].split(":")))
- except getopt.GetoptError as err:
- print(err)
- sys.exit(2)
+ except getopt.GetoptError as err:
+ print(err)
+ sys.exit(2)
- voltage = float(args[0])
- shunt = float(args[1])
- mimfile = args[2]
+ voltage = float(args[0])
+ shunt = float(args[1])
+ mimfile = args[2]
- mim = MIMOSA(voltage, shunt)
+ mim = MIMOSA(voltage, shunt)
- charges, triggers = mim.load_file(mimfile)
- charges = charges[:3000000]
+ charges, triggers = mim.load_file(mimfile)
+ charges = charges[:3000000]
- currents = running_mean(mim.charge_to_current_nocal(charges), 10) * 1e-6
- powers = currents * voltage
- xr = np.arange(len(currents)) * 1e-5
+ currents = running_mean(mim.charge_to_current_nocal(charges), 10) * 1e-6
+ powers = currents * voltage
+ xr = np.arange(len(currents)) * 1e-5
- if 'export' in opt:
- xr = xr[opt['export'][0] : opt['export'][1]]
- currents = currents[opt['export'][0] : opt['export'][1]]
- powers = powers[opt['export'][0] : opt['export'][1]]
+ if "export" in opt:
+ xr = xr[opt["export"][0] : opt["export"][1]]
+ currents = currents[opt["export"][0] : opt["export"][1]]
+ powers = powers[opt["export"][0] : opt["export"][1]]
- for pair in zip(xr, powers):
- print('{} {}'.format(*pair))
+ for pair in zip(xr, powers):
+ print("{} {}".format(*pair))
- plt.plot( xr, powers, "r-")
- plt.xlabel('Time [s]')
- plt.ylabel('Power [W]')
- plt.grid(True)
- plt.show()
+ plt.plot(xr, powers, "r-")
+ plt.xlabel("Time [s]")
+ plt.ylabel("Power [W]")
+ plt.grid(True)
+ plt.show()
diff --git a/bin/test_corrcoef.py b/bin/test_corrcoef.py
index 0b1ca54..ccb3366 100755
--- a/bin/test_corrcoef.py
+++ b/bin/test_corrcoef.py
@@ -4,8 +4,9 @@ import getopt
import re
import sys
from dfatool import plotter
-from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
-from dfatool.dfatool import gplearn_to_function
+from dfatool.loader import RawData, pta_trace_to_aggregate
+from dfatool.functions import gplearn_to_function
+from dfatool.model import PTAModel
opt = dict()
@@ -110,7 +111,6 @@ def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq):
if __name__ == "__main__":
ignored_trace_indexes = None
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -119,7 +119,7 @@ if __name__ == "__main__":
try:
optspec = (
"plot-unparam= plot-param= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"with-safe-functions"
)
raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
@@ -135,9 +135,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -169,7 +166,6 @@ if __name__ == "__main__":
arg_count,
traces=preprocessed_data,
ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
function_override=function_override,
use_corrcoef=False,
)
@@ -179,7 +175,6 @@ if __name__ == "__main__":
arg_count,
traces=preprocessed_data,
ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
function_override=function_override,
use_corrcoef=True,
)
diff --git a/doc/generate-dfa-benchmark.md b/doc/generate-dfa-benchmark.md
new file mode 100644
index 0000000..48a991d
--- /dev/null
+++ b/doc/generate-dfa-benchmark.md
@@ -0,0 +1,85 @@
+Diese Anleitung beschreibt die Benchmarkgenerierung mit AEMR/dfatool. Sie geht
+von der folgenden Verzeichnisstruktur aus.
+
+* `data`: Benchmark-Messdaten
+* `data/cache`: Cache für teilweise ausgewertete Benchmarks
+* `dfatool`: dfatool-Repository
+* `multipass`: multipass-Repository
+
+*multipass* enthält Gerätetreiber mit zugehörigen PTA-Definitionen
+(Transitionen, Zustände und Parameter der Hardware) sowie Hilfsfunktionen für
+Benchmarks. Es verzichtet bewusst auf Tasking und System-Ticks, um Benchmarks
+nicht durch Timer Interrupts zu beeinflussen. In *dfatool* liegen die
+Generierungs- und Auswertungsskripte.
+
+## Benchmarkgenerierung
+
+Die Generierung und Vermessung von Benchmarks erfolgt immer mit
+`generate-dfa-benchmark.py`. Dieses muss vom multipass-Verzeichnis aus
+aufgerufen werden. Ein Benchmark läuft wie folgt ab.
+
+* Generierung von Läufen durch den PTA des zu vermessenden Geräts. Die Läufe
+ können u.a. mit `--depth`, `--shrink` und `--trace-filter` beeinflusst
+ werden.
+* Erzeugung einer C++-Anwendung (`src/app/aemr/main.cc`), welche die Hardware
+ durch die Läufe schickt und die ausgeführten Transitionen protokolliert. Sie
+ greift auf `include/object/ptalog.h` zurück.
+ * Die grundlegende Anwendungsstruktur (Header, Aufruf der Treiberfunktionen,
+ Wartezeit zwischen Funktionsaufrufen) wird von generate-dfa-benchmark
+ vorgegeben (`benchmark_from_runs`)
+ * Ein Test Harness aus `lib/harness.py` (OnboardTimerHarness für
+ energytrace/timing benchmarks, TransitionHarness für MIMOSA) erweitert
+ die generierte Anwendung um Synchronisierungsaufrufe und/oder zusätzliche
+ Messungen, z.B. mit einem Onboard-Timer. Dazu werden für jeden Lauf durch
+ den PTA `start_run` und `start_trace` aufgerufen ("ein neuer Lauf beginnt"),
+ dann für jeden Funktionsaufruf und jeden Zustand `append_transition`,
+ `append_state` und `pass_transition` und schließlich `stop_run`.
+ Das Harness speichert die zum generierten Code gehörenden Läufe und die
+ während eines Zustands / einer Transition gültigen PTA-Parameter intern als
+ `{"isa": "state", "name": ..., "parameter": dict(...)}` bzw.
+ `{"isa": "transition", "name": ..., "parameter: dict(...), "args": list(...)}`
+* Kompilieren der Anwendung in `run_benchmark` per `runner.build` (siehe
+ `runner.py`). Falls der Benchmark zu groß ist, wird er in mehrere
+ Anwendungen aufgeteilt, die nacheinander ausgeführt und vermessen werden.
+ Zusätzlich wird jede Messung mehrfach durchegführt, um Einflüsse durch
+ Messfehler zu minimieren.
+* Ausführung des Benchmarks. Der Code wird mittels `runner.flash` programmiert,
+ die Ansteuerung zusätzlicher Software (z.B. MIMOSA, EnergyTrace) erfolgt über
+ einen Monitor aus `lib/runner.py`. Sobald der Monitor mittels `get_monitor`
+ erzeugt wird, beginnt die Messung. Während der Messung werden Ausgaben
+ von der seriellen Konsole über den `parser_cb` des aktiven Test Harness
+ verarbeitet; auf diese Weise wird auch das Ende des Benchmarks erkannt.
+ `monitor.close()` beendet die Messung.
+* Nach Abschluss aller (Teil)benchmarks und Wiederholungen werden
+ die Benchmarkpläne (`harness.traces`), UART-Ausgaben (`monitor.get_lines()`)
+ und ggf. zusätzliche Logfiles (`monitor.get_files()`) in eine tar-Datei
+ archiviert.
+
+## Beispiel
+
+Wenn sich msp430-etv und energytrace in $PATH befinden, generiert der folgende
+Aufruf mit einem MSP430FR5994 Launchpad ohne Peripherie einen erfolgreichen
+Benchmark-Ablauf:
+
+```
+cd multipass
+../dfatool/bin/generate-dfa-benchmark.py --data=../data \
+--timer-pin=GPIO::p1_0 --sleep=200 --repeat=3 --arch=msp430fr5994lp \
+--energytrace=sync=bar model/driver/sharp96.dfa src/app/aemr/main.cc
+```
+
+Nach einigen Minuten wird unter `data` ein auf sharp96.tar endendes Archiv mit
+Benchmark-Setup (Treiber-PTA, energytrace-Config, Traces durch den
+Automaten) und Messdaten (energytrace-Logfiles) abgelegt. Dieses kann wie folgt
+analysiert werden:
+
+```
+cd dfatool
+bin/analyze-archive.py --info --show-model=all --show-quality=table ../data/...-sharp96.tar
+```
+
+Sofern sich die LED-Leistungsaufnahme des verwendeten Launchpads im üblichen
+Rahmen bewegt, funktioniert die Auswertung. Hier sollten für POWEROFF und
+POWERON sehr ähnliche Werte herauskommen (da ja keine Peripherie angeschlossen
+war) und die writeLine-Transition deutlich mehr Zeit als die restlichen
+benötigen.
diff --git a/lib/automata.py b/lib/automata.py
index b3318e0..ebe1871 100755
--- a/lib/automata.py
+++ b/lib/automata.py
@@ -3,11 +3,14 @@
from .functions import AnalyticFunction, NormalizationFunction
from .utils import is_numeric
import itertools
+import logging
import numpy as np
import json
import queue
import yaml
+logger = logging.getLogger(__name__)
+
def _dict_to_list(input_dict: dict) -> list:
return [input_dict[x] for x in sorted(input_dict.keys())]
@@ -100,7 +103,7 @@ class PTAAttribute:
def __repr__(self):
if self.function is not None:
return "PTAATtribute<{:.0f}, {}>".format(
- self.value, self.function._model_str
+ self.value, self.function.model_function
)
return "PTAATtribute<{:.0f}, None>".format(self.value)
@@ -134,8 +137,8 @@ class PTAAttribute:
}
if self.function:
ret["function"] = {
- "raw": self.function._model_str,
- "regression_args": list(self.function._regression_args),
+ "raw": self.function.model_function,
+ "regression_args": list(self.function.model_args),
}
ret["function_error"] = self.function_error
return ret
@@ -1305,8 +1308,8 @@ class PTA:
"power"
]
except KeyError:
- print(
- "[W] skipping model update of state {} due to missing data".format(
+ logger.warning(
+ "skipping model update of state {} due to missing data".format(
state.name
)
)
@@ -1353,8 +1356,8 @@ class PTA:
"timeout"
]
except KeyError:
- print(
- "[W] skipping model update of transition {} due to missing data".format(
+ logger.warning(
+ "skipping model update of transition {} due to missing data".format(
transition.name
)
)
diff --git a/lib/data_parameters.py b/lib/data_parameters.py
index 1150b71..84eacfd 100644
--- a/lib/data_parameters.py
+++ b/lib/data_parameters.py
@@ -7,9 +7,12 @@ length of lists, ane more.
from .protocol_benchmarks import codegen_for_lib
from . import cycles_to_energy, size_to_radio_energy, utils
+import logging
import numpy as np
import ubjson
+logger = logging.getLogger(__name__)
+
def _string_value_length(json):
if type(json) == str:
@@ -224,7 +227,7 @@ class Protolog:
except KeyError:
pass
except TypeError as e:
- print(
+ logger.error(
"TypeError in {} {} {} {}: {} -> {}".format(
arch_lib,
benchmark,
@@ -395,7 +398,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_enc is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -410,7 +413,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_ser is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -425,7 +428,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_encser is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -440,7 +443,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_des is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -455,7 +458,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_dec is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -470,7 +473,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_desdec is NaN for {} -> {} -> {}".format(
arch, lib, key
)
diff --git a/lib/functions.py b/lib/functions.py
index 6d8daa4..94b1aaf 100644
--- a/lib/functions.py
+++ b/lib/functions.py
@@ -5,12 +5,14 @@ This module provides classes and helper functions useful for least-squares
regression and general handling of model functions.
"""
from itertools import chain, combinations
+import logging
import numpy as np
import re
from scipy import optimize
-from .utils import is_numeric, vprint
+from .utils import is_numeric
arg_support_enabled = True
+logger = logging.getLogger(__name__)
def powerset(iterable):
@@ -23,6 +25,47 @@ def powerset(iterable):
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
+def gplearn_to_function(function_str: str):
+ """
+ Convert gplearn-style function string to Python function.
+
+ Takes a function string like "mul(add(X0, X1), X2)" and returns
+ a Python function implementing the specified behaviour,
+ e.g. "lambda x, y, z: (x + y) * z".
+
+ Supported functions:
+ add -- x + y
+ sub -- x - y
+ mul -- x * y
+ div -- x / y if |y| > 0.001, otherwise 1
+ sqrt -- sqrt(|x|)
+ log -- log(|x|) if |x| > 0.001, otherwise 0
+ inv -- 1 / x if |x| > 0.001, otherwise 0
+ """
+ eval_globals = {
+ "add": lambda x, y: x + y,
+ "sub": lambda x, y: x - y,
+ "mul": lambda x, y: x * y,
+ "div": lambda x, y: np.divide(x, y) if np.abs(y) > 0.001 else 1.0,
+ "sqrt": lambda x: np.sqrt(np.abs(x)),
+ "log": lambda x: np.log(np.abs(x)) if np.abs(x) > 0.001 else 0.0,
+ "inv": lambda x: 1.0 / x if np.abs(x) > 0.001 else 0.0,
+ }
+
+ last_arg_index = 0
+ for i in range(0, 100):
+ if function_str.find("X{:d}".format(i)) >= 0:
+ last_arg_index = i
+
+ arg_list = []
+ for i in range(0, last_arg_index + 1):
+ arg_list.append("X{:d}".format(i))
+
+ eval_str = "lambda {}, *whatever: {}".format(",".join(arg_list), function_str)
+ logger.debug(eval_str)
+ return eval(eval_str, eval_globals)
+
+
class ParamFunction:
"""
A one-dimensional model function, ready for least squares optimization and similar.
@@ -118,9 +161,7 @@ class AnalyticFunction:
packet length.
"""
- def __init__(
- self, function_str, parameters, num_args, verbose=True, regression_args=None
- ):
+ def __init__(self, function_str, parameters, num_args, regression_args=None):
"""
Create a new AnalyticFunction object from a function string.
@@ -135,18 +176,16 @@ class AnalyticFunction:
:param num_args: number of local function arguments, if any. Set to 0 if
the model attribute does not belong to a function or if function
arguments are not included in the model.
- :param verbose: complain about odd events
:param regression_args: Initial regression variable values,
both for function usage and least squares optimization.
If unset, defaults to [1, 1, 1, ...]
"""
self._parameter_names = parameters
self._num_args = num_args
- self._model_str = function_str
+ self.model_function = function_str
rawfunction = function_str
self._dependson = [False] * (len(parameters) + num_args)
self.fit_success = False
- self.verbose = verbose
if type(function_str) == str:
num_vars_re = re.compile(r"regression_arg\(([0-9]+)\)")
@@ -176,12 +215,12 @@ class AnalyticFunction:
self._function = function_str
if regression_args:
- self._regression_args = regression_args.copy()
+ self.model_args = regression_args.copy()
self._fit_success = True
elif type(function_str) == str:
- self._regression_args = list(np.ones((num_vars)))
+ self.model_args = list(np.ones((num_vars)))
else:
- self._regression_args = []
+ self.model_args = []
def get_fit_data(self, by_param, state_or_tran, model_attribute):
"""
@@ -231,9 +270,8 @@ class AnalyticFunction:
else:
X[i].extend([np.nan] * len(val[model_attribute]))
elif key[0] == state_or_tran and len(key[1]) != dimension:
- vprint(
- self.verbose,
- "[W] Invalid parameter key length while gathering fit data for {}/{}. is {}, want {}.".format(
+ logger.warning(
+ "Invalid parameter key length while gathering fit data for {}/{}. is {}, want {}.".format(
state_or_tran, model_attribute, len(key[1]), dimension
),
)
@@ -263,30 +301,27 @@ class AnalyticFunction:
error_function = lambda P, X, y: self._function(P, X) - y
try:
res = optimize.least_squares(
- error_function, self._regression_args, args=(X, Y), xtol=2e-15
+ error_function, self.model_args, args=(X, Y), xtol=2e-15
)
except ValueError as err:
- vprint(
- self.verbose,
- "[W] Fit failed for {}/{}: {} (function: {})".format(
- state_or_tran, model_attribute, err, self._model_str
+ logger.warning(
+ "Fit failed for {}/{}: {} (function: {})".format(
+ state_or_tran, model_attribute, err, self.model_function
),
)
return
if res.status > 0:
- self._regression_args = res.x
+ self.model_args = res.x
self.fit_success = True
else:
- vprint(
- self.verbose,
- "[W] Fit failed for {}/{}: {} (function: {})".format(
- state_or_tran, model_attribute, res.message, self._model_str
+ logger.warning(
+ "Fit failed for {}/{}: {} (function: {})".format(
+ state_or_tran, model_attribute, res.message, self.model_function
),
)
else:
- vprint(
- self.verbose,
- "[W] Insufficient amount of valid parameter keys, cannot fit {}/{}".format(
+ logger.warning(
+ "Insufficient amount of valid parameter keys, cannot fit {}/{}".format(
state_or_tran, model_attribute
),
)
@@ -314,9 +349,9 @@ class AnalyticFunction:
corresponds to lexically first parameter, etc.
:param arg_list: argument values (list of float), if arguments are used.
"""
- if len(self._regression_args) == 0:
+ if len(self.model_args) == 0:
return self._function(param_list, arg_list)
- return self._function(self._regression_args, param_list)
+ return self._function(self.model_args, param_list)
class analytic:
diff --git a/lib/harness.py b/lib/harness.py
index 3b279c0..ae9c28c 100644
--- a/lib/harness.py
+++ b/lib/harness.py
@@ -21,7 +21,7 @@ class TransitionHarness:
* `name`: state or transition name
* `parameter`: currently valid parameter values. If normalization is used, they are already normalized. Each parameter value is either a primitive
int/float/str value (-> constant for each iteration) or a list of
- primitive values (-> set by the return value of the current run, not necessarily constan)
+ primitive values (-> set by the return value of the current run, not necessarily constant)
* `args`: function arguments, if isa == 'transition'
"""
@@ -229,6 +229,7 @@ class TransitionHarness:
log_data_target["parameter"][parameter_name] = list()
log_data_target["parameter"][parameter_name].append(parameter_value)
+ # Here Be Dragons
def parser_cb(self, line):
# print('[HARNESS] got line {}'.format(line))
if re.match(r"\[PTA\] benchmark stop", line):
@@ -440,6 +441,7 @@ class OnboardTimerHarness(TransitionHarness):
log_data_target["parameter"][parameter_name] = list()
log_data_target["parameter"][parameter_name].append(parameter_value)
+ # Here Be Dragons
def parser_cb(self, line):
# print('[HARNESS] got line {}'.format(line))
res = re.match(r"\[PTA\] nop=(\S+)/(\S+)", line)
diff --git a/lib/lex.py b/lib/lex.py
index 7bb3760..f698e8c 100644
--- a/lib/lex.py
+++ b/lib/lex.py
@@ -1,4 +1,7 @@
from .sly import Lexer, Parser
+import logging
+
+logger = logging.getLogger(__name__)
class TimedWordLexer(Lexer):
@@ -38,7 +41,7 @@ class TimedSequenceLexer(Lexer):
FUNCTIONSEP = r";"
def error(self, t):
- print("Illegal character '%s'" % t.value[0])
+ logger.error("Illegal character '%s'" % t.value[0])
if t.value[0] == "{" and t.value.find("}"):
self.index += 1 + t.value.find("}")
else:
@@ -153,11 +156,11 @@ class TimedSequenceParser(Parser):
def error(self, p):
if p:
- print("Syntax error at token", p.type)
+ logger.error("Syntax error at token", p.type)
# Just discard the token and tell the parser it's okay.
self.errok()
else:
- print("Syntax error at EOF")
+ logger.error("Syntax error at EOF")
class TimedWord:
diff --git a/lib/dfatool.py b/lib/loader.py
index 63639d3..4e07c92 100644
--- a/lib/dfatool.py
+++ b/lib/loader.py
@@ -3,26 +3,17 @@
import csv
import io
import json
+import logging
import numpy as np
import os
import re
-from scipy import optimize
-from sklearn.metrics import r2_score
import struct
import tarfile
import hashlib
from multiprocessing import Pool
-from .functions import analytic
-from .functions import AnalyticFunction
-from .parameters import ParamStats
-from .utils import (
- vprint,
- is_numeric,
- soft_cast_int,
- param_slice_eq,
- remove_index_from_tuple,
-)
-from .utils import by_name_to_by_param, match_parameter_values, running_mean
+from .utils import running_mean, soft_cast_int
+
+logger = logging.getLogger(__name__)
try:
from .pubcode import Code128
@@ -36,135 +27,6 @@ except ImportError:
arg_support_enabled = True
-def gplearn_to_function(function_str: str):
- """
- Convert gplearn-style function string to Python function.
-
- Takes a function string like "mul(add(X0, X1), X2)" and returns
- a Python function implementing the specified behaviour,
- e.g. "lambda x, y, z: (x + y) * z".
-
- Supported functions:
- add -- x + y
- sub -- x - y
- mul -- x * y
- div -- x / y if |y| > 0.001, otherwise 1
- sqrt -- sqrt(|x|)
- log -- log(|x|) if |x| > 0.001, otherwise 0
- inv -- 1 / x if |x| > 0.001, otherwise 0
- """
- eval_globals = {
- "add": lambda x, y: x + y,
- "sub": lambda x, y: x - y,
- "mul": lambda x, y: x * y,
- "div": lambda x, y: np.divide(x, y) if np.abs(y) > 0.001 else 1.0,
- "sqrt": lambda x: np.sqrt(np.abs(x)),
- "log": lambda x: np.log(np.abs(x)) if np.abs(x) > 0.001 else 0.0,
- "inv": lambda x: 1.0 / x if np.abs(x) > 0.001 else 0.0,
- }
-
- last_arg_index = 0
- for i in range(0, 100):
- if function_str.find("X{:d}".format(i)) >= 0:
- last_arg_index = i
-
- arg_list = []
- for i in range(0, last_arg_index + 1):
- arg_list.append("X{:d}".format(i))
-
- eval_str = "lambda {}, *whatever: {}".format(",".join(arg_list), function_str)
- print(eval_str)
- return eval(eval_str, eval_globals)
-
-
-def append_if_set(aggregate: dict, data: dict, key: str):
- """Append data[key] to aggregate if key in data."""
- if key in data:
- aggregate.append(data[key])
-
-
-def mean_or_none(arr):
- """
- Compute mean of NumPy array `arr`, return -1 if empty.
-
- :param arr: 1-Dimensional NumPy array
- """
- if len(arr):
- return np.mean(arr)
- return -1
-
-
-def aggregate_measures(aggregate: float, actual: list) -> dict:
- """
- Calculate error measures for model value on data list.
-
- arguments:
- aggregate -- model value (float or int)
- actual -- real-world / reference values (list of float or int)
-
- return value:
- See regression_measures
- """
- aggregate_array = np.array([aggregate] * len(actual))
- return regression_measures(aggregate_array, np.array(actual))
-
-
-def regression_measures(predicted: np.ndarray, actual: np.ndarray):
- """
- Calculate error measures by comparing model values to reference values.
-
- arguments:
- predicted -- model values (np.ndarray)
- actual -- real-world / reference values (np.ndarray)
-
- Returns a dict containing the following measures:
- mae -- Mean Absolute Error
- mape -- Mean Absolute Percentage Error,
- if all items in actual are non-zero (NaN otherwise)
- smape -- Symmetric Mean Absolute Percentage Error,
- if no 0,0-pairs are present in actual and predicted (NaN otherwise)
- msd -- Mean Square Deviation
- rmsd -- Root Mean Square Deviation
- ssr -- Sum of Squared Residuals
- rsq -- R^2 measure, see sklearn.metrics.r2_score
- count -- Number of values
- """
- if type(predicted) != np.ndarray:
- raise ValueError("first arg must be ndarray, is {}".format(type(predicted)))
- if type(actual) != np.ndarray:
- raise ValueError("second arg must be ndarray, is {}".format(type(actual)))
- deviations = predicted - actual
- # mean = np.mean(actual)
- if len(deviations) == 0:
- return {}
- measures = {
- "mae": np.mean(np.abs(deviations), dtype=np.float64),
- "msd": np.mean(deviations ** 2, dtype=np.float64),
- "rmsd": np.sqrt(np.mean(deviations ** 2), dtype=np.float64),
- "ssr": np.sum(deviations ** 2, dtype=np.float64),
- "rsq": r2_score(actual, predicted),
- "count": len(actual),
- }
-
- # rsq_quotient = np.sum((actual - mean)**2, dtype=np.float64) * np.sum((predicted - mean)**2, dtype=np.float64)
-
- if np.all(actual != 0):
- measures["mape"] = np.mean(np.abs(deviations / actual)) * 100 # bad measure
- else:
- measures["mape"] = np.nan
- if np.all(np.abs(predicted) + np.abs(actual) != 0):
- measures["smape"] = (
- np.mean(np.abs(deviations) / ((np.abs(predicted) + np.abs(actual)) / 2))
- * 100
- )
- else:
- measures["smape"] = np.nan
- # if np.all(rsq_quotient != 0):
- # measures['rsq'] = (np.sum((actual - mean) * (predicted - mean), dtype=np.float64)**2) / rsq_quotient
-
- return measures
-
-
class KeysightCSV:
"""Simple loader for Keysight CSV data, as exported by the windows software."""
@@ -194,162 +56,6 @@ class KeysightCSV:
return timestamps, currents
-def _xv_partitions_kfold(length, num_slices):
- pairs = []
- indexes = np.arange(length)
- for i in range(0, num_slices):
- training = np.delete(indexes, slice(i, None, num_slices))
- validation = indexes[i::num_slices]
- pairs.append((training, validation))
- return pairs
-
-
-def _xv_partition_montecarlo(length):
- shuffled = np.random.permutation(np.arange(length))
- border = int(length * float(2) / 3)
- training = shuffled[:border]
- validation = shuffled[border:]
- return (training, validation)
-
-
-class CrossValidator:
- """
- Cross-Validation helper for model generation.
-
- Given a set of measurements and a model class, it will partition the
- data into training and validation sets, train the model on the training
- set, and assess its quality on the validation set. This is repeated
- several times depending on cross-validation algorithm and configuration.
- Reports the mean model error over all cross-validation runs.
- """
-
- def __init__(self, model_class, by_name, parameters, arg_count):
- """
- Create a new CrossValidator object.
-
- Does not perform cross-validation yet.
-
- arguments:
- model_class -- model class/type used for model synthesis,
- e.g. PTAModel or AnalyticModel. model_class must have a
- constructor accepting (by_name, parameters, arg_count, verbose = False)
- and provide an assess method.
- by_name -- measurements aggregated by state/transition/function/... name.
- Layout: by_name[name][attribute] = list of data. Additionally,
- by_name[name]['attributes'] must be set to the list of attributes,
- e.g. ['power'] or ['duration', 'energy'].
- """
- self.model_class = model_class
- self.by_name = by_name
- self.names = sorted(by_name.keys())
- self.parameters = sorted(parameters)
- self.arg_count = arg_count
-
- def montecarlo(self, model_getter, count=200):
- """
- Perform Monte Carlo cross-validation and return average model quality.
-
- The by_name data is randomly divided into 2/3 training and 1/3
- validation. After creating a model for the training set, the
- model type returned by model_getter is evaluated on the validation set.
- This is repeated count times (defaulting to 200); the average of all
- measures is returned to the user.
-
- arguments:
- model_getter -- function with signature (model_object) -> model,
- e.g. lambda m: m.get_fitted()[0] to evaluate the parameter-aware
- model with automatic parameter detection.
- count -- number of validation runs to perform, defaults to 200
-
- return value:
- dict of model quality measures.
- {
- 'by_name' : {
- for each name: {
- for each attribute: {
- 'mae' : mean of all mean absolute errors
- 'mae_list' : list of the individual MAE values encountered during cross-validation
- 'smape' : mean of all symmetric mean absolute percentage errors
- 'smape_list' : list of the individual SMAPE values encountered during cross-validation
- }
- }
- }
- }
- """
- ret = {"by_name": dict()}
-
- for name in self.names:
- ret["by_name"][name] = dict()
- for attribute in self.by_name[name]["attributes"]:
- ret["by_name"][name][attribute] = {
- "mae_list": list(),
- "smape_list": list(),
- }
-
- for _ in range(count):
- res = self._single_montecarlo(model_getter)
- for name in self.names:
- for attribute in self.by_name[name]["attributes"]:
- ret["by_name"][name][attribute]["mae_list"].append(
- res["by_name"][name][attribute]["mae"]
- )
- ret["by_name"][name][attribute]["smape_list"].append(
- res["by_name"][name][attribute]["smape"]
- )
-
- for name in self.names:
- for attribute in self.by_name[name]["attributes"]:
- ret["by_name"][name][attribute]["mae"] = np.mean(
- ret["by_name"][name][attribute]["mae_list"]
- )
- ret["by_name"][name][attribute]["smape"] = np.mean(
- ret["by_name"][name][attribute]["smape_list"]
- )
-
- return ret
-
- def _single_montecarlo(self, model_getter):
- training = dict()
- validation = dict()
- for name in self.names:
- training[name] = {"attributes": self.by_name[name]["attributes"]}
- validation[name] = {"attributes": self.by_name[name]["attributes"]}
-
- if "isa" in self.by_name[name]:
- training[name]["isa"] = self.by_name[name]["isa"]
- validation[name]["isa"] = self.by_name[name]["isa"]
-
- data_count = len(self.by_name[name]["param"])
- training_subset, validation_subset = _xv_partition_montecarlo(data_count)
-
- for attribute in self.by_name[name]["attributes"]:
- self.by_name[name][attribute] = np.array(self.by_name[name][attribute])
- training[name][attribute] = self.by_name[name][attribute][
- training_subset
- ]
- validation[name][attribute] = self.by_name[name][attribute][
- validation_subset
- ]
-
- # We can't use slice syntax for 'param', which may contain strings and other odd values
- training[name]["param"] = list()
- validation[name]["param"] = list()
- for idx in training_subset:
- training[name]["param"].append(self.by_name[name]["param"][idx])
- for idx in validation_subset:
- validation[name]["param"].append(self.by_name[name]["param"][idx])
-
- training_data = self.model_class(
- training, self.parameters, self.arg_count, verbose=False
- )
- training_model = model_getter(training_data)
- validation_data = self.model_class(
- validation, self.parameters, self.arg_count, verbose=False
- )
-
- return validation_data.assess(training_model)
-
-
def _preprocess_mimosa(measurement):
setup = measurement["setup"]
mim = MIMOSA(
@@ -457,9 +163,7 @@ class TimingData:
transitions = list(
filter(lambda x: x["isa"] == "transition", trace["trace"])
)
- self.traces.append(
- {"id": trace["id"], "trace": transitions,}
- )
+ self.traces.append({"id": trace["id"], "trace": transitions})
for i, trace in enumerate(self.traces):
trace["orig_id"] = trace["id"]
trace["id"] = i
@@ -490,14 +194,13 @@ class TimingData:
self.traces_by_fileno.extend(log_data["traces"])
self._concatenate_analyzed_traces()
- def get_preprocessed_data(self, verbose=True):
+ def get_preprocessed_data(self):
"""
Return a list of DFA traces annotated with timing and parameter data.
Suitable for the PTAModel constructor.
See PTAModel(...) docstring for format details.
"""
- self.verbose = verbose
if self.preprocessed:
return self.traces
if self.version == 0:
@@ -539,7 +242,7 @@ class RawData:
file system, making subsequent loads near-instant.
"""
- def __init__(self, filenames, with_traces=False):
+ def __init__(self, filenames, with_traces=False, skip_cache=False):
"""
Create a new RawData object.
@@ -602,6 +305,7 @@ class RawData:
self._parameter_names = None
self.ignore_clipping = False
self.pta = None
+ self.ptalog = None
with tarfile.open(filenames[0]) as tf:
for member in tf.getmembers():
@@ -612,9 +316,12 @@ class RawData:
elif ".etlog" in member.name:
self.version = 2
break
+ if self.version >= 1:
+ self.ptalog = json.load(tf.extractfile(tf.getmember("ptalog.json")))
+ self.pta = self.ptalog["pta"]
self.set_cache_file()
- if not with_traces:
+ if not with_traces and not skip_cache:
self.load_cache()
def set_cache_file(self):
@@ -631,6 +338,8 @@ class RawData:
self.preprocessing_stats = cache_data["preprocessing_stats"]
if "pta" in cache_data:
self.pta = cache_data["pta"]
+ if "ptalog" in cache_data:
+ self.ptalog = cache_data["ptalog"]
self.setup_by_fileno = cache_data["setup_by_fileno"]
self.preprocessed = True
@@ -647,6 +356,7 @@ class RawData:
"traces": self.traces,
"preprocessing_stats": self.preprocessing_stats,
"pta": self.pta,
+ "ptalog": self.ptalog,
"setup_by_fileno": self.setup_by_fileno,
}
json.dump(cache_data, f)
@@ -1050,7 +760,7 @@ class RawData:
trace["id"] = i
return trace_output
- def get_preprocessed_data(self, verbose=True):
+ def get_preprocessed_data(self):
"""
Return a list of DFA traces annotated with energy, timing, and parameter data.
The list is cached on disk, unless the constructor was called with `with_traces` set.
@@ -1103,7 +813,6 @@ class RawData:
* `args`: List of arguments the corresponding function call was called with. args entries are strings which are not necessarily numeric
* `code`: List of function name (first entry) and arguments (remaining entries) of the corresponding function call
"""
- self.verbose = verbose
if self.preprocessed:
return self.traces
if self.version == 0:
@@ -1145,8 +854,7 @@ class RawData:
new_filenames = list()
with tarfile.open(filename) as tf:
- ptalog = json.load(tf.extractfile(tf.getmember("ptalog.json")))
- self.pta = ptalog["pta"]
+ ptalog = self.ptalog
# Benchmark code may be too large to be executed in a single
# run, so benchmarks (a benchmark is basically a list of DFA runs)
@@ -1200,8 +908,7 @@ class RawData:
new_filenames = list()
with tarfile.open(filename) as tf:
- ptalog = json.load(tf.extractfile(tf.getmember("ptalog.json")))
- self.pta = ptalog["pta"]
+ ptalog = self.ptalog
# Benchmark code may be too large to be executed in a single
# run, so benchmarks (a benchmark is basically a list of DFA runs)
@@ -1292,13 +999,12 @@ class RawData:
for measurement in measurements:
if "energy_trace" not in measurement:
- vprint(
- self.verbose,
- "[W] Skipping {ar:s}/{m:s}: {e:s}".format(
+ logger.warning(
+ "Skipping {ar:s}/{m:s}: {e:s}".format(
ar=self.filenames[measurement["fileno"]],
m=measurement["info"].name,
e="; ".join(measurement["datasource_errors"]),
- ),
+ )
)
continue
@@ -1315,32 +1021,29 @@ class RawData:
self._merge_online_and_offline(measurement)
num_valid += 1
else:
- vprint(
- self.verbose,
- "[W] Skipping {ar:s}/{m:s}: {e:s}".format(
+ logger.warning(
+ "Skipping {ar:s}/{m:s}: {e:s}".format(
ar=self.filenames[measurement["fileno"]],
m=measurement["info"].name,
e=measurement["error"],
- ),
+ )
)
elif version == 2:
if self._measurement_is_valid_2(measurement):
self._merge_online_and_etlog(measurement)
num_valid += 1
else:
- vprint(
- self.verbose,
- "[W] Skipping {ar:s}/{m:s}: {e:s}".format(
+ logger.warning(
+ "Skipping {ar:s}/{m:s}: {e:s}".format(
ar=self.filenames[measurement["fileno"]],
m=measurement["info"].name,
e=measurement["error"],
- ),
+ )
)
- vprint(
- self.verbose,
- "[I] {num_valid:d}/{num_total:d} measurements are valid".format(
+ logger.info(
+ "{num_valid:d}/{num_total:d} measurements are valid".format(
num_valid=num_valid, num_total=len(measurements)
- ),
+ )
)
if version == 0:
self.traces = self._concatenate_traces(self.traces_by_fileno)
@@ -1357,597 +1060,6 @@ class RawData:
}
-class ParallelParamFit:
- """
- Fit a set of functions on parameterized measurements.
-
- One parameter is variale, all others are fixed. Reports the best-fitting
- function type for each parameter.
- """
-
- def __init__(self, by_param):
- """Create a new ParallelParamFit object."""
- self.fit_queue = []
- self.by_param = by_param
-
- def enqueue(
- self,
- state_or_tran,
- attribute,
- param_index,
- param_name,
- safe_functions_enabled=False,
- param_filter=None,
- ):
- """
- Add state_or_tran/attribute/param_name to fit queue.
-
- This causes fit() to compute the best-fitting function for this model part.
- """
- self.fit_queue.append(
- {
- "key": [state_or_tran, attribute, param_name, param_filter],
- "args": [
- self.by_param,
- state_or_tran,
- attribute,
- param_index,
- safe_functions_enabled,
- param_filter,
- ],
- }
- )
-
- def fit(self):
- """
- Fit functions on previously enqueue data.
-
- Fitting is one in parallel with one process per core.
-
- Results can be accessed using the public ParallelParamFit.results object.
- """
- with Pool() as pool:
- self.results = pool.map(_try_fits_parallel, self.fit_queue)
-
-
-def _try_fits_parallel(arg):
- """
- Call _try_fits(*arg['args']) and return arg['key'] and the _try_fits result.
-
- Must be a global function as it is called from a multiprocessing Pool.
- """
- return {"key": arg["key"], "result": _try_fits(*arg["args"])}
-
-
-def _try_fits(
- by_param,
- state_or_tran,
- model_attribute,
- param_index,
- safe_functions_enabled=False,
- param_filter: dict = None,
-):
- """
- Determine goodness-of-fit for prediction of `by_param[(state_or_tran, *)][model_attribute]` dependence on `param_index` using various functions.
-
- This is done by varying `param_index` while keeping all other parameters constant and doing one least squares optimization for each function and for each combination of the remaining parameters.
- The value of the parameter corresponding to `param_index` (e.g. txpower or packet length) is the sole input to the model function.
- Only numeric parameter values (as determined by `utils.is_numeric`) are used for fitting, non-numeric values such as None or enum strings are ignored.
- Fitting is only performed if at least three distinct parameter values exist in `by_param[(state_or_tran, *)]`.
-
- :returns: a dictionary with the following elements:
- best -- name of the best-fitting function (see `analytic.functions`). `None` in case of insufficient data.
- best_rmsd -- mean Root Mean Square Deviation of best-fitting function over all combinations of the remaining parameters
- mean_rmsd -- mean Root Mean Square Deviation of a reference model using the mean of its respective input data as model value
- median_rmsd -- mean Root Mean Square Deviation of a reference model using the median of its respective input data as model value
- results -- mean goodness-of-fit measures for the individual functions. See `analytic.functions` for keys and `aggregate_measures` for values
-
- :param by_param: measurements partitioned by state/transition/... name and parameter values.
- Example: `{('foo', (0, 2)): {'bar': [2]}, ('foo', (0, 4)): {'bar': [4]}, ('foo', (0, 6)): {'bar': [6]}}`
-
- :param state_or_tran: state/transition/... name for which goodness-of-fit will be calculated (first element of by_param key tuple).
- Example: `'foo'`
-
- :param model_attribute: attribute for which goodness-of-fit will be calculated.
- Example: `'bar'`
-
- :param param_index: index of the parameter used as model input
- :param safe_functions_enabled: Include "safe" variants of functions with limited argument range.
- :param param_filter: Only use measurements whose parameters match param_filter for fitting.
- """
-
- functions = analytic.functions(safe_functions_enabled=safe_functions_enabled)
-
- for param_key in filter(lambda x: x[0] == state_or_tran, by_param.keys()):
- # We might remove elements from 'functions' while iterating over
- # its keys. A generator will not allow this, so we need to
- # convert to a list.
- function_names = list(functions.keys())
- for function_name in function_names:
- function_object = functions[function_name]
- if is_numeric(param_key[1][param_index]) and not function_object.is_valid(
- param_key[1][param_index]
- ):
- functions.pop(function_name, None)
-
- raw_results = dict()
- raw_results_by_param = dict()
- ref_results = {"mean": list(), "median": list()}
- results = dict()
- results_by_param = dict()
-
- seen_parameter_combinations = set()
-
- # for each parameter combination:
- for param_key in filter(
- lambda x: x[0] == state_or_tran
- and remove_index_from_tuple(x[1], param_index)
- not in seen_parameter_combinations
- and len(by_param[x]["param"])
- and match_parameter_values(by_param[x]["param"][0], param_filter),
- by_param.keys(),
- ):
- X = []
- Y = []
- num_valid = 0
- num_total = 0
-
- # Ensure that each parameter combination is only optimized once. Otherwise, with parameters (1, 2, 5), (1, 3, 5), (1, 4, 5) and param_index == 1,
- # the parameter combination (1, *, 5) would be optimized three times, both wasting time and biasing results towards more frequently occuring combinations of non-param_index parameters
- seen_parameter_combinations.add(
- remove_index_from_tuple(param_key[1], param_index)
- )
-
- # for each value of the parameter denoted by param_index (all other parameters remain the same):
- for k, v in filter(
- lambda kv: param_slice_eq(kv[0], param_key, param_index), by_param.items()
- ):
- num_total += 1
- if is_numeric(k[1][param_index]):
- num_valid += 1
- X.extend([float(k[1][param_index])] * len(v[model_attribute]))
- Y.extend(v[model_attribute])
-
- if num_valid > 2:
- X = np.array(X)
- Y = np.array(Y)
- other_parameters = remove_index_from_tuple(k[1], param_index)
- raw_results_by_param[other_parameters] = dict()
- results_by_param[other_parameters] = dict()
- for function_name, param_function in functions.items():
- if function_name not in raw_results:
- raw_results[function_name] = dict()
- error_function = param_function.error_function
- res = optimize.least_squares(
- error_function, [0, 1], args=(X, Y), xtol=2e-15
- )
- measures = regression_measures(param_function.eval(res.x, X), Y)
- raw_results_by_param[other_parameters][function_name] = measures
- for measure, error_rate in measures.items():
- if measure not in raw_results[function_name]:
- raw_results[function_name][measure] = list()
- raw_results[function_name][measure].append(error_rate)
- # print(function_name, res, measures)
- mean_measures = aggregate_measures(np.mean(Y), Y)
- ref_results["mean"].append(mean_measures["rmsd"])
- raw_results_by_param[other_parameters]["mean"] = mean_measures
- median_measures = aggregate_measures(np.median(Y), Y)
- ref_results["median"].append(median_measures["rmsd"])
- raw_results_by_param[other_parameters]["median"] = median_measures
-
- if not len(ref_results["mean"]):
- # Insufficient data for fitting
- # print('[W] Insufficient data for fitting {}/{}/{}'.format(state_or_tran, model_attribute, param_index))
- return {"best": None, "best_rmsd": np.inf, "results": results}
-
- for (
- other_parameter_combination,
- other_parameter_results,
- ) in raw_results_by_param.items():
- best_fit_val = np.inf
- best_fit_name = None
- results = dict()
- for function_name, result in other_parameter_results.items():
- if len(result) > 0:
- results[function_name] = result
- rmsd = result["rmsd"]
- if rmsd < best_fit_val:
- best_fit_val = rmsd
- best_fit_name = function_name
- results_by_param[other_parameter_combination] = {
- "best": best_fit_name,
- "best_rmsd": best_fit_val,
- "mean_rmsd": results["mean"]["rmsd"],
- "median_rmsd": results["median"]["rmsd"],
- "results": results,
- }
-
- best_fit_val = np.inf
- best_fit_name = None
- results = dict()
- for function_name, result in raw_results.items():
- if len(result) > 0:
- results[function_name] = {}
- for measure in result.keys():
- results[function_name][measure] = np.mean(result[measure])
- rmsd = results[function_name]["rmsd"]
- if rmsd < best_fit_val:
- best_fit_val = rmsd
- best_fit_name = function_name
-
- return {
- "best": best_fit_name,
- "best_rmsd": best_fit_val,
- "mean_rmsd": np.mean(ref_results["mean"]),
- "median_rmsd": np.mean(ref_results["median"]),
- "results": results,
- "results_by_other_param": results_by_param,
- }
-
-
-def _num_args_from_by_name(by_name):
- num_args = dict()
- for key, value in by_name.items():
- if "args" in value:
- num_args[key] = len(value["args"][0])
- return num_args
-
-
-def get_fit_result(results, name, attribute, verbose=False, param_filter: dict = None):
- """
- Parse and sanitize fit results for state/transition/... 'name' and model attribute 'attribute'.
-
- Filters out results where the best function is worse (or not much better than) static mean/median estimates.
-
- :param results: fit results as returned by `paramfit.results`
- :param name: state/transition/... name, e.g. 'TX'
- :param attribute: model attribute, e.g. 'duration'
- :param verbose: print debug message to stdout when deliberately not using a determined fit function
- :param param_filter:
- :returns: dict with fit result (see `_try_fits`) for each successfully fitted parameter. E.g. {'param 1': {'best' : 'function name', ...} }
- """
- fit_result = dict()
- for result in results:
- if (
- result["key"][0] == name
- and result["key"][1] == attribute
- and result["key"][3] == param_filter
- and result["result"]["best"] is not None
- ): # dürfte an ['best'] != None liegen-> Fit für gefilterten Kram schlägt fehl?
- this_result = result["result"]
- if this_result["best_rmsd"] >= min(
- this_result["mean_rmsd"], this_result["median_rmsd"]
- ):
- vprint(
- verbose,
- "[I] Not modeling {} {} as function of {}: best ({:.0f}) is worse than ref ({:.0f}, {:.0f})".format(
- name,
- attribute,
- result["key"][2],
- this_result["best_rmsd"],
- this_result["mean_rmsd"],
- this_result["median_rmsd"],
- ),
- )
- # See notes on depends_on_param
- elif this_result["best_rmsd"] >= 0.8 * min(
- this_result["mean_rmsd"], this_result["median_rmsd"]
- ):
- vprint(
- verbose,
- "[I] Not modeling {} {} as function of {}: best ({:.0f}) is not much better than ref ({:.0f}, {:.0f})".format(
- name,
- attribute,
- result["key"][2],
- this_result["best_rmsd"],
- this_result["mean_rmsd"],
- this_result["median_rmsd"],
- ),
- )
- else:
- fit_result[result["key"][2]] = this_result
- return fit_result
-
-
-class AnalyticModel:
- u"""
- Parameter-aware analytic energy/data size/... model.
-
- Supports both static and parameter-based model attributes, and automatic detection of parameter-dependence.
-
- These provide measurements aggregated by (function/state/...) name
- and (for by_param) parameter values. Layout:
- dictionary with one key per name ('send', 'TX', ...) or
- one key per name and parameter combination
- (('send', (1, 2)), ('send', (2, 3)), ('TX', (1, 2)), ('TX', (2, 3)), ...).
-
- Parameter values must be ordered corresponding to the lexically sorted parameter names.
-
- Each element is in turn a dict with the following elements:
- - param: list of parameter values in each measurement (-> list of lists)
- - attributes: list of keys that should be analyzed,
- e.g. ['power', 'duration']
- - for each attribute mentioned in 'attributes': A list with measurements.
- All list except for 'attributes' must have the same length.
-
- For example:
- parameters = ['foo_count', 'irrelevant']
- by_name = {
- 'foo' : [1, 1, 2],
- 'bar' : [5, 6, 7],
- 'attributes' : ['foo', 'bar'],
- 'param' : [[1, 0], [1, 0], [2, 0]]
- }
-
- methods:
- get_static -- return static (parameter-unaware) model.
- get_param_lut -- return parameter-aware look-up-table model. Cannot model parameter combinations not present in by_param.
- get_fitted -- return parameter-aware model using fitted functions for behaviour prediction.
-
- variables:
- names -- function/state/... names (i.e., the keys of by_name)
- parameters -- parameter names
- stats -- ParamStats object providing parameter-dependency statistics for each name and attribute
- assess -- calculate model quality
- """
-
- def __init__(
- self,
- by_name,
- parameters,
- arg_count=None,
- function_override=dict(),
- verbose=True,
- use_corrcoef=False,
- ):
- """
- Create a new AnalyticModel and compute parameter statistics.
-
- :param by_name: measurements aggregated by (function/state/...) name.
- Layout: dictionary with one key per name ('send', 'TX', ...) or
- one key per name and parameter combination
- (('send', (1, 2)), ('send', (2, 3)), ('TX', (1, 2)), ('TX', (2, 3)), ...).
-
- Parameter values must be ordered corresponding to the lexically sorted parameter names.
-
- Each element is in turn a dict with the following elements:
- - param: list of parameter values in each measurement (-> list of lists)
- - attributes: list of keys that should be analyzed,
- e.g. ['power', 'duration']
- - for each attribute mentioned in 'attributes': A list with measurements.
- All list except for 'attributes' must have the same length.
-
- For example:
- parameters = ['foo_count', 'irrelevant']
- by_name = {
- 'foo' : [1, 1, 2],
- 'duration' : [5, 6, 7],
- 'attributes' : ['foo', 'duration'],
- 'param' : [[1, 0], [1, 0], [2, 0]]
- # foo_count-^ ^-irrelevant
- }
- :param parameters: List of parameter names
- :param function_override: dict of overrides for automatic parameter function generation.
- If (state or transition name, model attribute) is present in function_override,
- the corresponding text string is the function used for analytic (parameter-aware/fitted)
- modeling of this attribute. It is passed to AnalyticFunction, see
- there for the required format. Note that this happens regardless of
- parameter dependency detection: The provided analytic function will be assigned
- even if it seems like the model attribute is static / parameter-independent.
- :param verbose: Print debug/info output while generating the model?
- :param use_corrcoef: use correlation coefficient instead of stddev comparison to detect whether a model attribute depends on a parameter
- """
- self.cache = dict()
- self.by_name = by_name
- self.by_param = by_name_to_by_param(by_name)
- self.names = sorted(by_name.keys())
- self.parameters = sorted(parameters)
- self.function_override = function_override.copy()
- self.verbose = verbose
- self._use_corrcoef = use_corrcoef
- self._num_args = arg_count
- if self._num_args is None:
- self._num_args = _num_args_from_by_name(by_name)
-
- self.stats = ParamStats(
- self.by_name,
- self.by_param,
- self.parameters,
- self._num_args,
- verbose=verbose,
- use_corrcoef=use_corrcoef,
- )
-
- def _get_model_from_dict(self, model_dict, model_function):
- model = {}
- for name, elem in model_dict.items():
- model[name] = {}
- for key in elem["attributes"]:
- try:
- model[name][key] = model_function(elem[key])
- except RuntimeWarning:
- vprint(self.verbose, "[W] Got no data for {} {}".format(name, key))
- except FloatingPointError as fpe:
- vprint(
- self.verbose,
- "[W] Got no data for {} {}: {}".format(name, key, fpe),
- )
- return model
-
- def param_index(self, param_name):
- if param_name in self.parameters:
- return self.parameters.index(param_name)
- return len(self.parameters) + int(param_name)
-
- def param_name(self, param_index):
- if param_index < len(self.parameters):
- return self.parameters[param_index]
- return str(param_index)
-
- def get_static(self, use_mean=False):
- """
- Get static model function: name, attribute -> model value.
-
- Uses the median of by_name for modeling.
- """
- getter_function = np.median
-
- if use_mean:
- getter_function = np.mean
-
- static_model = self._get_model_from_dict(self.by_name, getter_function)
-
- def static_model_getter(name, key, **kwargs):
- return static_model[name][key]
-
- return static_model_getter
-
- def get_param_lut(self, fallback=False):
- """
- Get parameter-look-up-table model function: name, attribute, parameter values -> model value.
-
- The function can only give model values for parameter combinations
- present in by_param. By default, it raises KeyError for other values.
-
- arguments:
- fallback -- Fall back to the (non-parameter-aware) static model when encountering unknown parameter values
- """
- static_model = self._get_model_from_dict(self.by_name, np.median)
- lut_model = self._get_model_from_dict(self.by_param, np.median)
-
- def lut_median_getter(name, key, param, arg=[], **kwargs):
- param.extend(map(soft_cast_int, arg))
- try:
- return lut_model[(name, tuple(param))][key]
- except KeyError:
- if fallback:
- return static_model[name][key]
- raise
-
- return lut_median_getter
-
- def get_fitted(self, safe_functions_enabled=False):
- """
- Get paramete-aware model function and model information function.
-
- Returns two functions:
- model_function(name, attribute, param=parameter values) -> model value.
- model_info(name, attribute) -> {'fit_result' : ..., 'function' : ... } or None
- """
- if "fitted_model_getter" in self.cache and "fitted_info_getter" in self.cache:
- return self.cache["fitted_model_getter"], self.cache["fitted_info_getter"]
-
- static_model = self._get_model_from_dict(self.by_name, np.median)
- param_model = dict([[name, {}] for name in self.by_name.keys()])
- paramfit = ParallelParamFit(self.by_param)
-
- for name in self.by_name.keys():
- for attribute in self.by_name[name]["attributes"]:
- for param_index, param in enumerate(self.parameters):
- if self.stats.depends_on_param(name, attribute, param):
- paramfit.enqueue(name, attribute, param_index, param, False)
- if arg_support_enabled and name in self._num_args:
- for arg_index in range(self._num_args[name]):
- if self.stats.depends_on_arg(name, attribute, arg_index):
- paramfit.enqueue(
- name,
- attribute,
- len(self.parameters) + arg_index,
- arg_index,
- False,
- )
-
- paramfit.fit()
-
- for name in self.by_name.keys():
- num_args = 0
- if name in self._num_args:
- num_args = self._num_args[name]
- for attribute in self.by_name[name]["attributes"]:
- fit_result = get_fit_result(
- paramfit.results, name, attribute, self.verbose
- )
-
- if (name, attribute) in self.function_override:
- function_str = self.function_override[(name, attribute)]
- x = AnalyticFunction(function_str, self.parameters, num_args)
- x.fit(self.by_param, name, attribute)
- if x.fit_success:
- param_model[name][attribute] = {
- "fit_result": fit_result,
- "function": x,
- }
- elif len(fit_result.keys()):
- x = analytic.function_powerset(
- fit_result, self.parameters, num_args
- )
- x.fit(self.by_param, name, attribute)
-
- if x.fit_success:
- param_model[name][attribute] = {
- "fit_result": fit_result,
- "function": x,
- }
-
- def model_getter(name, key, **kwargs):
- if "arg" in kwargs and "param" in kwargs:
- kwargs["param"].extend(map(soft_cast_int, kwargs["arg"]))
- if key in param_model[name]:
- param_list = kwargs["param"]
- param_function = param_model[name][key]["function"]
- if param_function.is_predictable(param_list):
- return param_function.eval(param_list)
- return static_model[name][key]
-
- def info_getter(name, key):
- if key in param_model[name]:
- return param_model[name][key]
- return None
-
- self.cache["fitted_model_getter"] = model_getter
- self.cache["fitted_info_getter"] = info_getter
-
- return model_getter, info_getter
-
- def assess(self, model_function):
- """
- Calculate MAE, SMAPE, etc. of model_function for each by_name entry.
-
- state/transition/... name and parameter values are fed into model_function.
- The by_name entries of this AnalyticModel are used as ground truth and
- compared with the values predicted by model_function.
-
- For proper model assessments, the data used to generate model_function
- and the data fed into this AnalyticModel instance must be mutually
- exclusive (e.g. by performing cross validation). Otherwise,
- overfitting cannot be detected.
- """
- detailed_results = {}
- for name, elem in sorted(self.by_name.items()):
- detailed_results[name] = {}
- for attribute in elem["attributes"]:
- predicted_data = np.array(
- list(
- map(
- lambda i: model_function(
- name, attribute, param=elem["param"][i]
- ),
- range(len(elem[attribute])),
- )
- )
- )
- measures = regression_measures(predicted_data, elem[attribute])
- detailed_results[name][attribute] = measures
-
- return {
- "by_name": detailed_results,
- }
-
- def to_json(self):
- # TODO
- pass
-
-
def _add_trace_data_to_aggregate(aggregate, key, element):
# Only cares about element['isa'], element['offline_aggregates'], and
# element['plan']['level']
@@ -2049,540 +1161,6 @@ def pta_trace_to_aggregate(traces, ignore_trace_indexes=[]):
return by_name, parameter_names, arg_count
-class PTAModel:
- u"""
- Parameter-aware PTA-based energy model.
-
- Supports both static and parameter-based model attributes, and automatic detection of parameter-dependence.
-
- The model heavily relies on two internal data structures:
- PTAModel.by_name and PTAModel.by_param.
-
- These provide measurements aggregated by state/transition name
- and (for by_param) parameter values. Layout:
- dictionary with one key per state/transition ('send', 'TX', ...) or
- one key per state/transition and parameter combination
- (('send', (1, 2)), ('send', (2, 3)), ('TX', (1, 2)), ('TX', (2, 3)), ...).
- For by_param, parameter values are ordered corresponding to the lexically sorted parameter names.
-
- Each element is in turn a dict with the following elements:
- - isa: 'state' or 'transition'
- - power: list of mean power measurements in µW
- - duration: list of durations in µs
- - power_std: list of stddev of power per state/transition
- - energy: consumed energy (power*duration) in pJ
- - paramkeys: list of parameter names in each measurement (-> list of lists)
- - param: list of parameter values in each measurement (-> list of lists)
- - attributes: list of keys that should be analyzed,
- e.g. ['power', 'duration']
- additionally, only if isa == 'transition':
- - timeout: list of duration of previous state in µs
- - rel_energy_prev: transition energy relative to previous state mean power in pJ
- - rel_energy_next: transition energy relative to next state mean power in pJ
- """
-
- def __init__(
- self,
- by_name,
- parameters,
- arg_count,
- traces=[],
- ignore_trace_indexes=[],
- discard_outliers=None,
- function_override={},
- verbose=True,
- use_corrcoef=False,
- pta=None,
- ):
- """
- Prepare a new PTA energy model.
-
- Actual model generation is done on-demand by calling the respective functions.
-
- arguments:
- by_name -- state/transition measurements aggregated by name, as returned by pta_trace_to_aggregate.
- parameters -- list of parameter names, as returned by pta_trace_to_aggregate
- arg_count -- function arguments, as returned by pta_trace_to_aggregate
- traces -- list of preprocessed DFA traces, as returned by RawData.get_preprocessed_data()
- ignore_trace_indexes -- list of trace indexes. The corresponding traces will be ignored.
- discard_outliers -- currently not supported: threshold for outlier detection and removel (float).
- Outlier detection is performed individually for each state/transition in each trace,
- so it only works if the benchmark ran several times.
- Given "data" (a set of measurements of the same thing, e.g. TX duration in the third benchmark trace),
- "m" (the median of all attribute measurements with the same parameters, which may include data from other traces),
- a data point X is considered an outlier if
- | 0.6745 * (X - m) / median(|data - m|) | > discard_outliers .
- function_override -- dict of overrides for automatic parameter function generation.
- If (state or transition name, model attribute) is present in function_override,
- the corresponding text string is the function used for analytic (parameter-aware/fitted)
- modeling of this attribute. It is passed to AnalyticFunction, see
- there for the required format. Note that this happens regardless of
- parameter dependency detection: The provided analytic function will be assigned
- even if it seems like the model attribute is static / parameter-independent.
- verbose -- print informative output, e.g. when removing an outlier
- use_corrcoef -- use correlation coefficient instead of stddev comparison
- to detect whether a model attribute depends on a parameter
- pta -- hardware model as `PTA` object
- """
- self.by_name = by_name
- self.by_param = by_name_to_by_param(by_name)
- self._parameter_names = sorted(parameters)
- self._num_args = arg_count
- self._use_corrcoef = use_corrcoef
- self.traces = traces
- self.stats = ParamStats(
- self.by_name,
- self.by_param,
- self._parameter_names,
- self._num_args,
- self._use_corrcoef,
- verbose=verbose,
- )
- self.cache = {}
- np.seterr("raise")
- self._outlier_threshold = discard_outliers
- self.function_override = function_override.copy()
- self.verbose = verbose
- self.pta = pta
- self.ignore_trace_indexes = ignore_trace_indexes
- self._aggregate_to_ndarray(self.by_name)
-
- def _aggregate_to_ndarray(self, aggregate):
- for elem in aggregate.values():
- for key in elem["attributes"]:
- elem[key] = np.array(elem[key])
-
- # This heuristic is very similar to the "function is not much better than
- # median" checks in get_fitted. So far, doing it here as well is mostly
- # a performance and not an algorithm quality decision.
- # --df, 2018-04-18
- def depends_on_param(self, state_or_trans, key, param):
- return self.stats.depends_on_param(state_or_trans, key, param)
-
- # See notes on depends_on_param
- def depends_on_arg(self, state_or_trans, key, param):
- return self.stats.depends_on_arg(state_or_trans, key, param)
-
- def _get_model_from_dict(self, model_dict, model_function):
- model = {}
- for name, elem in model_dict.items():
- model[name] = {}
- for key in elem["attributes"]:
- try:
- model[name][key] = model_function(elem[key])
- except RuntimeWarning:
- vprint(self.verbose, "[W] Got no data for {} {}".format(name, key))
- except FloatingPointError as fpe:
- vprint(
- self.verbose,
- "[W] Got no data for {} {}: {}".format(name, key, fpe),
- )
- return model
-
- def get_static(self, use_mean=False):
- """
- Get static model function: name, attribute -> model value.
-
- Uses the median of by_name for modeling, unless `use_mean` is set.
- """
- getter_function = np.median
-
- if use_mean:
- getter_function = np.mean
-
- static_model = self._get_model_from_dict(self.by_name, getter_function)
-
- def static_model_getter(name, key, **kwargs):
- return static_model[name][key]
-
- return static_model_getter
-
- def get_param_lut(self, fallback=False):
- """
- Get parameter-look-up-table model function: name, attribute, parameter values -> model value.
-
- The function can only give model values for parameter combinations
- present in by_param. By default, it raises KeyError for other values.
-
- arguments:
- fallback -- Fall back to the (non-parameter-aware) static model when encountering unknown parameter values
- """
- static_model = self._get_model_from_dict(self.by_name, np.median)
- lut_model = self._get_model_from_dict(self.by_param, np.median)
-
- def lut_median_getter(name, key, param, arg=[], **kwargs):
- param.extend(map(soft_cast_int, arg))
- try:
- return lut_model[(name, tuple(param))][key]
- except KeyError:
- if fallback:
- return static_model[name][key]
- raise
-
- return lut_median_getter
-
- def param_index(self, param_name):
- if param_name in self._parameter_names:
- return self._parameter_names.index(param_name)
- return len(self._parameter_names) + int(param_name)
-
- def param_name(self, param_index):
- if param_index < len(self._parameter_names):
- return self._parameter_names[param_index]
- return str(param_index)
-
- def get_fitted(self, safe_functions_enabled=False):
- """
- Get parameter-aware model function and model information function.
-
- Returns two functions:
- model_function(name, attribute, param=parameter values) -> model value.
- model_info(name, attribute) -> {'fit_result' : ..., 'function' : ... } or None
- """
- if "fitted_model_getter" in self.cache and "fitted_info_getter" in self.cache:
- return self.cache["fitted_model_getter"], self.cache["fitted_info_getter"]
-
- static_model = self._get_model_from_dict(self.by_name, np.median)
- param_model = dict(
- [[state_or_tran, {}] for state_or_tran in self.by_name.keys()]
- )
- paramfit = ParallelParamFit(self.by_param)
- for state_or_tran in self.by_name.keys():
- for model_attribute in self.by_name[state_or_tran]["attributes"]:
- fit_results = {}
- for parameter_index, parameter_name in enumerate(self._parameter_names):
- if self.depends_on_param(
- state_or_tran, model_attribute, parameter_name
- ):
- paramfit.enqueue(
- state_or_tran,
- model_attribute,
- parameter_index,
- parameter_name,
- safe_functions_enabled,
- )
- for (
- codependent_param_dict
- ) in self.stats.codependent_parameter_value_dicts(
- state_or_tran, model_attribute, parameter_name
- ):
- paramfit.enqueue(
- state_or_tran,
- model_attribute,
- parameter_index,
- parameter_name,
- safe_functions_enabled,
- codependent_param_dict,
- )
- if (
- arg_support_enabled
- and self.by_name[state_or_tran]["isa"] == "transition"
- ):
- for arg_index in range(self._num_args[state_or_tran]):
- if self.depends_on_arg(
- state_or_tran, model_attribute, arg_index
- ):
- paramfit.enqueue(
- state_or_tran,
- model_attribute,
- len(self._parameter_names) + arg_index,
- arg_index,
- safe_functions_enabled,
- )
- paramfit.fit()
-
- for state_or_tran in self.by_name.keys():
- num_args = 0
- if (
- arg_support_enabled
- and self.by_name[state_or_tran]["isa"] == "transition"
- ):
- num_args = self._num_args[state_or_tran]
- for model_attribute in self.by_name[state_or_tran]["attributes"]:
- fit_results = get_fit_result(
- paramfit.results, state_or_tran, model_attribute, self.verbose
- )
-
- for parameter_name in self._parameter_names:
- if self.depends_on_param(
- state_or_tran, model_attribute, parameter_name
- ):
- for (
- codependent_param_dict
- ) in self.stats.codependent_parameter_value_dicts(
- state_or_tran, model_attribute, parameter_name
- ):
- pass
- # FIXME get_fit_result hat ja gar keinen Parameter als Argument...
-
- if (state_or_tran, model_attribute) in self.function_override:
- function_str = self.function_override[
- (state_or_tran, model_attribute)
- ]
- x = AnalyticFunction(function_str, self._parameter_names, num_args)
- x.fit(self.by_param, state_or_tran, model_attribute)
- if x.fit_success:
- param_model[state_or_tran][model_attribute] = {
- "fit_result": fit_results,
- "function": x,
- }
- elif len(fit_results.keys()):
- x = analytic.function_powerset(
- fit_results, self._parameter_names, num_args
- )
- x.fit(self.by_param, state_or_tran, model_attribute)
- if x.fit_success:
- param_model[state_or_tran][model_attribute] = {
- "fit_result": fit_results,
- "function": x,
- }
-
- def model_getter(name, key, **kwargs):
- if "arg" in kwargs and "param" in kwargs:
- kwargs["param"].extend(map(soft_cast_int, kwargs["arg"]))
- if key in param_model[name]:
- param_list = kwargs["param"]
- param_function = param_model[name][key]["function"]
- if param_function.is_predictable(param_list):
- return param_function.eval(param_list)
- return static_model[name][key]
-
- def info_getter(name, key):
- if key in param_model[name]:
- return param_model[name][key]
- return None
-
- self.cache["fitted_model_getter"] = model_getter
- self.cache["fitted_info_getter"] = info_getter
-
- return model_getter, info_getter
-
- def to_json(self):
- static_model = self.get_static()
- static_quality = self.assess(static_model)
- param_model, param_info = self.get_fitted()
- analytic_quality = self.assess(param_model)
- self.pta.update(
- static_model,
- param_info,
- static_error=static_quality["by_name"],
- analytic_error=analytic_quality["by_name"],
- )
- return self.pta.to_json()
-
- def states(self):
- """Return sorted list of state names."""
- return sorted(
- list(
- filter(lambda k: self.by_name[k]["isa"] == "state", self.by_name.keys())
- )
- )
-
- def transitions(self):
- """Return sorted list of transition names."""
- return sorted(
- list(
- filter(
- lambda k: self.by_name[k]["isa"] == "transition",
- self.by_name.keys(),
- )
- )
- )
-
- def states_and_transitions(self):
- """Return list of states and transition names."""
- ret = self.states()
- ret.extend(self.transitions())
- return ret
-
- def parameters(self):
- return self._parameter_names
-
- def attributes(self, state_or_trans):
- return self.by_name[state_or_trans]["attributes"]
-
- def assess(self, model_function):
- """
- Calculate MAE, SMAPE, etc. of model_function for each by_name entry.
-
- state/transition/... name and parameter values are fed into model_function.
- The by_name entries of this PTAModel are used as ground truth and
- compared with the values predicted by model_function.
-
- For proper model assessments, the data used to generate model_function
- and the data fed into this AnalyticModel instance must be mutually
- exclusive (e.g. by performing cross validation). Otherwise,
- overfitting cannot be detected.
- """
- detailed_results = {}
- for name, elem in sorted(self.by_name.items()):
- detailed_results[name] = {}
- for key in elem["attributes"]:
- predicted_data = np.array(
- list(
- map(
- lambda i: model_function(name, key, param=elem["param"][i]),
- range(len(elem[key])),
- )
- )
- )
- measures = regression_measures(predicted_data, elem[key])
- detailed_results[name][key] = measures
-
- return {"by_name": detailed_results}
-
- def assess_states(
- self, model_function, model_attribute="power", distribution: dict = None
- ):
- """
- Calculate overall model error assuming equal distribution of states
- """
- # TODO calculate mean power draw for distribution and use it to
- # calculate relative error from MAE combination
- model_quality = self.assess(model_function)
- num_states = len(self.states())
- if distribution is None:
- distribution = dict(map(lambda x: [x, 1 / num_states], self.states()))
-
- if not np.isclose(sum(distribution.values()), 1):
- raise ValueError(
- "distribution must be a probability distribution with sum 1"
- )
-
- # total_value = None
- # try:
- # total_value = sum(map(lambda x: model_function(x, model_attribute) * distribution[x], self.states()))
- # except KeyError:
- # pass
-
- total_error = np.sqrt(
- sum(
- map(
- lambda x: np.square(
- model_quality["by_name"][x][model_attribute]["mae"]
- * distribution[x]
- ),
- self.states(),
- )
- )
- )
- return total_error
-
- def assess_on_traces(self, model_function):
- """
- Calculate MAE, SMAPE, etc. of model_function for each trace known to this PTAModel instance.
-
- :returns: dict of `duration_by_trace`, `energy_by_trace`, `timeout_by_trace`, `rel_energy_by_trace` and `state_energy_by_trace`.
- Each entry holds regression measures for the corresponding measure. Note that the determined model quality heavily depends on the
- traces: small-ish absolute errors in states which frequently occur may have more effect than large absolute errors in rarely occuring states
- """
- model_energy_list = []
- real_energy_list = []
- model_rel_energy_list = []
- model_state_energy_list = []
- model_duration_list = []
- real_duration_list = []
- model_timeout_list = []
- real_timeout_list = []
-
- for trace in self.traces:
- if trace["id"] not in self.ignore_trace_indexes:
- for rep_id in range(len(trace["trace"][0]["offline"])):
- model_energy = 0.0
- real_energy = 0.0
- model_rel_energy = 0.0
- model_state_energy = 0.0
- model_duration = 0.0
- real_duration = 0.0
- model_timeout = 0.0
- real_timeout = 0.0
- for i, trace_part in enumerate(trace["trace"]):
- name = trace_part["name"]
- prev_name = trace["trace"][i - 1]["name"]
- isa = trace_part["isa"]
- if name != "UNINITIALIZED":
- try:
- param = trace_part["offline_aggregates"]["param"][
- rep_id
- ]
- prev_param = trace["trace"][i - 1][
- "offline_aggregates"
- ]["param"][rep_id]
- power = trace_part["offline"][rep_id]["uW_mean"]
- duration = trace_part["offline"][rep_id]["us"]
- prev_duration = trace["trace"][i - 1]["offline"][
- rep_id
- ]["us"]
- real_energy += power * duration
- if isa == "state":
- model_energy += (
- model_function(name, "power", param=param)
- * duration
- )
- else:
- model_energy += model_function(
- name, "energy", param=param
- )
- # If i == 1, the previous state was UNINITIALIZED, for which we do not have model data
- if i == 1:
- model_rel_energy += model_function(
- name, "energy", param=param
- )
- else:
- model_rel_energy += model_function(
- prev_name, "power", param=prev_param
- ) * (prev_duration + duration)
- model_state_energy += model_function(
- prev_name, "power", param=prev_param
- ) * (prev_duration + duration)
- model_rel_energy += model_function(
- name, "rel_energy_prev", param=param
- )
- real_duration += duration
- model_duration += model_function(
- name, "duration", param=param
- )
- if (
- "plan" in trace_part
- and trace_part["plan"]["level"] == "epilogue"
- ):
- real_timeout += trace_part["offline"][rep_id][
- "timeout"
- ]
- model_timeout += model_function(
- name, "timeout", param=param
- )
- except KeyError:
- # if states/transitions have been removed via --filter-param, this is harmless
- pass
- real_energy_list.append(real_energy)
- model_energy_list.append(model_energy)
- model_rel_energy_list.append(model_rel_energy)
- model_state_energy_list.append(model_state_energy)
- real_duration_list.append(real_duration)
- model_duration_list.append(model_duration)
- real_timeout_list.append(real_timeout)
- model_timeout_list.append(model_timeout)
-
- return {
- "duration_by_trace": regression_measures(
- np.array(model_duration_list), np.array(real_duration_list)
- ),
- "energy_by_trace": regression_measures(
- np.array(model_energy_list), np.array(real_energy_list)
- ),
- "timeout_by_trace": regression_measures(
- np.array(model_timeout_list), np.array(real_timeout_list)
- ),
- "rel_energy_by_trace": regression_measures(
- np.array(model_rel_energy_list), np.array(real_energy_list)
- ),
- "state_energy_by_trace": regression_measures(
- np.array(model_state_energy_list), np.array(real_energy_list)
- ),
- }
-
-
class EnergyTraceLog:
"""
EnergyTrace log loader for DFA traces.
@@ -2617,7 +1195,6 @@ class EnergyTraceLog:
self.state_duration = state_duration * 1e-3
self.transition_names = transition_names
self.with_traces = with_traces
- self.verbose = False
self.errors = list()
# TODO auto-detect
@@ -2643,6 +1220,7 @@ class EnergyTraceLog:
"""
if not zbar_available:
+ logger.error("zbar module is not available")
self.errors.append(
'zbar module is not available. Try "apt install python3-zbar"'
)
@@ -2675,11 +1253,10 @@ class EnergyTraceLog:
self.sample_rate = data_count / (m_duration_us * 1e-6)
- vprint(
- self.verbose,
+ logger.debug(
"got {} samples with {} seconds of log data ({} Hz)".format(
data_count, m_duration_us * 1e-6, self.sample_rate
- ),
+ )
)
return (
@@ -2783,25 +1360,20 @@ class EnergyTraceLog:
for name, duration in expected_transitions:
bc, start, stop, end = self.find_barcode(next_barcode)
if bc is None:
- print('[!!!] did not find transition "{}"'.format(name))
+ logger.error('did not find transition "{}"'.format(name))
break
next_barcode = end + self.state_duration + duration
- vprint(
- self.verbose,
+ logger.debug(
'{} barcode "{}" area: {:0.2f} .. {:0.2f} / {:0.2f} seconds'.format(
offline_index, bc, start, stop, end
- ),
+ )
)
if bc != name:
- vprint(
- self.verbose,
- '[!!!] mismatch: expected "{}", got "{}"'.format(name, bc),
- )
- vprint(
- self.verbose,
+ logger.error('mismatch: expected "{}", got "{}"'.format(name, bc))
+ logger.debug(
"{} estimated transition area: {:0.3f} .. {:0.3f} seconds".format(
offline_index, end, end + duration
- ),
+ )
)
transition_start_index = self.ts_to_index(end)
@@ -2811,13 +1383,12 @@ class EnergyTraceLog:
self.ts_to_index(end + duration + self.state_duration) + 1
)
- vprint(
- self.verbose,
+ logger.debug(
"{} estimated transitionindex: {:0.3f} .. {:0.3f} seconds".format(
offline_index,
transition_start_index / self.sample_rate,
transition_done_index / self.sample_rate,
- ),
+ )
)
transition_power_W = self.interval_power[
@@ -2912,11 +1483,10 @@ class EnergyTraceLog:
+ self.led_power / 3
)
- vprint(
- self.verbose,
+ logger.debug(
"looking for barcode starting at {:0.2f} s, threshold is {:0.1f} mW".format(
start_ts, sync_threshold_power * 1e3
- ),
+ )
)
sync_area_start = None
@@ -2947,11 +1517,10 @@ class EnergyTraceLog:
barcode_data = self.interval_power[sync_area_start:sync_area_end]
- vprint(
- self.verbose,
+ logger.debug(
"barcode search area: {:0.2f} .. {:0.2f} seconds ({} samples)".format(
sync_start_ts, sync_end_ts, len(barcode_data)
- ),
+ )
)
bc, start, stop, padding_bits = self.find_barcode_in_power_data(barcode_data)
@@ -3026,7 +1595,7 @@ class EnergyTraceLog:
return content, sym_start, sym_end, padding_bits
else:
- vprint(self.verbose, "unable to find barcode")
+ logger.warning("unable to find barcode")
return None, None, None, None
@@ -3046,17 +1615,15 @@ class MIMOSA:
Resulting data is a list of state/transition/state/transition/... measurements.
"""
- def __init__(self, voltage: float, shunt: int, verbose=True, with_traces=False):
+ def __init__(self, voltage: float, shunt: int, with_traces=False):
"""
Initialize MIMOSA loader for a specific voltage and shunt setting.
:param voltage: MIMOSA DUT supply voltage (V)
:para mshunt: MIMOSA Shunt (Ohms)
- :param verbose: print notices about invalid data on STDOUT?
"""
self.voltage = voltage
self.shunt = shunt
- self.verbose = verbose
self.with_traces = with_traces
self.r1 = 984 # "1k"
self.r2 = 99013 # "100k"
@@ -3254,7 +1821,7 @@ class MIMOSA:
if cal_r2_mean > cal_0_mean:
b_lower = (ua_r2 - 0) / (cal_r2_mean - cal_0_mean)
else:
- vprint(self.verbose, "[W] 0 uA == %.f uA during calibration" % (ua_r2))
+ logger.warning("0 uA == %.f uA during calibration" % (ua_r2))
b_lower = 0
b_upper = (ua_r1 - ua_r2) / (cal_r1_mean - cal_r2_mean)
@@ -3302,50 +1869,6 @@ class MIMOSA:
return calfunc, caldata
- """
- def calcgrad(self, currents, threshold):
- grad = np.gradient(running_mean(currents * self.voltage, 10))
- # len(grad) == len(currents) - 9
- subst = []
- lastgrad = 0
- for i in range(len(grad)):
- # minimum substate duration: 10ms
- if np.abs(grad[i]) > threshold and i - lastgrad > 50:
- # account for skew introduced by running_mean and current
- # ramp slope (parasitic capacitors etc.)
- subst.append(i+10)
- lastgrad = i
- if lastgrad != i:
- subst.append(i+10)
- return subst
-
- # TODO konfigurierbare min/max threshold und len(gradidx) > X, binaere
- # Sache nach noetiger threshold. postprocessing mit
- # "zwei benachbarte substates haben sehr aehnliche werte / niedrige stddev" -> mergen
- # ... min/max muessen nicht vorgegeben werden, sind ja bekannt (0 / np.max(grad))
- # TODO bei substates / index foo den offset durch running_mean beachten
- # TODO ggf. clustering der 'abs(grad) > threshold' und bestimmung interessanter
- # uebergaenge dadurch?
- def gradfoo(self, currents):
- gradients = np.abs(np.gradient(running_mean(currents * self.voltage, 10)))
- gradmin = np.min(gradients)
- gradmax = np.max(gradients)
- threshold = np.mean([gradmin, gradmax])
- gradidx = self.calcgrad(currents, threshold)
- num_substates = 2
- while len(gradidx) != num_substates:
- if gradmax - gradmin < 0.1:
- # We did our best
- return threshold, gradidx
- if len(gradidx) > num_substates:
- gradmin = threshold
- else:
- gradmax = threshold
- threshold = np.mean([gradmin, gradmax])
- gradidx = self.calcgrad(currents, threshold)
- return threshold, gradidx
- """
-
def analyze_states(self, charges, trigidx, ua_func):
u"""
Split log data into states and transitions and return duration, energy, and mean power for each element.
@@ -3380,30 +1903,6 @@ class MIMOSA:
for idx in trigger_indices:
range_raw = charges[previdx:idx]
range_ua = ua_func(range_raw)
- substates = {}
-
- if previdx != 0 and idx - previdx > 200:
- thr, subst = 0, [] # self.gradfoo(range_ua)
- if len(subst):
- statelist = []
- prevsubidx = 0
- for subidx in subst:
- statelist.append(
- {
- "duration": (subidx - prevsubidx) * 10,
- "uW_mean": np.mean(
- range_ua[prevsubidx:subidx] * self.voltage
- ),
- "uW_std": np.std(
- range_ua[prevsubidx:subidx] * self.voltage
- ),
- }
- )
- prevsubidx = subidx
- substates = {
- "threshold": thr,
- "states": statelist,
- }
isa = "state"
if not is_state:
@@ -3422,12 +1921,6 @@ class MIMOSA:
if self.with_traces:
data["uW"] = range_ua * self.voltage
- if "states" in substates:
- data["substates"] = substates
- ssum = np.sum(list(map(lambda x: x["duration"], substates["states"])))
- if ssum != data["us"]:
- vprint(self.verbose, "ERR: duration %d vs %d" % (data["us"], ssum))
-
if isa == "transition":
# subtract average power of previous state
# (that is, the state from which this transition originates)
diff --git a/lib/model.py b/lib/model.py
new file mode 100644
index 0000000..bb4a45b
--- /dev/null
+++ b/lib/model.py
@@ -0,0 +1,1156 @@
+#!/usr/bin/env python3
+
+import logging
+import numpy as np
+from scipy import optimize
+from sklearn.metrics import r2_score
+from multiprocessing import Pool
+from .automata import PTA
+from .functions import analytic
+from .functions import AnalyticFunction
+from .parameters import ParamStats
+from .utils import is_numeric, soft_cast_int, param_slice_eq, remove_index_from_tuple
+from .utils import by_name_to_by_param, match_parameter_values
+
+logger = logging.getLogger(__name__)
+arg_support_enabled = True
+
+
+def aggregate_measures(aggregate: float, actual: list) -> dict:
+ """
+ Calculate error measures for model value on data list.
+
+ arguments:
+ aggregate -- model value (float or int)
+ actual -- real-world / reference values (list of float or int)
+
+ return value:
+ See regression_measures
+ """
+ aggregate_array = np.array([aggregate] * len(actual))
+ return regression_measures(aggregate_array, np.array(actual))
+
+
+def regression_measures(predicted: np.ndarray, actual: np.ndarray):
+ """
+ Calculate error measures by comparing model values to reference values.
+
+ arguments:
+ predicted -- model values (np.ndarray)
+ actual -- real-world / reference values (np.ndarray)
+
+ Returns a dict containing the following measures:
+ mae -- Mean Absolute Error
+ mape -- Mean Absolute Percentage Error,
+ if all items in actual are non-zero (NaN otherwise)
+ smape -- Symmetric Mean Absolute Percentage Error,
+ if no 0,0-pairs are present in actual and predicted (NaN otherwise)
+ msd -- Mean Square Deviation
+ rmsd -- Root Mean Square Deviation
+ ssr -- Sum of Squared Residuals
+ rsq -- R^2 measure, see sklearn.metrics.r2_score
+ count -- Number of values
+ """
+ if type(predicted) != np.ndarray:
+ raise ValueError("first arg must be ndarray, is {}".format(type(predicted)))
+ if type(actual) != np.ndarray:
+ raise ValueError("second arg must be ndarray, is {}".format(type(actual)))
+ deviations = predicted - actual
+ # mean = np.mean(actual)
+ if len(deviations) == 0:
+ return {}
+ measures = {
+ "mae": np.mean(np.abs(deviations), dtype=np.float64),
+ "msd": np.mean(deviations ** 2, dtype=np.float64),
+ "rmsd": np.sqrt(np.mean(deviations ** 2), dtype=np.float64),
+ "ssr": np.sum(deviations ** 2, dtype=np.float64),
+ "rsq": r2_score(actual, predicted),
+ "count": len(actual),
+ }
+
+ # rsq_quotient = np.sum((actual - mean)**2, dtype=np.float64) * np.sum((predicted - mean)**2, dtype=np.float64)
+
+ if np.all(actual != 0):
+ measures["mape"] = np.mean(np.abs(deviations / actual)) * 100 # bad measure
+ else:
+ measures["mape"] = np.nan
+ if np.all(np.abs(predicted) + np.abs(actual) != 0):
+ measures["smape"] = (
+ np.mean(np.abs(deviations) / ((np.abs(predicted) + np.abs(actual)) / 2))
+ * 100
+ )
+ else:
+ measures["smape"] = np.nan
+ # if np.all(rsq_quotient != 0):
+ # measures['rsq'] = (np.sum((actual - mean) * (predicted - mean), dtype=np.float64)**2) / rsq_quotient
+
+ return measures
+
+
+class ParallelParamFit:
+ """
+ Fit a set of functions on parameterized measurements.
+
+ One parameter is variale, all others are fixed. Reports the best-fitting
+ function type for each parameter.
+ """
+
+ def __init__(self, by_param):
+ """Create a new ParallelParamFit object."""
+ self.fit_queue = []
+ self.by_param = by_param
+
+ def enqueue(
+ self,
+ state_or_tran,
+ attribute,
+ param_index,
+ param_name,
+ safe_functions_enabled=False,
+ param_filter=None,
+ ):
+ """
+ Add state_or_tran/attribute/param_name to fit queue.
+
+ This causes fit() to compute the best-fitting function for this model part.
+ """
+ self.fit_queue.append(
+ {
+ "key": [state_or_tran, attribute, param_name, param_filter],
+ "args": [
+ self.by_param,
+ state_or_tran,
+ attribute,
+ param_index,
+ safe_functions_enabled,
+ param_filter,
+ ],
+ }
+ )
+
+ def fit(self):
+ """
+ Fit functions on previously enqueue data.
+
+ Fitting is one in parallel with one process per core.
+
+ Results can be accessed using the public ParallelParamFit.results object.
+ """
+ with Pool() as pool:
+ self.results = pool.map(_try_fits_parallel, self.fit_queue)
+
+ def get_result(self, name, attribute, param_filter: dict = None):
+ """
+ Parse and sanitize fit results for state/transition/... 'name' and model attribute 'attribute'.
+
+ Filters out results where the best function is worse (or not much better than) static mean/median estimates.
+
+ :param name: state/transition/... name, e.g. 'TX'
+ :param attribute: model attribute, e.g. 'duration'
+ :param param_filter:
+ :returns: dict with fit result (see `_try_fits`) for each successfully fitted parameter. E.g. {'param 1': {'best' : 'function name', ...} }
+ """
+ fit_result = dict()
+ for result in self.results:
+ if (
+ result["key"][0] == name
+ and result["key"][1] == attribute
+ and result["key"][3] == param_filter
+ and result["result"]["best"] is not None
+ ): # dürfte an ['best'] != None liegen-> Fit für gefilterten Kram schlägt fehl?
+ this_result = result["result"]
+ if this_result["best_rmsd"] >= min(
+ this_result["mean_rmsd"], this_result["median_rmsd"]
+ ):
+ logger.debug(
+ "Not modeling {} {} as function of {}: best ({:.0f}) is worse than ref ({:.0f}, {:.0f})".format(
+ name,
+ attribute,
+ result["key"][2],
+ this_result["best_rmsd"],
+ this_result["mean_rmsd"],
+ this_result["median_rmsd"],
+ )
+ )
+ # See notes on depends_on_param
+ elif this_result["best_rmsd"] >= 0.8 * min(
+ this_result["mean_rmsd"], this_result["median_rmsd"]
+ ):
+ logger.debug(
+ "Not modeling {} {} as function of {}: best ({:.0f}) is not much better than ref ({:.0f}, {:.0f})".format(
+ name,
+ attribute,
+ result["key"][2],
+ this_result["best_rmsd"],
+ this_result["mean_rmsd"],
+ this_result["median_rmsd"],
+ )
+ )
+ else:
+ fit_result[result["key"][2]] = this_result
+ return fit_result
+
+
+def _try_fits_parallel(arg):
+ """
+ Call _try_fits(*arg['args']) and return arg['key'] and the _try_fits result.
+
+ Must be a global function as it is called from a multiprocessing Pool.
+ """
+ return {"key": arg["key"], "result": _try_fits(*arg["args"])}
+
+
+def _try_fits(
+ by_param,
+ state_or_tran,
+ model_attribute,
+ param_index,
+ safe_functions_enabled=False,
+ param_filter: dict = None,
+):
+ """
+ Determine goodness-of-fit for prediction of `by_param[(state_or_tran, *)][model_attribute]` dependence on `param_index` using various functions.
+
+ This is done by varying `param_index` while keeping all other parameters constant and doing one least squares optimization for each function and for each combination of the remaining parameters.
+ The value of the parameter corresponding to `param_index` (e.g. txpower or packet length) is the sole input to the model function.
+ Only numeric parameter values (as determined by `utils.is_numeric`) are used for fitting, non-numeric values such as None or enum strings are ignored.
+ Fitting is only performed if at least three distinct parameter values exist in `by_param[(state_or_tran, *)]`.
+
+ :returns: a dictionary with the following elements:
+ best -- name of the best-fitting function (see `analytic.functions`). `None` in case of insufficient data.
+ best_rmsd -- mean Root Mean Square Deviation of best-fitting function over all combinations of the remaining parameters
+ mean_rmsd -- mean Root Mean Square Deviation of a reference model using the mean of its respective input data as model value
+ median_rmsd -- mean Root Mean Square Deviation of a reference model using the median of its respective input data as model value
+ results -- mean goodness-of-fit measures for the individual functions. See `analytic.functions` for keys and `aggregate_measures` for values
+
+ :param by_param: measurements partitioned by state/transition/... name and parameter values.
+ Example: `{('foo', (0, 2)): {'bar': [2]}, ('foo', (0, 4)): {'bar': [4]}, ('foo', (0, 6)): {'bar': [6]}}`
+
+ :param state_or_tran: state/transition/... name for which goodness-of-fit will be calculated (first element of by_param key tuple).
+ Example: `'foo'`
+
+ :param model_attribute: attribute for which goodness-of-fit will be calculated.
+ Example: `'bar'`
+
+ :param param_index: index of the parameter used as model input
+ :param safe_functions_enabled: Include "safe" variants of functions with limited argument range.
+ :param param_filter: Only use measurements whose parameters match param_filter for fitting.
+ """
+
+ functions = analytic.functions(safe_functions_enabled=safe_functions_enabled)
+
+ for param_key in filter(lambda x: x[0] == state_or_tran, by_param.keys()):
+ # We might remove elements from 'functions' while iterating over
+ # its keys. A generator will not allow this, so we need to
+ # convert to a list.
+ function_names = list(functions.keys())
+ for function_name in function_names:
+ function_object = functions[function_name]
+ if is_numeric(param_key[1][param_index]) and not function_object.is_valid(
+ param_key[1][param_index]
+ ):
+ functions.pop(function_name, None)
+
+ raw_results = dict()
+ raw_results_by_param = dict()
+ ref_results = {"mean": list(), "median": list()}
+ results = dict()
+ results_by_param = dict()
+
+ seen_parameter_combinations = set()
+
+ # for each parameter combination:
+ for param_key in filter(
+ lambda x: x[0] == state_or_tran
+ and remove_index_from_tuple(x[1], param_index)
+ not in seen_parameter_combinations
+ and len(by_param[x]["param"])
+ and match_parameter_values(by_param[x]["param"][0], param_filter),
+ by_param.keys(),
+ ):
+ X = []
+ Y = []
+ num_valid = 0
+ num_total = 0
+
+ # Ensure that each parameter combination is only optimized once. Otherwise, with parameters (1, 2, 5), (1, 3, 5), (1, 4, 5) and param_index == 1,
+ # the parameter combination (1, *, 5) would be optimized three times, both wasting time and biasing results towards more frequently occuring combinations of non-param_index parameters
+ seen_parameter_combinations.add(
+ remove_index_from_tuple(param_key[1], param_index)
+ )
+
+ # for each value of the parameter denoted by param_index (all other parameters remain the same):
+ for k, v in filter(
+ lambda kv: param_slice_eq(kv[0], param_key, param_index), by_param.items()
+ ):
+ num_total += 1
+ if is_numeric(k[1][param_index]):
+ num_valid += 1
+ X.extend([float(k[1][param_index])] * len(v[model_attribute]))
+ Y.extend(v[model_attribute])
+
+ if num_valid > 2:
+ X = np.array(X)
+ Y = np.array(Y)
+ other_parameters = remove_index_from_tuple(k[1], param_index)
+ raw_results_by_param[other_parameters] = dict()
+ results_by_param[other_parameters] = dict()
+ for function_name, param_function in functions.items():
+ if function_name not in raw_results:
+ raw_results[function_name] = dict()
+ error_function = param_function.error_function
+ res = optimize.least_squares(
+ error_function, [0, 1], args=(X, Y), xtol=2e-15
+ )
+ measures = regression_measures(param_function.eval(res.x, X), Y)
+ raw_results_by_param[other_parameters][function_name] = measures
+ for measure, error_rate in measures.items():
+ if measure not in raw_results[function_name]:
+ raw_results[function_name][measure] = list()
+ raw_results[function_name][measure].append(error_rate)
+ # print(function_name, res, measures)
+ mean_measures = aggregate_measures(np.mean(Y), Y)
+ ref_results["mean"].append(mean_measures["rmsd"])
+ raw_results_by_param[other_parameters]["mean"] = mean_measures
+ median_measures = aggregate_measures(np.median(Y), Y)
+ ref_results["median"].append(median_measures["rmsd"])
+ raw_results_by_param[other_parameters]["median"] = median_measures
+
+ if not len(ref_results["mean"]):
+ # Insufficient data for fitting
+ # print('[W] Insufficient data for fitting {}/{}/{}'.format(state_or_tran, model_attribute, param_index))
+ return {"best": None, "best_rmsd": np.inf, "results": results}
+
+ for (
+ other_parameter_combination,
+ other_parameter_results,
+ ) in raw_results_by_param.items():
+ best_fit_val = np.inf
+ best_fit_name = None
+ results = dict()
+ for function_name, result in other_parameter_results.items():
+ if len(result) > 0:
+ results[function_name] = result
+ rmsd = result["rmsd"]
+ if rmsd < best_fit_val:
+ best_fit_val = rmsd
+ best_fit_name = function_name
+ results_by_param[other_parameter_combination] = {
+ "best": best_fit_name,
+ "best_rmsd": best_fit_val,
+ "mean_rmsd": results["mean"]["rmsd"],
+ "median_rmsd": results["median"]["rmsd"],
+ "results": results,
+ }
+
+ best_fit_val = np.inf
+ best_fit_name = None
+ results = dict()
+ for function_name, result in raw_results.items():
+ if len(result) > 0:
+ results[function_name] = {}
+ for measure in result.keys():
+ results[function_name][measure] = np.mean(result[measure])
+ rmsd = results[function_name]["rmsd"]
+ if rmsd < best_fit_val:
+ best_fit_val = rmsd
+ best_fit_name = function_name
+
+ return {
+ "best": best_fit_name,
+ "best_rmsd": best_fit_val,
+ "mean_rmsd": np.mean(ref_results["mean"]),
+ "median_rmsd": np.mean(ref_results["median"]),
+ "results": results,
+ "results_by_other_param": results_by_param,
+ }
+
+
+def _num_args_from_by_name(by_name):
+ num_args = dict()
+ for key, value in by_name.items():
+ if "args" in value:
+ num_args[key] = len(value["args"][0])
+ return num_args
+
+
+class AnalyticModel:
+ u"""
+ Parameter-aware analytic energy/data size/... model.
+
+ Supports both static and parameter-based model attributes, and automatic detection of parameter-dependence.
+
+ These provide measurements aggregated by (function/state/...) name
+ and (for by_param) parameter values. Layout:
+ dictionary with one key per name ('send', 'TX', ...) or
+ one key per name and parameter combination
+ (('send', (1, 2)), ('send', (2, 3)), ('TX', (1, 2)), ('TX', (2, 3)), ...).
+
+ Parameter values must be ordered corresponding to the lexically sorted parameter names.
+
+ Each element is in turn a dict with the following elements:
+ - param: list of parameter values in each measurement (-> list of lists)
+ - attributes: list of keys that should be analyzed,
+ e.g. ['power', 'duration']
+ - for each attribute mentioned in 'attributes': A list with measurements.
+ All list except for 'attributes' must have the same length.
+
+ For example:
+ parameters = ['foo_count', 'irrelevant']
+ by_name = {
+ 'foo' : [1, 1, 2],
+ 'bar' : [5, 6, 7],
+ 'attributes' : ['foo', 'bar'],
+ 'param' : [[1, 0], [1, 0], [2, 0]]
+ }
+
+ methods:
+ get_static -- return static (parameter-unaware) model.
+ get_param_lut -- return parameter-aware look-up-table model. Cannot model parameter combinations not present in by_param.
+ get_fitted -- return parameter-aware model using fitted functions for behaviour prediction.
+
+ variables:
+ names -- function/state/... names (i.e., the keys of by_name)
+ parameters -- parameter names
+ stats -- ParamStats object providing parameter-dependency statistics for each name and attribute
+ assess -- calculate model quality
+ """
+
+ def __init__(
+ self,
+ by_name,
+ parameters,
+ arg_count=None,
+ function_override=dict(),
+ use_corrcoef=False,
+ ):
+ """
+ Create a new AnalyticModel and compute parameter statistics.
+
+ :param by_name: measurements aggregated by (function/state/...) name.
+ Layout: dictionary with one key per name ('send', 'TX', ...) or
+ one key per name and parameter combination
+ (('send', (1, 2)), ('send', (2, 3)), ('TX', (1, 2)), ('TX', (2, 3)), ...).
+
+ Parameter values must be ordered corresponding to the lexically sorted parameter names.
+
+ Each element is in turn a dict with the following elements:
+ - param: list of parameter values in each measurement (-> list of lists)
+ - attributes: list of keys that should be analyzed,
+ e.g. ['power', 'duration']
+ - for each attribute mentioned in 'attributes': A list with measurements.
+ All list except for 'attributes' must have the same length.
+
+ For example:
+ parameters = ['foo_count', 'irrelevant']
+ by_name = {
+ 'foo' : [1, 1, 2],
+ 'duration' : [5, 6, 7],
+ 'attributes' : ['foo', 'duration'],
+ 'param' : [[1, 0], [1, 0], [2, 0]]
+ # foo_count-^ ^-irrelevant
+ }
+ :param parameters: List of parameter names
+ :param function_override: dict of overrides for automatic parameter function generation.
+ If (state or transition name, model attribute) is present in function_override,
+ the corresponding text string is the function used for analytic (parameter-aware/fitted)
+ modeling of this attribute. It is passed to AnalyticFunction, see
+ there for the required format. Note that this happens regardless of
+ parameter dependency detection: The provided analytic function will be assigned
+ even if it seems like the model attribute is static / parameter-independent.
+ :param use_corrcoef: use correlation coefficient instead of stddev comparison to detect whether a model attribute depends on a parameter
+ """
+ self.cache = dict()
+ self.by_name = by_name
+ self.by_param = by_name_to_by_param(by_name)
+ self.names = sorted(by_name.keys())
+ self.parameters = sorted(parameters)
+ self.function_override = function_override.copy()
+ self._use_corrcoef = use_corrcoef
+ self._num_args = arg_count
+ if self._num_args is None:
+ self._num_args = _num_args_from_by_name(by_name)
+
+ self.stats = ParamStats(
+ self.by_name,
+ self.by_param,
+ self.parameters,
+ self._num_args,
+ use_corrcoef=use_corrcoef,
+ )
+
+ def _get_model_from_dict(self, model_dict, model_function):
+ model = {}
+ for name, elem in model_dict.items():
+ model[name] = {}
+ for key in elem["attributes"]:
+ try:
+ model[name][key] = model_function(elem[key])
+ except RuntimeWarning:
+ logger.warning("Got no data for {} {}".format(name, key))
+ except FloatingPointError as fpe:
+ logger.warning("Got no data for {} {}: {}".format(name, key, fpe))
+ return model
+
+ def param_index(self, param_name):
+ if param_name in self.parameters:
+ return self.parameters.index(param_name)
+ return len(self.parameters) + int(param_name)
+
+ def param_name(self, param_index):
+ if param_index < len(self.parameters):
+ return self.parameters[param_index]
+ return str(param_index)
+
+ def get_static(self, use_mean=False):
+ """
+ Get static model function: name, attribute -> model value.
+
+ Uses the median of by_name for modeling.
+ """
+ getter_function = np.median
+
+ if use_mean:
+ getter_function = np.mean
+
+ static_model = self._get_model_from_dict(self.by_name, getter_function)
+
+ def static_model_getter(name, key, **kwargs):
+ return static_model[name][key]
+
+ return static_model_getter
+
+ def get_param_lut(self, fallback=False):
+ """
+ Get parameter-look-up-table model function: name, attribute, parameter values -> model value.
+
+ The function can only give model values for parameter combinations
+ present in by_param. By default, it raises KeyError for other values.
+
+ arguments:
+ fallback -- Fall back to the (non-parameter-aware) static model when encountering unknown parameter values
+ """
+ static_model = self._get_model_from_dict(self.by_name, np.median)
+ lut_model = self._get_model_from_dict(self.by_param, np.median)
+
+ def lut_median_getter(name, key, param, arg=[], **kwargs):
+ param.extend(map(soft_cast_int, arg))
+ try:
+ return lut_model[(name, tuple(param))][key]
+ except KeyError:
+ if fallback:
+ return static_model[name][key]
+ raise
+
+ return lut_median_getter
+
+ def get_fitted(self, safe_functions_enabled=False):
+ """
+ Get paramete-aware model function and model information function.
+
+ Returns two functions:
+ model_function(name, attribute, param=parameter values) -> model value.
+ model_info(name, attribute) -> {'fit_result' : ..., 'function' : ... } or None
+ """
+ if "fitted_model_getter" in self.cache and "fitted_info_getter" in self.cache:
+ return self.cache["fitted_model_getter"], self.cache["fitted_info_getter"]
+
+ static_model = self._get_model_from_dict(self.by_name, np.median)
+ param_model = dict([[name, {}] for name in self.by_name.keys()])
+ paramfit = ParallelParamFit(self.by_param)
+
+ for name in self.by_name.keys():
+ for attribute in self.by_name[name]["attributes"]:
+ for param_index, param in enumerate(self.parameters):
+ if self.stats.depends_on_param(name, attribute, param):
+ paramfit.enqueue(name, attribute, param_index, param, False)
+ if arg_support_enabled and name in self._num_args:
+ for arg_index in range(self._num_args[name]):
+ if self.stats.depends_on_arg(name, attribute, arg_index):
+ paramfit.enqueue(
+ name,
+ attribute,
+ len(self.parameters) + arg_index,
+ arg_index,
+ False,
+ )
+
+ paramfit.fit()
+
+ for name in self.by_name.keys():
+ num_args = 0
+ if name in self._num_args:
+ num_args = self._num_args[name]
+ for attribute in self.by_name[name]["attributes"]:
+ fit_result = paramfit.get_result(name, attribute)
+
+ if (name, attribute) in self.function_override:
+ function_str = self.function_override[(name, attribute)]
+ x = AnalyticFunction(function_str, self.parameters, num_args)
+ x.fit(self.by_param, name, attribute)
+ if x.fit_success:
+ param_model[name][attribute] = {
+ "fit_result": fit_result,
+ "function": x,
+ }
+ elif len(fit_result.keys()):
+ x = analytic.function_powerset(
+ fit_result, self.parameters, num_args
+ )
+ x.fit(self.by_param, name, attribute)
+
+ if x.fit_success:
+ param_model[name][attribute] = {
+ "fit_result": fit_result,
+ "function": x,
+ }
+
+ def model_getter(name, key, **kwargs):
+ if "arg" in kwargs and "param" in kwargs:
+ kwargs["param"].extend(map(soft_cast_int, kwargs["arg"]))
+ if key in param_model[name]:
+ param_list = kwargs["param"]
+ param_function = param_model[name][key]["function"]
+ if param_function.is_predictable(param_list):
+ return param_function.eval(param_list)
+ return static_model[name][key]
+
+ def info_getter(name, key):
+ if key in param_model[name]:
+ return param_model[name][key]
+ return None
+
+ self.cache["fitted_model_getter"] = model_getter
+ self.cache["fitted_info_getter"] = info_getter
+
+ return model_getter, info_getter
+
+ def assess(self, model_function):
+ """
+ Calculate MAE, SMAPE, etc. of model_function for each by_name entry.
+
+ state/transition/... name and parameter values are fed into model_function.
+ The by_name entries of this AnalyticModel are used as ground truth and
+ compared with the values predicted by model_function.
+
+ For proper model assessments, the data used to generate model_function
+ and the data fed into this AnalyticModel instance must be mutually
+ exclusive (e.g. by performing cross validation). Otherwise,
+ overfitting cannot be detected.
+ """
+ detailed_results = {}
+ for name, elem in sorted(self.by_name.items()):
+ detailed_results[name] = {}
+ for attribute in elem["attributes"]:
+ predicted_data = np.array(
+ list(
+ map(
+ lambda i: model_function(
+ name, attribute, param=elem["param"][i]
+ ),
+ range(len(elem[attribute])),
+ )
+ )
+ )
+ measures = regression_measures(predicted_data, elem[attribute])
+ detailed_results[name][attribute] = measures
+
+ return {"by_name": detailed_results}
+
+ def to_json(self):
+ # TODO
+ pass
+
+
+class PTAModel:
+ u"""
+ Parameter-aware PTA-based energy model.
+
+ Supports both static and parameter-based model attributes, and automatic detection of parameter-dependence.
+
+ The model heavily relies on two internal data structures:
+ PTAModel.by_name and PTAModel.by_param.
+
+ These provide measurements aggregated by state/transition name
+ and (for by_param) parameter values. Layout:
+ dictionary with one key per state/transition ('send', 'TX', ...) or
+ one key per state/transition and parameter combination
+ (('send', (1, 2)), ('send', (2, 3)), ('TX', (1, 2)), ('TX', (2, 3)), ...).
+ For by_param, parameter values are ordered corresponding to the lexically sorted parameter names.
+
+ Each element is in turn a dict with the following elements:
+ - isa: 'state' or 'transition'
+ - power: list of mean power measurements in µW
+ - duration: list of durations in µs
+ - power_std: list of stddev of power per state/transition
+ - energy: consumed energy (power*duration) in pJ
+ - paramkeys: list of parameter names in each measurement (-> list of lists)
+ - param: list of parameter values in each measurement (-> list of lists)
+ - attributes: list of keys that should be analyzed,
+ e.g. ['power', 'duration']
+ additionally, only if isa == 'transition':
+ - timeout: list of duration of previous state in µs
+ - rel_energy_prev: transition energy relative to previous state mean power in pJ
+ - rel_energy_next: transition energy relative to next state mean power in pJ
+ """
+
+ def __init__(
+ self,
+ by_name,
+ parameters,
+ arg_count,
+ traces=[],
+ ignore_trace_indexes=[],
+ function_override={},
+ use_corrcoef=False,
+ pta=None,
+ ):
+ """
+ Prepare a new PTA energy model.
+
+ Actual model generation is done on-demand by calling the respective functions.
+
+ arguments:
+ by_name -- state/transition measurements aggregated by name, as returned by pta_trace_to_aggregate.
+ parameters -- list of parameter names, as returned by pta_trace_to_aggregate
+ arg_count -- function arguments, as returned by pta_trace_to_aggregate
+ traces -- list of preprocessed DFA traces, as returned by RawData.get_preprocessed_data()
+ ignore_trace_indexes -- list of trace indexes. The corresponding traces will be ignored.
+ function_override -- dict of overrides for automatic parameter function generation.
+ If (state or transition name, model attribute) is present in function_override,
+ the corresponding text string is the function used for analytic (parameter-aware/fitted)
+ modeling of this attribute. It is passed to AnalyticFunction, see
+ there for the required format. Note that this happens regardless of
+ parameter dependency detection: The provided analytic function will be assigned
+ even if it seems like the model attribute is static / parameter-independent.
+ use_corrcoef -- use correlation coefficient instead of stddev comparison
+ to detect whether a model attribute depends on a parameter
+ pta -- hardware model as `PTA` object
+ """
+ self.by_name = by_name
+ self.by_param = by_name_to_by_param(by_name)
+ self._parameter_names = sorted(parameters)
+ self._num_args = arg_count
+ self._use_corrcoef = use_corrcoef
+ self.traces = traces
+ self.stats = ParamStats(
+ self.by_name,
+ self.by_param,
+ self._parameter_names,
+ self._num_args,
+ self._use_corrcoef,
+ )
+ self.cache = {}
+ np.seterr("raise")
+ self.function_override = function_override.copy()
+ self.pta = pta
+ self.ignore_trace_indexes = ignore_trace_indexes
+ self._aggregate_to_ndarray(self.by_name)
+
+ def _aggregate_to_ndarray(self, aggregate):
+ for elem in aggregate.values():
+ for key in elem["attributes"]:
+ elem[key] = np.array(elem[key])
+
+ # This heuristic is very similar to the "function is not much better than
+ # median" checks in get_fitted. So far, doing it here as well is mostly
+ # a performance and not an algorithm quality decision.
+ # --df, 2018-04-18
+ def depends_on_param(self, state_or_trans, key, param):
+ return self.stats.depends_on_param(state_or_trans, key, param)
+
+ # See notes on depends_on_param
+ def depends_on_arg(self, state_or_trans, key, param):
+ return self.stats.depends_on_arg(state_or_trans, key, param)
+
+ def _get_model_from_dict(self, model_dict, model_function):
+ model = {}
+ for name, elem in model_dict.items():
+ model[name] = {}
+ for key in elem["attributes"]:
+ try:
+ model[name][key] = model_function(elem[key])
+ except RuntimeWarning:
+ logger.warning("Got no data for {} {}".format(name, key))
+ except FloatingPointError as fpe:
+ logger.warning("Got no data for {} {}: {}".format(name, key, fpe))
+ return model
+
+ def get_static(self, use_mean=False):
+ """
+ Get static model function: name, attribute -> model value.
+
+ Uses the median of by_name for modeling, unless `use_mean` is set.
+ """
+ getter_function = np.median
+
+ if use_mean:
+ getter_function = np.mean
+
+ static_model = self._get_model_from_dict(self.by_name, getter_function)
+
+ def static_model_getter(name, key, **kwargs):
+ return static_model[name][key]
+
+ return static_model_getter
+
+ def get_param_lut(self, fallback=False):
+ """
+ Get parameter-look-up-table model function: name, attribute, parameter values -> model value.
+
+ The function can only give model values for parameter combinations
+ present in by_param. By default, it raises KeyError for other values.
+
+ arguments:
+ fallback -- Fall back to the (non-parameter-aware) static model when encountering unknown parameter values
+ """
+ static_model = self._get_model_from_dict(self.by_name, np.median)
+ lut_model = self._get_model_from_dict(self.by_param, np.median)
+
+ def lut_median_getter(name, key, param, arg=[], **kwargs):
+ param.extend(map(soft_cast_int, arg))
+ try:
+ return lut_model[(name, tuple(param))][key]
+ except KeyError:
+ if fallback:
+ return static_model[name][key]
+ raise
+
+ return lut_median_getter
+
+ def param_index(self, param_name):
+ if param_name in self._parameter_names:
+ return self._parameter_names.index(param_name)
+ return len(self._parameter_names) + int(param_name)
+
+ def param_name(self, param_index):
+ if param_index < len(self._parameter_names):
+ return self._parameter_names[param_index]
+ return str(param_index)
+
+ def get_fitted(self, safe_functions_enabled=False):
+ """
+ Get parameter-aware model function and model information function.
+
+ Returns two functions:
+ model_function(name, attribute, param=parameter values) -> model value.
+ model_info(name, attribute) -> {'fit_result' : ..., 'function' : ... } or None
+ """
+ if "fitted_model_getter" in self.cache and "fitted_info_getter" in self.cache:
+ return self.cache["fitted_model_getter"], self.cache["fitted_info_getter"]
+
+ static_model = self._get_model_from_dict(self.by_name, np.median)
+ param_model = dict(
+ [[state_or_tran, {}] for state_or_tran in self.by_name.keys()]
+ )
+ paramfit = ParallelParamFit(self.by_param)
+ for state_or_tran in self.by_name.keys():
+ for model_attribute in self.by_name[state_or_tran]["attributes"]:
+ fit_results = {}
+ for parameter_index, parameter_name in enumerate(self._parameter_names):
+ if self.depends_on_param(
+ state_or_tran, model_attribute, parameter_name
+ ):
+ paramfit.enqueue(
+ state_or_tran,
+ model_attribute,
+ parameter_index,
+ parameter_name,
+ safe_functions_enabled,
+ )
+ if (
+ arg_support_enabled
+ and self.by_name[state_or_tran]["isa"] == "transition"
+ ):
+ for arg_index in range(self._num_args[state_or_tran]):
+ if self.depends_on_arg(
+ state_or_tran, model_attribute, arg_index
+ ):
+ paramfit.enqueue(
+ state_or_tran,
+ model_attribute,
+ len(self._parameter_names) + arg_index,
+ arg_index,
+ safe_functions_enabled,
+ )
+ paramfit.fit()
+
+ for state_or_tran in self.by_name.keys():
+ num_args = 0
+ if (
+ arg_support_enabled
+ and self.by_name[state_or_tran]["isa"] == "transition"
+ ):
+ num_args = self._num_args[state_or_tran]
+ for model_attribute in self.by_name[state_or_tran]["attributes"]:
+ fit_results = paramfit.get_result(state_or_tran, model_attribute)
+
+ if (state_or_tran, model_attribute) in self.function_override:
+ function_str = self.function_override[
+ (state_or_tran, model_attribute)
+ ]
+ x = AnalyticFunction(function_str, self._parameter_names, num_args)
+ x.fit(self.by_param, state_or_tran, model_attribute)
+ if x.fit_success:
+ param_model[state_or_tran][model_attribute] = {
+ "fit_result": fit_results,
+ "function": x,
+ }
+ elif len(fit_results.keys()):
+ x = analytic.function_powerset(
+ fit_results, self._parameter_names, num_args
+ )
+ x.fit(self.by_param, state_or_tran, model_attribute)
+ if x.fit_success:
+ param_model[state_or_tran][model_attribute] = {
+ "fit_result": fit_results,
+ "function": x,
+ }
+
+ def model_getter(name, key, **kwargs):
+ if "arg" in kwargs and "param" in kwargs:
+ kwargs["param"].extend(map(soft_cast_int, kwargs["arg"]))
+ if key in param_model[name]:
+ param_list = kwargs["param"]
+ param_function = param_model[name][key]["function"]
+ if param_function.is_predictable(param_list):
+ return param_function.eval(param_list)
+ return static_model[name][key]
+
+ def info_getter(name, key):
+ if key in param_model[name]:
+ return param_model[name][key]
+ return None
+
+ self.cache["fitted_model_getter"] = model_getter
+ self.cache["fitted_info_getter"] = info_getter
+
+ return model_getter, info_getter
+
+ def to_json(self):
+ static_model = self.get_static()
+ static_quality = self.assess(static_model)
+ param_model, param_info = self.get_fitted()
+ analytic_quality = self.assess(param_model)
+ pta = self.pta
+ if pta is None:
+ pta = PTA(self.states(), parameters=self._parameter_names)
+ pta.update(
+ static_model,
+ param_info,
+ static_error=static_quality["by_name"],
+ analytic_error=analytic_quality["by_name"],
+ )
+ return pta.to_json()
+
+ def states(self):
+ """Return sorted list of state names."""
+ return sorted(
+ list(
+ filter(lambda k: self.by_name[k]["isa"] == "state", self.by_name.keys())
+ )
+ )
+
+ def transitions(self):
+ """Return sorted list of transition names."""
+ return sorted(
+ list(
+ filter(
+ lambda k: self.by_name[k]["isa"] == "transition",
+ self.by_name.keys(),
+ )
+ )
+ )
+
+ def states_and_transitions(self):
+ """Return list of states and transition names."""
+ ret = self.states()
+ ret.extend(self.transitions())
+ return ret
+
+ def parameters(self):
+ return self._parameter_names
+
+ def attributes(self, state_or_trans):
+ return self.by_name[state_or_trans]["attributes"]
+
+ def assess(self, model_function):
+ """
+ Calculate MAE, SMAPE, etc. of model_function for each by_name entry.
+
+ state/transition/... name and parameter values are fed into model_function.
+ The by_name entries of this PTAModel are used as ground truth and
+ compared with the values predicted by model_function.
+
+ For proper model assessments, the data used to generate model_function
+ and the data fed into this AnalyticModel instance must be mutually
+ exclusive (e.g. by performing cross validation). Otherwise,
+ overfitting cannot be detected.
+ """
+ detailed_results = {}
+ for name, elem in sorted(self.by_name.items()):
+ detailed_results[name] = {}
+ for key in elem["attributes"]:
+ predicted_data = np.array(
+ list(
+ map(
+ lambda i: model_function(name, key, param=elem["param"][i]),
+ range(len(elem[key])),
+ )
+ )
+ )
+ measures = regression_measures(predicted_data, elem[key])
+ detailed_results[name][key] = measures
+
+ return {"by_name": detailed_results}
+
+ def assess_states(
+ self, model_function, model_attribute="power", distribution: dict = None
+ ):
+ """
+ Calculate overall model error assuming equal distribution of states
+ """
+ # TODO calculate mean power draw for distribution and use it to
+ # calculate relative error from MAE combination
+ model_quality = self.assess(model_function)
+ num_states = len(self.states())
+ if distribution is None:
+ distribution = dict(map(lambda x: [x, 1 / num_states], self.states()))
+
+ if not np.isclose(sum(distribution.values()), 1):
+ raise ValueError(
+ "distribution must be a probability distribution with sum 1"
+ )
+
+ # total_value = None
+ # try:
+ # total_value = sum(map(lambda x: model_function(x, model_attribute) * distribution[x], self.states()))
+ # except KeyError:
+ # pass
+
+ total_error = np.sqrt(
+ sum(
+ map(
+ lambda x: np.square(
+ model_quality["by_name"][x][model_attribute]["mae"]
+ * distribution[x]
+ ),
+ self.states(),
+ )
+ )
+ )
+ return total_error
+
+ def assess_on_traces(self, model_function):
+ """
+ Calculate MAE, SMAPE, etc. of model_function for each trace known to this PTAModel instance.
+
+ :returns: dict of `duration_by_trace`, `energy_by_trace`, `timeout_by_trace`, `rel_energy_by_trace` and `state_energy_by_trace`.
+ Each entry holds regression measures for the corresponding measure. Note that the determined model quality heavily depends on the
+ traces: small-ish absolute errors in states which frequently occur may have more effect than large absolute errors in rarely occuring states
+ """
+ model_energy_list = []
+ real_energy_list = []
+ model_rel_energy_list = []
+ model_state_energy_list = []
+ model_duration_list = []
+ real_duration_list = []
+ model_timeout_list = []
+ real_timeout_list = []
+
+ for trace in self.traces:
+ if trace["id"] not in self.ignore_trace_indexes:
+ for rep_id in range(len(trace["trace"][0]["offline"])):
+ model_energy = 0.0
+ real_energy = 0.0
+ model_rel_energy = 0.0
+ model_state_energy = 0.0
+ model_duration = 0.0
+ real_duration = 0.0
+ model_timeout = 0.0
+ real_timeout = 0.0
+ for i, trace_part in enumerate(trace["trace"]):
+ name = trace_part["name"]
+ prev_name = trace["trace"][i - 1]["name"]
+ isa = trace_part["isa"]
+ if name != "UNINITIALIZED":
+ try:
+ param = trace_part["offline_aggregates"]["param"][
+ rep_id
+ ]
+ prev_param = trace["trace"][i - 1][
+ "offline_aggregates"
+ ]["param"][rep_id]
+ power = trace_part["offline"][rep_id]["uW_mean"]
+ duration = trace_part["offline"][rep_id]["us"]
+ prev_duration = trace["trace"][i - 1]["offline"][
+ rep_id
+ ]["us"]
+ real_energy += power * duration
+ if isa == "state":
+ model_energy += (
+ model_function(name, "power", param=param)
+ * duration
+ )
+ else:
+ model_energy += model_function(
+ name, "energy", param=param
+ )
+ # If i == 1, the previous state was UNINITIALIZED, for which we do not have model data
+ if i == 1:
+ model_rel_energy += model_function(
+ name, "energy", param=param
+ )
+ else:
+ model_rel_energy += model_function(
+ prev_name, "power", param=prev_param
+ ) * (prev_duration + duration)
+ model_state_energy += model_function(
+ prev_name, "power", param=prev_param
+ ) * (prev_duration + duration)
+ model_rel_energy += model_function(
+ name, "rel_energy_prev", param=param
+ )
+ real_duration += duration
+ model_duration += model_function(
+ name, "duration", param=param
+ )
+ if (
+ "plan" in trace_part
+ and trace_part["plan"]["level"] == "epilogue"
+ ):
+ real_timeout += trace_part["offline"][rep_id][
+ "timeout"
+ ]
+ model_timeout += model_function(
+ name, "timeout", param=param
+ )
+ except KeyError:
+ # if states/transitions have been removed via --filter-param, this is harmless
+ pass
+ real_energy_list.append(real_energy)
+ model_energy_list.append(model_energy)
+ model_rel_energy_list.append(model_rel_energy)
+ model_state_energy_list.append(model_state_energy)
+ real_duration_list.append(real_duration)
+ model_duration_list.append(model_duration)
+ real_timeout_list.append(real_timeout)
+ model_timeout_list.append(model_timeout)
+
+ return {
+ "duration_by_trace": regression_measures(
+ np.array(model_duration_list), np.array(real_duration_list)
+ ),
+ "energy_by_trace": regression_measures(
+ np.array(model_energy_list), np.array(real_energy_list)
+ ),
+ "timeout_by_trace": regression_measures(
+ np.array(model_timeout_list), np.array(real_timeout_list)
+ ),
+ "rel_energy_by_trace": regression_measures(
+ np.array(model_rel_energy_list), np.array(real_energy_list)
+ ),
+ "state_energy_by_trace": regression_measures(
+ np.array(model_state_energy_list), np.array(real_energy_list)
+ ),
+ }
diff --git a/lib/parameters.py b/lib/parameters.py
index 8b562b6..5c6b978 100644
--- a/lib/parameters.py
+++ b/lib/parameters.py
@@ -1,11 +1,15 @@
import itertools
+import logging
import numpy as np
+import warnings
from collections import OrderedDict
from copy import deepcopy
from multiprocessing import Pool
from .utils import remove_index_from_tuple, is_numeric
from .utils import filter_aggregate_by_param, by_name_to_by_param
+logger = logging.getLogger(__name__)
+
def distinct_param_values(by_name, state_or_tran):
"""
@@ -78,25 +82,7 @@ def _reduce_param_matrix(matrix: np.ndarray, parameter_names: list) -> list:
return list()
-def _codependent_parameters(param, lut_by_param_values, std_by_param_values):
- """
- Return list of parameters which affect whether a parameter affects a model attribute or not.
- """
- return list()
- safe_div = np.vectorize(lambda x, y: 0.0 if x == 0 else 1 - x / y)
- ratio_by_value = safe_div(lut_by_param_values, std_by_param_values)
- err_mode = np.seterr("ignore")
- dep_by_value = ratio_by_value > 0.5
- np.seterr(**err_mode)
-
- other_param_list = list(filter(lambda x: x != param, self._parameter_names))
- influencer_parameters = _reduce_param_matrix(dep_by_value, other_param_list)
- return influencer_parameters
-
-
-def _std_by_param(
- by_param, all_param_values, state_or_tran, attribute, param_index, verbose=False
-):
+def _std_by_param(by_param, all_param_values, state_or_tran, attribute, param_index):
u"""
Calculate standard deviations for a static model where all parameters but `param_index` are constant.
@@ -162,12 +148,11 @@ def _std_by_param(
# vprint(verbose, '[W] parameter value partition for {} is empty'.format(param_value))
if np.all(np.isnan(stddev_matrix)):
- print(
- "[W] {}/{} parameter #{} has no data partitions -- how did this even happen?".format(
- state_or_tran, attribute, param_index
+ warnings.warn(
+ "{}/{} parameter #{} has no data partitions. stddev_matrix = {}".format(
+ state_or_tran, attribute, param_index, stddev_matrix
)
)
- print("stddev_matrix = {}".format(stddev_matrix))
return stddev_matrix, 0.0
return (
@@ -202,13 +187,13 @@ def _corr_by_param(by_name, state_or_trans, attribute, param_index):
# -> assume no correlation
return 0.0
except ValueError:
- print(
- "[!] Exception in _corr_by_param(by_name, state_or_trans={}, attribute={}, param_index={})".format(
+ logger.error(
+ "ValueError in _corr_by_param(by_name, state_or_trans={}, attribute={}, param_index={})".format(
state_or_trans, attribute, param_index
)
)
- print(
- "[!] while executing np.corrcoef(by_name[{}][{}]={}, {}))".format(
+ logger.error(
+ "while executing np.corrcoef(by_name[{}][{}]={}, {}))".format(
state_or_trans,
attribute,
by_name[state_or_trans][attribute],
@@ -229,7 +214,6 @@ def _compute_param_statistics(
attribute,
distinct_values,
distinct_values_by_param_index,
- verbose=False,
):
"""
Compute standard deviation and correlation coefficient for various data partitions.
@@ -252,7 +236,6 @@ def _compute_param_statistics(
:param arg_count: dict providing the number of functions args ("local parameters") for each function.
:param state_or_trans: state or transition name, e.g. 'send' or 'TX'
:param attribute: model attribute, e.g. 'power' or 'duration'
- :param verbose: print warning if some parameter partitions are too small for fitting
:returns: a dict with the following content:
std_static -- static parameter-unaware model error: stddev of by_name[state_or_trans][attribute]
@@ -267,6 +250,8 @@ def _compute_param_statistics(
corr_by_param -- correlation coefficient
corr_by_arg -- same, but ignoring a single function argument
Only set if state_or_trans appears in arg_count, empty dict otherwise.
+ depends_on_param -- dict(parameter_name -> Bool). True if /attribute/ behaviour probably depends on /parameter_name/
+ depends_on_arg -- list(bool). Same, but for function arguments, if any.
"""
ret = {
"std_static": np.std(by_name[state_or_trans][attribute]),
@@ -287,7 +272,6 @@ def _compute_param_statistics(
"corr_by_arg": [],
"depends_on_param": {},
"depends_on_arg": [],
- "param_data": {},
}
np.seterr("raise")
@@ -299,7 +283,6 @@ def _compute_param_statistics(
state_or_trans,
attribute,
param_idx,
- verbose,
)
ret["std_by_param"][param] = mean_std
ret["std_by_param_values"][param] = std_matrix
@@ -314,49 +297,6 @@ def _compute_param_statistics(
ret["std_param_lut"],
)
- if ret["depends_on_param"][param]:
- ret["param_data"][param] = {
- "codependent_parameters": _codependent_parameters(
- param, lut_matrix, std_matrix
- ),
- "depends_for_codependent_value": dict(),
- }
-
- # calculate parameter dependence for individual values of codependent parameters
- codependent_param_values = list()
- for codependent_param in ret["param_data"][param]["codependent_parameters"]:
- codependent_param_values.append(distinct_values[codependent_param])
- for combi in itertools.product(*codependent_param_values):
- by_name_part = deepcopy(by_name)
- filter_list = list(
- zip(ret["param_data"][param]["codependent_parameters"], combi)
- )
- filter_aggregate_by_param(by_name_part, parameter_names, filter_list)
- by_param_part = by_name_to_by_param(by_name_part)
- # there may be no data for this specific parameter value combination
- if state_or_trans in by_name_part:
- part_corr = _corr_by_param(
- by_name_part, state_or_trans, attribute, param_idx
- )
- part_std_lut = np.mean(
- [
- np.std(by_param_part[x][attribute])
- for x in by_param_part.keys()
- if x[0] == state_or_trans
- ]
- )
- _, part_std_param, _ = _std_by_param(
- by_param_part,
- distinct_values_by_param_index,
- state_or_trans,
- attribute,
- param_idx,
- verbose,
- )
- ret["param_data"][param]["depends_for_codependent_value"][
- combi
- ] = _depends_on_param(part_corr, part_std_param, part_std_lut)
-
if state_or_trans in arg_count:
for arg_index in range(arg_count[state_or_trans]):
std_matrix, mean_std, lut_matrix = _std_by_param(
@@ -365,7 +305,6 @@ def _compute_param_statistics(
state_or_trans,
attribute,
len(parameter_names) + arg_index,
- verbose,
)
ret["std_by_arg"].append(mean_std)
ret["std_by_arg_values"].append(std_matrix)
@@ -447,8 +386,8 @@ def prune_dependent_parameters(by_name, parameter_names, correlation_threshold=0
correlation != np.nan
and np.abs(correlation) > correlation_threshold
):
- print(
- "[!] Parameters {} <-> {} are correlated with coefficcient {}".format(
+ logger.debug(
+ "Parameters {} <-> {} are correlated with coefficcient {}".format(
parameter_names[index_1],
parameter_names[index_2],
correlation,
@@ -458,7 +397,7 @@ def prune_dependent_parameters(by_name, parameter_names, correlation_threshold=0
index_to_remove = index_1
else:
index_to_remove = index_2
- print(
+ logger.debug(
" Removing parameter {}".format(
parameter_names[index_to_remove]
)
@@ -495,13 +434,7 @@ class ParamStats:
"""
def __init__(
- self,
- by_name,
- by_param,
- parameter_names,
- arg_count,
- use_corrcoef=False,
- verbose=False,
+ self, by_name, by_param, parameter_names, arg_count, use_corrcoef=False,
):
"""
Compute standard deviation and correlation coefficient on parameterized data partitions.
@@ -556,7 +489,6 @@ class ParamStats:
attribute,
self.distinct_values[state_or_tran],
self.distinct_values_by_param_index[state_or_tran],
- verbose,
],
}
)
@@ -592,147 +524,21 @@ class ParamStats:
)
> 2
):
- print(
- key,
- param,
- list(
- filter(
- lambda n: is_numeric(n),
- self.distinct_values[key][param],
- )
- ),
+ logger.debug(
+ "{} can be fitted for param {} on {}".format(
+ key,
+ param,
+ list(
+ filter(
+ lambda n: is_numeric(n),
+ self.distinct_values[key][param],
+ )
+ ),
+ )
)
return True
return False
- def static_submodel_params(self, state_or_tran, attribute):
- """
- Return the union of all parameter values which decide whether another parameter influences the model or not.
-
- I.e., the returned list of dicts contains one entry for each parameter value combination which (probably) does not have any parameter influencing the model.
- If the current parameters matches one of these, a static sub-model built based on this subset of parameters can likely be used.
- """
- # TODO
- pass
-
- def has_codependent_parameters(
- self, state_or_tran: str, attribute: str, param: str
- ) -> bool:
- """
- Return whether there are parameters which determine whether `param` influences `state_or_tran` `attribute` or not.
-
- :param state_or_tran: model state or transition
- :param attribute: model attribute
- :param param: parameter name
- """
- if len(self.codependent_parameters(state_or_tran, attribute, param)):
- return True
- return False
-
- def codependent_parameters(
- self, state_or_tran: str, attribute: str, param: str
- ) -> list:
- """
- Return list of parameters which determine whether `param` influences `state_or_tran` `attribute` or not.
-
- :param state_or_tran: model state or transition
- :param attribute: model attribute
- :param param: parameter name
- """
- if self.stats[state_or_tran][attribute]["depends_on_param"][param]:
- return self.stats[state_or_tran][attribute]["param_data"][param][
- "codependent_parameters"
- ]
- return list()
-
- def has_codependent_parameters_union(
- self, state_or_tran: str, attribute: str
- ) -> bool:
- """
- Return whether there is a subset of parameters which decides whether `state_or_tran` `attribute` is static or parameter-dependent
-
- :param state_or_tran: model state or transition
- :param attribute: model attribute
- """
- depends_on_a_parameter = False
- for param in self._parameter_names:
- if self.stats[state_or_tran][attribute]["depends_on_param"][param]:
- print("{}/{} depends on {}".format(state_or_tran, attribute, param))
- depends_on_a_parameter = True
- if (
- len(self.codependent_parameters(state_or_tran, attribute, param))
- == 0
- ):
- print("has no codependent parameters")
- # Always depends on this parameter, regardless of other parameters' values
- return False
- return depends_on_a_parameter
-
- def codependent_parameters_union(self, state_or_tran: str, attribute: str) -> list:
- """
- Return list of parameters which determine whether any parameter influences `state_or_tran` `attribute`.
-
- :param state_or_tran: model state or transition
- :param attribute: model attribute
- """
- codependent_parameters = set()
- for param in self._parameter_names:
- if self.stats[state_or_tran][attribute]["depends_on_param"][param]:
- if (
- len(self.codependent_parameters(state_or_tran, attribute, param))
- == 0
- ):
- return list(self._parameter_names)
- for codependent_param in self.codependent_parameters(
- state_or_tran, attribute, param
- ):
- codependent_parameters.add(codependent_param)
- return sorted(codependent_parameters)
-
- def codependence_by_codependent_param_values(
- self, state_or_tran: str, attribute: str, param: str
- ) -> dict:
- """
- Return dict mapping codependent parameter values to a boolean indicating whether `param` influences `state_or_tran` `attribute`.
-
- If a dict value is true, `attribute` depends on `param` for the corresponding codependent parameter values, otherwise it does not.
-
- :param state_or_tran: model state or transition
- :param attribute: model attribute
- :param param: parameter name
- """
- if self.stats[state_or_tran][attribute]["depends_on_param"][param]:
- return self.stats[state_or_tran][attribute]["param_data"][param][
- "depends_for_codependent_value"
- ]
- return dict()
-
- def codependent_parameter_value_dicts(
- self, state_or_tran: str, attribute: str, param: str, kind="dynamic"
- ):
- """
- Return dicts of codependent parameter key-value mappings for which `param` influences (or does not influence) `state_or_tran` `attribute`.
-
- :param state_or_tran: model state or transition
- :param attribute: model attribute
- :param param: parameter name:
- :param kind: 'static' or 'dynamic'. If 'dynamic' (the default), returns codependent parameter values for which `param` influences `attribute`. If 'static', returns codependent parameter values for which `param` does not influence `attribute`
- """
- codependent_parameters = self.stats[state_or_tran][attribute]["param_data"][
- param
- ]["codependent_parameters"]
- codependence_info = self.stats[state_or_tran][attribute]["param_data"][param][
- "depends_for_codependent_value"
- ]
- if len(codependent_parameters) == 0:
- return
- else:
- for param_values, is_dynamic in codependence_info.items():
- if (is_dynamic and kind == "dynamic") or (
- not is_dynamic and kind == "static"
- ):
- yield dict(zip(codependent_parameters, param_values))
-
def _generic_param_independence_ratio(self, state_or_trans, attribute):
"""
Return the heuristic ratio of parameter independence for state_or_trans and attribute.
diff --git a/lib/protocol_benchmarks.py b/lib/protocol_benchmarks.py
index b42e821..d41979f 100755
--- a/lib/protocol_benchmarks.py
+++ b/lib/protocol_benchmarks.py
@@ -16,8 +16,11 @@ import io
import os
import re
import time
+import logging
from filelock import FileLock
+logger = logging.getLogger(__name__)
+
class DummyProtocol:
def __init__(self):
@@ -1838,14 +1841,14 @@ class Benchmark:
this_result["data"] = data
if value != None:
this_result[key] = {"v": value, "ts": int(time.time())}
- print(
+ logger.debug(
"{} {} {} ({}) :: {} -> {}".format(
libkey, bench_name, bench_index, data, key, value
)
)
else:
this_result[key] = {"e": error, "ts": int(time.time())}
- print(
+ logger.debug(
"{} {} {} ({}) :: {} -> [E] {}".format(
libkey, bench_name, bench_index, data, key, error[:500]
)
diff --git a/lib/runner.py b/lib/runner.py
index 16f0a29..77b7c68 100644
--- a/lib/runner.py
+++ b/lib/runner.py
@@ -31,7 +31,8 @@ class SerialReader(serial.threaded.Protocol):
"""Create a new SerialReader object."""
self.callback = callback
self.recv_buf = ""
- self.lines = []
+ self.lines = list()
+ self.all_lines = list()
def __call__(self):
return self
@@ -47,7 +48,9 @@ class SerialReader(serial.threaded.Protocol):
# Note: Do not call str.strip on lines[-1]! Otherwise, lines may be mangled
lines = self.recv_buf.split("\n")
if len(lines) > 1:
- self.lines.extend(map(str.strip, lines[:-1]))
+ new_lines = list(map(str.strip, lines[:-1]))
+ self.lines.extend(new_lines)
+ self.all_lines.extend(new_lines)
self.recv_buf = lines[-1]
if self.callback:
for line in lines[:-1]:
@@ -120,7 +123,7 @@ class SerialMonitor:
return self.reader.get_lines()
def get_lines(self) -> list:
- return self.reader.get_lines()
+ return self.reader.all_lines
def get_files(self) -> list:
return list()
@@ -143,6 +146,9 @@ class SerialMonitor:
class EnergyTraceMonitor(SerialMonitor):
"""EnergyTraceMonitor captures serial timing output and EnergyTrace energy data."""
+ # Zusätzliche key-value-Argumente von generate-dfa-benchmark.py --energytrace=... landen hier
+ # (z.B. --energytrace=var1=bar,somecount=2 => EnerygTraceMonitor(..., var1="bar", somecount="2")).
+ # Soald das EnergyTraceMonitor-Objekt erzeugt wird, beginnt die Messung (d.h. hier: msp430-etv wird gestartet)
def __init__(self, port: str, baud: int, callback=None, voltage=3.3):
super().__init__(port=port, baud=baud, callback=callback)
self._voltage = voltage
@@ -155,20 +161,31 @@ class EnergyTraceMonitor(SerialMonitor):
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
+ # Benchmark fertig -> externe Hilfsprogramme beenden
def close(self):
super().close()
self._logger.send_signal(subprocess.signal.SIGINT)
stdout, stderr = self._logger.communicate(timeout=15)
+ # Zusätzliche Dateien, die mit dem Benchmark-Log und -Plan abgespeichert werden sollen
+ # (hier: Die von msp430-etv generierten Logfiles)
def get_files(self) -> list:
return [self._output]
+ #
def get_config(self) -> dict:
return {
"voltage": self._voltage,
}
+class EnergyTraceLogicAnalyzerMonitor(EnergyTraceMonitor):
+ """EnergyTraceLogicAnalyzerMonitor captures EnergyTrace energy data and LogicAnalyzer timing output."""
+
+ def __init__(self, port: str, baud: int, callback=None, voltage=3.3):
+ super().__init__(port=port, baud=baud, callback=callback, voltage=voltage)
+
+
class MIMOSAMonitor(SerialMonitor):
"""MIMOSAMonitor captures serial output and MIMOSA energy data for a specific amount of time."""
@@ -362,8 +379,14 @@ def get_monitor(arch: str, **kwargs) -> object:
mimosa_kwargs = kwargs.pop("mimosa")
return MIMOSAMonitor(port, arg, **mimosa_kwargs, **kwargs)
elif "energytrace" in kwargs and kwargs["energytrace"] is not None:
- energytrace_kwargs = kwargs.pop("energytrace")
- return EnergyTraceMonitor(port, arg, **energytrace_kwargs, **kwargs)
+ energytrace_kwargs = kwargs.pop("energytrace").copy()
+ sync_mode = energytrace_kwargs.pop("sync")
+ if sync_mode == "la":
+ return EnergyTraceLogicAnalyzerMonitor(
+ port, arg, **energytrace_kwargs, **kwargs
+ )
+ else:
+ return EnergyTraceMonitor(port, arg, **energytrace_kwargs, **kwargs)
else:
kwargs.pop("energytrace", None)
kwargs.pop("mimosa", None)
@@ -382,6 +405,23 @@ def get_counter_limits(arch: str) -> tuple:
raise RuntimeError("Did not find Counter Overflow limits")
+def sleep_ms(duration: int, arch: str, cpu_freq: int = None) -> str:
+ max_sleep = None
+ if "msp430fr" in arch:
+ if cpu_freq is not None and cpu_freq > 8000000:
+ max_sleep = 250
+ else:
+ max_sleep = 500
+ if max_sleep is not None and duration > max_sleep:
+ sub_sleep_count = duration // max_sleep
+ tail_sleep = duration % max_sleep
+ ret = f"for (unsigned char i = 0; i < {sub_sleep_count}; i++) {{ arch.sleep_ms({max_sleep}); }}\n"
+ if tail_sleep > 0:
+ ret += f"arch.sleep_ms({tail_sleep});\n"
+ return ret
+ return "arch.sleep_ms({duration});\n"
+
+
def get_counter_limits_us(arch: str) -> tuple:
"""Return duration of one counter step and one counter overflow in us."""
cpu_freq = 0
diff --git a/lib/utils.py b/lib/utils.py
index 91dded0..d28ecda 100644
--- a/lib/utils.py
+++ b/lib/utils.py
@@ -1,17 +1,9 @@
import numpy as np
import re
+import logging
arg_support_enabled = True
-
-
-def vprint(verbose, string):
- """
- Print `string` if `verbose`.
-
- Prints string if verbose is a True value
- """
- if verbose:
- print(string)
+logger = logging.getLogger(__name__)
def running_mean(x: np.ndarray, N: int) -> np.ndarray:
@@ -222,7 +214,7 @@ def filter_aggregate_by_param(aggregate, parameters, parameter_filter):
)
)
if len(indices_to_keep) == 0:
- print("??? {}->{}".format(parameter_filter, name))
+ logger.debug("??? {}->{}".format(parameter_filter, name))
names_to_remove.add(name)
else:
for attribute in aggregate[name]["attributes"]:
diff --git a/lib/validation.py b/lib/validation.py
new file mode 100644
index 0000000..ee147fe
--- /dev/null
+++ b/lib/validation.py
@@ -0,0 +1,238 @@
+#!/usr/bin/env python3
+
+import logging
+import numpy as np
+
+logger = logging.getLogger(__name__)
+
+
+def _xv_partitions_kfold(length, k=10):
+ """
+ Return k pairs of training and validation sets for k-fold cross-validation on `length` items.
+
+ In k-fold cross-validation, every k-th item is used for validation and the remainder is used for training.
+ As there are k ways to do this (items 0, k, 2k, ... vs. items 1, k+1, 2k+1, ... etc), this function returns k pairs of training and validation set.
+
+ Note that this function operates on indices, not data.
+ """
+ pairs = []
+ num_slices = k
+ indexes = np.arange(length)
+ for i in range(num_slices):
+ training = np.delete(indexes, slice(i, None, num_slices))
+ validation = indexes[i::num_slices]
+ pairs.append((training, validation))
+ return pairs
+
+
+def _xv_partition_montecarlo(length):
+ """
+ Return training and validation set for Monte Carlo cross-validation on `length` items.
+
+ This function operates on indices, not data. It randomly partitions range(length) into a list of training indices and a list of validation indices.
+
+ The training set contains 2/3 of all indices; the validation set consits of the remaining 1/3.
+
+ Example: 9 items -> training = [7, 3, 8, 0, 4, 2], validation = [ 1, 6, 5]
+ """
+ shuffled = np.random.permutation(np.arange(length))
+ border = int(length * float(2) / 3)
+ training = shuffled[:border]
+ validation = shuffled[border:]
+ return (training, validation)
+
+
+class CrossValidator:
+ """
+ Cross-Validation helper for model generation.
+
+ Given a set of measurements and a model class, it will partition the
+ data into training and validation sets, train the model on the training
+ set, and assess its quality on the validation set. This is repeated
+ several times depending on cross-validation algorithm and configuration.
+ Reports the mean model error over all cross-validation runs.
+ """
+
+ def __init__(self, model_class, by_name, parameters, arg_count):
+ """
+ Create a new CrossValidator object.
+
+ Does not perform cross-validation yet.
+
+ arguments:
+ model_class -- model class/type used for model synthesis,
+ e.g. PTAModel or AnalyticModel. model_class must have a
+ constructor accepting (by_name, parameters, arg_count)
+ and provide an `assess` method.
+ by_name -- measurements aggregated by state/transition/function/... name.
+ Layout: by_name[name][attribute] = list of data. Additionally,
+ by_name[name]['attributes'] must be set to the list of attributes,
+ e.g. ['power'] or ['duration', 'energy'].
+ """
+ self.model_class = model_class
+ self.by_name = by_name
+ self.names = sorted(by_name.keys())
+ self.parameters = sorted(parameters)
+ self.arg_count = arg_count
+
+ def kfold(self, model_getter, k=10):
+ """
+ Perform k-fold cross-validation and return average model quality.
+
+ The by_name data is divided into 1-1/k training and 1/k validation in a deterministic manner.
+ After creating a model for the training set, the
+ model type returned by model_getter is evaluated on the validation set.
+ This is repeated k times; the average of all measures is returned to the user.
+
+ arguments:
+ model_getter -- function with signature (model_object) -> model,
+ e.g. lambda m: m.get_fitted()[0] to evaluate the parameter-aware
+ model with automatic parameter detection.
+ k -- step size for k-fold cross-validation. The validation set contains 100/k % of data.
+
+ return value:
+ dict of model quality measures.
+ {
+ 'by_name' : {
+ for each name: {
+ for each attribute: {
+ 'mae' : mean of all mean absolute errors
+ 'mae_list' : list of the individual MAE values encountered during cross-validation
+ 'smape' : mean of all symmetric mean absolute percentage errors
+ 'smape_list' : list of the individual SMAPE values encountered during cross-validation
+ }
+ }
+ }
+ }
+ """
+
+ # training / validation subsets for each state and transition
+ subsets_by_name = dict()
+ training_and_validation_sets = list()
+
+ for name in self.names:
+ sample_count = len(self.by_name[name]["param"])
+ subsets_by_name[name] = list()
+ subsets_by_name[name] = _xv_partitions_kfold(sample_count, k)
+
+ for i in range(k):
+ training_and_validation_sets.append(dict())
+ for name in self.names:
+ training_and_validation_sets[i][name] = subsets_by_name[name][i]
+
+ return self._generic_xv(model_getter, training_and_validation_sets)
+
+ def montecarlo(self, model_getter, count=200):
+ """
+ Perform Monte Carlo cross-validation and return average model quality.
+
+ The by_name data is randomly divided into 2/3 training and 1/3
+ validation. After creating a model for the training set, the
+ model type returned by model_getter is evaluated on the validation set.
+ This is repeated count times (defaulting to 200); the average of all
+ measures is returned to the user.
+
+ arguments:
+ model_getter -- function with signature (model_object) -> model,
+ e.g. lambda m: m.get_fitted()[0] to evaluate the parameter-aware
+ model with automatic parameter detection.
+ count -- number of validation runs to perform, defaults to 200
+
+ return value:
+ dict of model quality measures.
+ {
+ 'by_name' : {
+ for each name: {
+ for each attribute: {
+ 'mae' : mean of all mean absolute errors
+ 'mae_list' : list of the individual MAE values encountered during cross-validation
+ 'smape' : mean of all symmetric mean absolute percentage errors
+ 'smape_list' : list of the individual SMAPE values encountered during cross-validation
+ }
+ }
+ }
+ }
+ """
+
+ # training / validation subsets for each state and transition
+ subsets_by_name = dict()
+ training_and_validation_sets = list()
+
+ for name in self.names:
+ sample_count = len(self.by_name[name]["param"])
+ subsets_by_name[name] = list()
+ for _ in range(count):
+ subsets_by_name[name].append(_xv_partition_montecarlo(sample_count))
+
+ for i in range(count):
+ training_and_validation_sets.append(dict())
+ for name in self.names:
+ training_and_validation_sets[i][name] = subsets_by_name[name][i]
+
+ return self._generic_xv(model_getter, training_and_validation_sets)
+
+ def _generic_xv(self, model_getter, training_and_validation_sets):
+ ret = {"by_name": dict()}
+
+ for name in self.names:
+ ret["by_name"][name] = dict()
+ for attribute in self.by_name[name]["attributes"]:
+ ret["by_name"][name][attribute] = {
+ "mae_list": list(),
+ "rmsd_list": list(),
+ "smape_list": list(),
+ }
+
+ for training_and_validation_by_name in training_and_validation_sets:
+ res = self._single_xv(model_getter, training_and_validation_by_name)
+ for name in self.names:
+ for attribute in self.by_name[name]["attributes"]:
+ for measure in ("mae", "rmsd", "smape"):
+ ret["by_name"][name][attribute][f"{measure}_list"].append(
+ res["by_name"][name][attribute][measure]
+ )
+
+ for name in self.names:
+ for attribute in self.by_name[name]["attributes"]:
+ for measure in ("mae", "rmsd", "smape"):
+ ret["by_name"][name][attribute][measure] = np.mean(
+ ret["by_name"][name][attribute][f"{measure}_list"]
+ )
+
+ return ret
+
+ def _single_xv(self, model_getter, tv_set_dict):
+ training = dict()
+ validation = dict()
+ for name in self.names:
+ training[name] = {"attributes": self.by_name[name]["attributes"]}
+ validation[name] = {"attributes": self.by_name[name]["attributes"]}
+
+ if "isa" in self.by_name[name]:
+ training[name]["isa"] = self.by_name[name]["isa"]
+ validation[name]["isa"] = self.by_name[name]["isa"]
+
+ training_subset, validation_subset = tv_set_dict[name]
+
+ for attribute in self.by_name[name]["attributes"]:
+ self.by_name[name][attribute] = np.array(self.by_name[name][attribute])
+ training[name][attribute] = self.by_name[name][attribute][
+ training_subset
+ ]
+ validation[name][attribute] = self.by_name[name][attribute][
+ validation_subset
+ ]
+
+ # We can't use slice syntax for 'param', which may contain strings and other odd values
+ training[name]["param"] = list()
+ validation[name]["param"] = list()
+ for idx in training_subset:
+ training[name]["param"].append(self.by_name[name]["param"][idx])
+ for idx in validation_subset:
+ validation[name]["param"].append(self.by_name[name]["param"][idx])
+
+ training_data = self.model_class(training, self.parameters, self.arg_count)
+ training_model = model_getter(training_data)
+ validation_data = self.model_class(validation, self.parameters, self.arg_count)
+
+ return validation_data.assess(training_model)
diff --git a/test/test_codegen.py b/test/test_codegen.py
index 981117b..ce565d6 100755
--- a/test/test_codegen.py
+++ b/test/test_codegen.py
@@ -5,84 +5,74 @@ from dfatool.codegen import get_simulated_accountingmethod
import unittest
example_json_1 = {
- 'parameters': ['datarate', 'txbytes', 'txpower'],
- 'initial_param_values': [None, None, None],
- 'state': {
- 'IDLE': {
- 'power': {
- 'static': 5,
- }
- },
- 'TX': {
- 'power': {
- 'static': 100,
- 'function': {
- 'raw': 'regression_arg(0) + regression_arg(1)'
- ' * parameter(txpower)',
- 'regression_args': [100, 2]
+ "parameters": ["datarate", "txbytes", "txpower"],
+ "initial_param_values": [None, None, None],
+ "state": {
+ "IDLE": {"power": {"static": 5,}},
+ "TX": {
+ "power": {
+ "static": 100,
+ "function": {
+ "raw": "regression_arg(0) + regression_arg(1)"
+ " * parameter(txpower)",
+ "regression_args": [100, 2],
},
}
},
},
- 'transitions': [
+ "transitions": [
{
- 'name': 'init',
- 'origin': ['UNINITIALIZED', 'IDLE'],
- 'destination': 'IDLE',
- 'duration': {
- 'static': 50000,
- },
- 'set_param': {
- 'txpower': 10
- },
+ "name": "init",
+ "origin": ["UNINITIALIZED", "IDLE"],
+ "destination": "IDLE",
+ "duration": {"static": 50000,},
+ "set_param": {"txpower": 10},
},
{
- 'name': 'setTxPower',
- 'origin': 'IDLE',
- 'destination': 'IDLE',
- 'duration': {'static': 120},
- 'energy ': {'static': 10000},
- 'arg_to_param_map': {0: 'txpower'},
- 'argument_values': [[10, 20, 30]],
+ "name": "setTxPower",
+ "origin": "IDLE",
+ "destination": "IDLE",
+ "duration": {"static": 120},
+ "energy ": {"static": 10000},
+ "arg_to_param_map": {0: "txpower"},
+ "argument_values": [[10, 20, 30]],
},
{
- 'name': 'send',
- 'origin': 'IDLE',
- 'destination': 'TX',
- 'duration': {
- 'static': 10,
- 'function': {
- 'raw': 'regression_arg(0) + regression_arg(1)'
- ' * function_arg(1)',
- 'regression_args': [48, 8],
+ "name": "send",
+ "origin": "IDLE",
+ "destination": "TX",
+ "duration": {
+ "static": 10,
+ "function": {
+ "raw": "regression_arg(0) + regression_arg(1)" " * function_arg(1)",
+ "regression_args": [48, 8],
},
},
- 'energy': {
- 'static': 3,
- 'function': {
- 'raw': 'regression_arg(0) + regression_arg(1)'
- ' * function_arg(1)',
- 'regression_args': [3, 5],
+ "energy": {
+ "static": 3,
+ "function": {
+ "raw": "regression_arg(0) + regression_arg(1)" " * function_arg(1)",
+ "regression_args": [3, 5],
},
},
- 'arg_to_param_map': {1: 'txbytes'},
- 'argument_values': [['"foo"', '"hodor"'], [3, 5]],
- 'argument_combination': 'zip',
+ "arg_to_param_map": {1: "txbytes"},
+ "argument_values": [['"foo"', '"hodor"'], [3, 5]],
+ "argument_combination": "zip",
},
{
- 'name': 'txComplete',
- 'origin': 'TX',
- 'destination': 'IDLE',
- 'is_interrupt': 1,
- 'timeout': {
- 'static': 2000,
- 'function': {
- 'raw': 'regression_arg(0) + regression_arg(1)'
- ' * parameter(txbytes)',
- 'regression_args': [500, 16],
+ "name": "txComplete",
+ "origin": "TX",
+ "destination": "IDLE",
+ "is_interrupt": 1,
+ "timeout": {
+ "static": 2000,
+ "function": {
+ "raw": "regression_arg(0) + regression_arg(1)"
+ " * parameter(txbytes)",
+ "regression_args": [500, 16],
},
},
- }
+ },
],
}
@@ -91,9 +81,11 @@ class TestCG(unittest.TestCase):
def test_statetransition_immediate(self):
pta = PTA.from_json(example_json_1)
pta.set_random_energy_model()
- pta.state['IDLE'].power.value = 9
- cg = get_simulated_accountingmethod('static_statetransition_immediate')(pta, 1000000, 'uint8_t', 'uint8_t', 'uint8_t', 'uint8_t')
- cg.current_state = pta.state['IDLE']
+ pta.state["IDLE"].power.value = 9
+ cg = get_simulated_accountingmethod("static_statetransition_immediate")(
+ pta, 1000000, "uint8_t", "uint8_t", "uint8_t", "uint8_t"
+ )
+ cg.current_state = pta.state["IDLE"]
cg.sleep(7)
self.assertEqual(cg.get_energy(), 9 * 7)
pta.transitions[1].energy.value = 123
@@ -102,8 +94,10 @@ class TestCG(unittest.TestCase):
cg.pass_transition(pta.transitions[1])
self.assertEqual(cg.get_energy(), (9 * 7 + 123 + 123) % 256)
- cg = get_simulated_accountingmethod('static_statetransition_immediate')(pta, 100000, 'uint8_t', 'uint8_t', 'uint8_t', 'uint8_t')
- cg.current_state = pta.state['IDLE']
+ cg = get_simulated_accountingmethod("static_statetransition_immediate")(
+ pta, 100000, "uint8_t", "uint8_t", "uint8_t", "uint8_t"
+ )
+ cg.current_state = pta.state["IDLE"]
cg.sleep(7)
self.assertEqual(cg.get_energy(), 0)
cg.sleep(15)
@@ -111,8 +105,10 @@ class TestCG(unittest.TestCase):
cg.sleep(90)
self.assertEqual(cg.get_energy(), 900 % 256)
- cg = get_simulated_accountingmethod('static_statetransition_immediate')(pta, 100000, 'uint8_t', 'uint8_t', 'uint8_t', 'uint16_t')
- cg.current_state = pta.state['IDLE']
+ cg = get_simulated_accountingmethod("static_statetransition_immediate")(
+ pta, 100000, "uint8_t", "uint8_t", "uint8_t", "uint16_t"
+ )
+ cg.current_state = pta.state["IDLE"]
cg.sleep(7)
self.assertEqual(cg.get_energy(), 0)
cg.sleep(15)
@@ -120,10 +116,12 @@ class TestCG(unittest.TestCase):
cg.sleep(90)
self.assertEqual(cg.get_energy(), 900)
- pta.state['IDLE'].power.value = 9 # -> 90 uW
+ pta.state["IDLE"].power.value = 9 # -> 90 uW
pta.transitions[1].energy.value = 1 # -> 100 pJ
- cg = get_simulated_accountingmethod('static_statetransition_immediate')(pta, 1000000, 'uint8_t', 'uint8_t', 'uint8_t', 'uint8_t', 1e-5, 1e-5, 1e-10)
- cg.current_state = pta.state['IDLE']
+ cg = get_simulated_accountingmethod("static_statetransition_immediate")(
+ pta, 1000000, "uint8_t", "uint8_t", "uint8_t", "uint8_t", 1e-5, 1e-5, 1e-10
+ )
+ cg.current_state = pta.state["IDLE"]
cg.sleep(10) # 10 us
self.assertEqual(cg.get_energy(), 90 * 10)
cg.pass_transition(pta.transitions[1])
@@ -134,9 +132,11 @@ class TestCG(unittest.TestCase):
def test_statetransition(self):
pta = PTA.from_json(example_json_1)
pta.set_random_energy_model()
- pta.state['IDLE'].power.value = 9
- cg = get_simulated_accountingmethod('static_statetransition')(pta, 1000000, 'uint8_t', 'uint8_t', 'uint8_t', 'uint8_t')
- cg.current_state = pta.state['IDLE']
+ pta.state["IDLE"].power.value = 9
+ cg = get_simulated_accountingmethod("static_statetransition")(
+ pta, 1000000, "uint8_t", "uint8_t", "uint8_t", "uint8_t"
+ )
+ cg.current_state = pta.state["IDLE"]
cg.sleep(7)
self.assertEqual(cg.get_energy(), 9 * 7)
pta.transitions[1].energy.value = 123
@@ -148,9 +148,11 @@ class TestCG(unittest.TestCase):
def test_state_immediate(self):
pta = PTA.from_json(example_json_1)
pta.set_random_energy_model()
- pta.state['IDLE'].power.value = 9
- cg = get_simulated_accountingmethod('static_state_immediate')(pta, 1000000, 'uint8_t', 'uint8_t', 'uint8_t', 'uint8_t')
- cg.current_state = pta.state['IDLE']
+ pta.state["IDLE"].power.value = 9
+ cg = get_simulated_accountingmethod("static_state_immediate")(
+ pta, 1000000, "uint8_t", "uint8_t", "uint8_t", "uint8_t"
+ )
+ cg.current_state = pta.state["IDLE"]
cg.sleep(7)
self.assertEqual(cg.get_energy(), 9 * 7)
pta.transitions[1].energy.value = 123
@@ -162,9 +164,11 @@ class TestCG(unittest.TestCase):
def test_state(self):
pta = PTA.from_json(example_json_1)
pta.set_random_energy_model()
- pta.state['IDLE'].power.value = 9
- cg = get_simulated_accountingmethod('static_state')(pta, 1000000, 'uint8_t', 'uint8_t', 'uint8_t', 'uint8_t')
- cg.current_state = pta.state['IDLE']
+ pta.state["IDLE"].power.value = 9
+ cg = get_simulated_accountingmethod("static_state")(
+ pta, 1000000, "uint8_t", "uint8_t", "uint8_t", "uint8_t"
+ )
+ cg.current_state = pta.state["IDLE"]
cg.sleep(7)
self.assertEqual(cg.get_energy(), 9 * 7)
pta.transitions[1].energy.value = 123
@@ -173,8 +177,10 @@ class TestCG(unittest.TestCase):
cg.pass_transition(pta.transitions[1])
self.assertEqual(cg.get_energy(), 9 * 7)
- cg = get_simulated_accountingmethod('static_state')(pta, 1000000, 'uint8_t', 'uint16_t', 'uint16_t', 'uint16_t')
+ cg = get_simulated_accountingmethod("static_state")(
+ pta, 1000000, "uint8_t", "uint16_t", "uint16_t", "uint16_t"
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_parameters.py b/test/test_parameters.py
new file mode 100755
index 0000000..e36b1a1
--- /dev/null
+++ b/test/test_parameters.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python3
+
+from dfatool import parameters
+from dfatool.utils import by_name_to_by_param
+from dfatool.functions import analytic
+from dfatool.model import ParallelParamFit
+import unittest
+
+import numpy as np
+
+
+class TestModels(unittest.TestCase):
+ def test_distinct_param_values(self):
+ X = np.arange(35)
+ by_name = {
+ "TX": {
+ "param": [(x % 5, x % 7) for x in X],
+ "power": X,
+ "attributes": ["power"],
+ }
+ }
+ self.assertEqual(
+ parameters.distinct_param_values(by_name, "TX"),
+ [list(range(5)), list(range(7))],
+ )
+
+ def test_parameter_detection_linear(self):
+ # rng = np.random.default_rng(seed=1312) # requiresy NumPy >= 1.17
+ np.random.seed(1312)
+ X = np.arange(200) % 50
+ # Y = X + rng.normal(size=X.size) # requiry NumPy >= 1.17
+ Y = X + np.random.normal(size=X.size)
+ parameter_names = ["p_mod5", "p_linear"]
+
+ # Test input data:
+ # * param[0] ("p_mod5") == X % 5 (bogus data to test detection of non-influence)
+ # * param[1] ("p_linear") == X
+ # * TX power == X ± gaussian noise
+ # -> TX power depends linearly on "p_linear"
+ by_name = {
+ "TX": {
+ "param": [(x % 5, x) for x in X],
+ "power": Y,
+ "attributes": ["power"],
+ }
+ }
+ by_param = by_name_to_by_param(by_name)
+ stats = parameters.ParamStats(by_name, by_param, parameter_names, dict())
+
+ self.assertEqual(stats.depends_on_param("TX", "power", "p_mod5"), False)
+ self.assertEqual(stats.depends_on_param("TX", "power", "p_linear"), True)
+
+ # Fit individual functions for each parameter (only "p_linear" in this case)
+
+ paramfit = ParallelParamFit(by_param)
+ paramfit.enqueue("TX", "power", 1, "p_linear")
+ paramfit.fit()
+
+ fit_result = paramfit.get_result("TX", "power")
+ self.assertEqual(fit_result["p_linear"]["best"], "linear")
+ self.assertEqual("p_mod5" not in fit_result, True)
+
+ # Fit a single function for all parameters (still only "p_linear" in this case)
+
+ combined_fit = analytic.function_powerset(fit_result, parameter_names, 0)
+
+ self.assertEqual(
+ combined_fit.model_function,
+ "0 + regression_arg(0) + regression_arg(1) * parameter(p_linear)",
+ )
+ self.assertEqual(
+ combined_fit._function_str,
+ "0 + reg_param[0] + reg_param[1] * model_param[1]",
+ )
+
+ combined_fit.fit(by_param, "TX", "power")
+
+ self.assertEqual(combined_fit.fit_success, True)
+
+ self.assertEqual(combined_fit.is_predictable([None, None]), False)
+ self.assertEqual(combined_fit.is_predictable([None, 0]), True)
+ self.assertEqual(combined_fit.is_predictable([None, 50]), True)
+ self.assertEqual(combined_fit.is_predictable([0, None]), False)
+ self.assertEqual(combined_fit.is_predictable([50, None]), False)
+ self.assertEqual(combined_fit.is_predictable([0, 0]), True)
+ self.assertEqual(combined_fit.is_predictable([0, 50]), True)
+ self.assertEqual(combined_fit.is_predictable([50, 0]), True)
+ self.assertEqual(combined_fit.is_predictable([50, 50]), True)
+
+ # The function should be linear without offset or skew
+ for i in range(100):
+ self.assertAlmostEqual(combined_fit.eval([None, i]), i, places=0)
+
+ def test_parameter_detection_multi_dimensional(self):
+ # rng = np.random.default_rng(seed=1312) # requires NumPy >= 1.17
+ np.random.seed(1312)
+ # vary each parameter from 1 to 10
+ Xi = (np.arange(50) % 10) + 1
+ # Three parameters -> Build input array [[1, 1, 1], [1, 1, 2], ..., [10, 10, 10]]
+ X = np.array(np.meshgrid(Xi, Xi, Xi)).T.reshape(-1, 3)
+
+ f_lls = np.vectorize(
+ lambda x: 42 + 7 * x[0] + 10 * np.log(x[1]) - 0.5 * x[2] * x[2],
+ signature="(n)->()",
+ )
+ f_ll = np.vectorize(
+ lambda x: 23 + 5 * x[0] - 3 * x[0] / x[1], signature="(n)->()"
+ )
+
+ # Y_lls = f_lls(X) + rng.normal(size=X.shape[0]) # requires NumPy >= 1.17
+ # Y_ll = f_ll(X) + rng.normal(size=X.shape[0]) # requires NumPy >= 1.17
+ Y_lls = f_lls(X) + np.random.normal(size=X.shape[0])
+ Y_ll = f_ll(X) + np.random.normal(size=X.shape[0])
+
+ parameter_names = ["lin_lin", "log_inv", "square_none"]
+
+ by_name = {
+ "someKey": {
+ "param": X,
+ "lls": Y_lls,
+ "ll": Y_ll,
+ "attributes": ["lls", "ll"],
+ }
+ }
+ by_param = by_name_to_by_param(by_name)
+ stats = parameters.ParamStats(by_name, by_param, parameter_names, dict())
+
+ self.assertEqual(stats.depends_on_param("someKey", "lls", "lin_lin"), True)
+ self.assertEqual(stats.depends_on_param("someKey", "lls", "log_inv"), True)
+ self.assertEqual(stats.depends_on_param("someKey", "lls", "square_none"), True)
+
+ self.assertEqual(stats.depends_on_param("someKey", "ll", "lin_lin"), True)
+ self.assertEqual(stats.depends_on_param("someKey", "ll", "log_inv"), True)
+ self.assertEqual(stats.depends_on_param("someKey", "ll", "square_none"), False)
+
+ paramfit = ParallelParamFit(by_param)
+ paramfit.enqueue("someKey", "lls", 0, "lin_lin")
+ paramfit.enqueue("someKey", "lls", 1, "log_inv")
+ paramfit.enqueue("someKey", "lls", 2, "square_none")
+ paramfit.enqueue("someKey", "ll", 0, "lin_lin")
+ paramfit.enqueue("someKey", "ll", 1, "log_inv")
+ paramfit.fit()
+
+ fit_lls = paramfit.get_result("someKey", "lls")
+ self.assertEqual(fit_lls["lin_lin"]["best"], "linear")
+ self.assertEqual(fit_lls["log_inv"]["best"], "logarithmic")
+ self.assertEqual(fit_lls["square_none"]["best"], "square")
+
+ combined_fit_lls = analytic.function_powerset(fit_lls, parameter_names, 0)
+
+ self.assertEqual(
+ combined_fit_lls.model_function,
+ "0 + regression_arg(0) + regression_arg(1) * parameter(lin_lin)"
+ " + regression_arg(2) * np.log(parameter(log_inv))"
+ " + regression_arg(3) * (parameter(square_none))**2"
+ " + regression_arg(4) * parameter(lin_lin) * np.log(parameter(log_inv))"
+ " + regression_arg(5) * parameter(lin_lin) * (parameter(square_none))**2"
+ " + regression_arg(6) * np.log(parameter(log_inv)) * (parameter(square_none))**2"
+ " + regression_arg(7) * parameter(lin_lin) * np.log(parameter(log_inv)) * (parameter(square_none))**2",
+ )
+
+ combined_fit_lls.fit(by_param, "someKey", "lls")
+
+ self.assertEqual(combined_fit_lls.fit_success, True)
+
+ # Verify that f_lls parameters have been found
+ self.assertAlmostEqual(combined_fit_lls.model_args[0], 42, places=0)
+ self.assertAlmostEqual(combined_fit_lls.model_args[1], 7, places=0)
+ self.assertAlmostEqual(combined_fit_lls.model_args[2], 10, places=0)
+ self.assertAlmostEqual(combined_fit_lls.model_args[3], -0.5, places=1)
+ self.assertAlmostEqual(combined_fit_lls.model_args[4], 0, places=2)
+ self.assertAlmostEqual(combined_fit_lls.model_args[5], 0, places=2)
+ self.assertAlmostEqual(combined_fit_lls.model_args[6], 0, places=2)
+ self.assertAlmostEqual(combined_fit_lls.model_args[7], 0, places=2)
+
+ self.assertEqual(combined_fit_lls.is_predictable([None, None, None]), False)
+ self.assertEqual(combined_fit_lls.is_predictable([None, None, 11]), False)
+ self.assertEqual(combined_fit_lls.is_predictable([None, 11, None]), False)
+ self.assertEqual(combined_fit_lls.is_predictable([None, 11, 11]), False)
+ self.assertEqual(combined_fit_lls.is_predictable([11, None, None]), False)
+ self.assertEqual(combined_fit_lls.is_predictable([11, None, 11]), False)
+ self.assertEqual(combined_fit_lls.is_predictable([11, 11, None]), False)
+ self.assertEqual(combined_fit_lls.is_predictable([11, 11, 11]), True)
+
+ # Verify that fitted function behaves like input function
+ for i, x in enumerate(X):
+ self.assertAlmostEqual(combined_fit_lls.eval(x), f_lls(x), places=0)
+
+ fit_ll = paramfit.get_result("someKey", "ll")
+ self.assertEqual(fit_ll["lin_lin"]["best"], "linear")
+ self.assertEqual(fit_ll["log_inv"]["best"], "inverse")
+ self.assertEqual("quare_none" not in fit_ll, True)
+
+ combined_fit_ll = analytic.function_powerset(fit_ll, parameter_names, 0)
+
+ self.assertEqual(
+ combined_fit_ll.model_function,
+ "0 + regression_arg(0) + regression_arg(1) * parameter(lin_lin)"
+ " + regression_arg(2) * 1/(parameter(log_inv))"
+ " + regression_arg(3) * parameter(lin_lin) * 1/(parameter(log_inv))",
+ )
+
+ combined_fit_ll.fit(by_param, "someKey", "ll")
+
+ self.assertEqual(combined_fit_ll.fit_success, True)
+
+ # Verify that f_ll parameters have been found
+ self.assertAlmostEqual(combined_fit_ll.model_args[0], 23, places=0)
+ self.assertAlmostEqual(combined_fit_ll.model_args[1], 5, places=0)
+ self.assertAlmostEqual(combined_fit_ll.model_args[2], 0, places=1)
+ self.assertAlmostEqual(combined_fit_ll.model_args[3], -3, places=0)
+
+ self.assertEqual(combined_fit_ll.is_predictable([None, None, None]), False)
+ self.assertEqual(combined_fit_ll.is_predictable([None, None, 11]), False)
+ self.assertEqual(combined_fit_ll.is_predictable([None, 11, None]), False)
+ self.assertEqual(combined_fit_ll.is_predictable([None, 11, 11]), False)
+ self.assertEqual(combined_fit_ll.is_predictable([11, None, None]), False)
+ self.assertEqual(combined_fit_ll.is_predictable([11, None, 11]), False)
+ self.assertEqual(combined_fit_ll.is_predictable([11, 11, None]), True)
+ self.assertEqual(combined_fit_ll.is_predictable([11, 11, 11]), True)
+
+ # Verify that fitted function behaves like input function
+ for i, x in enumerate(X):
+ self.assertAlmostEqual(combined_fit_ll.eval(x), f_ll(x), places=0)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/test_pta.py b/test/test_pta.py
index 9f0778d..d43e702 100755
--- a/test/test_pta.py
+++ b/test/test_pta.py
@@ -5,88 +5,79 @@ import unittest
import yaml
example_json_1 = {
- 'parameters': ['datarate', 'txbytes', 'txpower'],
- 'initial_param_values': [None, None, None],
- 'state': {
- 'IDLE': {
- 'power': {
- 'static': 5,
- }
- },
- 'TX': {
- 'power': {
- 'static': 10000,
- 'function': {
- 'raw': 'regression_arg(0) + regression_arg(1)'
- ' * parameter(txpower)',
- 'regression_args': [10000, 2]
+ "parameters": ["datarate", "txbytes", "txpower"],
+ "initial_param_values": [None, None, None],
+ "state": {
+ "IDLE": {"power": {"static": 5,}},
+ "TX": {
+ "power": {
+ "static": 10000,
+ "function": {
+ "raw": "regression_arg(0) + regression_arg(1)"
+ " * parameter(txpower)",
+ "regression_args": [10000, 2],
},
}
},
},
- 'transitions': [
+ "transitions": [
{
- 'name': 'init',
- 'origin': ['UNINITIALIZED', 'IDLE'],
- 'destination': 'IDLE',
- 'duration': {
- 'static': 50000,
- },
- 'set_param': {
- 'txpower': 10
- },
+ "name": "init",
+ "origin": ["UNINITIALIZED", "IDLE"],
+ "destination": "IDLE",
+ "duration": {"static": 50000,},
+ "set_param": {"txpower": 10},
},
{
- 'name': 'setTxPower',
- 'origin': 'IDLE',
- 'destination': 'IDLE',
- 'duration': {'static': 120},
- 'energy ': {'static': 10000},
- 'arg_to_param_map': {0: 'txpower'},
- 'argument_values': [[10, 20, 30]],
+ "name": "setTxPower",
+ "origin": "IDLE",
+ "destination": "IDLE",
+ "duration": {"static": 120},
+ "energy ": {"static": 10000},
+ "arg_to_param_map": {0: "txpower"},
+ "argument_values": [[10, 20, 30]],
},
{
- 'name': 'send',
- 'origin': 'IDLE',
- 'destination': 'TX',
- 'duration': {
- 'static': 10,
- 'function': {
- 'raw': 'regression_arg(0) + regression_arg(1)'
- ' * function_arg(1)',
- 'regression_args': [48, 8],
+ "name": "send",
+ "origin": "IDLE",
+ "destination": "TX",
+ "duration": {
+ "static": 10,
+ "function": {
+ "raw": "regression_arg(0) + regression_arg(1)" " * function_arg(1)",
+ "regression_args": [48, 8],
},
},
- 'energy': {
- 'static': 3,
- 'function': {
- 'raw': 'regression_arg(0) + regression_arg(1)'
- ' * function_arg(1)',
- 'regression_args': [3, 5],
+ "energy": {
+ "static": 3,
+ "function": {
+ "raw": "regression_arg(0) + regression_arg(1)" " * function_arg(1)",
+ "regression_args": [3, 5],
},
},
- 'arg_to_param_map': {1: 'txbytes'},
- 'argument_values': [['"foo"', '"hodor"'], [3, 5]],
- 'argument_combination': 'zip',
+ "arg_to_param_map": {1: "txbytes"},
+ "argument_values": [['"foo"', '"hodor"'], [3, 5]],
+ "argument_combination": "zip",
},
{
- 'name': 'txComplete',
- 'origin': 'TX',
- 'destination': 'IDLE',
- 'is_interrupt': 1,
- 'timeout': {
- 'static': 2000,
- 'function': {
- 'raw': 'regression_arg(0) + regression_arg(1)'
- ' * parameter(txbytes)',
- 'regression_args': [500, 16],
+ "name": "txComplete",
+ "origin": "TX",
+ "destination": "IDLE",
+ "is_interrupt": 1,
+ "timeout": {
+ "static": 2000,
+ "function": {
+ "raw": "regression_arg(0) + regression_arg(1)"
+ " * parameter(txbytes)",
+ "regression_args": [500, 16],
},
},
- }
+ },
],
}
-example_yaml_1 = yaml.safe_load("""
+example_yaml_1 = yaml.safe_load(
+ """
codegen:
instance: cc1200
@@ -124,9 +115,11 @@ transition:
src: [TX]
dst: IDLE
is_interrupt: true
-""")
+"""
+)
-example_yaml_2 = yaml.safe_load("""
+example_yaml_2 = yaml.safe_load(
+ """
codegen:
instance: cc1200
@@ -169,9 +162,11 @@ transition:
src: [TX]
dst: IDLE
is_interrupt: true
-""")
+"""
+)
-example_yaml_3 = yaml.safe_load("""
+example_yaml_3 = yaml.safe_load(
+ """
codegen:
instance: nrf24l01
includes: ['driver/nrf24l01.h']
@@ -260,12 +255,17 @@ transition:
- name: blocking
values: [1, 1, 1, 1, 1, 1]
argument_combination: zip
-""")
+"""
+)
-def dfs_tran_to_name(runs: list, with_args: bool = False, with_param: bool = False) -> list:
+def dfs_tran_to_name(
+ runs: list, with_args: bool = False, with_param: bool = False
+) -> list:
if with_param:
- return list(map(lambda run: list(map(lambda x: (x[0].name, x[1], x[2]), run)), runs))
+ return list(
+ map(lambda run: list(map(lambda x: (x[0].name, x[1], x[2]), run)), runs)
+ )
if with_args:
return list(map(lambda run: list(map(lambda x: (x[0].name, x[1]), run)), runs))
return list(map(lambda run: list(map(lambda x: (x[0].name), run)), runs))
@@ -273,117 +273,175 @@ def dfs_tran_to_name(runs: list, with_args: bool = False, with_param: bool = Fal
class TestPTA(unittest.TestCase):
def test_dfs(self):
- pta = PTA(['IDLE', 'TX'])
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init')
- pta.add_transition('IDLE', 'TX', 'send')
- pta.add_transition('TX', 'IDLE', 'txComplete')
- self.assertEqual(dfs_tran_to_name(pta.dfs(0), False), [['init']])
- self.assertEqual(dfs_tran_to_name(pta.dfs(1), False), [['init', 'send']])
- self.assertEqual(dfs_tran_to_name(pta.dfs(2), False), [['init', 'send', 'txComplete']])
- self.assertEqual(dfs_tran_to_name(pta.dfs(3), False), [['init', 'send', 'txComplete', 'send']])
-
- pta = PTA(['IDLE'])
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init')
- pta.add_transition('IDLE', 'IDLE', 'set1')
- pta.add_transition('IDLE', 'IDLE', 'set2')
- self.assertEqual(dfs_tran_to_name(pta.dfs(0), False), [['init']])
- self.assertEqual(sorted(dfs_tran_to_name(pta.dfs(1), False)), [['init', 'set1'], ['init', 'set2']])
- self.assertEqual(sorted(dfs_tran_to_name(pta.dfs(2), False)), [['init', 'set1', 'set1'],
- ['init', 'set1', 'set2'],
- ['init', 'set2', 'set1'],
- ['init', 'set2', 'set2']])
+ pta = PTA(["IDLE", "TX"])
+ pta.add_transition("UNINITIALIZED", "IDLE", "init")
+ pta.add_transition("IDLE", "TX", "send")
+ pta.add_transition("TX", "IDLE", "txComplete")
+ self.assertEqual(dfs_tran_to_name(pta.dfs(0), False), [["init"]])
+ self.assertEqual(dfs_tran_to_name(pta.dfs(1), False), [["init", "send"]])
+ self.assertEqual(
+ dfs_tran_to_name(pta.dfs(2), False), [["init", "send", "txComplete"]]
+ )
+ self.assertEqual(
+ dfs_tran_to_name(pta.dfs(3), False),
+ [["init", "send", "txComplete", "send"]],
+ )
+
+ pta = PTA(["IDLE"])
+ pta.add_transition("UNINITIALIZED", "IDLE", "init")
+ pta.add_transition("IDLE", "IDLE", "set1")
+ pta.add_transition("IDLE", "IDLE", "set2")
+ self.assertEqual(dfs_tran_to_name(pta.dfs(0), False), [["init"]])
+ self.assertEqual(
+ sorted(dfs_tran_to_name(pta.dfs(1), False)),
+ [["init", "set1"], ["init", "set2"]],
+ )
+ self.assertEqual(
+ sorted(dfs_tran_to_name(pta.dfs(2), False)),
+ [
+ ["init", "set1", "set1"],
+ ["init", "set1", "set2"],
+ ["init", "set2", "set1"],
+ ["init", "set2", "set2"],
+ ],
+ )
def test_dfs_trace_filter(self):
- pta = PTA(['IDLE'])
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init')
- pta.add_transition('IDLE', 'IDLE', 'set1')
- pta.add_transition('IDLE', 'IDLE', 'set2')
- self.assertEqual(sorted(dfs_tran_to_name(pta.dfs(2, trace_filter=[['init', 'set1', 'set2'], ['init', 'set2', 'set1']]), False)),
- [['init', 'set1', 'set2'], ['init', 'set2', 'set1']])
- self.assertEqual(sorted(dfs_tran_to_name(pta.dfs(2, trace_filter=[['init', 'set1', '$'], ['init', 'set2', '$']]), False)),
- [['init', 'set1'], ['init', 'set2']])
+ pta = PTA(["IDLE"])
+ pta.add_transition("UNINITIALIZED", "IDLE", "init")
+ pta.add_transition("IDLE", "IDLE", "set1")
+ pta.add_transition("IDLE", "IDLE", "set2")
+ self.assertEqual(
+ sorted(
+ dfs_tran_to_name(
+ pta.dfs(
+ 2,
+ trace_filter=[
+ ["init", "set1", "set2"],
+ ["init", "set2", "set1"],
+ ],
+ ),
+ False,
+ )
+ ),
+ [["init", "set1", "set2"], ["init", "set2", "set1"]],
+ )
+ self.assertEqual(
+ sorted(
+ dfs_tran_to_name(
+ pta.dfs(
+ 2, trace_filter=[["init", "set1", "$"], ["init", "set2", "$"]]
+ ),
+ False,
+ )
+ ),
+ [["init", "set1"], ["init", "set2"]],
+ )
def test_dfs_accepting(self):
- pta = PTA(['IDLE', 'TX'], accepting_states=['IDLE'])
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init')
- pta.add_transition('IDLE', 'TX', 'send')
- pta.add_transition('TX', 'IDLE', 'txComplete')
- self.assertEqual(dfs_tran_to_name(pta.dfs(0), False), [['init']])
+ pta = PTA(["IDLE", "TX"], accepting_states=["IDLE"])
+ pta.add_transition("UNINITIALIZED", "IDLE", "init")
+ pta.add_transition("IDLE", "TX", "send")
+ pta.add_transition("TX", "IDLE", "txComplete")
+ self.assertEqual(dfs_tran_to_name(pta.dfs(0), False), [["init"]])
self.assertEqual(dfs_tran_to_name(pta.dfs(1), False), [])
- self.assertEqual(dfs_tran_to_name(pta.dfs(2), False), [['init', 'send', 'txComplete']])
+ self.assertEqual(
+ dfs_tran_to_name(pta.dfs(2), False), [["init", "send", "txComplete"]]
+ )
self.assertEqual(dfs_tran_to_name(pta.dfs(3), False), [])
def test_dfs_objects(self):
- pta = PTA(['IDLE', 'TX'])
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init')
- pta.add_transition('IDLE', 'TX', 'send')
- pta.add_transition('TX', 'IDLE', 'txComplete')
+ pta = PTA(["IDLE", "TX"])
+ pta.add_transition("UNINITIALIZED", "IDLE", "init")
+ pta.add_transition("IDLE", "TX", "send")
+ pta.add_transition("TX", "IDLE", "txComplete")
traces = list(pta.dfs(2))
self.assertEqual(len(traces), 1)
trace = traces[0]
self.assertEqual(len(trace), 3)
- self.assertEqual(trace[0][0].name, 'init')
- self.assertEqual(trace[1][0].name, 'send')
- self.assertEqual(trace[2][0].name, 'txComplete')
+ self.assertEqual(trace[0][0].name, "init")
+ self.assertEqual(trace[1][0].name, "send")
+ self.assertEqual(trace[2][0].name, "txComplete")
self.assertEqual(pta.get_transition_id(trace[0][0]), 0)
self.assertEqual(pta.get_transition_id(trace[1][0]), 1)
self.assertEqual(pta.get_transition_id(trace[2][0]), 2)
def test_dfs_with_sleep(self):
- pta = PTA(['IDLE', 'TX'])
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init')
- pta.add_transition('IDLE', 'TX', 'send')
- pta.add_transition('TX', 'IDLE', 'txComplete')
+ pta = PTA(["IDLE", "TX"])
+ pta.add_transition("UNINITIALIZED", "IDLE", "init")
+ pta.add_transition("IDLE", "TX", "send")
+ pta.add_transition("TX", "IDLE", "txComplete")
traces = list(pta.dfs(2, sleep=10))
self.assertEqual(len(traces), 1)
trace = traces[0]
self.assertEqual(len(trace), 6)
self.assertIsNone(trace[0][0])
- self.assertEqual(trace[1][0].name, 'init')
+ self.assertEqual(trace[1][0].name, "init")
self.assertIsNone(trace[2][0])
- self.assertEqual(trace[3][0].name, 'send')
+ self.assertEqual(trace[3][0].name, "send")
self.assertIsNone(trace[4][0])
- self.assertEqual(trace[5][0].name, 'txComplete')
+ self.assertEqual(trace[5][0].name, "txComplete")
self.assertEqual(pta.get_transition_id(trace[1][0]), 0)
self.assertEqual(pta.get_transition_id(trace[3][0]), 1)
self.assertEqual(pta.get_transition_id(trace[5][0]), 2)
def test_bfs(self):
- pta = PTA(['IDLE', 'TX'])
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init')
- pta.add_transition('IDLE', 'TX', 'send')
- pta.add_transition('TX', 'IDLE', 'txComplete')
- self.assertEqual(dfs_tran_to_name(pta.bfs(0), False), [['init']])
- self.assertEqual(dfs_tran_to_name(pta.bfs(1), False), [['init'], ['init', 'send']])
- self.assertEqual(dfs_tran_to_name(pta.bfs(2), False), [['init'], ['init', 'send'], ['init', 'send', 'txComplete']])
- self.assertEqual(dfs_tran_to_name(pta.bfs(3), False), [['init'], ['init', 'send'], ['init', 'send', 'txComplete'], ['init', 'send', 'txComplete', 'send']])
-
- pta = PTA(['IDLE'])
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init')
- pta.add_transition('IDLE', 'IDLE', 'set1')
- pta.add_transition('IDLE', 'IDLE', 'set2')
- self.assertEqual(dfs_tran_to_name(pta.bfs(0), False), [['init']])
- self.assertEqual(sorted(dfs_tran_to_name(pta.bfs(1), False)), [['init'], ['init', 'set1'], ['init', 'set2']])
- self.assertEqual(sorted(dfs_tran_to_name(pta.bfs(2), False)), [['init'],
- ['init', 'set1'],
- ['init', 'set1', 'set1'],
- ['init', 'set1', 'set2'],
- ['init', 'set2'],
- ['init', 'set2', 'set1'],
- ['init', 'set2', 'set2']])
+ pta = PTA(["IDLE", "TX"])
+ pta.add_transition("UNINITIALIZED", "IDLE", "init")
+ pta.add_transition("IDLE", "TX", "send")
+ pta.add_transition("TX", "IDLE", "txComplete")
+ self.assertEqual(dfs_tran_to_name(pta.bfs(0), False), [["init"]])
+ self.assertEqual(
+ dfs_tran_to_name(pta.bfs(1), False), [["init"], ["init", "send"]]
+ )
+ self.assertEqual(
+ dfs_tran_to_name(pta.bfs(2), False),
+ [["init"], ["init", "send"], ["init", "send", "txComplete"]],
+ )
+ self.assertEqual(
+ dfs_tran_to_name(pta.bfs(3), False),
+ [
+ ["init"],
+ ["init", "send"],
+ ["init", "send", "txComplete"],
+ ["init", "send", "txComplete", "send"],
+ ],
+ )
+
+ pta = PTA(["IDLE"])
+ pta.add_transition("UNINITIALIZED", "IDLE", "init")
+ pta.add_transition("IDLE", "IDLE", "set1")
+ pta.add_transition("IDLE", "IDLE", "set2")
+ self.assertEqual(dfs_tran_to_name(pta.bfs(0), False), [["init"]])
+ self.assertEqual(
+ sorted(dfs_tran_to_name(pta.bfs(1), False)),
+ [["init"], ["init", "set1"], ["init", "set2"]],
+ )
+ self.assertEqual(
+ sorted(dfs_tran_to_name(pta.bfs(2), False)),
+ [
+ ["init"],
+ ["init", "set1"],
+ ["init", "set1", "set1"],
+ ["init", "set1", "set2"],
+ ["init", "set2"],
+ ["init", "set2", "set1"],
+ ["init", "set2", "set2"],
+ ],
+ )
def test_from_json(self):
pta = PTA.from_json(example_json_1)
- self.assertEqual(pta.parameters, ['datarate', 'txbytes', 'txpower'])
- self.assertEqual(pta.state['UNINITIALIZED'].name, 'UNINITIALIZED')
- self.assertEqual(pta.state['IDLE'].name, 'IDLE')
- self.assertEqual(pta.state['TX'].name, 'TX')
+ self.assertEqual(pta.parameters, ["datarate", "txbytes", "txpower"])
+ self.assertEqual(pta.state["UNINITIALIZED"].name, "UNINITIALIZED")
+ self.assertEqual(pta.state["IDLE"].name, "IDLE")
+ self.assertEqual(pta.state["TX"].name, "TX")
self.assertEqual(len(pta.transitions), 5)
- self.assertEqual(pta.transitions[0].name, 'init')
- self.assertEqual(pta.transitions[1].name, 'init')
- self.assertEqual(pta.transitions[2].name, 'setTxPower')
- self.assertEqual(pta.transitions[3].name, 'send')
- self.assertEqual(pta.transitions[4].name, 'txComplete')
+ self.assertEqual(pta.transitions[0].name, "init")
+ self.assertEqual(pta.transitions[1].name, "init")
+ self.assertEqual(pta.transitions[2].name, "setTxPower")
+ self.assertEqual(pta.transitions[3].name, "send")
+ self.assertEqual(pta.transitions[4].name, "txComplete")
# def test_to_json(self):
# pta = PTA.from_json(example_json_1)
@@ -394,368 +452,471 @@ class TestPTA(unittest.TestCase):
def test_from_json_dfs_arg(self):
pta = PTA.from_json(example_json_1)
- self.assertEqual(sorted(dfs_tran_to_name(pta.dfs(1), False)), [['init', 'init'], ['init', 'send'], ['init', 'setTxPower']])
- self.assertEqual(sorted(dfs_tran_to_name(pta.dfs(1, with_arguments=True), True)),
- [
- [('init', ()), ('init', ())],
- [('init', ()), ('send', ('"foo"', 3))],
- [('init', ()), ('send', ('"hodor"', 5))],
- [('init', ()), ('setTxPower', (10,))],
- [('init', ()), ('setTxPower', (20,))],
- [('init', ()), ('setTxPower', (30,))],
- ]
+ self.assertEqual(
+ sorted(dfs_tran_to_name(pta.dfs(1), False)),
+ [["init", "init"], ["init", "send"], ["init", "setTxPower"]],
+ )
+ self.assertEqual(
+ sorted(dfs_tran_to_name(pta.dfs(1, with_arguments=True), True)),
+ [
+ [("init", ()), ("init", ())],
+ [("init", ()), ("send", ('"foo"', 3))],
+ [("init", ()), ("send", ('"hodor"', 5))],
+ [("init", ()), ("setTxPower", (10,))],
+ [("init", ()), ("setTxPower", (20,))],
+ [("init", ()), ("setTxPower", (30,))],
+ ],
)
def test_from_json_dfs_param(self):
pta = PTA.from_json(example_json_1)
no_param = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': 10,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": 10,
}
param_tx3 = {
- 'datarate': None,
- 'txbytes': 3,
- 'txpower': 10,
+ "datarate": None,
+ "txbytes": 3,
+ "txpower": 10,
}
param_tx5 = {
- 'datarate': None,
- 'txbytes': 5,
- 'txpower': 10,
+ "datarate": None,
+ "txbytes": 5,
+ "txpower": 10,
}
param_txp10 = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': 10,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": 10,
}
param_txp20 = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': 20,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": 20,
}
param_txp30 = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': 30,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": 30,
}
- self.assertEqual(sorted(dfs_tran_to_name(pta.dfs(1, with_arguments=True, with_parameters=True), True, True)),
- [
- [('init', (), no_param), ('init', (), no_param)],
- [('init', (), no_param), ('send', ('"foo"', 3), param_tx3)],
- [('init', (), no_param), ('send', ('"hodor"', 5), param_tx5)],
- [('init', (), no_param), ('setTxPower', (10,), param_txp10)],
- [('init', (), no_param), ('setTxPower', (20,), param_txp20)],
- [('init', (), no_param), ('setTxPower', (30,), param_txp30)],
- ]
+ self.assertEqual(
+ sorted(
+ dfs_tran_to_name(
+ pta.dfs(1, with_arguments=True, with_parameters=True), True, True
+ )
+ ),
+ [
+ [("init", (), no_param), ("init", (), no_param)],
+ [("init", (), no_param), ("send", ('"foo"', 3), param_tx3)],
+ [("init", (), no_param), ("send", ('"hodor"', 5), param_tx5)],
+ [("init", (), no_param), ("setTxPower", (10,), param_txp10)],
+ [("init", (), no_param), ("setTxPower", (20,), param_txp20)],
+ [("init", (), no_param), ("setTxPower", (30,), param_txp30)],
+ ],
)
def test_from_json_function(self):
pta = PTA.from_json(example_json_1)
- self.assertEqual(pta.state['TX'].get_energy(1000, {'datarate': 10, 'txbytes': 6, 'txpower': 10}), 1000 * (10000 + 2 * 10))
- self.assertEqual(pta.transitions[4].get_timeout({'datarate': 10, 'txbytes': 6, 'txpower': 10}), 500 + 16 * 6)
+ self.assertEqual(
+ pta.state["TX"].get_energy(
+ 1000, {"datarate": 10, "txbytes": 6, "txpower": 10}
+ ),
+ 1000 * (10000 + 2 * 10),
+ )
+ self.assertEqual(
+ pta.transitions[4].get_timeout(
+ {"datarate": 10, "txbytes": 6, "txpower": 10}
+ ),
+ 500 + 16 * 6,
+ )
def test_from_yaml_dfs_param(self):
pta = PTA.from_yaml(example_yaml_1)
no_param = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': None,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": None,
}
param_tx3 = {
- 'datarate': None,
- 'txbytes': 3,
- 'txpower': None,
+ "datarate": None,
+ "txbytes": 3,
+ "txpower": None,
}
param_tx5 = {
- 'datarate': None,
- 'txbytes': 5,
- 'txpower': None,
+ "datarate": None,
+ "txbytes": 5,
+ "txpower": None,
}
param_txp10 = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': 10,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": 10,
}
param_txp20 = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': 20,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": 20,
}
param_txp30 = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': 30,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": 30,
}
- self.assertEqual(sorted(dfs_tran_to_name(pta.dfs(1, with_arguments=True, with_parameters=True), True, True)),
- [
- [('init', (), no_param), ('init', (), no_param)],
- [('init', (), no_param), ('send', ('"foo"', 3), param_tx3)],
- [('init', (), no_param), ('send', ('"hodor"', 5), param_tx5)],
- [('init', (), no_param), ('setTxPower', (10,), param_txp10)],
- [('init', (), no_param), ('setTxPower', (20,), param_txp20)],
- [('init', (), no_param), ('setTxPower', (30,), param_txp30)],
- ]
+ self.assertEqual(
+ sorted(
+ dfs_tran_to_name(
+ pta.dfs(1, with_arguments=True, with_parameters=True), True, True
+ )
+ ),
+ [
+ [("init", (), no_param), ("init", (), no_param)],
+ [("init", (), no_param), ("send", ('"foo"', 3), param_tx3)],
+ [("init", (), no_param), ("send", ('"hodor"', 5), param_tx5)],
+ [("init", (), no_param), ("setTxPower", (10,), param_txp10)],
+ [("init", (), no_param), ("setTxPower", (20,), param_txp20)],
+ [("init", (), no_param), ("setTxPower", (30,), param_txp30)],
+ ],
)
def test_normalization(self):
pta = PTA.from_yaml(example_yaml_2)
no_param = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': None,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": None,
}
param_tx3 = {
- 'datarate': None,
- 'txbytes': 3,
- 'txpower': None,
+ "datarate": None,
+ "txbytes": 3,
+ "txpower": None,
}
param_tx6 = {
- 'datarate': None,
- 'txbytes': 6,
- 'txpower': None,
+ "datarate": None,
+ "txbytes": 6,
+ "txpower": None,
}
param_txp10 = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': -6,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": -6,
}
param_txp20 = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': 4,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": 4,
}
param_txp30 = {
- 'datarate': None,
- 'txbytes': None,
- 'txpower': 14,
+ "datarate": None,
+ "txbytes": None,
+ "txpower": 14,
}
- self.assertEqual(sorted(dfs_tran_to_name(pta.dfs(1, with_arguments=True, with_parameters=True), True, True)),
- [
- [('init', (), no_param), ('init', (), no_param)],
- [('init', (), no_param), ('send', ('FOO',), param_tx3)],
- [('init', (), no_param), ('send', ('LONGER',), param_tx6)],
- [('init', (), no_param), ('setTxPower', (10,), param_txp10)],
- [('init', (), no_param), ('setTxPower', (20,), param_txp20)],
- [('init', (), no_param), ('setTxPower', (30,), param_txp30)],
- ]
+ self.assertEqual(
+ sorted(
+ dfs_tran_to_name(
+ pta.dfs(1, with_arguments=True, with_parameters=True), True, True
+ )
+ ),
+ [
+ [("init", (), no_param), ("init", (), no_param)],
+ [("init", (), no_param), ("send", ("FOO",), param_tx3)],
+ [("init", (), no_param), ("send", ("LONGER",), param_tx6)],
+ [("init", (), no_param), ("setTxPower", (10,), param_txp10)],
+ [("init", (), no_param), ("setTxPower", (20,), param_txp20)],
+ [("init", (), no_param), ("setTxPower", (30,), param_txp30)],
+ ],
)
def test_shrink(self):
pta = PTA.from_yaml(example_yaml_3)
pta.shrink_argument_values()
- self.assertEqual(pta.transitions[0].name, 'setAutoAck')
- self.assertEqual(pta.transitions[1].name, 'setPALevel')
- self.assertEqual(pta.transitions[2].name, 'setRetries')
- self.assertEqual(pta.transitions[3].name, 'setup')
- self.assertEqual(pta.transitions[4].name, 'setup')
- self.assertEqual(pta.transitions[5].name, 'write')
+ self.assertEqual(pta.transitions[0].name, "setAutoAck")
+ self.assertEqual(pta.transitions[1].name, "setPALevel")
+ self.assertEqual(pta.transitions[2].name, "setRetries")
+ self.assertEqual(pta.transitions[3].name, "setup")
+ self.assertEqual(pta.transitions[4].name, "setup")
+ self.assertEqual(pta.transitions[5].name, "write")
self.assertEqual(pta.transitions[0].argument_values, [[0, 1]])
- self.assertEqual(pta.transitions[1].argument_values, [['Nrf24l01::RF24_PA_MIN', 'Nrf24l01::RF24_PA_MAX']])
+ self.assertEqual(
+ pta.transitions[1].argument_values,
+ [["Nrf24l01::RF24_PA_MIN", "Nrf24l01::RF24_PA_MAX"]],
+ )
self.assertEqual(pta.transitions[2].argument_values, [[0, 15], [0, 15]])
- self.assertEqual(pta.transitions[5].argument_values, [['"foo"', '"foo"', '"foofoofoo"', '"foofoofoo"', '"123456789012345678901234567890"',
- '"123456789012345678901234567890"'], [3, 3, 9, 9, 30, 30], [0, 1, 0, 1, 0, 1], [1, 1, 1, 1, 1, 1]])
+ self.assertEqual(
+ pta.transitions[5].argument_values,
+ [
+ [
+ '"foo"',
+ '"foo"',
+ '"foofoofoo"',
+ '"foofoofoo"',
+ '"123456789012345678901234567890"',
+ '"123456789012345678901234567890"',
+ ],
+ [3, 3, 9, 9, 30, 30],
+ [0, 1, 0, 1, 0, 1],
+ [1, 1, 1, 1, 1, 1],
+ ],
+ )
def test_simulation(self):
pta = PTA()
- pta.add_state('IDLE', power=5)
- pta.add_state('TX', power=100)
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init', duration=50000)
- pta.add_transition('IDLE', 'TX', 'send', energy=3, duration=10)
- pta.add_transition('TX', 'IDLE', 'txComplete', timeout=2000, is_interrupt=True)
+ pta.add_state("IDLE", power=5)
+ pta.add_state("TX", power=100)
+ pta.add_transition("UNINITIALIZED", "IDLE", "init", duration=50000)
+ pta.add_transition("IDLE", "TX", "send", energy=3, duration=10)
+ pta.add_transition("TX", "IDLE", "txComplete", timeout=2000, is_interrupt=True)
trace = [
- ['init'],
+ ["init"],
[None, 10000000],
- ['send', 'foo', 3],
+ ["send", "foo", 3],
[None, 5000000],
- ['send', 'foo', 3]
+ ["send", "foo", 3],
]
- expected_energy = 5. * 10000000 + 3 + 100 * 2000 + 5 * 5000000 + 3 + 100 * 2000
+ expected_energy = 5.0 * 10000000 + 3 + 100 * 2000 + 5 * 5000000 + 3 + 100 * 2000
expected_duration = 50000 + 10000000 + 10 + 2000 + 5000000 + 10 + 2000
result = pta.simulate(trace)
self.assertAlmostEqual(result.energy, expected_energy * 1e-12, places=12)
self.assertAlmostEqual(result.duration, expected_duration * 1e-6, places=6)
- self.assertEqual(result.end_state.name, 'IDLE')
+ self.assertEqual(result.end_state.name, "IDLE")
self.assertEqual(result.parameters, {})
def test_simulation_param_none(self):
- pta = PTA(parameters=['txpower', 'length'])
- pta.add_state('IDLE', power=5)
- pta.add_state('TX', power=100)
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init', energy=500000, duration=50000)
- pta.add_transition('IDLE', 'TX', 'send', energy=3, duration=10)
- pta.add_transition('TX', 'IDLE', 'txComplete', timeout=2000, is_interrupt=True)
+ pta = PTA(parameters=["txpower", "length"])
+ pta.add_state("IDLE", power=5)
+ pta.add_state("TX", power=100)
+ pta.add_transition(
+ "UNINITIALIZED", "IDLE", "init", energy=500000, duration=50000
+ )
+ pta.add_transition("IDLE", "TX", "send", energy=3, duration=10)
+ pta.add_transition("TX", "IDLE", "txComplete", timeout=2000, is_interrupt=True)
trace = [
- ['init'],
+ ["init"],
]
expected_energy = 500000
expected_duration = 50000
result = pta.simulate(trace)
self.assertAlmostEqual(result.energy, expected_energy * 1e-12, places=12)
self.assertAlmostEqual(result.duration, expected_duration * 1e-6, places=6)
- self.assertEqual(result.end_state.name, 'IDLE')
- self.assertEqual(result.parameters, {
- 'txpower': None,
- 'length': None
- })
+ self.assertEqual(result.end_state.name, "IDLE")
+ self.assertEqual(result.parameters, {"txpower": None, "length": None})
def test_simulation_param_update_function(self):
- pta = PTA(parameters=['txpower', 'length'])
- pta.add_state('IDLE', power=5)
- pta.add_state('TX', power=100)
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init', energy=500000, duration=50000)
- pta.add_transition('IDLE', 'IDLE', 'setTxPower', energy=10000, duration=120,
- param_update_function=lambda param, arg: {**param, 'txpower': arg[0]})
- pta.add_transition('IDLE', 'TX', 'send', energy=3, duration=10)
- pta.add_transition('TX', 'IDLE', 'txComplete', timeout=2000, is_interrupt=True)
- trace = [
- ['init'],
- ['setTxPower', 10]
- ]
+ pta = PTA(parameters=["txpower", "length"])
+ pta.add_state("IDLE", power=5)
+ pta.add_state("TX", power=100)
+ pta.add_transition(
+ "UNINITIALIZED", "IDLE", "init", energy=500000, duration=50000
+ )
+ pta.add_transition(
+ "IDLE",
+ "IDLE",
+ "setTxPower",
+ energy=10000,
+ duration=120,
+ param_update_function=lambda param, arg: {**param, "txpower": arg[0]},
+ )
+ pta.add_transition("IDLE", "TX", "send", energy=3, duration=10)
+ pta.add_transition("TX", "IDLE", "txComplete", timeout=2000, is_interrupt=True)
+ trace = [["init"], ["setTxPower", 10]]
expected_energy = 510000
expected_duration = 50120
result = pta.simulate(trace)
self.assertAlmostEqual(result.energy, expected_energy * 1e-12, places=12)
self.assertAlmostEqual(result.duration, expected_duration * 1e-6, places=6)
- self.assertEqual(result.end_state.name, 'IDLE')
- self.assertEqual(result.parameters, {
- 'txpower': 10,
- 'length': None
- })
+ self.assertEqual(result.end_state.name, "IDLE")
+ self.assertEqual(result.parameters, {"txpower": 10, "length": None})
def test_simulation_arg_to_param_map(self):
- pta = PTA(parameters=['txpower', 'length'])
- pta.add_state('IDLE', power=5)
- pta.add_state('TX', power=100)
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init', energy=500000, duration=50000)
- pta.add_transition('IDLE', 'IDLE', 'setTxPower', energy=10000, duration=120,
- arg_to_param_map={0: 'txpower'})
- pta.add_transition('IDLE', 'TX', 'send', energy=3, duration=10)
- pta.add_transition('TX', 'IDLE', 'txComplete', timeout=2000, is_interrupt=True)
- trace = [
- ['init'],
- ['setTxPower', 10]
- ]
+ pta = PTA(parameters=["txpower", "length"])
+ pta.add_state("IDLE", power=5)
+ pta.add_state("TX", power=100)
+ pta.add_transition(
+ "UNINITIALIZED", "IDLE", "init", energy=500000, duration=50000
+ )
+ pta.add_transition(
+ "IDLE",
+ "IDLE",
+ "setTxPower",
+ energy=10000,
+ duration=120,
+ arg_to_param_map={0: "txpower"},
+ )
+ pta.add_transition("IDLE", "TX", "send", energy=3, duration=10)
+ pta.add_transition("TX", "IDLE", "txComplete", timeout=2000, is_interrupt=True)
+ trace = [["init"], ["setTxPower", 10]]
expected_energy = 510000
expected_duration = 50120
result = pta.simulate(trace)
self.assertAlmostEqual(result.energy, expected_energy * 1e-12, places=12)
self.assertAlmostEqual(result.duration, expected_duration * 1e-6, places=6)
- self.assertEqual(result.end_state.name, 'IDLE')
- self.assertEqual(result.parameters, {
- 'txpower': 10,
- 'length': None
- })
+ self.assertEqual(result.end_state.name, "IDLE")
+ self.assertEqual(result.parameters, {"txpower": 10, "length": None})
def test_simulation_set_param(self):
- pta = PTA(parameters=['txpower', 'length'])
- pta.add_state('IDLE', power=5)
- pta.add_state('TX', power=100)
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init', energy=500000, duration=50000, set_param={'txpower': 10})
+ pta = PTA(parameters=["txpower", "length"])
+ pta.add_state("IDLE", power=5)
+ pta.add_state("TX", power=100)
+ pta.add_transition(
+ "UNINITIALIZED",
+ "IDLE",
+ "init",
+ energy=500000,
+ duration=50000,
+ set_param={"txpower": 10},
+ )
trace = [
- ['init'],
+ ["init"],
]
expected_energy = 500000
expected_duration = 50000
result = pta.simulate(trace)
self.assertAlmostEqual(result.energy, expected_energy * 1e-12, places=12)
self.assertAlmostEqual(result.duration, expected_duration * 1e-6, places=6)
- self.assertEqual(result.end_state.name, 'IDLE')
- self.assertEqual(result.parameters, {
- 'txpower': 10,
- 'length': None
- })
+ self.assertEqual(result.end_state.name, "IDLE")
+ self.assertEqual(result.parameters, {"txpower": 10, "length": None})
def test_simulation_arg_function(self):
- pta = PTA(parameters=['txpower', 'length'])
- pta.add_state('IDLE', power=5)
- pta.add_state('TX', power=100)
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init', energy=500000, duration=50000)
- pta.add_transition('IDLE', 'IDLE', 'setTxPower', energy=10000, duration=120,
- param_update_function=lambda param, arg: {**param, 'txpower': arg[0]})
- pta.add_transition('IDLE', 'TX', 'send', energy=3, duration=10,
- energy_function=lambda param, arg: 3 + 5 * arg[1],
- duration_function=lambda param, arg: 48 + 8 * arg[1])
- pta.add_transition('TX', 'IDLE', 'txComplete', timeout=2000, is_interrupt=True)
+ pta = PTA(parameters=["txpower", "length"])
+ pta.add_state("IDLE", power=5)
+ pta.add_state("TX", power=100)
+ pta.add_transition(
+ "UNINITIALIZED", "IDLE", "init", energy=500000, duration=50000
+ )
+ pta.add_transition(
+ "IDLE",
+ "IDLE",
+ "setTxPower",
+ energy=10000,
+ duration=120,
+ param_update_function=lambda param, arg: {**param, "txpower": arg[0]},
+ )
+ pta.add_transition(
+ "IDLE",
+ "TX",
+ "send",
+ energy=3,
+ duration=10,
+ energy_function=lambda param, arg: 3 + 5 * arg[1],
+ duration_function=lambda param, arg: 48 + 8 * arg[1],
+ )
+ pta.add_transition("TX", "IDLE", "txComplete", timeout=2000, is_interrupt=True)
trace = [
- ['init'],
- ['setTxPower', 10],
- ['send', 'foo', 3],
+ ["init"],
+ ["setTxPower", 10],
+ ["send", "foo", 3],
]
expected_energy = 500000 + 10000 + (3 + 5 * 3) + (2000 * 100)
expected_duration = 50000 + 120 + (48 + 8 * 3) + 2000
result = pta.simulate(trace)
self.assertAlmostEqual(result.energy, expected_energy * 1e-12, places=12)
self.assertAlmostEqual(result.duration, expected_duration * 1e-6, places=6)
- self.assertEqual(result.end_state.name, 'IDLE')
- self.assertEqual(result.parameters, {
- 'txpower': 10,
- 'length': None
- })
-
- pta = PTA(parameters=['txpower', 'length'])
- pta.add_state('IDLE', power=5)
- pta.add_state('TX', power=100)
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init', energy=500000, duration=50000)
- pta.add_transition('IDLE', 'IDLE', 'setTxPower', energy=10000, duration=120,
- param_update_function=lambda param, arg: {**param, 'txpower': arg[0]})
- pta.add_transition('IDLE', 'TX', 'send', energy=3, duration=10,
- energy_function=lambda param, arg: 3 + 5 * arg[1],
- duration_function=lambda param, arg: 48 + 8 * arg[1])
- pta.add_transition('TX', 'IDLE', 'txComplete', timeout=2000, is_interrupt=True)
+ self.assertEqual(result.end_state.name, "IDLE")
+ self.assertEqual(result.parameters, {"txpower": 10, "length": None})
+
+ pta = PTA(parameters=["txpower", "length"])
+ pta.add_state("IDLE", power=5)
+ pta.add_state("TX", power=100)
+ pta.add_transition(
+ "UNINITIALIZED", "IDLE", "init", energy=500000, duration=50000
+ )
+ pta.add_transition(
+ "IDLE",
+ "IDLE",
+ "setTxPower",
+ energy=10000,
+ duration=120,
+ param_update_function=lambda param, arg: {**param, "txpower": arg[0]},
+ )
+ pta.add_transition(
+ "IDLE",
+ "TX",
+ "send",
+ energy=3,
+ duration=10,
+ energy_function=lambda param, arg: 3 + 5 * arg[1],
+ duration_function=lambda param, arg: 48 + 8 * arg[1],
+ )
+ pta.add_transition("TX", "IDLE", "txComplete", timeout=2000, is_interrupt=True)
trace = [
- ['init'],
- ['setTxPower', 10],
- ['send', 'foobar', 6],
+ ["init"],
+ ["setTxPower", 10],
+ ["send", "foobar", 6],
]
expected_energy = 500000 + 10000 + (3 + 5 * 6) + (2000 * 100)
expected_duration = 50000 + 120 + (48 + 8 * 6) + 2000
result = pta.simulate(trace)
self.assertAlmostEqual(result.energy, expected_energy * 1e-12, places=12)
self.assertAlmostEqual(result.duration, expected_duration * 1e-6, places=6)
- self.assertEqual(result.end_state.name, 'IDLE')
- self.assertEqual(result.parameters, {
- 'txpower': 10,
- 'length': None
- })
+ self.assertEqual(result.end_state.name, "IDLE")
+ self.assertEqual(result.parameters, {"txpower": 10, "length": None})
def test_simulation_param_function(self):
- pta = PTA(parameters=['length', 'txpower'])
- pta.add_state('IDLE', power=5)
- pta.add_state('TX', power=100,
- power_function=lambda param, arg: 1000 + 2 * param[1])
- pta.add_transition('UNINITIALIZED', 'IDLE', 'init', energy=500000, duration=50000)
- pta.add_transition('IDLE', 'IDLE', 'setTxPower', energy=10000, duration=120,
- param_update_function=lambda param, arg: {**param, 'txpower': arg[0]})
- pta.add_transition('IDLE', 'TX', 'send', energy=3, duration=10,
- energy_function=lambda param, arg: 3 + 5 * arg[1],
- param_update_function=lambda param, arg: {**param, 'length': arg[1]})
- pta.add_transition('TX', 'IDLE', 'txComplete', timeout=2000, is_interrupt=True,
- timeout_function=lambda param, arg: 500 + 16 * param[0])
+ pta = PTA(parameters=["length", "txpower"])
+ pta.add_state("IDLE", power=5)
+ pta.add_state(
+ "TX", power=100, power_function=lambda param, arg: 1000 + 2 * param[1]
+ )
+ pta.add_transition(
+ "UNINITIALIZED", "IDLE", "init", energy=500000, duration=50000
+ )
+ pta.add_transition(
+ "IDLE",
+ "IDLE",
+ "setTxPower",
+ energy=10000,
+ duration=120,
+ param_update_function=lambda param, arg: {**param, "txpower": arg[0]},
+ )
+ pta.add_transition(
+ "IDLE",
+ "TX",
+ "send",
+ energy=3,
+ duration=10,
+ energy_function=lambda param, arg: 3 + 5 * arg[1],
+ param_update_function=lambda param, arg: {**param, "length": arg[1]},
+ )
+ pta.add_transition(
+ "TX",
+ "IDLE",
+ "txComplete",
+ timeout=2000,
+ is_interrupt=True,
+ timeout_function=lambda param, arg: 500 + 16 * param[0],
+ )
trace = [
- ['init'],
- ['setTxPower', 10],
- ['send', 'foo', 3],
+ ["init"],
+ ["setTxPower", 10],
+ ["send", "foo", 3],
]
- expected_energy = 500000 + 10000 + (3 + 5 * 3) + (1000 + 2 * 10) * (500 + 16 * 3)
+ expected_energy = (
+ 500000 + 10000 + (3 + 5 * 3) + (1000 + 2 * 10) * (500 + 16 * 3)
+ )
expected_duration = 50000 + 120 + 10 + (500 + 16 * 3)
result = pta.simulate(trace)
self.assertAlmostEqual(result.energy, expected_energy * 1e-12, places=12)
self.assertAlmostEqual(result.duration, expected_duration * 1e-6, places=6)
- self.assertEqual(result.end_state.name, 'IDLE')
- self.assertEqual(result.parameters, {
- 'txpower': 10,
- 'length': 3
- })
+ self.assertEqual(result.end_state.name, "IDLE")
+ self.assertEqual(result.parameters, {"txpower": 10, "length": 3})
def test_get_X_expensive_state(self):
pta = PTA.from_json(example_json_1)
- self.assertEqual(pta.get_least_expensive_state(), pta.state['IDLE'])
- self.assertEqual(pta.get_most_expensive_state(), pta.state['TX'])
+ self.assertEqual(pta.get_least_expensive_state(), pta.state["IDLE"])
+ self.assertEqual(pta.get_most_expensive_state(), pta.state["TX"])
# self.assertAlmostEqual(pta.min_duration_until_energy_overflow(), (2**32 - 1) * 1e-12 / 10e-3, places=9)
# self.assertAlmostEqual(pta.min_duration_until_energy_overflow(energy_granularity=1e-9), (2**32 - 1) * 1e-9 / 10e-3, places=9)
- self.assertAlmostEqual(pta.max_duration_until_energy_overflow(), (2**32 - 1) * 1e-12 / 5e-6, places=9)
- self.assertAlmostEqual(pta.max_duration_until_energy_overflow(energy_granularity=1e-9), (2**32 - 1) * 1e-9 / 5e-6, places=9)
+ self.assertAlmostEqual(
+ pta.max_duration_until_energy_overflow(),
+ (2 ** 32 - 1) * 1e-12 / 5e-6,
+ places=9,
+ )
+ self.assertAlmostEqual(
+ pta.max_duration_until_energy_overflow(energy_granularity=1e-9),
+ (2 ** 32 - 1) * 1e-9 / 5e-6,
+ places=9,
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_ptamodel.py b/test/test_ptamodel.py
index 7d501e6..e8905b1 100755
--- a/test/test_ptamodel.py
+++ b/test/test_ptamodel.py
@@ -1,248 +1,843 @@
#!/usr/bin/env python3
-from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
+from dfatool.loader import RawData, pta_trace_to_aggregate
+from dfatool.model import PTAModel
+from dfatool.utils import by_name_to_by_param
+from dfatool.validation import CrossValidator
import os
import unittest
import pytest
+import numpy as np
-class TestModels(unittest.TestCase):
- def test_model_singlefile_rf24(self):
- raw_data = RawData(['test-data/20170220_164723_RF24_int_A.tar'])
- preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
+
+class TestSynthetic(unittest.TestCase):
+ def test_model_validation(self):
+ # rng = np.random.default_rng(seed=1312) # requiresy NumPy >= 1.17
+ np.random.seed(1312)
+ X = np.arange(500) % 50
+ parameter_names = ["p_mod5", "p_linear"]
+
+ s1_duration_base = 70
+ s1_duration_scale = 2
+ s1_power_base = 50
+ s1_power_scale = 7
+ s2_duration_base = 700
+ s2_duration_scale = 1
+ s2_power_base = 1500
+ s2_power_scale = 10
+
+ by_name = {
+ "raw_state_1": {
+ "isa": "state",
+ "param": [(x % 5, x) for x in X],
+ "duration": s1_duration_base
+ + np.random.normal(size=X.size, scale=s1_duration_scale),
+ "power": s1_power_base
+ + X
+ + np.random.normal(size=X.size, scale=s1_power_scale),
+ "attributes": ["duration", "power"],
+ },
+ "raw_state_2": {
+ "isa": "state",
+ "param": [(x % 5, x) for x in X],
+ "duration": s2_duration_base
+ - 2 * X
+ + np.random.normal(size=X.size, scale=s2_duration_scale),
+ "power": s2_power_base
+ + X
+ + np.random.normal(size=X.size, scale=s2_power_scale),
+ "attributes": ["duration", "power"],
+ },
+ }
+ by_param = by_name_to_by_param(by_name)
+ model = PTAModel(by_name, parameter_names, dict())
+ static_model = model.get_static()
+
+ # x ∈ [0, 50] -> mean(X) is 25
+ self.assertAlmostEqual(
+ static_model("raw_state_1", "duration"), s1_duration_base, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("raw_state_1", "power"), s1_power_base + 25, delta=7
+ )
+ self.assertAlmostEqual(
+ static_model("raw_state_2", "duration"), s2_duration_base - 2 * 25, delta=2
+ )
+ self.assertAlmostEqual(
+ static_model("raw_state_2", "power"), s2_power_base + 25, delta=7
+ )
+
+ param_model, param_info = model.get_fitted()
+
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "duration", param=[0, 10]),
+ s1_duration_base,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "duration", param=[0, 50]),
+ s1_duration_base,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "duration", param=[0, 70]),
+ s1_duration_base,
+ places=0,
+ )
+
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "power", param=[0, 10]),
+ s1_power_base + 10,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "power", param=[0, 50]),
+ s1_power_base + 50,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "power", param=[0, 70]),
+ s1_power_base + 70,
+ places=0,
+ )
+
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "duration", param=[0, 10]),
+ s2_duration_base - 2 * 10,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "duration", param=[0, 50]),
+ s2_duration_base - 2 * 50,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "duration", param=[0, 70]),
+ s2_duration_base - 2 * 70,
+ places=0,
+ )
+
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "power", param=[0, 10]),
+ s2_power_base + 10,
+ delta=50,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "power", param=[0, 50]),
+ s2_power_base + 50,
+ delta=50,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "power", param=[0, 70]),
+ s2_power_base + 70,
+ delta=50,
+ )
+
+ static_quality = model.assess(static_model)
+ param_quality = model.assess(param_model)
+
+ # static quality reflects normal distribution scale for non-parameterized data
+
+ # the Root Mean Square Deviation must not be greater the scale (i.e., standard deviation) of the normal distribution
+ # Low Mean Absolute Error (< 2)
+ self.assertTrue(static_quality["by_name"]["raw_state_1"]["duration"]["mae"] < 2)
+ # Low Root Mean Square Deviation (< scale == 2)
+ self.assertTrue(
+ static_quality["by_name"]["raw_state_1"]["duration"]["rmsd"] < 2
+ )
+ # Relatively low error percentage (~~ MAE * 100% / s1_duration_base)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["duration"]["mape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"]
+ * 100
+ / s1_duration_base,
+ places=1,
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"]
+ * 100
+ / s1_duration_base,
+ places=1,
+ )
+
+ # static error is high for parameterized data
+
+ # MAE == mean(abs(actual value - model value))
+ # parameter range is [0, 50) -> mean 25, deviation range is [0, 25) -> mean deviation is 12.5 ± gauss scale
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["mae"], 12.5, delta=1
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["rmsd"], 16, delta=2
+ )
+ # high percentage error due to low s1_power_base
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["mape"], 19, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["smape"], 19, delta=2
+ )
+
+ # parameter range is [0, 100) -> mean deviation is 25 ± gauss scale
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["mae"], 25, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["rmsd"], 30, delta=2
+ )
+
+ # low percentage error due to high s2_duration_base (~~ 3.5 %)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["mape"],
+ 25 * 100 / s2_duration_base,
+ delta=1,
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["smape"],
+ 25 * 100 / s2_duration_base,
+ delta=1,
+ )
+
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["mae"], 12.5, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["rmsd"], 17, delta=2
+ )
+
+ # low percentage error due to high s2_power_base (~~ 1.7 %)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["mape"],
+ 25 * 100 / s2_power_base,
+ delta=1,
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["smape"],
+ 25 * 100 / s2_power_base,
+ delta=1,
+ )
+
+ # raw_state_1/duration does not depend on parameters and delegates to the static model
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["mae"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["rmsd"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["rmsd"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["mape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mape"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ )
+
+ # fitted param-model quality reflects normal distribution scale for all data
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["mape"], 0.9, places=1
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["smape"], 0.9, places=1
+ )
+
+ self.assertTrue(
+ param_quality["by_name"]["raw_state_1"]["power"]["mae"] < s1_power_scale
+ )
+ self.assertTrue(
+ param_quality["by_name"]["raw_state_1"]["power"]["rmsd"] < s1_power_scale
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["power"]["mape"], 7.5, delta=1
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["power"]["smape"], 7.5, delta=1
+ )
+
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["mae"],
+ s2_duration_scale,
+ delta=0.2,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["rmsd"],
+ s2_duration_scale,
+ delta=0.2,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["mape"],
+ 0.12,
+ delta=0.01,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["smape"],
+ 0.12,
+ delta=0.01,
+ )
+
+ # ... unless the signal-to-noise ratio (parameter range = [0 .. 50] vs. scale = 10) is bad, leading to
+ # increased regression errors
+ self.assertTrue(param_quality["by_name"]["raw_state_2"]["power"]["mae"] < 15)
+ self.assertTrue(param_quality["by_name"]["raw_state_2"]["power"]["rmsd"] < 18)
+
+ # still: low percentage error due to high s2_power_base
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["mape"], 0.9, places=1
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["smape"], 0.9, places=1
+ )
+
+ def test_model_crossvalidation_10fold(self):
+ # rng = np.random.default_rng(seed=1312) # requiresy NumPy >= 1.17
+ np.random.seed(1312)
+ X = np.arange(500) % 50
+ parameter_names = ["p_mod5", "p_linear"]
+
+ s1_duration_base = 70
+ s1_duration_scale = 2
+ s1_power_base = 50
+ s1_power_scale = 7
+ s2_duration_base = 700
+ s2_duration_scale = 1
+ s2_power_base = 1500
+ s2_power_scale = 10
+
+ by_name = {
+ "raw_state_1": {
+ "isa": "state",
+ "param": [(x % 5, x) for x in X],
+ "duration": s1_duration_base
+ + np.random.normal(size=X.size, scale=s1_duration_scale),
+ "power": s1_power_base
+ + X
+ + np.random.normal(size=X.size, scale=s1_power_scale),
+ "attributes": ["duration", "power"],
+ },
+ "raw_state_2": {
+ "isa": "state",
+ "param": [(x % 5, x) for x in X],
+ "duration": s2_duration_base
+ - 2 * X
+ + np.random.normal(size=X.size, scale=s2_duration_scale),
+ "power": s2_power_base
+ + X
+ + np.random.normal(size=X.size, scale=s2_power_scale),
+ "attributes": ["duration", "power"],
+ },
+ }
+ by_param = by_name_to_by_param(by_name)
+ arg_count = dict()
+ model = PTAModel(by_name, parameter_names, arg_count)
+ validator = CrossValidator(PTAModel, by_name, parameter_names, arg_count)
+
+ static_quality = validator.kfold(lambda m: m.get_static(), 10)
+ param_quality = validator.kfold(lambda m: m.get_fitted()[0], 10)
+
+ print(static_quality)
+
+ # static quality reflects normal distribution scale for non-parameterized data
+
+ # the Root Mean Square Deviation must not be greater the scale (i.e., standard deviation) of the normal distribution
+ # Low Mean Absolute Error (< 2)
+ self.assertTrue(static_quality["by_name"]["raw_state_1"]["duration"]["mae"] < 2)
+ # Low Root Mean Square Deviation (< scale == 2)
+ self.assertTrue(
+ static_quality["by_name"]["raw_state_1"]["duration"]["rmsd"] < 2
+ )
+ # Relatively low error percentage (~~ MAE * 100% / s1_duration_base)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"]
+ * 100
+ / s1_duration_base,
+ places=1,
+ )
+
+ # static error is high for parameterized data
+
+ # MAE == mean(abs(actual value - model value))
+ # parameter range is [0, 50) -> mean 25, deviation range is [0, 25) -> mean deviation is 12.5 ± gauss scale
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["mae"], 12.5, delta=1
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["rmsd"], 16, delta=2
+ )
+ # high percentage error due to low s1_power_base
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["smape"], 19, delta=2
+ )
+
+ # parameter range is [0, 100) -> mean deviation is 25 ± gauss scale
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["mae"], 25, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["rmsd"], 30, delta=2
+ )
+
+ # low percentage error due to high s2_duration_base (~~ 3.5 %)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["smape"],
+ 25 * 100 / s2_duration_base,
+ delta=1,
+ )
+
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["mae"], 12.5, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["rmsd"], 17, delta=2
+ )
+
+ # low percentage error due to high s2_power_base (~~ 1.7 %)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["smape"],
+ 25 * 100 / s2_power_base,
+ delta=1,
+ )
+
+ # raw_state_1/duration does not depend on parameters and delegates to the static model
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["mae"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["rmsd"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["rmsd"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ )
+
+ # fitted param-model quality reflects normal distribution scale for all data
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["smape"], 0.9, places=1
+ )
+
+ self.assertTrue(
+ param_quality["by_name"]["raw_state_1"]["power"]["mae"] < s1_power_scale
+ )
+ self.assertTrue(
+ param_quality["by_name"]["raw_state_1"]["power"]["rmsd"] < s1_power_scale
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["power"]["smape"], 7.5, delta=1
+ )
+
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["mae"],
+ s2_duration_scale,
+ delta=0.2,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["rmsd"],
+ s2_duration_scale,
+ delta=0.2,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["smape"],
+ 0.12,
+ delta=0.01,
+ )
+
+ # ... unless the signal-to-noise ratio (parameter range = [0 .. 50] vs. scale = 10) is bad, leading to
+ # increased regression errors
+ self.assertTrue(param_quality["by_name"]["raw_state_2"]["power"]["mae"] < 15)
+ self.assertTrue(param_quality["by_name"]["raw_state_2"]["power"]["rmsd"] < 18)
+
+ # still: low percentage error due to high s2_power_base
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["smape"], 0.9, places=1
+ )
+
+
+class TestFromFile(unittest.TestCase):
+ def test_singlefile_rf24(self):
+ raw_data = RawData(["test-data/20170220_164723_RF24_int_A.tar"])
+ preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
- model = PTAModel(by_name, parameters, arg_count, verbose=False)
- self.assertEqual(model.states(), 'POWERDOWN RX STANDBY1 TX'.split(' '))
- self.assertEqual(model.transitions(), 'begin epilogue powerDown powerUp setDataRate_num setPALevel_num startListening stopListening write_nb'.split(' '))
+ model = PTAModel(by_name, parameters, arg_count)
+ self.assertEqual(model.states(), "POWERDOWN RX STANDBY1 TX".split(" "))
+ self.assertEqual(
+ model.transitions(),
+ "begin epilogue powerDown powerUp setDataRate_num setPALevel_num startListening stopListening write_nb".split(
+ " "
+ ),
+ )
static_model = model.get_static()
- self.assertAlmostEqual(static_model('POWERDOWN', 'power'), 0, places=0)
- self.assertAlmostEqual(static_model('RX', 'power'), 52254, places=0)
- self.assertAlmostEqual(static_model('STANDBY1', 'power'), 7, places=0)
- self.assertAlmostEqual(static_model('TX', 'power'), 18414, places=0)
- self.assertAlmostEqual(static_model('begin', 'energy'), 1652249, places=0)
- self.assertAlmostEqual(static_model('epilogue', 'energy'), 15449, places=0)
- self.assertAlmostEqual(static_model('powerDown', 'energy'), 4547, places=0)
- self.assertAlmostEqual(static_model('powerUp', 'energy'), 1641765, places=0)
- self.assertAlmostEqual(static_model('setDataRate_num', 'energy'), 7749, places=0)
- self.assertAlmostEqual(static_model('setPALevel_num', 'energy'), 4700, places=0)
- self.assertAlmostEqual(static_model('startListening', 'energy'), 4309602, places=0)
- self.assertAlmostEqual(static_model('stopListening', 'energy'), 193775, places=0)
- self.assertAlmostEqual(static_model('write_nb', 'energy'), 218339, places=0)
- self.assertAlmostEqual(static_model('begin', 'rel_energy_prev'), 1649571, places=0)
- self.assertAlmostEqual(static_model('epilogue', 'rel_energy_prev'), -744114, places=0)
- self.assertAlmostEqual(static_model('powerDown', 'rel_energy_prev'), 3854, places=0)
- self.assertAlmostEqual(static_model('powerUp', 'rel_energy_prev'), 1641381, places=0)
- self.assertAlmostEqual(static_model('setDataRate_num', 'rel_energy_prev'), 6777, places=0)
- self.assertAlmostEqual(static_model('setPALevel_num', 'rel_energy_prev'), 3728, places=0)
- self.assertAlmostEqual(static_model('startListening', 'rel_energy_prev'), 4307769, places=0)
- self.assertAlmostEqual(static_model('stopListening', 'rel_energy_prev'), -13533693, places=0)
- self.assertAlmostEqual(static_model('write_nb', 'rel_energy_prev'), 214618, places=0)
- self.assertAlmostEqual(static_model('begin', 'duration'), 19830, places=0)
- self.assertAlmostEqual(static_model('epilogue', 'duration'), 40, places=0)
- self.assertAlmostEqual(static_model('powerDown', 'duration'), 90, places=0)
- self.assertAlmostEqual(static_model('powerUp', 'duration'), 10030, places=0)
- self.assertAlmostEqual(static_model('setDataRate_num', 'duration'), 140, places=0)
- self.assertAlmostEqual(static_model('setPALevel_num', 'duration'), 90, places=0)
- self.assertAlmostEqual(static_model('startListening', 'duration'), 260, places=0)
- self.assertAlmostEqual(static_model('stopListening', 'duration'), 260, places=0)
- self.assertAlmostEqual(static_model('write_nb', 'duration'), 510, places=0)
-
- self.assertAlmostEqual(model.stats.param_dependence_ratio('POWERDOWN', 'power', 'datarate'), 0, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('POWERDOWN', 'power', 'txbytes'), 0, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('POWERDOWN', 'power', 'txpower'), 0, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('RX', 'power', 'datarate'), 0.99, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('RX', 'power', 'txbytes'), 0, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('RX', 'power', 'txpower'), 0.01, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('STANDBY1', 'power', 'datarate'), 0.04, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('STANDBY1', 'power', 'txbytes'), 0.35, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('STANDBY1', 'power', 'txpower'), 0.32, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('TX', 'power', 'datarate'), 1, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('TX', 'power', 'txbytes'), 0.09, places=2)
- self.assertAlmostEqual(model.stats.param_dependence_ratio('TX', 'power', 'txpower'), 1, places=2)
+ self.assertAlmostEqual(static_model("POWERDOWN", "power"), 0, places=0)
+ self.assertAlmostEqual(static_model("RX", "power"), 52254, places=0)
+ self.assertAlmostEqual(static_model("STANDBY1", "power"), 7, places=0)
+ self.assertAlmostEqual(static_model("TX", "power"), 18414, places=0)
+ self.assertAlmostEqual(static_model("begin", "energy"), 1652249, places=0)
+ self.assertAlmostEqual(static_model("epilogue", "energy"), 15449, places=0)
+ self.assertAlmostEqual(static_model("powerDown", "energy"), 4547, places=0)
+ self.assertAlmostEqual(static_model("powerUp", "energy"), 1641765, places=0)
+ self.assertAlmostEqual(
+ static_model("setDataRate_num", "energy"), 7749, places=0
+ )
+ self.assertAlmostEqual(static_model("setPALevel_num", "energy"), 4700, places=0)
+ self.assertAlmostEqual(
+ static_model("startListening", "energy"), 4309602, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("stopListening", "energy"), 193775, places=0
+ )
+ self.assertAlmostEqual(static_model("write_nb", "energy"), 218339, places=0)
+ self.assertAlmostEqual(
+ static_model("begin", "rel_energy_prev"), 1649571, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("epilogue", "rel_energy_prev"), -744114, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("powerDown", "rel_energy_prev"), 3854, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("powerUp", "rel_energy_prev"), 1641381, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("setDataRate_num", "rel_energy_prev"), 6777, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("setPALevel_num", "rel_energy_prev"), 3728, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("startListening", "rel_energy_prev"), 4307769, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("stopListening", "rel_energy_prev"), -13533693, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("write_nb", "rel_energy_prev"), 214618, places=0
+ )
+ self.assertAlmostEqual(static_model("begin", "duration"), 19830, places=0)
+ self.assertAlmostEqual(static_model("epilogue", "duration"), 40, places=0)
+ self.assertAlmostEqual(static_model("powerDown", "duration"), 90, places=0)
+ self.assertAlmostEqual(static_model("powerUp", "duration"), 10030, places=0)
+ self.assertAlmostEqual(
+ static_model("setDataRate_num", "duration"), 140, places=0
+ )
+ self.assertAlmostEqual(static_model("setPALevel_num", "duration"), 90, places=0)
+ self.assertAlmostEqual(
+ static_model("startListening", "duration"), 260, places=0
+ )
+ self.assertAlmostEqual(static_model("stopListening", "duration"), 260, places=0)
+ self.assertAlmostEqual(static_model("write_nb", "duration"), 510, places=0)
+
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("POWERDOWN", "power", "datarate"),
+ 0,
+ places=2,
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("POWERDOWN", "power", "txbytes"),
+ 0,
+ places=2,
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("POWERDOWN", "power", "txpower"),
+ 0,
+ places=2,
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("RX", "power", "datarate"),
+ 0.99,
+ places=2,
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("RX", "power", "txbytes"), 0, places=2
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("RX", "power", "txpower"), 0.01, places=2
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("STANDBY1", "power", "datarate"),
+ 0.04,
+ places=2,
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("STANDBY1", "power", "txbytes"),
+ 0.35,
+ places=2,
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("STANDBY1", "power", "txpower"),
+ 0.32,
+ places=2,
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("TX", "power", "datarate"), 1, places=2
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("TX", "power", "txbytes"), 0.09, places=2
+ )
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio("TX", "power", "txpower"), 1, places=2
+ )
param_model, param_info = model.get_fitted()
- self.assertEqual(param_info('POWERDOWN', 'power'), None)
- self.assertEqual(param_info('RX', 'power')['function']._model_str,
- '0 + regression_arg(0) + regression_arg(1) * np.sqrt(parameter(datarate))')
- self.assertAlmostEqual(param_info('RX', 'power')['function']._regression_args[0], 48530.7, places=0)
- self.assertAlmostEqual(param_info('RX', 'power')['function']._regression_args[1], 117, places=0)
- self.assertEqual(param_info('STANDBY1', 'power'), None)
- self.assertEqual(param_info('TX', 'power')['function']._model_str,
- '0 + regression_arg(0) + regression_arg(1) * 1/(parameter(datarate)) + regression_arg(2) * parameter(txpower) + regression_arg(3) * 1/(parameter(datarate)) * parameter(txpower)')
- self.assertEqual(param_info('epilogue', 'timeout')['function']._model_str,
- '0 + regression_arg(0) + regression_arg(1) * 1/(parameter(datarate))')
- self.assertEqual(param_info('stopListening', 'duration')['function']._model_str,
- '0 + regression_arg(0) + regression_arg(1) * 1/(parameter(datarate))')
-
- self.assertAlmostEqual(param_model('RX', 'power', param=[1, None, None]), 48647, places=-1)
-
- def test_model_singlefile_mmparam(self):
- raw_data = RawData(['test-data/20161221_123347_mmparam.tar'])
- preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
+ self.assertEqual(param_info("POWERDOWN", "power"), None)
+ self.assertEqual(
+ param_info("RX", "power")["function"].model_function,
+ "0 + regression_arg(0) + regression_arg(1) * np.sqrt(parameter(datarate))",
+ )
+ self.assertAlmostEqual(
+ param_info("RX", "power")["function"].model_args[0], 48530.7, places=0
+ )
+ self.assertAlmostEqual(
+ param_info("RX", "power")["function"].model_args[1], 117, places=0
+ )
+ self.assertEqual(param_info("STANDBY1", "power"), None)
+ self.assertEqual(
+ param_info("TX", "power")["function"].model_function,
+ "0 + regression_arg(0) + regression_arg(1) * 1/(parameter(datarate)) + regression_arg(2) * parameter(txpower) + regression_arg(3) * 1/(parameter(datarate)) * parameter(txpower)",
+ )
+ self.assertEqual(
+ param_info("epilogue", "timeout")["function"].model_function,
+ "0 + regression_arg(0) + regression_arg(1) * 1/(parameter(datarate))",
+ )
+ self.assertEqual(
+ param_info("stopListening", "duration")["function"].model_function,
+ "0 + regression_arg(0) + regression_arg(1) * 1/(parameter(datarate))",
+ )
+
+ self.assertAlmostEqual(
+ param_model("RX", "power", param=[1, None, None]), 48647, places=-1
+ )
+
+ def test_singlefile_mmparam(self):
+ raw_data = RawData(["test-data/20161221_123347_mmparam.tar"])
+ preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
- model = PTAModel(by_name, parameters, arg_count, verbose=False)
- self.assertEqual(model.states(), 'OFF ON'.split(' '))
- self.assertEqual(model.transitions(), 'off setBrightness'.split(' '))
+ model = PTAModel(by_name, parameters, arg_count)
+ self.assertEqual(model.states(), "OFF ON".split(" "))
+ self.assertEqual(model.transitions(), "off setBrightness".split(" "))
static_model = model.get_static()
- self.assertAlmostEqual(static_model('OFF', 'power'), 7124, places=0)
- self.assertAlmostEqual(static_model('ON', 'power'), 17866, places=0)
- self.assertAlmostEqual(static_model('off', 'energy'), 268079197, places=0)
- self.assertAlmostEqual(static_model('setBrightness', 'energy'), 168912773, places=0)
- self.assertAlmostEqual(static_model('off', 'rel_energy_prev'), 105040198, places=0)
- self.assertAlmostEqual(static_model('setBrightness', 'rel_energy_prev'), 103745586, places=0)
- self.assertAlmostEqual(static_model('off', 'duration'), 9130, places=0)
- self.assertAlmostEqual(static_model('setBrightness', 'duration'), 9130, places=0)
+ self.assertAlmostEqual(static_model("OFF", "power"), 7124, places=0)
+ self.assertAlmostEqual(static_model("ON", "power"), 17866, places=0)
+ self.assertAlmostEqual(static_model("off", "energy"), 268079197, places=0)
+ self.assertAlmostEqual(
+ static_model("setBrightness", "energy"), 168912773, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("off", "rel_energy_prev"), 105040198, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("setBrightness", "rel_energy_prev"), 103745586, places=0
+ )
+ self.assertAlmostEqual(static_model("off", "duration"), 9130, places=0)
+ self.assertAlmostEqual(
+ static_model("setBrightness", "duration"), 9130, places=0
+ )
param_lut_model = model.get_param_lut()
- self.assertAlmostEqual(param_lut_model('OFF', 'power', param=[None, None]), 7124, places=0)
+ self.assertAlmostEqual(
+ param_lut_model("OFF", "power", param=[None, None]), 7124, places=0
+ )
with self.assertRaises(KeyError):
- param_lut_model('ON', 'power', param=[None, None])
- param_lut_model('ON', 'power', param=['a'])
- param_lut_model('ON', 'power', param=[0])
- self.assertTrue(param_lut_model('ON', 'power', param=[0, 0]))
+ param_lut_model("ON", "power", param=[None, None])
+ param_lut_model("ON", "power", param=["a"])
+ param_lut_model("ON", "power", param=[0])
+ self.assertTrue(param_lut_model("ON", "power", param=[0, 0]))
param_lut_model = model.get_param_lut(fallback=True)
- self.assertAlmostEqual(param_lut_model('ON', 'power', param=[None, None]), 17866, places=0)
+ self.assertAlmostEqual(
+ param_lut_model("ON", "power", param=[None, None]), 17866, places=0
+ )
- def test_model_multifile_lm75x(self):
+ def test_multifile_lm75x(self):
testfiles = [
- 'test-data/20170116_124500_LM75x.tar',
- 'test-data/20170116_131306_LM75x.tar',
+ "test-data/20170116_124500_LM75x.tar",
+ "test-data/20170116_131306_LM75x.tar",
]
raw_data = RawData(testfiles)
- preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
+ preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
- model = PTAModel(by_name, parameters, arg_count, verbose=False)
- self.assertEqual(model.states(), 'ACTIVE POWEROFF'.split(' '))
- self.assertEqual(model.transitions(), 'getTemp setHyst setOS shutdown start'.split(' '))
+ model = PTAModel(by_name, parameters, arg_count)
+ self.assertEqual(model.states(), "ACTIVE POWEROFF".split(" "))
+ self.assertEqual(
+ model.transitions(), "getTemp setHyst setOS shutdown start".split(" ")
+ )
static_model = model.get_static()
- self.assertAlmostEqual(static_model('ACTIVE', 'power'), 332, places=0)
- self.assertAlmostEqual(static_model('POWEROFF', 'power'), 7, places=0)
- self.assertAlmostEqual(static_model('getTemp', 'energy'), 26016748, places=0)
- self.assertAlmostEqual(static_model('setHyst', 'energy'), 22082226, places=0)
- self.assertAlmostEqual(static_model('setOS', 'energy'), 21774238, places=0)
- self.assertAlmostEqual(static_model('shutdown', 'energy'), 11808160, places=0)
- self.assertAlmostEqual(static_model('start', 'energy'), 12445302, places=0)
- self.assertAlmostEqual(static_model('getTemp', 'rel_energy_prev'), 21722720, places=0)
- self.assertAlmostEqual(static_model('setHyst', 'rel_energy_prev'), 19001499, places=0)
- self.assertAlmostEqual(static_model('setOS', 'rel_energy_prev'), 18693283, places=0)
- self.assertAlmostEqual(static_model('shutdown', 'rel_energy_prev'), 11746224, places=0)
- self.assertAlmostEqual(static_model('start', 'rel_energy_prev'), 12391462, places=0)
- self.assertAlmostEqual(static_model('getTemp', 'duration'), 12740, places=0)
- self.assertAlmostEqual(static_model('setHyst', 'duration'), 9140, places=0)
- self.assertAlmostEqual(static_model('setOS', 'duration'), 9140, places=0)
- self.assertAlmostEqual(static_model('shutdown', 'duration'), 6980, places=0)
- self.assertAlmostEqual(static_model('start', 'duration'), 6980, places=0)
-
- def test_model_multifile_sharp(self):
+ self.assertAlmostEqual(static_model("ACTIVE", "power"), 332, places=0)
+ self.assertAlmostEqual(static_model("POWEROFF", "power"), 7, places=0)
+ self.assertAlmostEqual(static_model("getTemp", "energy"), 26016748, places=0)
+ self.assertAlmostEqual(static_model("setHyst", "energy"), 22082226, places=0)
+ self.assertAlmostEqual(static_model("setOS", "energy"), 21774238, places=0)
+ self.assertAlmostEqual(static_model("shutdown", "energy"), 11808160, places=0)
+ self.assertAlmostEqual(static_model("start", "energy"), 12445302, places=0)
+ self.assertAlmostEqual(
+ static_model("getTemp", "rel_energy_prev"), 21722720, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("setHyst", "rel_energy_prev"), 19001499, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("setOS", "rel_energy_prev"), 18693283, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("shutdown", "rel_energy_prev"), 11746224, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("start", "rel_energy_prev"), 12391462, places=0
+ )
+ self.assertAlmostEqual(static_model("getTemp", "duration"), 12740, places=0)
+ self.assertAlmostEqual(static_model("setHyst", "duration"), 9140, places=0)
+ self.assertAlmostEqual(static_model("setOS", "duration"), 9140, places=0)
+ self.assertAlmostEqual(static_model("shutdown", "duration"), 6980, places=0)
+ self.assertAlmostEqual(static_model("start", "duration"), 6980, places=0)
+
+ def test_multifile_sharp(self):
testfiles = [
- 'test-data/20170116_145420_sharpLS013B4DN.tar',
- 'test-data/20170116_151348_sharpLS013B4DN.tar',
+ "test-data/20170116_145420_sharpLS013B4DN.tar",
+ "test-data/20170116_151348_sharpLS013B4DN.tar",
]
raw_data = RawData(testfiles)
- preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
+ preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
- model = PTAModel(by_name, parameters, arg_count, verbose=False)
- self.assertEqual(model.states(), 'DISABLED ENABLED'.split(' '))
- self.assertEqual(model.transitions(), 'clear disable enable ioInit sendLine toggleVCOM'.split(' '))
+ model = PTAModel(by_name, parameters, arg_count)
+ self.assertEqual(model.states(), "DISABLED ENABLED".split(" "))
+ self.assertEqual(
+ model.transitions(),
+ "clear disable enable ioInit sendLine toggleVCOM".split(" "),
+ )
static_model = model.get_static()
- self.assertAlmostEqual(static_model('DISABLED', 'power'), 22, places=0)
- self.assertAlmostEqual(static_model('ENABLED', 'power'), 24, places=0)
- self.assertAlmostEqual(static_model('clear', 'energy'), 14059, places=0)
- self.assertAlmostEqual(static_model('disable', 'energy'), 0, places=0)
- self.assertAlmostEqual(static_model('enable', 'energy'), 0, places=0)
- self.assertAlmostEqual(static_model('ioInit', 'energy'), 0, places=0)
- self.assertAlmostEqual(static_model('sendLine', 'energy'), 37874, places=0)
- self.assertAlmostEqual(static_model('toggleVCOM', 'energy'), 30991, places=0)
- self.assertAlmostEqual(static_model('clear', 'rel_energy_prev'), 13329, places=0)
- self.assertAlmostEqual(static_model('disable', 'rel_energy_prev'), 0, places=0)
- self.assertAlmostEqual(static_model('enable', 'rel_energy_prev'), 0, places=0)
- self.assertAlmostEqual(static_model('ioInit', 'rel_energy_prev'), 0, places=0)
- self.assertAlmostEqual(static_model('sendLine', 'rel_energy_prev'), 33447, places=0)
- self.assertAlmostEqual(static_model('toggleVCOM', 'rel_energy_prev'), 30242, places=0)
- self.assertAlmostEqual(static_model('clear', 'duration'), 30, places=0)
- self.assertAlmostEqual(static_model('disable', 'duration'), 0, places=0)
- self.assertAlmostEqual(static_model('enable', 'duration'), 0, places=0)
- self.assertAlmostEqual(static_model('ioInit', 'duration'), 0, places=0)
- self.assertAlmostEqual(static_model('sendLine', 'duration'), 180, places=0)
- self.assertAlmostEqual(static_model('toggleVCOM', 'duration'), 30, places=0)
-
- def test_model_multifile_mmstatic(self):
+ self.assertAlmostEqual(static_model("DISABLED", "power"), 22, places=0)
+ self.assertAlmostEqual(static_model("ENABLED", "power"), 24, places=0)
+ self.assertAlmostEqual(static_model("clear", "energy"), 14059, places=0)
+ self.assertAlmostEqual(static_model("disable", "energy"), 0, places=0)
+ self.assertAlmostEqual(static_model("enable", "energy"), 0, places=0)
+ self.assertAlmostEqual(static_model("ioInit", "energy"), 0, places=0)
+ self.assertAlmostEqual(static_model("sendLine", "energy"), 37874, places=0)
+ self.assertAlmostEqual(static_model("toggleVCOM", "energy"), 30991, places=0)
+ self.assertAlmostEqual(
+ static_model("clear", "rel_energy_prev"), 13329, places=0
+ )
+ self.assertAlmostEqual(static_model("disable", "rel_energy_prev"), 0, places=0)
+ self.assertAlmostEqual(static_model("enable", "rel_energy_prev"), 0, places=0)
+ self.assertAlmostEqual(static_model("ioInit", "rel_energy_prev"), 0, places=0)
+ self.assertAlmostEqual(
+ static_model("sendLine", "rel_energy_prev"), 33447, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("toggleVCOM", "rel_energy_prev"), 30242, places=0
+ )
+ self.assertAlmostEqual(static_model("clear", "duration"), 30, places=0)
+ self.assertAlmostEqual(static_model("disable", "duration"), 0, places=0)
+ self.assertAlmostEqual(static_model("enable", "duration"), 0, places=0)
+ self.assertAlmostEqual(static_model("ioInit", "duration"), 0, places=0)
+ self.assertAlmostEqual(static_model("sendLine", "duration"), 180, places=0)
+ self.assertAlmostEqual(static_model("toggleVCOM", "duration"), 30, places=0)
+
+ def test_multifile_mmstatic(self):
testfiles = [
- 'test-data/20170116_143516_mmstatic.tar',
- 'test-data/20170116_142654_mmstatic.tar',
+ "test-data/20170116_143516_mmstatic.tar",
+ "test-data/20170116_142654_mmstatic.tar",
]
raw_data = RawData(testfiles)
- preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
+ preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
- model = PTAModel(by_name, parameters, arg_count, verbose=False)
- self.assertEqual(model.states(), 'B G OFF R'.split(' '))
- self.assertEqual(model.transitions(), 'blue green off red'.split(' '))
+ model = PTAModel(by_name, parameters, arg_count)
+ self.assertEqual(model.states(), "B G OFF R".split(" "))
+ self.assertEqual(model.transitions(), "blue green off red".split(" "))
static_model = model.get_static()
- self.assertAlmostEqual(static_model('B', 'power'), 29443, places=0)
- self.assertAlmostEqual(static_model('G', 'power'), 29432, places=0)
- self.assertAlmostEqual(static_model('OFF', 'power'), 7057, places=0)
- self.assertAlmostEqual(static_model('R', 'power'), 49068, places=0)
- self.assertAlmostEqual(static_model('blue', 'energy'), 374440955, places=0)
- self.assertAlmostEqual(static_model('green', 'energy'), 372026027, places=0)
- self.assertAlmostEqual(static_model('off', 'energy'), 372999554, places=0)
- self.assertAlmostEqual(static_model('red', 'energy'), 378936634, places=0)
- self.assertAlmostEqual(static_model('blue', 'rel_energy_prev'), 105535587, places=0)
- self.assertAlmostEqual(static_model('green', 'rel_energy_prev'), 102999371, places=0)
- self.assertAlmostEqual(static_model('off', 'rel_energy_prev'), 103613698, places=0)
- self.assertAlmostEqual(static_model('red', 'rel_energy_prev'), 110474331, places=0)
- self.assertAlmostEqual(static_model('blue', 'duration'), 9140, places=0)
- self.assertAlmostEqual(static_model('green', 'duration'), 9140, places=0)
- self.assertAlmostEqual(static_model('off', 'duration'), 9140, places=0)
- self.assertAlmostEqual(static_model('red', 'duration'), 9140, places=0)
-
- @pytest.mark.skipif('TEST_SLOW' not in os.environ, reason="slow test, set TEST_SLOW=1 to run")
- def test_model_multifile_cc1200(self):
+ self.assertAlmostEqual(static_model("B", "power"), 29443, places=0)
+ self.assertAlmostEqual(static_model("G", "power"), 29432, places=0)
+ self.assertAlmostEqual(static_model("OFF", "power"), 7057, places=0)
+ self.assertAlmostEqual(static_model("R", "power"), 49068, places=0)
+ self.assertAlmostEqual(static_model("blue", "energy"), 374440955, places=0)
+ self.assertAlmostEqual(static_model("green", "energy"), 372026027, places=0)
+ self.assertAlmostEqual(static_model("off", "energy"), 372999554, places=0)
+ self.assertAlmostEqual(static_model("red", "energy"), 378936634, places=0)
+ self.assertAlmostEqual(
+ static_model("blue", "rel_energy_prev"), 105535587, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("green", "rel_energy_prev"), 102999371, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("off", "rel_energy_prev"), 103613698, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("red", "rel_energy_prev"), 110474331, places=0
+ )
+ self.assertAlmostEqual(static_model("blue", "duration"), 9140, places=0)
+ self.assertAlmostEqual(static_model("green", "duration"), 9140, places=0)
+ self.assertAlmostEqual(static_model("off", "duration"), 9140, places=0)
+ self.assertAlmostEqual(static_model("red", "duration"), 9140, places=0)
+
+ @pytest.mark.skipif(
+ "TEST_SLOW" not in os.environ, reason="slow test, set TEST_SLOW=1 to run"
+ )
+ def test_multifile_cc1200(self):
testfiles = [
- 'test-data/20170125_125433_cc1200.tar',
- 'test-data/20170125_142420_cc1200.tar',
- 'test-data/20170125_144957_cc1200.tar',
- 'test-data/20170125_151149_cc1200.tar',
- 'test-data/20170125_151824_cc1200.tar',
- 'test-data/20170125_154019_cc1200.tar',
+ "test-data/20170125_125433_cc1200.tar",
+ "test-data/20170125_142420_cc1200.tar",
+ "test-data/20170125_144957_cc1200.tar",
+ "test-data/20170125_151149_cc1200.tar",
+ "test-data/20170125_151824_cc1200.tar",
+ "test-data/20170125_154019_cc1200.tar",
]
raw_data = RawData(testfiles)
- preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
+ preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
- model = PTAModel(by_name, parameters, arg_count, verbose=False)
- self.assertEqual(model.states(), 'IDLE RX SLEEP SLEEP_EWOR SYNTH_ON TX XOFF'.split(' '))
- self.assertEqual(model.transitions(), 'crystal_off eWOR idle init prepare_xmit receive send setSymbolRate setTxPower sleep txDone'.split(' '))
+ model = PTAModel(by_name, parameters, arg_count)
+ self.assertEqual(
+ model.states(), "IDLE RX SLEEP SLEEP_EWOR SYNTH_ON TX XOFF".split(" ")
+ )
+ self.assertEqual(
+ model.transitions(),
+ "crystal_off eWOR idle init prepare_xmit receive send setSymbolRate setTxPower sleep txDone".split(
+ " "
+ ),
+ )
static_model = model.get_static()
- self.assertAlmostEqual(static_model('IDLE', 'power'), 9500, places=0)
- self.assertAlmostEqual(static_model('RX', 'power'), 85177, places=0)
- self.assertAlmostEqual(static_model('SLEEP', 'power'), 143, places=0)
- self.assertAlmostEqual(static_model('SLEEP_EWOR', 'power'), 81801, places=0)
- self.assertAlmostEqual(static_model('SYNTH_ON', 'power'), 60036, places=0)
- self.assertAlmostEqual(static_model('TX', 'power'), 92461, places=0)
- self.assertAlmostEqual(static_model('XOFF', 'power'), 780, places=0)
- self.assertAlmostEqual(static_model('crystal_off', 'energy'), 114658, places=0)
- self.assertAlmostEqual(static_model('eWOR', 'energy'), 317556, places=0)
- self.assertAlmostEqual(static_model('idle', 'energy'), 717713, places=0)
- self.assertAlmostEqual(static_model('init', 'energy'), 23028941, places=0)
- self.assertAlmostEqual(static_model('prepare_xmit', 'energy'), 378552, places=0)
- self.assertAlmostEqual(static_model('receive', 'energy'), 380335, places=0)
- self.assertAlmostEqual(static_model('send', 'energy'), 4282597, places=0)
- self.assertAlmostEqual(static_model('setSymbolRate', 'energy'), 962060, places=0)
- self.assertAlmostEqual(static_model('setTxPower', 'energy'), 288701, places=0)
- self.assertAlmostEqual(static_model('sleep', 'energy'), 104445, places=0)
- self.assertEqual(static_model('txDone', 'energy'), 0)
+ self.assertAlmostEqual(static_model("IDLE", "power"), 9500, places=0)
+ self.assertAlmostEqual(static_model("RX", "power"), 85177, places=0)
+ self.assertAlmostEqual(static_model("SLEEP", "power"), 143, places=0)
+ self.assertAlmostEqual(static_model("SLEEP_EWOR", "power"), 81801, places=0)
+ self.assertAlmostEqual(static_model("SYNTH_ON", "power"), 60036, places=0)
+ self.assertAlmostEqual(static_model("TX", "power"), 92461, places=0)
+ self.assertAlmostEqual(static_model("XOFF", "power"), 780, places=0)
+ self.assertAlmostEqual(static_model("crystal_off", "energy"), 114658, places=0)
+ self.assertAlmostEqual(static_model("eWOR", "energy"), 317556, places=0)
+ self.assertAlmostEqual(static_model("idle", "energy"), 717713, places=0)
+ self.assertAlmostEqual(static_model("init", "energy"), 23028941, places=0)
+ self.assertAlmostEqual(static_model("prepare_xmit", "energy"), 378552, places=0)
+ self.assertAlmostEqual(static_model("receive", "energy"), 380335, places=0)
+ self.assertAlmostEqual(static_model("send", "energy"), 4282597, places=0)
+ self.assertAlmostEqual(
+ static_model("setSymbolRate", "energy"), 962060, places=0
+ )
+ self.assertAlmostEqual(static_model("setTxPower", "energy"), 288701, places=0)
+ self.assertAlmostEqual(static_model("sleep", "energy"), 104445, places=0)
+ self.assertEqual(static_model("txDone", "energy"), 0)
param_model, param_info = model.get_fitted()
- self.assertEqual(param_info('IDLE', 'power'), None)
- self.assertEqual(param_info('RX', 'power')['function']._model_str,
- '0 + regression_arg(0) + regression_arg(1) * np.log(parameter(symbolrate) + 1)')
- self.assertEqual(param_info('SLEEP', 'power'), None)
- self.assertEqual(param_info('SLEEP_EWOR', 'power'), None)
- self.assertEqual(param_info('SYNTH_ON', 'power'), None)
- self.assertEqual(param_info('XOFF', 'power'), None)
+ self.assertEqual(param_info("IDLE", "power"), None)
+ self.assertEqual(
+ param_info("RX", "power")["function"].model_function,
+ "0 + regression_arg(0) + regression_arg(1) * np.log(parameter(symbolrate) + 1)",
+ )
+ self.assertEqual(param_info("SLEEP", "power"), None)
+ self.assertEqual(param_info("SLEEP_EWOR", "power"), None)
+ self.assertEqual(param_info("SYNTH_ON", "power"), None)
+ self.assertEqual(param_info("XOFF", "power"), None)
- self.assertAlmostEqual(param_info('RX', 'power')['function']._regression_args[0], 84415, places=0)
- self.assertAlmostEqual(param_info('RX', 'power')['function']._regression_args[1], 206, places=0)
+ self.assertAlmostEqual(
+ param_info("RX", "power")["function"].model_args[0], 84415, places=0
+ )
+ self.assertAlmostEqual(
+ param_info("RX", "power")["function"].model_args[1], 206, places=0
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_timingharness.py b/test/test_timingharness.py
index c8a422c..917e4e2 100755
--- a/test/test_timingharness.py
+++ b/test/test_timingharness.py
@@ -1,95 +1,157 @@
#!/usr/bin/env python3
-from dfatool.dfatool import AnalyticModel, TimingData, pta_trace_to_aggregate
+from dfatool.loader import TimingData, pta_trace_to_aggregate
+from dfatool.model import AnalyticModel
from dfatool.parameters import prune_dependent_parameters
import unittest
class TestModels(unittest.TestCase):
def test_model_singlefile_rf24(self):
- raw_data = TimingData(['test-data/20190815_111745_nRF24_no-rx.json'])
- preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
+ raw_data = TimingData(["test-data/20190815_111745_nRF24_no-rx.json"])
+ preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
- model = AnalyticModel(by_name, parameters, arg_count, verbose=False)
- self.assertEqual(model.names, 'setPALevel setRetries setup write'.split(' '))
+ model = AnalyticModel(by_name, parameters, arg_count)
+ self.assertEqual(model.names, "setPALevel setRetries setup write".split(" "))
static_model = model.get_static()
- self.assertAlmostEqual(static_model('setPALevel', 'duration'), 146, places=0)
- self.assertAlmostEqual(static_model('setRetries', 'duration'), 73, places=0)
- self.assertAlmostEqual(static_model('setup', 'duration'), 6533, places=0)
- self.assertAlmostEqual(static_model('write', 'duration'), 12634, places=0)
-
- for transition in 'setPALevel setRetries setup write'.split(' '):
- self.assertAlmostEqual(model.stats.param_dependence_ratio(transition, 'duration', 'channel'), 0, places=2)
+ self.assertAlmostEqual(static_model("setPALevel", "duration"), 146, places=0)
+ self.assertAlmostEqual(static_model("setRetries", "duration"), 73, places=0)
+ self.assertAlmostEqual(static_model("setup", "duration"), 6533, places=0)
+ self.assertAlmostEqual(static_model("write", "duration"), 12634, places=0)
+
+ for transition in "setPALevel setRetries setup write".split(" "):
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio(transition, "duration", "channel"),
+ 0,
+ places=2,
+ )
param_model, param_info = model.get_fitted()
- self.assertEqual(param_info('setPALevel', 'duration'), None)
- self.assertEqual(param_info('setRetries', 'duration'), None)
- self.assertEqual(param_info('setup', 'duration'), None)
- self.assertEqual(param_info('write', 'duration')['function']._model_str, '0 + regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay)')
-
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[0], 1163, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[1], 464, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[2], 1, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[3], 1, places=0)
+ self.assertEqual(param_info("setPALevel", "duration"), None)
+ self.assertEqual(param_info("setRetries", "duration"), None)
+ self.assertEqual(param_info("setup", "duration"), None)
+ self.assertEqual(
+ param_info("write", "duration")["function"].model_function,
+ "0 + regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay)",
+ )
+
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[0], 1163, places=0,
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[1], 464, places=0,
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[2], 1, places=0
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[3], 1, places=0
+ )
def test_dependent_parameter_pruning(self):
- raw_data = TimingData(['test-data/20190815_103347_nRF24_no-rx.json'])
- preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
+ raw_data = TimingData(["test-data/20190815_103347_nRF24_no-rx.json"])
+ preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
prune_dependent_parameters(by_name, parameters)
- model = AnalyticModel(by_name, parameters, arg_count, verbose=False)
- self.assertEqual(model.names, 'getObserveTx setPALevel setRetries setup write'.split(' '))
+ model = AnalyticModel(by_name, parameters, arg_count)
+ self.assertEqual(
+ model.names, "getObserveTx setPALevel setRetries setup write".split(" ")
+ )
static_model = model.get_static()
- self.assertAlmostEqual(static_model('getObserveTx', 'duration'), 75, places=0)
- self.assertAlmostEqual(static_model('setPALevel', 'duration'), 146, places=0)
- self.assertAlmostEqual(static_model('setRetries', 'duration'), 73, places=0)
- self.assertAlmostEqual(static_model('setup', 'duration'), 6533, places=0)
- self.assertAlmostEqual(static_model('write', 'duration'), 12634, places=0)
-
- for transition in 'getObserveTx setPALevel setRetries setup write'.split(' '):
- self.assertAlmostEqual(model.stats.param_dependence_ratio(transition, 'duration', 'channel'), 0, places=2)
+ self.assertAlmostEqual(static_model("getObserveTx", "duration"), 75, places=0)
+ self.assertAlmostEqual(static_model("setPALevel", "duration"), 146, places=0)
+ self.assertAlmostEqual(static_model("setRetries", "duration"), 73, places=0)
+ self.assertAlmostEqual(static_model("setup", "duration"), 6533, places=0)
+ self.assertAlmostEqual(static_model("write", "duration"), 12634, places=0)
+
+ for transition in "getObserveTx setPALevel setRetries setup write".split(" "):
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio(transition, "duration", "channel"),
+ 0,
+ places=2,
+ )
param_model, param_info = model.get_fitted()
- self.assertEqual(param_info('getObserveTx', 'duration'), None)
- self.assertEqual(param_info('setPALevel', 'duration'), None)
- self.assertEqual(param_info('setRetries', 'duration'), None)
- self.assertEqual(param_info('setup', 'duration'), None)
- self.assertEqual(param_info('write', 'duration')['function']._model_str, '0 + regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay)')
-
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[0], 1163, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[1], 464, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[2], 1, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[3], 1, places=0)
+ self.assertEqual(param_info("getObserveTx", "duration"), None)
+ self.assertEqual(param_info("setPALevel", "duration"), None)
+ self.assertEqual(param_info("setRetries", "duration"), None)
+ self.assertEqual(param_info("setup", "duration"), None)
+ self.assertEqual(
+ param_info("write", "duration")["function"].model_function,
+ "0 + regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay)",
+ )
+
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[0], 1163, places=0,
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[1], 464, places=0,
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[2], 1, places=0
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[3], 1, places=0
+ )
def test_function_override(self):
- raw_data = TimingData(['test-data/20190815_122531_nRF24_no-rx.json'])
- preprocessed_data = raw_data.get_preprocessed_data(verbose=False)
+ raw_data = TimingData(["test-data/20190815_122531_nRF24_no-rx.json"])
+ preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
- model = AnalyticModel(by_name, parameters, arg_count, verbose=False, function_override={('write', 'duration'): '(parameter(auto_ack!) * (regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay))) + ((1 - parameter(auto_ack!)) * regression_arg(4))'})
- self.assertEqual(model.names, 'setAutoAck setPALevel setRetries setup write'.split(' '))
+ model = AnalyticModel(
+ by_name,
+ parameters,
+ arg_count,
+ function_override={
+ (
+ "write",
+ "duration",
+ ): "(parameter(auto_ack!) * (regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay))) + ((1 - parameter(auto_ack!)) * regression_arg(4))"
+ },
+ )
+ self.assertEqual(
+ model.names, "setAutoAck setPALevel setRetries setup write".split(" ")
+ )
static_model = model.get_static()
- self.assertAlmostEqual(static_model('setAutoAck', 'duration'), 72, places=0)
- self.assertAlmostEqual(static_model('setPALevel', 'duration'), 146, places=0)
- self.assertAlmostEqual(static_model('setRetries', 'duration'), 73, places=0)
- self.assertAlmostEqual(static_model('setup', 'duration'), 6533, places=0)
- self.assertAlmostEqual(static_model('write', 'duration'), 1181, places=0)
-
- for transition in 'setAutoAck setPALevel setRetries setup write'.split(' '):
- self.assertAlmostEqual(model.stats.param_dependence_ratio(transition, 'duration', 'channel'), 0, places=2)
+ self.assertAlmostEqual(static_model("setAutoAck", "duration"), 72, places=0)
+ self.assertAlmostEqual(static_model("setPALevel", "duration"), 146, places=0)
+ self.assertAlmostEqual(static_model("setRetries", "duration"), 73, places=0)
+ self.assertAlmostEqual(static_model("setup", "duration"), 6533, places=0)
+ self.assertAlmostEqual(static_model("write", "duration"), 1181, places=0)
+
+ for transition in "setAutoAck setPALevel setRetries setup write".split(" "):
+ self.assertAlmostEqual(
+ model.stats.param_dependence_ratio(transition, "duration", "channel"),
+ 0,
+ places=2,
+ )
param_model, param_info = model.get_fitted()
- self.assertEqual(param_info('setAutoAck', 'duration'), None)
- self.assertEqual(param_info('setPALevel', 'duration'), None)
- self.assertEqual(param_info('setRetries', 'duration'), None)
- self.assertEqual(param_info('setup', 'duration'), None)
- self.assertEqual(param_info('write', 'duration')['function']._model_str, '(parameter(auto_ack!) * (regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay))) + ((1 - parameter(auto_ack!)) * regression_arg(4))')
-
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[0], 1162, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[1], 464, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[2], 1, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[3], 1, places=0)
- self.assertAlmostEqual(param_info('write', 'duration')['function']._regression_args[4], 1086, places=0)
-
-
-if __name__ == '__main__':
+ self.assertEqual(param_info("setAutoAck", "duration"), None)
+ self.assertEqual(param_info("setPALevel", "duration"), None)
+ self.assertEqual(param_info("setRetries", "duration"), None)
+ self.assertEqual(param_info("setup", "duration"), None)
+ self.assertEqual(
+ param_info("write", "duration")["function"].model_function,
+ "(parameter(auto_ack!) * (regression_arg(0) + regression_arg(1) * parameter(max_retry_count) + regression_arg(2) * parameter(retry_delay) + regression_arg(3) * parameter(max_retry_count) * parameter(retry_delay))) + ((1 - parameter(auto_ack!)) * regression_arg(4))",
+ )
+
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[0], 1162, places=0,
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[1], 464, places=0,
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[2], 1, places=0
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[3], 1, places=0
+ )
+ self.assertAlmostEqual(
+ param_info("write", "duration")["function"].model_args[4], 1086, places=0,
+ )
+
+
+if __name__ == "__main__":
unittest.main()