summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.yml4
-rw-r--r--.gitmodules6
-rwxr-xr-xbin/analyze-archive.py446
-rwxr-xr-xbin/analyze-timing.py6
-rwxr-xr-xbin/eval-rel-energy.py7
-rwxr-xr-xbin/explore-kconfig.py98
-rwxr-xr-xbin/generate-dfa-benchmark.py43
-rwxr-xr-xbin/test_corrcoef.py8
l---------bin/versuchung1
m---------ext/kconfiglib0
m---------ext/versuchung0
-rw-r--r--lib/kconfig.py222
l---------lib/kconfiglib.py1
-rw-r--r--lib/loader.py2
-rw-r--r--lib/model.py17
-rw-r--r--lib/parameters.py3
-rw-r--r--lib/runner.py242
-rw-r--r--lib/validation.py21
-rwxr-xr-xtest/test_ptamodel.py465
19 files changed, 1219 insertions, 373 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 52d6e1c..c0b6c96 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -9,6 +9,8 @@ lint_python:
- apt-get update -qy
- apt-get install -y black
- black --check --diff bin
+ rules:
+ - if: '$CI_COMMIT_BRANCH == "master"'
run_tests:
stage: test
@@ -28,7 +30,7 @@ run_tests:
- wget -qO test-data/20190815_111745_nRF24_no-rx.json https://lib.finalrewind.org/energy-models/20190815_111745_nRF24_no-rx.json
- wget -qO test-data/20190815_122531_nRF24_no-rx.json https://lib.finalrewind.org/energy-models/20190815_122531_nRF24_no-rx.json
- pytest-3 --cov=lib
- - python3-coverage html
+ - python3-coverage html -i
artifacts:
paths:
- htmlcov/
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..2baed33
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,6 @@
+[submodule "kconfiglib"]
+ path = ext/kconfiglib
+ url = https://github.com/ulfalizer/Kconfiglib.git
+[submodule "versuchung"]
+ path = ext/versuchung
+ url = https://github.com/stettberger/versuchung.git
diff --git a/bin/analyze-archive.py b/bin/analyze-archive.py
index 10fe304..ca36745 100755
--- a/bin/analyze-archive.py
+++ b/bin/analyze-archive.py
@@ -1,73 +1,11 @@
#!/usr/bin/env python3
"""
-analyze-archive -- generate PTA energy model from annotated legacy MIMOSA traces.
-
-Usage:
-PYTHONPATH=lib bin/analyze-archive.py [options] <tracefiles ...>
+analyze-archive - generate PTA energy model from dfatool benchmark traces
analyze-archive generates a PTA energy model from one or more annotated
-traces generated by MIMOSA/dfatool-legacy. By default, it does nothing else --
-use one of the --plot-* or --show-* options to examine the generated model.
-
-Options:
---plot-unparam=<name>:<attribute>:<Y axis label>[;<name>:<attribute>:<label>;...]
- Plot all mesurements for <name> <attribute> without regard for parameter values.
- X axis is measurement number/id.
-
---plot-param=<name> <attribute> <parameter> [gplearn function][;<name> <attribute> <parameter> [function];...]
- Plot measurements for <name> <attribute> by <parameter>.
- X axis is parameter value.
- Plots the model function as one solid line for each combination of non-<parameter>
- parameters. Also plots the corresponding measurements.
- If gplearn function is set, it is plotted using dashed lines.
-
---plot-traces=<name>
- Plot power trace for state or transition <name>.
-
---export-traces=<directory>
- Export power traces of all states and transitions to <directory>.
- Creates a JSON file for each state and transition. Each JSON file
- lists all occurences of the corresponding state/transition in the
- benchmark's PTA trace. Each occurence contains the corresponding PTA
- parameters (if any) in 'parameter' and measurement results in 'offline'.
- As measurements are typically run repeatedly, 'offline' is in turn a list
- of measurements: offline[0]['uW'] is the power trace of the first
- measurement of this state/transition, offline[1]['uW'] corresponds t the
- second measurement, etc. Values are provided in microwatts.
- For example, TX.json[0].offline[0].uW corresponds to the first measurement
- of the first TX state in the benchmark, and TX.json[5].offline[2].uW
- corresponds to the third measurement of the sixth TX state in the benchmark.
- WARNING: Several GB of RAM and disk space are required for complex measurements.
- (JSON files may grow very large -- we trade efficiency for easy handling)
-
---info
- Show state duration and (for each state and transition) number of measurements and parameter values
-
---show-models=<static|paramdetection|param|all|tex|html>
- static: show static model values as well as parameter detection heuristic
- paramdetection: show stddev of static/lut/fitted model
- param: show parameterized model functions and regression variable values
- all: all of the above
- tex: print tex/pgfplots-compatible model data on stdout
- html: print model and quality data as HTML table on stdout
-
---show-quality=<table|summary|all|tex|html>
- table: show static/fitted/lut SMAPE and MAE for each name and attribute
- summary: show static/fitted/lut SMAPE and MAE for each attribute, averaged over all states/transitions
- all: all of the above
- tex: print tex/pgfplots-compatible model quality data on stdout
-
---ignored-trace-indexes=<i1,i2,...>
- Specify traces which should be ignored due to bogus data. 1 is the first
- trace, 2 the second, and so on.
-
---discard-outliers=
- not supported at the moment
-
---cross-validate=<method>:<count>
- Perform cross validation when computing model quality.
- Only works with --show-quality=table at the moment.
+traces generated by dfatool. By default, it does nothing else.
+Cross-Validation help:
If <method> is "montecarlo": Randomly divide data into 2/3 training and 1/3
validation, <count> times. Reported model quality is the average of all
validation runs. Data is partitioned without regard for parameter values,
@@ -83,37 +21,25 @@ Options:
so a specific parameter combination may be present in both training and
validation sets or just one of them.
---function-override=<name attribute function>[;<name> <attribute> <function>;...]
- Manually specify the function to fit for <name> <attribute>. A function
- specified this way bypasses parameter detection: It is always assigned,
- even if the model seems to be independent of the parameters it references.
-
---with-safe-functions
- If set, include "safe" functions (safe_log, safe_inv, safe_sqrt) which are
- also defined for cases such as safe_inv(0) or safe_sqrt(-1). This allows
- a greater range of functions to be tried during fitting.
-
---filter-param=<parameter name>=<parameter value>[,<parameter name>=<parameter value>...]
- Only consider measurements where <parameter name> is <parameter value>
- All other measurements (including those where it is None, that is, has
- not been set yet) are discarded. Note that this may remove entire
- function calls from the model.
-
---hwmodel=<hwmodel.json|hwmodel.dfa>
- Load DFA hardware model from JSON or YAML
-
---export-energymodel=<model.json>
- Export energy model. Works out of the box for v1 and v2 logfiles. Requires --hwmodel for v0 logfiles.
-
---no-cache
- Do not load cached measurement results
+Trace Export:
+ Each JSON file lists all occurences of the corresponding state/transition in the
+ benchmark's PTA trace. Each occurence contains the corresponding PTA
+ parameters (if any) in 'parameter' and measurement results in 'offline'.
+ As measurements are typically run repeatedly, 'offline' is in turn a list
+ of measurements: offline[0]['uW'] is the power trace of the first
+ measurement of this state/transition, offline[1]['uW'] corresponds t the
+ second measurement, etc. Values are provided in microwatts.
+ For example, TX.json[0].offline[0].uW corresponds to the first measurement
+ of the first TX state in the benchmark, and TX.json[5].offline[2].uW
+ corresponds to the third measurement of the sixth TX state in the benchmark.
+ WARNING: Several GB of RAM and disk space are required for complex measurements.
+ (JSON files may grow very large -- we trade efficiency for easy handling)
"""
-import getopt
+import argparse
import json
import logging
import random
-import re
import sys
from dfatool import plotter
from dfatool.loader import RawData, pta_trace_to_aggregate
@@ -123,8 +49,6 @@ from dfatool.validation import CrossValidator
from dfatool.utils import filter_aggregate_by_param
from dfatool.automata import PTA
-opt = dict()
-
def print_model_quality(results):
for state_or_tran in results.keys():
@@ -148,6 +72,15 @@ def format_quality_measures(result):
def model_quality_table(result_lists, info_list):
+ print(
+ "{:20s} {:15s} {:19s} {:19s} {:19s}".format(
+ "key",
+ "attribute",
+ "static".center(19),
+ "parameterized".center(19),
+ "LUT".center(19),
+ )
+ )
for state_or_tran in result_lists[0]["by_name"].keys():
for key in result_lists[0]["by_name"][state_or_tran].keys():
buf = "{:20s} {:15s}".format(state_or_tran, key)
@@ -158,7 +91,7 @@ def model_quality_table(result_lists, info_list):
result = results["by_name"][state_or_tran][key]
buf += format_quality_measures(result)
else:
- buf += "{:6}----{:9}".format("", "")
+ buf += "{:7}----{:8}".format("", "")
print(buf)
@@ -290,11 +223,36 @@ def print_html_model_data(model, pm, pq, lm, lq, am, ai, aq):
print("</tr>")
print("</table>")
+def plot_traces(preprocessed_data, sot_name):
+ traces = list()
+ for trace in preprocessed_data:
+ for state_or_transition in trace["trace"]:
+ if state_or_transition["name"] == sot_name:
+ traces.extend(
+ map(lambda x: x["uW"], state_or_transition["offline"])
+ )
+ if len(traces) == 0:
+ print(
+ f"""Did not find traces for state or transition {sot_name}. Abort.""",
+ file=sys.stderr,
+ )
+ sys.exit(2)
+
+ if len(traces) > 40:
+ print(f"""Truncating plot to 40 of {len(traces)} traces (random sample)""")
+ traces = random.sample(traces, 40)
+
+ plotter.plot_y(
+ traces,
+ xlabel="t [1e-5 s]",
+ ylabel="P [uW]",
+ title=sot_name,
+ family=True,
+ )
if __name__ == "__main__":
ignored_trace_indexes = []
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -305,80 +263,176 @@ if __name__ == "__main__":
xv_method = None
xv_count = 10
- try:
- optspec = (
- "info no-cache "
- "plot-unparam= plot-param= plot-traces= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
- "export-traces= "
- "filter-param= "
- "log-level= "
- "cross-validate= "
- "with-safe-functions hwmodel= export-energymodel="
- )
- raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
-
- for option, parameter in raw_opts:
- optname = re.sub(r"^--", "", option)
- opt[optname] = parameter
-
- if "ignored-trace-indexes" in opt:
- ignored_trace_indexes = list(
- map(int, opt["ignored-trace-indexes"].split(","))
- )
- if 0 in ignored_trace_indexes:
- print("[E] arguments to --ignored-trace-indexes start from 1")
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__
+ )
+ parser.add_argument(
+ "--info",
+ action="store_true",
+ help="Show state duration and (for each state and transition) number of measurements and parameter values)",
+ )
+ parser.add_argument(
+ "--no-cache", action="store_true", help="Do not load cached measurement results"
+ )
+ parser.add_argument(
+ "--plot-unparam",
+ metavar="<name>:<attribute>:<Y axis label>[;<name>:<attribute>:<label>;...]",
+ type=str,
+ help="Plot all mesurements for <name> <attribute> without regard for parameter values. "
+ "X axis is measurement number/id.",
+ )
+ parser.add_argument(
+ "--plot-param",
+ metavar="<name> <attribute> <parameter> [gplearn function][;<name> <attribute> <parameter> [function];...])",
+ type=str,
+ help="Plot measurements for <name> <attribute> by <parameter>. "
+ "X axis is parameter value. "
+ "Plots the model function as one solid line for each combination of non-<parameter> parameters. "
+ "Also plots the corresponding measurements. "
+ "If gplearn function is set, it is plotted using dashed lines.",
+ )
+ parser.add_argument(
+ "--plot-traces",
+ metavar="NAME",
+ type=str,
+ help="Plot power trace for state or transition NAME",
+ )
+ parser.add_argument(
+ "--show-models",
+ choices=["static", "paramdetection", "param", "all", "tex", "html"],
+ help="static: show static model values as well as parameter detection heuristic.\n"
+ "paramdetection: show stddev of static/lut/fitted model\n"
+ "param: show parameterized model functions and regression variable values\n"
+ "all: all of the above\n"
+ "tex: print tex/pgfplots-compatible model data on stdout\n"
+ "html: print model and quality data as HTML table on stdout",
+ )
+ parser.add_argument(
+ "--show-quality",
+ choices=["table", "summary", "all", "tex", "html"],
+ help="table: show static/fitted/lut SMAPE and MAE for each name and attribute.\n"
+ "summary: show static/fitted/lut SMAPE and MAE for each attribute, averaged over all states/transitions.\n"
+ "all: all of the above.\n"
+ "tex: print tex/pgfplots-compatible model quality data on stdout.",
+ )
+ parser.add_argument(
+ "--ignored-trace-indexes",
+ metavar="<i1,i2,...>",
+ type=str,
+ help="Specify traces which should be ignored due to bogus data. "
+ "1 is the first trace, 2 the second, and so on.",
+ )
+ parser.add_argument(
+ "--function-override",
+ metavar="<name> <attribute> <function>[;<name> <attribute> <function>;...]",
+ type=str,
+ help="Manually specify the function to fit for <name> <attribute>. "
+ "A function specified this way bypasses parameter detection: "
+ "It is always assigned, even if the model seems to be independent of the parameters it references.",
+ )
+ parser.add_argument(
+ "--export-traces",
+ metavar="DIRECTORY",
+ type=str,
+ help="Export power traces of all states and transitions to DIRECTORY. "
+ "Creates a JSON file for each state and transition.",
+ )
+ parser.add_argument(
+ "--filter-param",
+ metavar="<parameter name>=<parameter value>[,<parameter name>=<parameter value>...]",
+ type=str,
+ help="Only consider measurements where <parameter name> is <parameter value>. "
+ "All other measurements (including those where it is None, that is, has not been set yet) are discarded. "
+ "Note that this may remove entire function calls from the model.",
+ )
+ parser.add_argument(
+ "--log-level",
+ metavar="LEVEL",
+ choices=["debug", "info", "warning", "error"],
+ help="Set log level",
+ )
+ parser.add_argument(
+ "--cross-validate",
+ metavar="<method>:<count>",
+ type=str,
+ help="Perform cross validation when computing model quality. "
+ "Only works with --show-quality=table at the moment.",
+ )
+ parser.add_argument(
+ "--with-safe-functions",
+ action="store_true",
+ help="Include 'safe' functions (safe_log, safe_inv, safe_sqrt) which are also defined for 0 and -1. "
+ "This allows a greater range of functions to be tried during fitting.",
+ )
+ parser.add_argument(
+ "--hwmodel",
+ metavar="FILE",
+ type=str,
+ help="Load DFA hardware model from JSON or YAML FILE",
+ )
+ parser.add_argument(
+ "--export-energymodel",
+ metavar="FILE",
+ type=str,
+ help="Export JSON energy modle to FILE. Works out of the box for v1 and v2, requires --hwmodel for v0",
+ )
+ parser.add_argument("measurement", nargs="+")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
+ args = parser.parse_args()
- if "function-override" in opt:
- for function_desc in opt["function-override"].split(";"):
- state_or_tran, attribute, *function_str = function_desc.split(" ")
- function_override[(state_or_tran, attribute)] = " ".join(function_str)
+ if args.log_level:
+ numeric_level = getattr(logging, args.log_level.upper(), None)
+ if not isinstance(numeric_level, int):
+ print(f"Invalid log level: {args.log_level}", file=sys.stderr)
+ sys.exit(1)
+ logging.basicConfig(level=numeric_level)
- if "show-models" in opt:
- show_models = opt["show-models"].split(",")
+ if args.ignored_trace_indexes:
+ ignored_trace_indexes = list(map(int, args.ignored_trace_indexes.split(",")))
+ if 0 in ignored_trace_indexes:
+ logging.error("arguments to --ignored-trace-indexes start from 1")
- if "show-quality" in opt:
- show_quality = opt["show-quality"].split(",")
+ if args.function_override:
+ for function_desc in args.function_override.split(";"):
+ state_or_tran, attribute, *function_str = function_desc.split(" ")
+ function_override[(state_or_tran, attribute)] = " ".join(function_str)
- if "cross-validate" in opt:
- xv_method, xv_count = opt["cross-validate"].split(":")
- xv_count = int(xv_count)
+ if args.show_models:
+ show_models = args.show_models.split(",")
- if "filter-param" in opt:
- opt["filter-param"] = list(
- map(lambda x: x.split("="), opt["filter-param"].split(","))
- )
- else:
- opt["filter-param"] = list()
+ if args.show_quality:
+ show_quality = args.show_quality.split(",")
- if "with-safe-functions" in opt:
- safe_functions_enabled = True
+ if args.cross_validate:
+ xv_method, xv_count = args.cross_validate.split(":")
+ xv_count = int(xv_count)
- if "hwmodel" in opt:
- pta = PTA.from_file(opt["hwmodel"])
+ if args.filter_param:
+ args.filter_param = list(
+ map(lambda x: x.split("="), args.filter_param.split(","))
+ )
+ else:
+ args.filter_param = list()
- if "log-level" in opt:
- numeric_level = getattr(logging, opt["log-level"].upper(), None)
- if not isinstance(numeric_level, int):
- print(f"Invalid log level: {loglevel}", file=sys.stderr)
- sys.exit(1)
- logging.basicConfig(level=numeric_level)
+ if args.with_safe_functions is not None:
+ safe_functions_enabled = True
- except getopt.GetoptError as err:
- print(err, file=sys.stderr)
- sys.exit(2)
+ if args.hwmodel:
+ pta = PTA.from_file(args.hwmodel)
raw_data = RawData(
- args,
- with_traces=("export-traces" in opt or "plot-traces" in opt),
- skip_cache=("no-cache" in opt),
+ args.measurement,
+ with_traces=(args.export_traces is not None or args.plot_traces is not None),
+ skip_cache=args.no_cache,
)
- if "info" in opt:
+ if args.info:
print(" ".join(raw_data.filenames) + ":")
+ if raw_data.ptalog:
+ options = " --".join(
+ map(lambda kv: f"{kv[0]}={str(kv[1])}", raw_data.ptalog["opt"].items())
+ )
+ print(f" Options: --{options}")
if raw_data.version <= 1:
data_source = "MIMOSA"
elif raw_data.version == 2:
@@ -392,7 +446,7 @@ if __name__ == "__main__":
preprocessed_data = raw_data.get_preprocessed_data()
- if "info" in opt:
+ if args.info:
print(
f""" Valid Runs: {raw_data.preprocessing_stats["num_valid"]}/{raw_data.preprocessing_stats["num_runs"]}"""
)
@@ -401,7 +455,7 @@ if __name__ == "__main__":
)
print(f""" State Duration: {" / ".join(state_durations)} ms""")
- if "export-traces" in opt:
+ if args.export_traces:
uw_per_sot = dict()
for trace in preprocessed_data:
for state_or_transition in trace["trace"]:
@@ -412,37 +466,13 @@ if __name__ == "__main__":
elem["uW"] = list(elem["uW"])
uw_per_sot[name].append(state_or_transition)
for name, data in uw_per_sot.items():
- target = f"{opt['export-traces']}/{name}.json"
+ target = f"{args.export_traces}/{name}.json"
print(f"exporting {target} ...")
with open(target, "w") as f:
json.dump(data, f)
- if "plot-traces" in opt:
- traces = list()
- for trace in preprocessed_data:
- for state_or_transition in trace["trace"]:
- if state_or_transition["name"] == opt["plot-traces"]:
- traces.extend(
- map(lambda x: x["uW"], state_or_transition["offline"])
- )
- if len(traces) == 0:
- print(
- f"""Did not find traces for state or transition {opt["plot-traces"]}. Abort.""",
- file=sys.stderr,
- )
- sys.exit(2)
-
- if len(traces) > 20:
- print(f"""Truncating plot to 40 of {len(traces)} traces (random sample)""")
- traces = random.sample(traces, 40)
-
- plotter.plot_y(
- traces,
- xlabel="t [1e-5 s]",
- ylabel="P [uW]",
- title=opt["plot-traces"],
- family=True,
- )
+ if args.plot_traces:
+ plot_traces(preprocessed_data, args.plot_traces)
if raw_data.preprocessing_stats["num_valid"] == 0:
print("No valid data available. Abort.", file=sys.stderr)
@@ -455,14 +485,13 @@ if __name__ == "__main__":
preprocessed_data, ignored_trace_indexes
)
- filter_aggregate_by_param(by_name, parameters, opt["filter-param"])
+ filter_aggregate_by_param(by_name, parameters, args.filter_param)
model = PTAModel(
by_name,
parameters,
arg_count,
traces=preprocessed_data,
- discard_outliers=discard_outliers,
function_override=function_override,
pta=pta,
)
@@ -470,7 +499,7 @@ if __name__ == "__main__":
if xv_method:
xv = CrossValidator(PTAModel, by_name, parameters, arg_count)
- if "info" in opt:
+ if args.info:
for state in model.states():
print("{}:".format(state))
print(f""" Number of Measurements: {len(by_name[state]["power"])}""")
@@ -492,8 +521,8 @@ if __name__ == "__main__":
)
)
- if "plot-unparam" in opt:
- for kv in opt["plot-unparam"].split(";"):
+ if args.plot_unparam:
+ for kv in args.plot_unparam.split(";"):
state_or_trans, attribute, ylabel = kv.split(":")
fname = "param_y_{}_{}.pdf".format(state_or_trans, attribute)
plotter.plot_y(
@@ -703,7 +732,7 @@ if __name__ == "__main__":
)
if "overall" in show_quality or "all" in show_quality:
- print("overall static/param/lut MAE assuming equal state distribution:")
+ print("overall state static/param/lut MAE assuming equal state distribution:")
print(
" {:6.1f} / {:6.1f} / {:6.1f} µW".format(
model.assess_states(static_model),
@@ -711,15 +740,30 @@ if __name__ == "__main__":
model.assess_states(lut_model),
)
)
- print("overall static/param/lut MAE assuming 95% STANDBY1:")
- distrib = {"STANDBY1": 0.95, "POWERDOWN": 0.03, "TX": 0.01, "RX": 0.01}
- print(
- " {:6.1f} / {:6.1f} / {:6.1f} µW".format(
- model.assess_states(static_model, distribution=distrib),
- model.assess_states(param_model, distribution=distrib),
- model.assess_states(lut_model, distribution=distrib),
+ distrib = dict()
+ num_states = len(model.states())
+ p95_state = None
+ for state in model.states():
+ distrib[state] = 1.0 / num_states
+
+ if "STANDBY1" in model.states():
+ p95_state = "STANDBY1"
+ elif "SLEEP" in model.states():
+ p95_state = "SLEEP"
+
+ if p95_state is not None:
+ for state in distrib.keys():
+ distrib[state] = 0.05 / (num_states - 1)
+ distrib[p95_state] = 0.95
+
+ print(f"overall state static/param/lut MAE assuming 95% {p95_state}:")
+ print(
+ " {:6.1f} / {:6.1f} / {:6.1f} µW".format(
+ model.assess_states(static_model, distribution=distrib),
+ model.assess_states(param_model, distribution=distrib),
+ model.assess_states(lut_model, distribution=distrib),
+ )
)
- )
if "summary" in show_quality or "all" in show_quality:
model_summary_table(
@@ -730,8 +774,8 @@ if __name__ == "__main__":
]
)
- if "plot-param" in opt:
- for kv in opt["plot-param"].split(";"):
+ if args.plot_param:
+ for kv in args.plot_param.split(";"):
try:
state_or_trans, attribute, param_name, *function = kv.split(" ")
except ValueError:
@@ -752,14 +796,14 @@ if __name__ == "__main__":
extra_function=function,
)
- if "export-energymodel" in opt:
+ if args.export_energymodel:
if not pta:
print(
"[E] --export-energymodel requires --hwmodel to be set", file=sys.stderr
)
sys.exit(1)
json_model = model.to_json()
- with open(opt["export-energymodel"], "w") as f:
+ with open(args.export_energymodel, "w") as f:
json.dump(json_model, f, indent=2, sort_keys=True)
sys.exit(0)
diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py
index ed9c571..ddd49ec 100755
--- a/bin/analyze-timing.py
+++ b/bin/analyze-timing.py
@@ -172,7 +172,6 @@ def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq):
if __name__ == "__main__":
ignored_trace_indexes = []
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -185,7 +184,7 @@ if __name__ == "__main__":
try:
optspec = (
"plot-unparam= plot-param= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"filter-param= "
"log-level= "
"cross-validate= "
@@ -205,9 +204,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
diff --git a/bin/eval-rel-energy.py b/bin/eval-rel-energy.py
index 66c3ae2..aeaf88c 100755
--- a/bin/eval-rel-energy.py
+++ b/bin/eval-rel-energy.py
@@ -23,7 +23,6 @@ def get_file_groups(args):
if __name__ == "__main__":
ignored_trace_indexes = []
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -32,7 +31,7 @@ if __name__ == "__main__":
try:
optspec = (
"plot-unparam= plot-param= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"with-safe-functions"
)
raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
@@ -48,9 +47,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -89,7 +85,6 @@ if __name__ == "__main__":
arg_count,
traces=preprocessed_data,
ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
function_override=function_override,
verbose=False,
)
diff --git a/bin/explore-kconfig.py b/bin/explore-kconfig.py
new file mode 100755
index 0000000..4c08826
--- /dev/null
+++ b/bin/explore-kconfig.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python3
+
+"""explore-kconfig - Obtain build attributes of configuration variants
+
+explore-kconfig obtains build attributes such as ROM or RAM usage of
+configuration variants for a given software project. It works on random
+random configurations (--random) or in the neighbourhood
+of existing configurations (--neighbourhood).
+
+Supported projects must be configurable via kconfig and provide a command which
+outputs a JSON dict of build attributes on stdout. Use
+--{clean,build,attribute}-command to configure explore-kconfig for a project.
+"""
+
+import argparse
+import logging
+import os
+import sys
+
+from dfatool import kconfig
+
+from versuchung.experiment import Experiment
+from versuchung.types import String, Bool, Integer
+from versuchung.files import File, Directory
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__
+ )
+ parser.add_argument(
+ "--neighbourhood",
+ type=str,
+ help="Explore neighbourhood of provided .config file(s)",
+ )
+ parser.add_argument(
+ "--log-level",
+ default=logging.INFO,
+ type=lambda level: getattr(logging, level.upper()),
+ help="Set log level",
+ )
+ parser.add_argument(
+ "--random",
+ type=int,
+ help="Explore a number of random configurations (make randconfig)",
+ )
+ parser.add_argument(
+ "--clean-command", type=str, help="Clean command", default="make clean"
+ )
+ parser.add_argument(
+ "--build-command", type=str, help="Build command", default="make"
+ )
+ parser.add_argument(
+ "--attribute-command",
+ type=str,
+ help="Attribute extraction command",
+ default="make attributes",
+ )
+ parser.add_argument("project_root", type=str, help="Project root directory")
+
+ args = parser.parse_args()
+
+ if isinstance(args.log_level, int):
+ logging.basicConfig(level=args.log_level)
+ else:
+ print(f"Invalid log level. Setting log level to INFO.", file=sys.stderr)
+
+ kconf = kconfig.KConfig(args.project_root)
+
+ if args.clean_command:
+ kconf.clean_command = args.clean_command
+ if args.build_command:
+ kconf.build_command = args.build_command
+ if args.attribute_command:
+ kconf.attribute_command = args.attribute_command
+
+ if args.random:
+ for i in range(args.random):
+ logging.info(f"Running randconfig {i+1} of {args.random}")
+ kconf.run_randconfig()
+
+ if args.neighbourhood:
+ if os.path.isfile(args.neighbourhood):
+ kconf.run_exploration_from_file(args.neighbourhood)
+ elif os.path.isdir(args.neighbourhood):
+ for filename in os.listdir(args.neighbourhood):
+ config_filename = f"{args.neighbourhood}/{filename}"
+ logging.info(f"Exploring neighbourhood of {config_filename}")
+ kconf.run_exploration_from_file(config_filename)
+ else:
+ print(
+ f"--neighbourhod: Error: {args.neighbourhood} must be a file or directory, but is neither",
+ file=sys.stderr,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/bin/generate-dfa-benchmark.py b/bin/generate-dfa-benchmark.py
index 6540702..c8681c5 100755
--- a/bin/generate-dfa-benchmark.py
+++ b/bin/generate-dfa-benchmark.py
@@ -223,17 +223,11 @@ def benchmark_from_runs(
)
elif opt["sleep"]:
if "energytrace" in opt:
- outbuf.write(
- "arch.sleep_ms({:d}); // {}\n".format(
- opt["sleep"], transition.destination.name
- )
- )
+ outbuf.write(f"// -> {transition.destination.name}\n")
+ outbuf.write(target.sleep_ms(opt["sleep"]))
else:
- outbuf.write(
- "arch.delay_ms({:d}); // {}\n".format(
- opt["sleep"], transition.destination.name
- )
- )
+ outbuf.write(f"// -> {transition.destination.name}\n")
+ outbuf.write("arch.delay_ms({:d});\n".format(opt["sleep"]))
outbuf.write(harness.stop_run(num_traces))
if dummy:
@@ -289,7 +283,7 @@ def run_benchmark(
needs_split = True
else:
try:
- runner.build(arch, app, run_args)
+ target.build(app, run_args)
except RuntimeError:
if len(runs) > 50:
# Application is too large -> split up runs
@@ -342,14 +336,14 @@ def run_benchmark(
i = 0
while i < opt["repeat"]:
print(f"""[RUN] flashing benchmark {i+1}/{opt["repeat"]}""")
- runner.flash(arch, app, run_args)
+ target.flash(app, run_args)
if "mimosa" in opt:
- monitor = runner.get_monitor(
- arch, callback=harness.parser_cb, mimosa=opt["mimosa"]
+ monitor = target.get_monitor(
+ callback=harness.parser_cb, mimosa=opt["mimosa"]
)
elif "energytrace" in opt:
- monitor = runner.get_monitor(
- arch, callback=harness.parser_cb, energytrace=opt["energytrace"]
+ monitor = target.get_monitor(
+ callback=harness.parser_cb, energytrace=opt["energytrace"]
)
sync_error = False
@@ -400,8 +394,8 @@ def run_benchmark(
return [(runs, harness, monitor, files)]
else:
- runner.flash(arch, app, run_args)
- monitor = runner.get_monitor(arch, callback=harness.parser_cb)
+ target.flash(app, run_args)
+ monitor = target.get_monitor(callback=harness.parser_cb)
if arch == "posix":
print("[RUN] Will run benchmark for {:.0f} seconds".format(run_timeout))
@@ -518,6 +512,11 @@ if __name__ == "__main__":
print(err)
sys.exit(2)
+ if "msp430fr" in opt["arch"]:
+ target = runner.Arch(opt["arch"], ["cpu_freq=8000000"])
+ else:
+ target = runner.Arch(opt["arch"])
+
modelfile = args[0]
pta = PTA.from_file(modelfile)
@@ -594,8 +593,8 @@ if __name__ == "__main__":
if "codegen" in driver_definition and "flags" in driver_definition["codegen"]:
if run_flags is None:
run_flags = driver_definition["codegen"]["flags"]
- if run_flags is None:
- run_flags = opt["run"].split()
+ if "run" in opt:
+ run_flags.extend(opt["run"].split())
runs = list(
pta.dfs(
@@ -644,7 +643,7 @@ if __name__ == "__main__":
gpio_pin=timer_pin,
gpio_mode=gpio_mode,
pta=pta,
- counter_limits=runner.get_counter_limits_us(opt["arch"]),
+ counter_limits=target.get_counter_limits_us(run_flags),
log_return_values=need_return_values,
repeat=1,
energytrace_sync=energytrace_sync,
@@ -653,7 +652,7 @@ if __name__ == "__main__":
harness = OnboardTimerHarness(
gpio_pin=timer_pin,
pta=pta,
- counter_limits=runner.get_counter_limits_us(opt["arch"]),
+ counter_limits=target.get_counter_limits_us(run_flags),
log_return_values=need_return_values,
repeat=opt["repeat"],
)
diff --git a/bin/test_corrcoef.py b/bin/test_corrcoef.py
index b8c8eae..ccb3366 100755
--- a/bin/test_corrcoef.py
+++ b/bin/test_corrcoef.py
@@ -111,7 +111,6 @@ def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq):
if __name__ == "__main__":
ignored_trace_indexes = None
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -120,7 +119,7 @@ if __name__ == "__main__":
try:
optspec = (
"plot-unparam= plot-param= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"with-safe-functions"
)
raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
@@ -136,9 +135,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -170,7 +166,6 @@ if __name__ == "__main__":
arg_count,
traces=preprocessed_data,
ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
function_override=function_override,
use_corrcoef=False,
)
@@ -180,7 +175,6 @@ if __name__ == "__main__":
arg_count,
traces=preprocessed_data,
ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
function_override=function_override,
use_corrcoef=True,
)
diff --git a/bin/versuchung b/bin/versuchung
new file mode 120000
index 0000000..57b45a8
--- /dev/null
+++ b/bin/versuchung
@@ -0,0 +1 @@
+../ext/versuchung/src/versuchung \ No newline at end of file
diff --git a/ext/kconfiglib b/ext/kconfiglib
new file mode 160000
+Subproject 061e71f7d78cb057762d88de088055361863def
diff --git a/ext/versuchung b/ext/versuchung
new file mode 160000
+Subproject 381d7bbb6545eb41e3784048270ab0a7182634e
diff --git a/lib/kconfig.py b/lib/kconfig.py
new file mode 100644
index 0000000..6ae947a
--- /dev/null
+++ b/lib/kconfig.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python3
+
+import kconfiglib
+import logging
+import re
+import shutil
+import subprocess
+
+from versuchung.experiment import Experiment
+from versuchung.types import String, Bool, Integer
+from versuchung.files import File, Directory
+
+logger = logging.getLogger(__name__)
+
+
+class AttributeExperiment(Experiment):
+ outputs = {
+ "config": File(".config"),
+ "attributes": File("attributes.json"),
+ "build_out": File("build.out"),
+ "build_err": File("build.err"),
+ }
+
+ def run(self):
+ build_command = self.build_command.value.split()
+ attr_command = self.attr_command.value.split()
+ shutil.copyfile(f"{self.project_root.path}/.config", self.config.path)
+ subprocess.check_call(
+ ["make", "clean"],
+ cwd=self.project_root.path,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ try:
+ with open(self.build_out.path, "w") as out_fd, open(
+ self.build_err.path, "w"
+ ) as err_fd:
+ subprocess.check_call(
+ build_command,
+ cwd=self.project_root.path,
+ stdout=out_fd,
+ stderr=err_fd,
+ )
+ except subprocess.CalledProcessError:
+ logger.info("build error")
+ return
+ with open(self.attributes.path, "w") as attr_fd:
+ subprocess.check_call(
+ attr_command, cwd=self.project_root.path, stdout=attr_fd
+ )
+
+
+class RandomConfig(AttributeExperiment):
+ inputs = {
+ "randconfig_seed": String("FIXME"),
+ "kconfig_hash": String("FIXME"),
+ "project_root": Directory("/tmp"),
+ "project_version": String("FIXME"),
+ "clean_command": String("make clean"),
+ "build_command": String("make"),
+ "attr_command": String("make attributes"),
+ }
+
+
+class ExploreConfig(AttributeExperiment):
+ inputs = {
+ "config_hash": String("FIXME"),
+ "kconfig_hash": String("FIXME"),
+ "project_root": Directory("/tmp"),
+ "project_version": String("FIXME"),
+ "clean_command": String("make clean"),
+ "build_command": String("make"),
+ "attr_command": String("make attributes"),
+ }
+
+
+class KConfig:
+ def __init__(self, working_directory):
+ self.cwd = working_directory
+ self.clean_command = "make clean"
+ self.build_command = "make"
+ self.attribute_command = "make attributes"
+
+ def randconfig(self):
+ status = subprocess.run(
+ ["make", "randconfig"],
+ cwd=self.cwd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ )
+
+ # make randconfig occasionally generates illegal configurations, so a project may run randconfig more than once.
+ # Make sure to return the seed of the latest run (don't short-circuit).
+ seed = None
+ for line in status.stderr.split("\n"):
+ match = re.match("KCONFIG_SEED=(.*)", line)
+ if match:
+ seed = match.group(1)
+ if seed:
+ return seed
+ raise RuntimeError("KCONFIG_SEED not found")
+
+ def git_commit_id(self):
+ status = subprocess.run(
+ ["git", "rev-parse", "HEAD"],
+ cwd=self.cwd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ )
+ revision = status.stdout.strip()
+ return revision
+
+ def file_hash(self, config_file):
+ status = subprocess.run(
+ ["sha256sum", config_file],
+ cwd=self.cwd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ )
+ sha256sum = status.stdout.split()[0]
+ return sha256sum
+
+ def run_randconfig(self):
+ """Run a randomconfig experiment in the selected project. Results are written to the current working directory."""
+ experiment = RandomConfig()
+ experiment(
+ [
+ "--randconfig_seed",
+ self.randconfig(),
+ "--kconfig_hash",
+ self.file_hash(f"{self.cwd}/Kconfig"),
+ "--project_version",
+ self.git_commit_id(),
+ "--project_root",
+ self.cwd,
+ "--clean_command",
+ self.clean_command,
+ "--build_command",
+ self.build_command,
+ "--attr_command",
+ self.attribute_command,
+ ]
+ )
+
+ def config_is_functional(self, kconf):
+ for choice in kconf.choices:
+ if (
+ not choice.is_optional
+ and 2 in choice.assignable
+ and choice.selection is None
+ ):
+ return False
+ return True
+
+ def run_exploration_from_file(self, config_file):
+ kconfig_file = f"{self.cwd}/Kconfig"
+ kconf = kconfiglib.Kconfig(kconfig_file)
+ kconf.load_config(config_file)
+ symbols = list(kconf.syms.keys())
+
+ experiment = ExploreConfig()
+ shutil.copyfile(config_file, f"{self.cwd}/.config")
+ experiment(
+ [
+ "--config_hash",
+ self.file_hash(config_file),
+ "--kconfig_hash",
+ self.file_hash(kconfig_file),
+ "--project_version",
+ self.git_commit_id(),
+ "--project_root",
+ self.cwd,
+ "--clean_command",
+ self.clean_command,
+ "--build_command",
+ self.build_command,
+ "--attr_command",
+ self.attribute_command,
+ ]
+ )
+
+ for symbol in kconf.syms.values():
+ if kconfiglib.TYPE_TO_STR[symbol.type] != "bool":
+ continue
+ if symbol.tri_value == 0 and 2 in symbol.assignable:
+ logger.debug(f"Set {symbol.name} to y")
+ symbol.set_value(2)
+ elif symbol.tri_value == 2 and 0 in symbol.assignable:
+ logger.debug(f"Set {symbol.name} to n")
+ symbol.set_value(0)
+ else:
+ continue
+
+ if not self.config_is_functional(kconf):
+ logger.debug("Configuration is non-functional")
+ kconf.load_config(config_file)
+ continue
+
+ kconf.write_config(f"{self.cwd}/.config")
+ experiment = ExploreConfig()
+ experiment(
+ [
+ "--config_hash",
+ self.file_hash(f"{self.cwd}/.config"),
+ "--kconfig_hash",
+ self.file_hash(kconfig_file),
+ "--project_version",
+ self.git_commit_id(),
+ "--project_root",
+ self.cwd,
+ "--clean_command",
+ self.clean_command,
+ "--build_command",
+ self.build_command,
+ "--attr_command",
+ self.attribute_command,
+ ]
+ )
+ kconf.load_config(config_file)
diff --git a/lib/kconfiglib.py b/lib/kconfiglib.py
new file mode 120000
index 0000000..5b2f9ac
--- /dev/null
+++ b/lib/kconfiglib.py
@@ -0,0 +1 @@
+../ext/kconfiglib/kconfiglib.py \ No newline at end of file
diff --git a/lib/loader.py b/lib/loader.py
index ea2b183..fcd5490 100644
--- a/lib/loader.py
+++ b/lib/loader.py
@@ -489,7 +489,7 @@ class RawData:
if sorted(online_trace_part["parameter"].keys()) != self._parameter_names:
processed_data[
"error"
- ] = "Offline #{off_idx:d} (online {on_name:s} @ {on_idx:d}/{on_sub:d}) has inconsistent parameter set: should be {param_want:s}, is {param_is:s}".format(
+ ] = "Offline #{off_idx:d} (online {on_name:s} @ {on_idx:d}/{on_sub:d}) has inconsistent parameter set: should be {param_want}, is {param_is}".format(
off_idx=offline_idx,
on_idx=online_run_idx,
on_sub=online_trace_part_idx,
diff --git a/lib/model.py b/lib/model.py
index e908af4..bb4a45b 100644
--- a/lib/model.py
+++ b/lib/model.py
@@ -5,6 +5,7 @@ import numpy as np
from scipy import optimize
from sklearn.metrics import r2_score
from multiprocessing import Pool
+from .automata import PTA
from .functions import analytic
from .functions import AnalyticFunction
from .parameters import ParamStats
@@ -700,7 +701,6 @@ class PTAModel:
arg_count,
traces=[],
ignore_trace_indexes=[],
- discard_outliers=None,
function_override={},
use_corrcoef=False,
pta=None,
@@ -716,13 +716,6 @@ class PTAModel:
arg_count -- function arguments, as returned by pta_trace_to_aggregate
traces -- list of preprocessed DFA traces, as returned by RawData.get_preprocessed_data()
ignore_trace_indexes -- list of trace indexes. The corresponding traces will be ignored.
- discard_outliers -- currently not supported: threshold for outlier detection and removel (float).
- Outlier detection is performed individually for each state/transition in each trace,
- so it only works if the benchmark ran several times.
- Given "data" (a set of measurements of the same thing, e.g. TX duration in the third benchmark trace),
- "m" (the median of all attribute measurements with the same parameters, which may include data from other traces),
- a data point X is considered an outlier if
- | 0.6745 * (X - m) / median(|data - m|) | > discard_outliers .
function_override -- dict of overrides for automatic parameter function generation.
If (state or transition name, model attribute) is present in function_override,
the corresponding text string is the function used for analytic (parameter-aware/fitted)
@@ -749,7 +742,6 @@ class PTAModel:
)
self.cache = {}
np.seterr("raise")
- self._outlier_threshold = discard_outliers
self.function_override = function_override.copy()
self.pta = pta
self.ignore_trace_indexes = ignore_trace_indexes
@@ -940,13 +932,16 @@ class PTAModel:
static_quality = self.assess(static_model)
param_model, param_info = self.get_fitted()
analytic_quality = self.assess(param_model)
- self.pta.update(
+ pta = self.pta
+ if pta is None:
+ pta = PTA(self.states(), parameters=self._parameter_names)
+ pta.update(
static_model,
param_info,
static_error=static_quality["by_name"],
analytic_error=analytic_quality["by_name"],
)
- return self.pta.to_json()
+ return pta.to_json()
def states(self):
"""Return sorted list of state names."""
diff --git a/lib/parameters.py b/lib/parameters.py
index 81649f2..5c6b978 100644
--- a/lib/parameters.py
+++ b/lib/parameters.py
@@ -250,6 +250,8 @@ def _compute_param_statistics(
corr_by_param -- correlation coefficient
corr_by_arg -- same, but ignoring a single function argument
Only set if state_or_trans appears in arg_count, empty dict otherwise.
+ depends_on_param -- dict(parameter_name -> Bool). True if /attribute/ behaviour probably depends on /parameter_name/
+ depends_on_arg -- list(bool). Same, but for function arguments, if any.
"""
ret = {
"std_static": np.std(by_name[state_or_trans][attribute]),
@@ -270,7 +272,6 @@ def _compute_param_statistics(
"corr_by_arg": [],
"depends_on_param": {},
"depends_on_arg": [],
- "param_data": {},
}
np.seterr("raise")
diff --git a/lib/runner.py b/lib/runner.py
index 4cab9ed..96627cf 100644
--- a/lib/runner.py
+++ b/lib/runner.py
@@ -340,113 +340,157 @@ class ShellMonitor:
pass
-def build(arch, app, opts=[]):
- command = ["make", "arch={}".format(arch), "app={}".format(app), "clean"]
- command.extend(opts)
- res = subprocess.run(
- command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
- )
- if res.returncode != 0:
- raise RuntimeError(
- "Build failure, executing {}:\n".format(command) + res.stderr
+class Arch:
+ def __init__(self, name, opts=list()):
+ self.name = name
+ self.opts = opts
+ self.info = self.get_info()
+
+ def build(self, app, opts=list()):
+ command = ["make", "arch={}".format(self.name), "app={}".format(app), "clean"]
+ command.extend(self.opts)
+ command.extend(opts)
+ res = subprocess.run(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
)
- command = ["make", "-B", "arch={}".format(arch), "app={}".format(app)]
- command.extend(opts)
- res = subprocess.run(
- command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
- )
- if res.returncode != 0:
- raise RuntimeError(
- "Build failure, executing {}:\n ".format(command) + res.stderr
+ if res.returncode != 0:
+ raise RuntimeError(
+ "Build failure, executing {}:\n".format(command) + res.stderr
+ )
+ command = ["make", "-B", "arch={}".format(self.name), "app={}".format(app)]
+ command.extend(self.opts)
+ command.extend(opts)
+ res = subprocess.run(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
)
- return command
-
-
-def flash(arch, app, opts=[]):
- command = ["make", "arch={}".format(arch), "app={}".format(app), "program"]
- command.extend(opts)
- res = subprocess.run(
- command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
- )
- if res.returncode != 0:
- raise RuntimeError("Flash failure")
- return command
+ if res.returncode != 0:
+ raise RuntimeError(
+ "Build failure, executing {}:\n ".format(command) + res.stderr
+ )
+ return command
+ def flash(self, app, opts=list()):
+ command = ["make", "arch={}".format(self.name), "app={}".format(app), "program"]
+ command.extend(self.opts)
+ command.extend(opts)
+ res = subprocess.run(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ )
+ if res.returncode != 0:
+ raise RuntimeError("Flash failure")
+ return command
-def get_info(arch, opts: list = []) -> list:
- """
- Return multipass "make info" output.
+ def get_info(self, opts=list()) -> list:
+ """
+ Return multipass "make info" output.
- Returns a list.
- """
- command = ["make", "arch={}".format(arch), "info"]
- command.extend(opts)
- res = subprocess.run(
- command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
- )
- if res.returncode != 0:
- raise RuntimeError("make info Failure")
- return res.stdout.split("\n")
+ Returns a list.
+ """
+ command = ["make", "arch={}".format(self.name), "info"]
+ command.extend(self.opts)
+ command.extend(opts)
+ res = subprocess.run(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ )
+ if res.returncode != 0:
+ raise RuntimeError("make info Failure")
+ return res.stdout.split("\n")
+ def _cached_info(self, opts=list()) -> list:
+ if len(opts):
+ return self.get_info(opts)
+ return self.info
-def get_monitor(arch: str, **kwargs) -> object:
- """
- Return an appropriate monitor for arch, depending on "make info" output.
+ def get_monitor(self, **kwargs) -> object:
+ """
+ Return an appropriate monitor for arch, depending on "make info" output.
- Port and Baud rate are taken from "make info".
+ Port and Baud rate are taken from "make info".
- :param arch: architecture name, e.g. 'msp430fr5994lp' or 'posix'
- :param energytrace: `EnergyTraceMonitor` options. Returns an EnergyTrace monitor if not None.
- :param mimosa: `MIMOSAMonitor` options. Returns a MIMOSA monitor if not None.
- """
- for line in get_info(arch):
- if "Monitor:" in line:
- _, port, arg = line.split(" ")
- if port == "run":
- return ShellMonitor(arg, **kwargs)
- elif "mimosa" in kwargs and kwargs["mimosa"] is not None:
- mimosa_kwargs = kwargs.pop("mimosa")
- return MIMOSAMonitor(port, arg, **mimosa_kwargs, **kwargs)
- elif "energytrace" in kwargs and kwargs["energytrace"] is not None:
- energytrace_kwargs = kwargs.pop("energytrace").copy()
- sync_mode = energytrace_kwargs.pop("sync")
- if sync_mode == "la":
- return EnergyTraceLogicAnalyzerMonitor(
- port, arg, **energytrace_kwargs, **kwargs
- )
+ :param energytrace: `EnergyTraceMonitor` options. Returns an EnergyTrace monitor if not None.
+ :param mimosa: `MIMOSAMonitor` options. Returns a MIMOSA monitor if not None.
+ """
+ for line in self.info:
+ if "Monitor:" in line:
+ _, port, arg = line.split(" ")
+ if port == "run":
+ return ShellMonitor(arg, **kwargs)
+ elif "mimosa" in kwargs and kwargs["mimosa"] is not None:
+ mimosa_kwargs = kwargs.pop("mimosa")
+ return MIMOSAMonitor(port, arg, **mimosa_kwargs, **kwargs)
+ elif "energytrace" in kwargs and kwargs["energytrace"] is not None:
+ energytrace_kwargs = kwargs.pop("energytrace").copy()
+ sync_mode = energytrace_kwargs.pop("sync")
+ if sync_mode == "la":
+ return EnergyTraceLogicAnalyzerMonitor(
+ port, arg, **energytrace_kwargs, **kwargs
+ )
+ else:
+ return EnergyTraceMonitor(
+ port, arg, **energytrace_kwargs, **kwargs
+ )
else:
- return EnergyTraceMonitor(port, arg, **energytrace_kwargs, **kwargs)
+ kwargs.pop("energytrace", None)
+ kwargs.pop("mimosa", None)
+ return SerialMonitor(port, arg, **kwargs)
+ raise RuntimeError("Monitor failure")
+
+ def get_counter_limits(self, opts=list()) -> tuple:
+ """Return multipass max counter and max overflow value for arch."""
+ for line in self._cached_info(opts):
+ match = re.match("Counter Overflow: ([^/]*)/(.*)", line)
+ if match:
+ overflow_value = int(match.group(1))
+ max_overflow = int(match.group(2))
+ return overflow_value, max_overflow
+ raise RuntimeError("Did not find Counter Overflow limits")
+
+ def sleep_ms(self, duration: int, opts=list()) -> str:
+ max_sleep = None
+ if "msp430fr" in self.name:
+ cpu_freq = None
+ for line in self._cached_info(opts):
+ match = re.match(r"CPU\s+Freq:\s+(.*)\s+Hz", line)
+ if match:
+ cpu_freq = int(match.group(1))
+ if cpu_freq is not None and cpu_freq > 8000000:
+ max_sleep = 250
else:
- kwargs.pop("energytrace", None)
- kwargs.pop("mimosa", None)
- return SerialMonitor(port, arg, **kwargs)
- raise RuntimeError("Monitor failure")
-
-
-def get_counter_limits(arch: str) -> tuple:
- """Return multipass max counter and max overflow value for arch."""
- for line in get_info(arch):
- match = re.match("Counter Overflow: ([^/]*)/(.*)", line)
- if match:
- overflow_value = int(match.group(1))
- max_overflow = int(match.group(2))
- return overflow_value, max_overflow
- raise RuntimeError("Did not find Counter Overflow limits")
-
-
-def get_counter_limits_us(arch: str) -> tuple:
- """Return duration of one counter step and one counter overflow in us."""
- cpu_freq = 0
- overflow_value = 0
- max_overflow = 0
- for line in get_info(arch):
- match = re.match(r"CPU\s+Freq:\s+(.*)\s+Hz", line)
- if match:
- cpu_freq = int(match.group(1))
- match = re.match(r"Counter Overflow:\s+([^/]*)/(.*)", line)
- if match:
- overflow_value = int(match.group(1))
- max_overflow = int(match.group(2))
- if cpu_freq and overflow_value:
- return 1000000 / cpu_freq, overflow_value * 1000000 / cpu_freq, max_overflow
- raise RuntimeError("Did not find Counter Overflow limits")
+ max_sleep = 500
+ if max_sleep is not None and duration > max_sleep:
+ sub_sleep_count = duration // max_sleep
+ tail_sleep = duration % max_sleep
+ ret = f"for (unsigned char i = 0; i < {sub_sleep_count}; i++) {{ arch.sleep_ms({max_sleep}); }}\n"
+ if tail_sleep > 0:
+ ret += f"arch.sleep_ms({tail_sleep});\n"
+ return ret
+ return f"arch.sleep_ms({duration});\n"
+
+ def get_counter_limits_us(self, opts=list()) -> tuple:
+ """Return duration of one counter step and one counter overflow in us."""
+ cpu_freq = 0
+ overflow_value = 0
+ max_overflow = 0
+ for line in self._cached_info(opts):
+ match = re.match(r"CPU\s+Freq:\s+(.*)\s+Hz", line)
+ if match:
+ cpu_freq = int(match.group(1))
+ match = re.match(r"Counter Overflow:\s+([^/]*)/(.*)", line)
+ if match:
+ overflow_value = int(match.group(1))
+ max_overflow = int(match.group(2))
+ if cpu_freq and overflow_value:
+ return 1000000 / cpu_freq, overflow_value * 1000000 / cpu_freq, max_overflow
+ raise RuntimeError("Did not find Counter Overflow limits")
diff --git a/lib/validation.py b/lib/validation.py
index 98d49c1..ee147fe 100644
--- a/lib/validation.py
+++ b/lib/validation.py
@@ -179,6 +179,7 @@ class CrossValidator:
for attribute in self.by_name[name]["attributes"]:
ret["by_name"][name][attribute] = {
"mae_list": list(),
+ "rmsd_list": list(),
"smape_list": list(),
}
@@ -186,21 +187,17 @@ class CrossValidator:
res = self._single_xv(model_getter, training_and_validation_by_name)
for name in self.names:
for attribute in self.by_name[name]["attributes"]:
- ret["by_name"][name][attribute]["mae_list"].append(
- res["by_name"][name][attribute]["mae"]
- )
- ret["by_name"][name][attribute]["smape_list"].append(
- res["by_name"][name][attribute]["smape"]
- )
+ for measure in ("mae", "rmsd", "smape"):
+ ret["by_name"][name][attribute][f"{measure}_list"].append(
+ res["by_name"][name][attribute][measure]
+ )
for name in self.names:
for attribute in self.by_name[name]["attributes"]:
- ret["by_name"][name][attribute]["mae"] = np.mean(
- ret["by_name"][name][attribute]["mae_list"]
- )
- ret["by_name"][name][attribute]["smape"] = np.mean(
- ret["by_name"][name][attribute]["smape_list"]
- )
+ for measure in ("mae", "rmsd", "smape"):
+ ret["by_name"][name][attribute][measure] = np.mean(
+ ret["by_name"][name][attribute][f"{measure}_list"]
+ )
return ret
diff --git a/test/test_ptamodel.py b/test/test_ptamodel.py
index 94ee842..e8905b1 100755
--- a/test/test_ptamodel.py
+++ b/test/test_ptamodel.py
@@ -2,13 +2,464 @@
from dfatool.loader import RawData, pta_trace_to_aggregate
from dfatool.model import PTAModel
+from dfatool.utils import by_name_to_by_param
+from dfatool.validation import CrossValidator
import os
import unittest
import pytest
+import numpy as np
-class TestModels(unittest.TestCase):
- def test_model_singlefile_rf24(self):
+
+class TestSynthetic(unittest.TestCase):
+ def test_model_validation(self):
+ # rng = np.random.default_rng(seed=1312) # requiresy NumPy >= 1.17
+ np.random.seed(1312)
+ X = np.arange(500) % 50
+ parameter_names = ["p_mod5", "p_linear"]
+
+ s1_duration_base = 70
+ s1_duration_scale = 2
+ s1_power_base = 50
+ s1_power_scale = 7
+ s2_duration_base = 700
+ s2_duration_scale = 1
+ s2_power_base = 1500
+ s2_power_scale = 10
+
+ by_name = {
+ "raw_state_1": {
+ "isa": "state",
+ "param": [(x % 5, x) for x in X],
+ "duration": s1_duration_base
+ + np.random.normal(size=X.size, scale=s1_duration_scale),
+ "power": s1_power_base
+ + X
+ + np.random.normal(size=X.size, scale=s1_power_scale),
+ "attributes": ["duration", "power"],
+ },
+ "raw_state_2": {
+ "isa": "state",
+ "param": [(x % 5, x) for x in X],
+ "duration": s2_duration_base
+ - 2 * X
+ + np.random.normal(size=X.size, scale=s2_duration_scale),
+ "power": s2_power_base
+ + X
+ + np.random.normal(size=X.size, scale=s2_power_scale),
+ "attributes": ["duration", "power"],
+ },
+ }
+ by_param = by_name_to_by_param(by_name)
+ model = PTAModel(by_name, parameter_names, dict())
+ static_model = model.get_static()
+
+ # x ∈ [0, 50] -> mean(X) is 25
+ self.assertAlmostEqual(
+ static_model("raw_state_1", "duration"), s1_duration_base, places=0
+ )
+ self.assertAlmostEqual(
+ static_model("raw_state_1", "power"), s1_power_base + 25, delta=7
+ )
+ self.assertAlmostEqual(
+ static_model("raw_state_2", "duration"), s2_duration_base - 2 * 25, delta=2
+ )
+ self.assertAlmostEqual(
+ static_model("raw_state_2", "power"), s2_power_base + 25, delta=7
+ )
+
+ param_model, param_info = model.get_fitted()
+
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "duration", param=[0, 10]),
+ s1_duration_base,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "duration", param=[0, 50]),
+ s1_duration_base,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "duration", param=[0, 70]),
+ s1_duration_base,
+ places=0,
+ )
+
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "power", param=[0, 10]),
+ s1_power_base + 10,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "power", param=[0, 50]),
+ s1_power_base + 50,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_1", "power", param=[0, 70]),
+ s1_power_base + 70,
+ places=0,
+ )
+
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "duration", param=[0, 10]),
+ s2_duration_base - 2 * 10,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "duration", param=[0, 50]),
+ s2_duration_base - 2 * 50,
+ places=0,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "duration", param=[0, 70]),
+ s2_duration_base - 2 * 70,
+ places=0,
+ )
+
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "power", param=[0, 10]),
+ s2_power_base + 10,
+ delta=50,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "power", param=[0, 50]),
+ s2_power_base + 50,
+ delta=50,
+ )
+ self.assertAlmostEqual(
+ param_model("raw_state_2", "power", param=[0, 70]),
+ s2_power_base + 70,
+ delta=50,
+ )
+
+ static_quality = model.assess(static_model)
+ param_quality = model.assess(param_model)
+
+ # static quality reflects normal distribution scale for non-parameterized data
+
+ # the Root Mean Square Deviation must not be greater the scale (i.e., standard deviation) of the normal distribution
+ # Low Mean Absolute Error (< 2)
+ self.assertTrue(static_quality["by_name"]["raw_state_1"]["duration"]["mae"] < 2)
+ # Low Root Mean Square Deviation (< scale == 2)
+ self.assertTrue(
+ static_quality["by_name"]["raw_state_1"]["duration"]["rmsd"] < 2
+ )
+ # Relatively low error percentage (~~ MAE * 100% / s1_duration_base)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["duration"]["mape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"]
+ * 100
+ / s1_duration_base,
+ places=1,
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"]
+ * 100
+ / s1_duration_base,
+ places=1,
+ )
+
+ # static error is high for parameterized data
+
+ # MAE == mean(abs(actual value - model value))
+ # parameter range is [0, 50) -> mean 25, deviation range is [0, 25) -> mean deviation is 12.5 ± gauss scale
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["mae"], 12.5, delta=1
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["rmsd"], 16, delta=2
+ )
+ # high percentage error due to low s1_power_base
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["mape"], 19, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["smape"], 19, delta=2
+ )
+
+ # parameter range is [0, 100) -> mean deviation is 25 ± gauss scale
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["mae"], 25, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["rmsd"], 30, delta=2
+ )
+
+ # low percentage error due to high s2_duration_base (~~ 3.5 %)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["mape"],
+ 25 * 100 / s2_duration_base,
+ delta=1,
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["smape"],
+ 25 * 100 / s2_duration_base,
+ delta=1,
+ )
+
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["mae"], 12.5, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["rmsd"], 17, delta=2
+ )
+
+ # low percentage error due to high s2_power_base (~~ 1.7 %)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["mape"],
+ 25 * 100 / s2_power_base,
+ delta=1,
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["smape"],
+ 25 * 100 / s2_power_base,
+ delta=1,
+ )
+
+ # raw_state_1/duration does not depend on parameters and delegates to the static model
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["mae"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["rmsd"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["rmsd"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["mape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mape"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ )
+
+ # fitted param-model quality reflects normal distribution scale for all data
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["mape"], 0.9, places=1
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["smape"], 0.9, places=1
+ )
+
+ self.assertTrue(
+ param_quality["by_name"]["raw_state_1"]["power"]["mae"] < s1_power_scale
+ )
+ self.assertTrue(
+ param_quality["by_name"]["raw_state_1"]["power"]["rmsd"] < s1_power_scale
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["power"]["mape"], 7.5, delta=1
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["power"]["smape"], 7.5, delta=1
+ )
+
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["mae"],
+ s2_duration_scale,
+ delta=0.2,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["rmsd"],
+ s2_duration_scale,
+ delta=0.2,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["mape"],
+ 0.12,
+ delta=0.01,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["smape"],
+ 0.12,
+ delta=0.01,
+ )
+
+ # ... unless the signal-to-noise ratio (parameter range = [0 .. 50] vs. scale = 10) is bad, leading to
+ # increased regression errors
+ self.assertTrue(param_quality["by_name"]["raw_state_2"]["power"]["mae"] < 15)
+ self.assertTrue(param_quality["by_name"]["raw_state_2"]["power"]["rmsd"] < 18)
+
+ # still: low percentage error due to high s2_power_base
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["mape"], 0.9, places=1
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["smape"], 0.9, places=1
+ )
+
+ def test_model_crossvalidation_10fold(self):
+ # rng = np.random.default_rng(seed=1312) # requiresy NumPy >= 1.17
+ np.random.seed(1312)
+ X = np.arange(500) % 50
+ parameter_names = ["p_mod5", "p_linear"]
+
+ s1_duration_base = 70
+ s1_duration_scale = 2
+ s1_power_base = 50
+ s1_power_scale = 7
+ s2_duration_base = 700
+ s2_duration_scale = 1
+ s2_power_base = 1500
+ s2_power_scale = 10
+
+ by_name = {
+ "raw_state_1": {
+ "isa": "state",
+ "param": [(x % 5, x) for x in X],
+ "duration": s1_duration_base
+ + np.random.normal(size=X.size, scale=s1_duration_scale),
+ "power": s1_power_base
+ + X
+ + np.random.normal(size=X.size, scale=s1_power_scale),
+ "attributes": ["duration", "power"],
+ },
+ "raw_state_2": {
+ "isa": "state",
+ "param": [(x % 5, x) for x in X],
+ "duration": s2_duration_base
+ - 2 * X
+ + np.random.normal(size=X.size, scale=s2_duration_scale),
+ "power": s2_power_base
+ + X
+ + np.random.normal(size=X.size, scale=s2_power_scale),
+ "attributes": ["duration", "power"],
+ },
+ }
+ by_param = by_name_to_by_param(by_name)
+ arg_count = dict()
+ model = PTAModel(by_name, parameter_names, arg_count)
+ validator = CrossValidator(PTAModel, by_name, parameter_names, arg_count)
+
+ static_quality = validator.kfold(lambda m: m.get_static(), 10)
+ param_quality = validator.kfold(lambda m: m.get_fitted()[0], 10)
+
+ print(static_quality)
+
+ # static quality reflects normal distribution scale for non-parameterized data
+
+ # the Root Mean Square Deviation must not be greater the scale (i.e., standard deviation) of the normal distribution
+ # Low Mean Absolute Error (< 2)
+ self.assertTrue(static_quality["by_name"]["raw_state_1"]["duration"]["mae"] < 2)
+ # Low Root Mean Square Deviation (< scale == 2)
+ self.assertTrue(
+ static_quality["by_name"]["raw_state_1"]["duration"]["rmsd"] < 2
+ )
+ # Relatively low error percentage (~~ MAE * 100% / s1_duration_base)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"]
+ * 100
+ / s1_duration_base,
+ places=1,
+ )
+
+ # static error is high for parameterized data
+
+ # MAE == mean(abs(actual value - model value))
+ # parameter range is [0, 50) -> mean 25, deviation range is [0, 25) -> mean deviation is 12.5 ± gauss scale
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["mae"], 12.5, delta=1
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["rmsd"], 16, delta=2
+ )
+ # high percentage error due to low s1_power_base
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_1"]["power"]["smape"], 19, delta=2
+ )
+
+ # parameter range is [0, 100) -> mean deviation is 25 ± gauss scale
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["mae"], 25, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["rmsd"], 30, delta=2
+ )
+
+ # low percentage error due to high s2_duration_base (~~ 3.5 %)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["duration"]["smape"],
+ 25 * 100 / s2_duration_base,
+ delta=1,
+ )
+
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["mae"], 12.5, delta=2
+ )
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["rmsd"], 17, delta=2
+ )
+
+ # low percentage error due to high s2_power_base (~~ 1.7 %)
+ self.assertAlmostEqual(
+ static_quality["by_name"]["raw_state_2"]["power"]["smape"],
+ 25 * 100 / s2_power_base,
+ delta=1,
+ )
+
+ # raw_state_1/duration does not depend on parameters and delegates to the static model
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["mae"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["mae"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["rmsd"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["rmsd"],
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ static_quality["by_name"]["raw_state_1"]["duration"]["smape"],
+ )
+
+ # fitted param-model quality reflects normal distribution scale for all data
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["smape"], 0.9, places=1
+ )
+
+ self.assertTrue(
+ param_quality["by_name"]["raw_state_1"]["power"]["mae"] < s1_power_scale
+ )
+ self.assertTrue(
+ param_quality["by_name"]["raw_state_1"]["power"]["rmsd"] < s1_power_scale
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_1"]["power"]["smape"], 7.5, delta=1
+ )
+
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["mae"],
+ s2_duration_scale,
+ delta=0.2,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["rmsd"],
+ s2_duration_scale,
+ delta=0.2,
+ )
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["duration"]["smape"],
+ 0.12,
+ delta=0.01,
+ )
+
+ # ... unless the signal-to-noise ratio (parameter range = [0 .. 50] vs. scale = 10) is bad, leading to
+ # increased regression errors
+ self.assertTrue(param_quality["by_name"]["raw_state_2"]["power"]["mae"] < 15)
+ self.assertTrue(param_quality["by_name"]["raw_state_2"]["power"]["rmsd"] < 18)
+
+ # still: low percentage error due to high s2_power_base
+ self.assertAlmostEqual(
+ param_quality["by_name"]["raw_state_2"]["power"]["smape"], 0.9, places=1
+ )
+
+
+class TestFromFile(unittest.TestCase):
+ def test_singlefile_rf24(self):
raw_data = RawData(["test-data/20170220_164723_RF24_int_A.tar"])
preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
@@ -162,7 +613,7 @@ class TestModels(unittest.TestCase):
param_model("RX", "power", param=[1, None, None]), 48647, places=-1
)
- def test_model_singlefile_mmparam(self):
+ def test_singlefile_mmparam(self):
raw_data = RawData(["test-data/20161221_123347_mmparam.tar"])
preprocessed_data = raw_data.get_preprocessed_data()
by_name, parameters, arg_count = pta_trace_to_aggregate(preprocessed_data)
@@ -201,7 +652,7 @@ class TestModels(unittest.TestCase):
param_lut_model("ON", "power", param=[None, None]), 17866, places=0
)
- def test_model_multifile_lm75x(self):
+ def test_multifile_lm75x(self):
testfiles = [
"test-data/20170116_124500_LM75x.tar",
"test-data/20170116_131306_LM75x.tar",
@@ -243,7 +694,7 @@ class TestModels(unittest.TestCase):
self.assertAlmostEqual(static_model("shutdown", "duration"), 6980, places=0)
self.assertAlmostEqual(static_model("start", "duration"), 6980, places=0)
- def test_model_multifile_sharp(self):
+ def test_multifile_sharp(self):
testfiles = [
"test-data/20170116_145420_sharpLS013B4DN.tar",
"test-data/20170116_151348_sharpLS013B4DN.tar",
@@ -285,7 +736,7 @@ class TestModels(unittest.TestCase):
self.assertAlmostEqual(static_model("sendLine", "duration"), 180, places=0)
self.assertAlmostEqual(static_model("toggleVCOM", "duration"), 30, places=0)
- def test_model_multifile_mmstatic(self):
+ def test_multifile_mmstatic(self):
testfiles = [
"test-data/20170116_143516_mmstatic.tar",
"test-data/20170116_142654_mmstatic.tar",
@@ -325,7 +776,7 @@ class TestModels(unittest.TestCase):
@pytest.mark.skipif(
"TEST_SLOW" not in os.environ, reason="slow test, set TEST_SLOW=1 to run"
)
- def test_model_multifile_cc1200(self):
+ def test_multifile_cc1200(self):
testfiles = [
"test-data/20170125_125433_cc1200.tar",
"test-data/20170125_142420_cc1200.tar",