summaryrefslogtreecommitdiff
path: root/bin
diff options
context:
space:
mode:
authorjfalkenhagen <jfalkenhagen@uos.de>2020-07-16 16:39:19 +0200
committerjfalkenhagen <jfalkenhagen@uos.de>2020-07-16 16:39:19 +0200
commit98d23807e35cc211415c7e0c887f1b1b502f10e5 (patch)
treeebb649c585166e546dda704990ed4c5eeb95519f /bin
parenta00ffc0e32ddc72a8faceec4344432cdbf3b90c7 (diff)
parentaf4cc108b5c5132a991a2b83d258ed55e985936f (diff)
Merge branch 'master' into janis
Diffstat (limited to 'bin')
-rwxr-xr-xbin/analyze-archive.py135
-rwxr-xr-xbin/analyze-timing.py52
-rwxr-xr-xbin/cal-hist92
-rwxr-xr-xbin/eval-accounting-overhead.py2
-rwxr-xr-xbin/eval-online-model-accuracy.py2
-rwxr-xr-xbin/eval-outlier-removal.py19
-rwxr-xr-xbin/eval-rel-energy.py10
-rwxr-xr-xbin/generate-dfa-benchmark.py33
-rwxr-xr-xbin/gptest.py5
-rwxr-xr-xbin/gradient14
-rwxr-xr-xbin/keysightdlog.py164
-rwxr-xr-xbin/mim-vs-keysight.py2
-rwxr-xr-xbin/mimosa-etv163
-rwxr-xr-xbin/mimplot68
-rwxr-xr-xbin/test_corrcoef.py13
15 files changed, 508 insertions, 266 deletions
diff --git a/bin/analyze-archive.py b/bin/analyze-archive.py
index 4531d86..8311f5c 100755
--- a/bin/analyze-archive.py
+++ b/bin/analyze-archive.py
@@ -61,18 +61,25 @@ Options:
Specify traces which should be ignored due to bogus data. 1 is the first
trace, 2 the second, and so on.
---discard-outliers=
- not supported at the moment
-
--cross-validate=<method>:<count>
Perform cross validation when computing model quality.
Only works with --show-quality=table at the moment.
+
If <method> is "montecarlo": Randomly divide data into 2/3 training and 1/3
validation, <count> times. Reported model quality is the average of all
validation runs. Data is partitioned without regard for parameter values,
so a specific parameter combination may be present in both training and
validation sets or just one of them.
+ If <method> is "kfold": Perform k-fold cross validation with k=<count>.
+ Divide data into 1-1/k training and 1/k validation, <count> times.
+ In the first set, items 0, k, 2k, ... ard used for validation, in the
+ second set, items 1, k+1, 2k+1, ... and so on.
+ validation, <count> times. Reported model quality is the average of all
+ validation runs. Data is partitioned without regard for parameter values,
+ so a specific parameter combination may be present in both training and
+ validation sets or just one of them.
+
--function-override=<name attribute function>[;<name> <attribute> <function>;...]
Manually specify the function to fit for <name> <attribute>. A function
specified this way bypasses parameter detection: It is always assigned,
@@ -94,17 +101,22 @@ Options:
--export-energymodel=<model.json>
Export energy model. Works out of the box for v1 and v2 logfiles. Requires --hwmodel for v0 logfiles.
+
+--no-cache
+ Do not load cached measurement results
"""
import getopt
import json
+import logging
import random
import re
import sys
from dfatool import plotter
-from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
-from dfatool.dfatool import gplearn_to_function
-from dfatool.dfatool import CrossValidator
+from dfatool.loader import RawData, pta_trace_to_aggregate
+from dfatool.functions import gplearn_to_function
+from dfatool.model import PTAModel
+from dfatool.validation import CrossValidator
from dfatool.utils import filter_aggregate_by_param
from dfatool.automata import PTA
@@ -133,6 +145,15 @@ def format_quality_measures(result):
def model_quality_table(result_lists, info_list):
+ print(
+ "{:20s} {:15s} {:19s} {:19s} {:19s}".format(
+ "key",
+ "attribute",
+ "static".center(19),
+ "LUT".center(19),
+ "parameterized".center(19),
+ )
+ )
for state_or_tran in result_lists[0]["by_name"].keys():
for key in result_lists[0]["by_name"][state_or_tran].keys():
buf = "{:20s} {:15s}".format(state_or_tran, key)
@@ -143,7 +164,7 @@ def model_quality_table(result_lists, info_list):
result = results["by_name"][state_or_tran][key]
buf += format_quality_measures(result)
else:
- buf += "{:6}----{:9}".format("", "")
+ buf += "{:7}----{:8}".format("", "")
print(buf)
@@ -279,7 +300,6 @@ def print_html_model_data(model, pm, pq, lm, lq, am, ai, aq):
if __name__ == "__main__":
ignored_trace_indexes = []
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -292,11 +312,12 @@ if __name__ == "__main__":
try:
optspec = (
- "info "
+ "info no-cache "
"plot-unparam= plot-param= plot-traces= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"export-traces= "
"filter-param= "
+ "log-level= "
"cross-validate= "
"with-safe-functions hwmodel= export-energymodel="
)
@@ -313,9 +334,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -344,12 +362,21 @@ if __name__ == "__main__":
if "hwmodel" in opt:
pta = PTA.from_file(opt["hwmodel"])
+ if "log-level" in opt:
+ numeric_level = getattr(logging, opt["log-level"].upper(), None)
+ if not isinstance(numeric_level, int):
+ print(f"Invalid log level: {loglevel}", file=sys.stderr)
+ sys.exit(1)
+ logging.basicConfig(level=numeric_level)
+
except getopt.GetoptError as err:
print(err, file=sys.stderr)
sys.exit(2)
raw_data = RawData(
- args, with_traces=("export-traces" in opt or "plot-traces" in opt)
+ args,
+ with_traces=("export-traces" in opt or "plot-traces" in opt),
+ skip_cache=("no-cache" in opt),
)
if "info" in opt:
@@ -357,9 +384,12 @@ if __name__ == "__main__":
if raw_data.version <= 1:
data_source = "MIMOSA"
elif raw_data.version == 2:
- data_source = "MSP430 EnergyTrace"
- else:
- data_source = "UNKNOWN"
+ if raw_data.ptalog and "sync" in raw_data.ptalog["opt"]["energytrace"]:
+ data_source = "MSP430 EnergyTrace, sync={}".format(
+ raw_data.ptalog["opt"]["energytrace"]["sync"]
+ )
+ else:
+ data_source = "MSP430 EnergyTrace"
print(f" Data source ID: {raw_data.version} ({data_source})")
preprocessed_data = raw_data.get_preprocessed_data()
@@ -434,7 +464,6 @@ if __name__ == "__main__":
parameters,
arg_count,
traces=preprocessed_data,
- discard_outliers=discard_outliers,
function_override=function_override,
pta=pta,
)
@@ -495,21 +524,6 @@ if __name__ == "__main__":
model.stats.param_dependence_ratio(state, "power", param),
)
)
- if model.stats.has_codependent_parameters(state, "power", param):
- print(
- "{:24s} co-dependencies: {:s}".format(
- "",
- ", ".join(
- model.stats.codependent_parameters(
- state, "power", param
- )
- ),
- )
- )
- for param_dict in model.stats.codependent_parameter_value_dicts(
- state, "power", param
- ):
- print("{:24s} parameter-aware for {}".format("", param_dict))
for trans in model.transitions():
# Mean power is not a typical transition attribute, but may be present for debugging or analysis purposes
@@ -551,6 +565,8 @@ if __name__ == "__main__":
if xv_method == "montecarlo":
static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count)
+ elif xv_method == "kfold":
+ static_quality = xv.kfold(lambda m: m.get_static(), xv_count)
else:
static_quality = model.assess(static_model)
@@ -560,6 +576,8 @@ if __name__ == "__main__":
if xv_method == "montecarlo":
lut_quality = xv.montecarlo(lambda m: m.get_param_lut(fallback=True), xv_count)
+ elif xv_method == "kfold":
+ lut_quality = xv.kfold(lambda m: m.get_param_lut(fallback=True), xv_count)
else:
lut_quality = model.assess(lut_model)
@@ -616,21 +634,21 @@ if __name__ == "__main__":
if "param" in show_models or "all" in show_models:
if not model.stats.can_be_fitted():
- print(
- "[!] measurements have insufficient distinct numeric parameters for fitting. A parameter-aware model is not available."
+ logging.warning(
+ "measurements have insufficient distinct numeric parameters for fitting. A parameter-aware model is not available."
)
for state in model.states():
for attribute in model.attributes(state):
if param_info(state, attribute):
print(
"{:10s}: {}".format(
- state, param_info(state, attribute)["function"]._model_str
+ state,
+ param_info(state, attribute)["function"].model_function,
)
)
print(
"{:10s} {}".format(
- "",
- param_info(state, attribute)["function"]._regression_args,
+ "", param_info(state, attribute)["function"].model_args
)
)
for trans in model.transitions():
@@ -640,19 +658,19 @@ if __name__ == "__main__":
"{:10s}: {:10s}: {}".format(
trans,
attribute,
- param_info(trans, attribute)["function"]._model_str,
+ param_info(trans, attribute)["function"].model_function,
)
)
print(
"{:10s} {:10s} {}".format(
- "",
- "",
- param_info(trans, attribute)["function"]._regression_args,
+ "", "", param_info(trans, attribute)["function"].model_args
)
)
if xv_method == "montecarlo":
analytic_quality = xv.montecarlo(lambda m: m.get_fitted()[0], xv_count)
+ elif xv_method == "kfold":
+ analytic_quality = xv.kfold(lambda m: m.get_fitted()[0], xv_count)
else:
analytic_quality = model.assess(param_model)
@@ -686,7 +704,7 @@ if __name__ == "__main__":
)
if "overall" in show_quality or "all" in show_quality:
- print("overall static/param/lut MAE assuming equal state distribution:")
+ print("overall state static/param/lut MAE assuming equal state distribution:")
print(
" {:6.1f} / {:6.1f} / {:6.1f} µW".format(
model.assess_states(static_model),
@@ -694,15 +712,30 @@ if __name__ == "__main__":
model.assess_states(lut_model),
)
)
- print("overall static/param/lut MAE assuming 95% STANDBY1:")
- distrib = {"STANDBY1": 0.95, "POWERDOWN": 0.03, "TX": 0.01, "RX": 0.01}
- print(
- " {:6.1f} / {:6.1f} / {:6.1f} µW".format(
- model.assess_states(static_model, distribution=distrib),
- model.assess_states(param_model, distribution=distrib),
- model.assess_states(lut_model, distribution=distrib),
+ distrib = dict()
+ num_states = len(model.states())
+ p95_state = None
+ for state in model.states():
+ distrib[state] = 1.0 / num_states
+
+ if "STANDBY1" in model.states():
+ p95_state = "STANDBY1"
+ elif "SLEEP" in model.states():
+ p95_state = "SLEEP"
+
+ if p95_state is not None:
+ for state in distrib.keys():
+ distrib[state] = 0.05 / (num_states - 1)
+ distrib[p95_state] = 0.95
+
+ print(f"overall state static/param/lut MAE assuming 95% {p95_state}:")
+ print(
+ " {:6.1f} / {:6.1f} / {:6.1f} µW".format(
+ model.assess_states(static_model, distribution=distrib),
+ model.assess_states(param_model, distribution=distrib),
+ model.assess_states(lut_model, distribution=distrib),
+ )
)
- )
if "summary" in show_quality or "all" in show_quality:
model_summary_table(
diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py
index 4039f45..ddd49ec 100755
--- a/bin/analyze-timing.py
+++ b/bin/analyze-timing.py
@@ -75,12 +75,14 @@ Options:
import getopt
import json
+import logging
import re
import sys
from dfatool import plotter
-from dfatool.dfatool import AnalyticModel, TimingData, pta_trace_to_aggregate
-from dfatool.dfatool import gplearn_to_function
-from dfatool.dfatool import CrossValidator
+from dfatool.loader import TimingData, pta_trace_to_aggregate
+from dfatool.functions import gplearn_to_function
+from dfatool.model import AnalyticModel
+from dfatool.validation import CrossValidator
from dfatool.utils import filter_aggregate_by_param
from dfatool.parameters import prune_dependent_parameters
@@ -170,7 +172,6 @@ def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq):
if __name__ == "__main__":
ignored_trace_indexes = []
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -183,8 +184,9 @@ if __name__ == "__main__":
try:
optspec = (
"plot-unparam= plot-param= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"filter-param= "
+ "log-level= "
"cross-validate= "
"corrcoef param-info "
"with-safe-functions hwmodel= export-energymodel="
@@ -202,9 +204,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -237,6 +236,13 @@ if __name__ == "__main__":
else:
opt["filter-param"] = list()
+ if "log-level" in opt:
+ numeric_level = getattr(logging, opt["log-level"].upper(), None)
+ if not isinstance(numeric_level, int):
+ print(f"Invalid log level: {loglevel}", file=sys.stderr)
+ sys.exit(1)
+ logging.basicConfig(level=numeric_level)
+
except getopt.GetoptError as err:
print(err)
sys.exit(2)
@@ -297,30 +303,6 @@ if __name__ == "__main__":
model.stats.param_dependence_ratio(trans, "duration", param),
)
)
- if model.stats.has_codependent_parameters(trans, "duration", param):
- print(
- "{:24s} co-dependencies: {:s}".format(
- "",
- ", ".join(
- model.stats.codependent_parameters(
- trans, "duration", param
- )
- ),
- )
- )
- for param_dict in model.stats.codependent_parameter_value_dicts(
- trans, "duration", param
- ):
- print("{:24s} parameter-aware for {}".format("", param_dict))
- # import numpy as np
- # safe_div = np.vectorize(lambda x,y: 0. if x == 0 else 1 - x/y)
- # ratio_by_value = safe_div(model.stats.stats['write']['duration']['lut_by_param_values']['max_retry_count'], model.stats.stats['write']['duration']['std_by_param_values']['max_retry_count'])
- # err_mode = np.seterr('warn')
- # dep_by_value = ratio_by_value > 0.5
- # np.seterr(**err_mode)
- # Eigentlich sollte hier ein paar mal True stehen, ist aber nicht so...
- # und warum ist da eine non-power-of-two Zahl von True-Einträgen in der Matrix? 3 stück ist komisch...
- # print(dep_by_value)
if xv_method == "montecarlo":
static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count)
@@ -423,14 +405,12 @@ if __name__ == "__main__":
"{:10s}: {:10s}: {}".format(
trans,
attribute,
- param_info(trans, attribute)["function"]._model_str,
+ param_info(trans, attribute)["function"].model_function,
)
)
print(
"{:10s} {:10s} {}".format(
- "",
- "",
- param_info(trans, attribute)["function"]._regression_args,
+ "", "", param_info(trans, attribute)["function"].model_args
)
)
diff --git a/bin/cal-hist b/bin/cal-hist
index ba2ff47..a92ae1e 100755
--- a/bin/cal-hist
+++ b/bin/cal-hist
@@ -7,7 +7,7 @@ import struct
import sys
import tarfile
import matplotlib.pyplot as plt
-from dfatool.dfatool import MIMOSA
+from dfatool.loader import MIMOSA
from dfatool.utils import running_mean
voltage = float(sys.argv[1])
@@ -18,50 +18,74 @@ mim = MIMOSA(voltage, shunt)
charges, triggers = mim.load_data(mimfile)
trigidx = mim.trigger_edges(triggers)
-cal_edges = mim.calibration_edges(running_mean(mim.currents_nocal(charges[0:trigidx[0]]), 10))
+cal_edges = mim.calibration_edges(
+ running_mean(mim.currents_nocal(charges[0 : trigidx[0]]), 10)
+)
+
+# charges = charges[charges > 20000]
+# charges = charges[charges < 21000]
-#charges = charges[charges > 20000]
-#charges = charges[charges < 21000]
def show_hist(data):
- bins = np.max(data) - np.min(data)
- if bins == 0:
- bins = 1
- if bins > 1000:
- bins = bins / 10
- #bins = 500
- n, bins, patches = plt.hist(data, bins, normed=0, facecolor='green', alpha=0.75)
- plt.grid(True)
- plt.show()
- print(np.histogram(data, bins=bins))
+ bins = np.max(data) - np.min(data)
+ if bins == 0:
+ bins = 1
+ if bins > 1000:
+ bins = bins / 10
+ # bins = 500
+ n, bins, patches = plt.hist(data, bins, normed=0, facecolor="green", alpha=0.75)
+ plt.grid(True)
+ plt.show()
+ print(np.histogram(data, bins=bins))
+
-#show_hist(charges[cal_edges[0]:cal_edges[1]])
-#show_hist(charges[cal_edges[4]:cal_edges[5]])
-#show_hist(charges[cal_edges[2]:cal_edges[3]])
-#show_hist(charges[trigidx[7]:trigidx[8]])
-#show_hist(np.array(charges))
+# show_hist(charges[cal_edges[0]:cal_edges[1]])
+# show_hist(charges[cal_edges[4]:cal_edges[5]])
+# show_hist(charges[cal_edges[2]:cal_edges[3]])
+# show_hist(charges[trigidx[7]:trigidx[8]])
+# show_hist(np.array(charges))
-#print(charges[cal_edges[0]:cal_edges[1]])
-#print(charges[cal_edges[4]:cal_edges[5]])
-#print(charges[cal_edges[2]:cal_edges[3]])
+# print(charges[cal_edges[0]:cal_edges[1]])
+# print(charges[cal_edges[4]:cal_edges[5]])
+# print(charges[cal_edges[2]:cal_edges[3]])
-plt.hist(mim.charge_to_current_nocal(charges[cal_edges[2]:cal_edges[3]]) * 1e-3, 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('mA MimosaCMD')
-plt.ylabel('#')
+plt.hist(
+ mim.charge_to_current_nocal(charges[cal_edges[2] : cal_edges[3]]) * 1e-3,
+ 100,
+ normed=0,
+ facecolor="blue",
+ alpha=0.8,
+)
+plt.xlabel("mA MimosaCMD")
+plt.ylabel("#")
plt.grid(True)
plt.show()
-plt.hist(mim.charge_to_current_nocal(charges[cal_edges[4]:cal_edges[5]]) * 1e-3, 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('mA MimosaCMD')
-plt.ylabel('#')
+plt.hist(
+ mim.charge_to_current_nocal(charges[cal_edges[4] : cal_edges[5]]) * 1e-3,
+ 100,
+ normed=0,
+ facecolor="blue",
+ alpha=0.8,
+)
+plt.xlabel("mA MimosaCMD")
+plt.ylabel("#")
plt.grid(True)
plt.show()
-plt.hist(mim.charge_to_current_nocal(charges[cal_edges[0]:cal_edges[1]]) * 1e-3, 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('mA MimosaCMD')
-plt.ylabel('#')
+plt.hist(
+ mim.charge_to_current_nocal(charges[cal_edges[0] : cal_edges[1]]) * 1e-3,
+ 100,
+ normed=0,
+ facecolor="blue",
+ alpha=0.8,
+)
+plt.xlabel("mA MimosaCMD")
+plt.ylabel("#")
plt.grid(True)
plt.show()
-plt.hist(charges[cal_edges[0]:cal_edges[1]], 100, normed=0, facecolor='blue', alpha=0.8)
-plt.xlabel('Rohwert MimosaCMD')
-plt.ylabel('#')
+plt.hist(
+ charges[cal_edges[0] : cal_edges[1]], 100, normed=0, facecolor="blue", alpha=0.8
+)
+plt.xlabel("Rohwert MimosaCMD")
+plt.ylabel("#")
plt.grid(True)
plt.show()
diff --git a/bin/eval-accounting-overhead.py b/bin/eval-accounting-overhead.py
index 7ea0807..1c03bf8 100755
--- a/bin/eval-accounting-overhead.py
+++ b/bin/eval-accounting-overhead.py
@@ -13,7 +13,7 @@ providing overhead per transition and getEnergy overhead
"""
-from dfatool.dfatool import AnalyticModel, TimingData, pta_trace_to_aggregate
+from dfatool.loader import AnalyticModel, TimingData, pta_trace_to_aggregate
import json
import sys
diff --git a/bin/eval-online-model-accuracy.py b/bin/eval-online-model-accuracy.py
index 202ac28..97fd8e2 100755
--- a/bin/eval-online-model-accuracy.py
+++ b/bin/eval-online-model-accuracy.py
@@ -28,7 +28,7 @@ import itertools
import yaml
from dfatool.automata import PTA
from dfatool.codegen import get_simulated_accountingmethod
-from dfatool.dfatool import regression_measures
+from dfatool.model import regression_measures
import numpy as np
opt = dict()
diff --git a/bin/eval-outlier-removal.py b/bin/eval-outlier-removal.py
index 14f0e60..c03266d 100755
--- a/bin/eval-outlier-removal.py
+++ b/bin/eval-outlier-removal.py
@@ -3,7 +3,8 @@
import getopt
import re
import sys
-from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
+from dfatool.loader import RawData, pta_trace_to_aggregate
+from dfatool.model import PTAModel
opt = dict()
@@ -141,12 +142,12 @@ if __name__ == "__main__":
if param_i1(state, attribute):
print(
"{:10s}: {}".format(
- state, param_i1(state, attribute)["function"]._model_str
+ state, param_i1(state, attribute)["function"].model_function
)
)
print(
"{:10s} {}".format(
- "", param_i1(state, attribute)["function"]._regression_args
+ "", param_i1(state, attribute)["function"].model_args
)
)
for trans in m1.transitions():
@@ -162,12 +163,12 @@ if __name__ == "__main__":
"{:10s}: {:10s}: {}".format(
trans,
attribute,
- param_i1(trans, attribute)["function"]._model_str,
+ param_i1(trans, attribute)["function"].model_function,
)
)
print(
"{:10s} {:10s} {}".format(
- "", "", param_i1(trans, attribute)["function"]._regression_args
+ "", "", param_i1(trans, attribute)["function"].model_args
)
)
param_m2, param_i2 = m2.get_fitted()
@@ -176,12 +177,12 @@ if __name__ == "__main__":
if param_i2(state, attribute):
print(
"{:10s}: {}".format(
- state, param_i2(state, attribute)["function"]._model_str
+ state, param_i2(state, attribute)["function"].model_function
)
)
print(
"{:10s} {}".format(
- "", param_i2(state, attribute)["function"]._regression_args
+ "", param_i2(state, attribute)["function"].model_args
)
)
for trans in m2.transitions():
@@ -197,12 +198,12 @@ if __name__ == "__main__":
"{:10s}: {:10s}: {}".format(
trans,
attribute,
- param_i2(trans, attribute)["function"]._model_str,
+ param_i2(trans, attribute)["function"].model_function,
)
)
print(
"{:10s} {:10s} {}".format(
- "", "", param_i2(trans, attribute)["function"]._regression_args
+ "", "", param_i2(trans, attribute)["function"].model_args
)
)
diff --git a/bin/eval-rel-energy.py b/bin/eval-rel-energy.py
index 8a2be13..aeaf88c 100755
--- a/bin/eval-rel-energy.py
+++ b/bin/eval-rel-energy.py
@@ -3,7 +3,8 @@
import getopt
import re
import sys
-from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
+from dfatool.loader import RawData, pta_trace_to_aggregate
+from dfatool.model import PTAModel
opt = dict()
@@ -22,7 +23,6 @@ def get_file_groups(args):
if __name__ == "__main__":
ignored_trace_indexes = []
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -31,7 +31,7 @@ if __name__ == "__main__":
try:
optspec = (
"plot-unparam= plot-param= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"with-safe-functions"
)
raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
@@ -47,9 +47,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -88,7 +85,6 @@ if __name__ == "__main__":
arg_count,
traces=preprocessed_data,
ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
function_override=function_override,
verbose=False,
)
diff --git a/bin/generate-dfa-benchmark.py b/bin/generate-dfa-benchmark.py
index 478b221..2c53d9f 100755
--- a/bin/generate-dfa-benchmark.py
+++ b/bin/generate-dfa-benchmark.py
@@ -61,6 +61,10 @@ Options:
--energytrace=[k=v,k=v,...]
Perform energy measurements using MSP430 EnergyTrace hardware. Includes --timing.
+ Additional configuration settings:
+ sync = bar (Barcode mode (default): synchronize measurements via barcodes embedded in the energy trace)
+ sync = la (Logic Analyzer mode (WIP): An external logic analyzer captures transition timing)
+ sync = timing (Timing mode (WIP): The on-board cycle counter captures transition timing)
--trace-filter=<transition,transition,transition,...>[ <transition,transition,transition,...> ...]
Only consider traces whose beginning matches one of the provided transition sequences.
@@ -219,17 +223,11 @@ def benchmark_from_runs(
)
elif opt["sleep"]:
if "energytrace" in opt:
- outbuf.write(
- "arch.sleep_ms({:d}); // {}\n".format(
- opt["sleep"], transition.destination.name
- )
- )
+ outbuf.write(f"// -> {transition.destination.name}\n")
+ outbuf.write(runner.sleep_ms(opt["sleep"], opt["arch"]))
else:
- outbuf.write(
- "arch.delay_ms({:d}); // {}\n".format(
- opt["sleep"], transition.destination.name
- )
- )
+ outbuf.write(f"// -> {transition.destination.name}\n")
+ outbuf.write("arch.delay_ms({:d});\n".format(opt["sleep"]))
outbuf.write(harness.stop_run(num_traces))
if dummy:
@@ -337,6 +335,7 @@ def run_benchmark(
files = list()
i = 0
while i < opt["repeat"]:
+ print(f"""[RUN] flashing benchmark {i+1}/{opt["repeat"]}""")
runner.flash(arch, app, run_args)
if "mimosa" in opt:
monitor = runner.get_monitor(
@@ -353,7 +352,6 @@ def run_benchmark(
while not harness.done:
# possible race condition: if the benchmark completes at this
# exact point, it sets harness.done and unsets harness.synced.
- # vvv
if (
slept > 30
and slept < 40
@@ -372,11 +370,11 @@ def run_benchmark(
time.sleep(5)
slept += 5
print(
- "[RUN] {:d}/{:d} ({:.0f}%), current benchmark at {:.0f}%".format(
+ "[RUN] {:d}/{:d} ({:.0f}%) at trace {:d}".format(
run_offset,
runs_total,
run_offset * 100 / runs_total,
- slept * 100 / run_timeout,
+ harness.trace_id,
)
)
except KeyboardInterrupt:
@@ -593,6 +591,9 @@ if __name__ == "__main__":
if run_flags is None:
run_flags = opt["run"].split()
+ if "msp430fr" in opt["arch"]:
+ run_flags.append("cpu_freq=8000000")
+
runs = list(
pta.dfs(
opt["depth"],
@@ -630,9 +631,13 @@ if __name__ == "__main__":
post_transition_delay_us=20,
)
elif "energytrace" in opt:
+ # Use barcode sync by default
+ gpio_mode = "bar"
+ if "sync" in opt["energytrace"] and opt["energytrace"]["sync"] != "bar":
+ gpio_mode = "around"
harness = OnboardTimerHarness(
gpio_pin=timer_pin,
- gpio_mode="bar",
+ gpio_mode=gpio_mode,
pta=pta,
counter_limits=runner.get_counter_limits_us(opt["arch"]),
log_return_values=need_return_values,
diff --git a/bin/gptest.py b/bin/gptest.py
index 82b4575..b5012e5 100755
--- a/bin/gptest.py
+++ b/bin/gptest.py
@@ -2,12 +2,11 @@
import sys
import numpy as np
-from dfatool.dfatool import (
- PTAModel,
+from dfatool.loader import (
RawData,
- regression_measures,
pta_trace_to_aggregate,
)
+from dfatool.model import PTAModel, regression_measures
from gplearn.genetic import SymbolicRegressor
from multiprocessing import Pool
diff --git a/bin/gradient b/bin/gradient
index 8280794..ca60949 100755
--- a/bin/gradient
+++ b/bin/gradient
@@ -7,7 +7,7 @@ import struct
import sys
import tarfile
import matplotlib.pyplot as plt
-from dfatool.dfatool import MIMOSA
+from dfatool.loader import MIMOSA
from dfatool.utils import running_mean
voltage = float(sys.argv[1])
@@ -17,17 +17,17 @@ mimfile = sys.argv[3]
mim = MIMOSA(voltage, shunt)
charges, triggers = mim.load_file(mimfile)
-#charges = charges[2000000:3000000]
+# charges = charges[2000000:3000000]
currents = running_mean(mim.charge_to_current_nocal(charges), 10) * 1e-6
xr = np.arange(len(currents)) * 1e-5
threshold = 1e-5
grad = np.gradient(currents, 2)
tp = np.abs(grad) > threshold
-plt.plot( xr, currents, "r-")
-plt.plot( xr, grad, "y-")
-plt.plot( xr[tp], grad[tp], "bo")
-plt.xlabel('Zeit [s]')
-plt.ylabel('Strom [A]')
+plt.plot(xr, currents, "r-")
+plt.plot(xr, grad, "y-")
+plt.plot(xr[tp], grad[tp], "bo")
+plt.xlabel("Zeit [s]")
+plt.ylabel("Strom [A]")
plt.grid(True)
plt.show()
diff --git a/bin/keysightdlog.py b/bin/keysightdlog.py
new file mode 100755
index 0000000..89264b9
--- /dev/null
+++ b/bin/keysightdlog.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+
+import lzma
+import matplotlib.pyplot as plt
+import numpy as np
+import os
+import struct
+import sys
+import xml.etree.ElementTree as ET
+
+
+def plot_y(Y, **kwargs):
+ plot_xy(np.arange(len(Y)), Y, **kwargs)
+
+
+def plot_xy(X, Y, xlabel=None, ylabel=None, title=None, output=None):
+ fig, ax1 = plt.subplots(figsize=(10, 6))
+ if title != None:
+ fig.canvas.set_window_title(title)
+ if xlabel != None:
+ ax1.set_xlabel(xlabel)
+ if ylabel != None:
+ ax1.set_ylabel(ylabel)
+ plt.subplots_adjust(left=0.1, bottom=0.1, right=0.99, top=0.99)
+ plt.plot(X, Y, "bo", markersize=2)
+ if output:
+ plt.savefig(output)
+ with open("{}.txt".format(output), "w") as f:
+ print("X Y", file=f)
+ for i in range(len(X)):
+ print("{} {}".format(X[i], Y[i]), file=f)
+ else:
+ plt.show()
+
+
+filename = sys.argv[1]
+
+with open(filename, "rb") as logfile:
+ lines = []
+ line = ""
+
+ if ".xz" in filename:
+ f = lzma.open(logfile)
+ else:
+ f = logfile
+
+ while line != "</dlog>\n":
+ line = f.readline().decode()
+ lines.append(line)
+ xml_header = "".join(lines)
+ raw_header = f.read(8)
+ data_offset = f.tell()
+ raw_data = f.read()
+
+ xml_header = xml_header.replace("1ua>", "X1ua>")
+ xml_header = xml_header.replace("2ua>", "X2ua>")
+ dlog = ET.fromstring(xml_header)
+ channels = []
+ for channel in dlog.findall("channel"):
+ channel_id = int(channel.get("id"))
+ sense_curr = channel.find("sense_curr").text
+ sense_volt = channel.find("sense_volt").text
+ model = channel.find("ident").find("model").text
+ if sense_volt == "1":
+ channels.append((channel_id, model, "V"))
+ if sense_curr == "1":
+ channels.append((channel_id, model, "A"))
+
+ num_channels = len(channels)
+ duration = int(dlog.find("frame").find("time").text)
+ interval = float(dlog.find("frame").find("tint").text)
+ real_duration = interval * int(len(raw_data) / (4 * num_channels))
+
+ data = np.ndarray(
+ shape=(num_channels, int(len(raw_data) / (4 * num_channels))), dtype=np.float32
+ )
+
+ iterator = struct.iter_unpack(">f", raw_data)
+ channel_offset = 0
+ measurement_offset = 0
+ for value in iterator:
+ data[channel_offset, measurement_offset] = value[0]
+ if channel_offset + 1 == num_channels:
+ channel_offset = 0
+ measurement_offset += 1
+ else:
+ channel_offset += 1
+
+if int(real_duration) != duration:
+ print(
+ "Measurement duration: {:f} of {:d} seconds at {:f} µs per sample".format(
+ real_duration, duration, interval * 1000000
+ )
+ )
+else:
+ print(
+ "Measurement duration: {:d} seconds at {:f} µs per sample".format(
+ duration, interval * 1000000
+ )
+ )
+
+for i, channel in enumerate(channels):
+ channel_id, channel_model, channel_type = channel
+ print(
+ "channel {:d} ({:s}): min {:f}, max {:f}, mean {:f} {:s}".format(
+ channel_id,
+ channel_model,
+ np.min(data[i]),
+ np.max(data[i]),
+ np.mean(data[i]),
+ channel_type,
+ )
+ )
+
+ if (
+ i > 0
+ and channel_type == "A"
+ and channels[i - 1][2] == "V"
+ and channel_id == channels[i - 1][0]
+ ):
+ power = data[i - 1] * data[i]
+ power = 3.6 * data[i]
+ print(
+ "channel {:d} ({:s}): min {:f}, max {:f}, mean {:f} W".format(
+ channel_id, channel_model, np.min(power), np.max(power), np.mean(power)
+ )
+ )
+ min_power = np.min(power)
+ max_power = np.max(power)
+ power_border = np.mean([min_power, max_power])
+ low_power = power[power < power_border]
+ high_power = power[power >= power_border]
+ plot_y(power)
+ print(
+ " avg low / high power (delta): {:f} / {:f} ({:f}) W".format(
+ np.mean(low_power),
+ np.mean(high_power),
+ np.mean(high_power) - np.mean(low_power),
+ )
+ )
+ # plot_y(low_power)
+ # plot_y(high_power)
+ high_power_durations = []
+ current_high_power_duration = 0
+ for is_hpe in power >= power_border:
+ if is_hpe:
+ current_high_power_duration += interval
+ else:
+ if current_high_power_duration > 0:
+ high_power_durations.append(current_high_power_duration)
+ current_high_power_duration = 0
+ print(
+ " avg high-power duration: {:f} µs".format(
+ np.mean(high_power_durations) * 1000000
+ )
+ )
+
+# print(xml_header)
+# print(raw_header)
+# print(channels)
+# print(data)
+# print(np.mean(data[0]))
+# print(np.mean(data[1]))
+# print(np.mean(data[0] * data[1]))
diff --git a/bin/mim-vs-keysight.py b/bin/mim-vs-keysight.py
index c214f2f..c9a7249 100755
--- a/bin/mim-vs-keysight.py
+++ b/bin/mim-vs-keysight.py
@@ -3,7 +3,7 @@
import numpy as np
import sys
import matplotlib.pyplot as plt
-from dfatool.dfatool import MIMOSA, KeysightCSV
+from dfatool.loader import MIMOSA, KeysightCSV
from dfatool.utils import running_mean
voltage = float(sys.argv[1])
diff --git a/bin/mimosa-etv b/bin/mimosa-etv
index e23b46c..9b6e897 100755
--- a/bin/mimosa-etv
+++ b/bin/mimosa-etv
@@ -8,13 +8,16 @@ import numpy as np
import os
import re
import sys
-from dfatool.dfatool import aggregate_measures, MIMOSA
+from dfatool.loader import MIMOSA
+from dfatool.model import aggregate_measures
from dfatool.utils import running_mean
opt = dict()
+
def show_help():
- print('''mimosa-etv - MIMOSA Analyzer and Visualizer
+ print(
+ """mimosa-etv - MIMOSA Analyzer and Visualizer
USAGE
@@ -41,7 +44,9 @@ OPTIONS
Show power/time plot
--stat
Show mean voltage, current, and power as well as total energy consumption.
- ''')
+ """
+ )
+
def peak_search(data, lower, upper, direction_function):
while upper - lower > 1e-6:
@@ -58,6 +63,7 @@ def peak_search(data, lower, upper, direction_function):
upper = bs_test
return None
+
def peak_search2(data, lower, upper, check_function):
for power in np.arange(lower, upper, 1e-6):
peakcount = itertools.groupby(data, lambda x: x >= power)
@@ -67,38 +73,39 @@ def peak_search2(data, lower, upper, check_function):
return power
return None
-if __name__ == '__main__':
+
+if __name__ == "__main__":
try:
- optspec = ('help skip= threshold= threshold-peakcount= plot stat')
- raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(' '))
+ optspec = "help skip= threshold= threshold-peakcount= plot stat"
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
+ optname = re.sub(r"^--", "", option)
opt[optname] = parameter
- if 'help' in opt:
+ if "help" in opt:
show_help()
sys.exit(0)
- if 'skip' in opt:
- opt['skip'] = int(opt['skip'])
+ if "skip" in opt:
+ opt["skip"] = int(opt["skip"])
else:
- opt['skip'] = 0
+ opt["skip"] = 0
- if 'threshold' in opt and opt['threshold'] != 'mean':
- opt['threshold'] = float(opt['threshold'])
+ if "threshold" in opt and opt["threshold"] != "mean":
+ opt["threshold"] = float(opt["threshold"])
- if 'threshold-peakcount' in opt:
- opt['threshold-peakcount'] = int(opt['threshold-peakcount'])
+ if "threshold-peakcount" in opt:
+ opt["threshold-peakcount"] = int(opt["threshold-peakcount"])
except getopt.GetoptError as err:
print(err)
sys.exit(2)
except IndexError:
- print('Usage: mimosa-etv <duration>')
+ print("Usage: mimosa-etv <duration>")
sys.exit(2)
except ValueError:
- print('Error: duration or skip is not a number')
+ print("Error: duration or skip is not a number")
sys.exit(2)
voltage, shunt, inputfile = args
@@ -110,7 +117,7 @@ if __name__ == '__main__':
currents = mim.charge_to_current_nocal(charges) * 1e-6
powers = currents * voltage
- if 'threshold-peakcount' in opt:
+ if "threshold-peakcount" in opt:
bs_mean = np.mean(powers)
# Finding the correct threshold is tricky. If #peaks < peakcont, our
@@ -126,42 +133,59 @@ if __name__ == '__main__':
# #peaks != peakcount and threshold >= mean, we go down.
# If that doesn't work, we fall back to a linear search in 1 µW steps
def direction_function(peakcount, power):
- if peakcount == opt['threshold-peakcount']:
+ if peakcount == opt["threshold-peakcount"]:
return 0
if power < bs_mean:
return 1
return -1
+
threshold = peak_search(power, np.min(power), np.max(power), direction_function)
if threshold == None:
- threshold = peak_search2(power, np.min(power), np.max(power), direction_function)
+ threshold = peak_search2(
+ power, np.min(power), np.max(power), direction_function
+ )
if threshold != None:
- print('Threshold set to {:.0f} µW : {:.9f}'.format(threshold * 1e6, threshold))
- opt['threshold'] = threshold
+ print(
+ "Threshold set to {:.0f} µW : {:.9f}".format(
+ threshold * 1e6, threshold
+ )
+ )
+ opt["threshold"] = threshold
else:
- print('Found no working threshold')
+ print("Found no working threshold")
- if 'threshold' in opt:
- if opt['threshold'] == 'mean':
- opt['threshold'] = np.mean(powers)
- print('Threshold set to {:.0f} µW : {:.9f}'.format(opt['threshold'] * 1e6, opt['threshold']))
+ if "threshold" in opt:
+ if opt["threshold"] == "mean":
+ opt["threshold"] = np.mean(powers)
+ print(
+ "Threshold set to {:.0f} µW : {:.9f}".format(
+ opt["threshold"] * 1e6, opt["threshold"]
+ )
+ )
baseline_mean = 0
- if np.any(powers < opt['threshold']):
- baseline_mean = np.mean(powers[powers < opt['threshold']])
- print('Baseline mean: {:.0f} µW : {:.9f}'.format(
- baseline_mean * 1e6, baseline_mean))
- if np.any(powers >= opt['threshold']):
- print('Peak mean: {:.0f} µW : {:.9f}'.format(
- np.mean(powers[powers >= opt['threshold']]) * 1e6,
- np.mean(powers[powers >= opt['threshold']])))
+ if np.any(powers < opt["threshold"]):
+ baseline_mean = np.mean(powers[powers < opt["threshold"]])
+ print(
+ "Baseline mean: {:.0f} µW : {:.9f}".format(
+ baseline_mean * 1e6, baseline_mean
+ )
+ )
+ if np.any(powers >= opt["threshold"]):
+ print(
+ "Peak mean: {:.0f} µW : {:.9f}".format(
+ np.mean(powers[powers >= opt["threshold"]]) * 1e6,
+ np.mean(powers[powers >= opt["threshold"]]),
+ )
+ )
peaks = []
peak_start = -1
for i, dp in enumerate(powers):
- if dp >= opt['threshold'] and peak_start == -1:
+ if dp >= opt["threshold"] and peak_start == -1:
peak_start = i
- elif dp < opt['threshold'] and peak_start != -1:
+ elif dp < opt["threshold"] and peak_start != -1:
peaks.append((peak_start, i))
peak_start = -1
@@ -170,32 +194,55 @@ if __name__ == '__main__':
for peak in peaks:
duration = (peak[1] - peak[0]) * 1e-5
total_energy += np.mean(powers[peak[0] : peak[1]]) * duration
- delta_energy += (np.mean(powers[peak[0] : peak[1]]) - baseline_mean) * duration
+ delta_energy += (
+ np.mean(powers[peak[0] : peak[1]]) - baseline_mean
+ ) * duration
delta_powers = powers[peak[0] : peak[1]] - baseline_mean
- print('{:.2f}ms peak ({:f} -> {:f})'.format(duration * 1000,
- peak[0], peak[1]))
- print(' {:f} µJ / mean {:f} µW'.format(
- np.mean(powers[peak[0] : peak[1]]) * duration * 1e6,
- np.mean(powers[peak[0] : peak[1]]) * 1e6 ))
+ print(
+ "{:.2f}ms peak ({:f} -> {:f})".format(duration * 1000, peak[0], peak[1])
+ )
+ print(
+ " {:f} µJ / mean {:f} µW".format(
+ np.mean(powers[peak[0] : peak[1]]) * duration * 1e6,
+ np.mean(powers[peak[0] : peak[1]]) * 1e6,
+ )
+ )
measures = aggregate_measures(np.mean(delta_powers), delta_powers)
- print(' {:f} µW delta mean = {:0.1f}% / {:f} µW error'.format(np.mean(delta_powers) * 1e6, measures['smape'], measures['rmsd'] * 1e6 ))
- print('Peak energy mean: {:.0f} µJ : {:.9f}'.format(
- total_energy * 1e6 / len(peaks), total_energy / len(peaks)))
- print('Average per-peak energy (delta over baseline): {:.0f} µJ : {:.9f}'.format(
- delta_energy * 1e6 / len(peaks), delta_energy / len(peaks)))
-
-
- if 'stat' in opt:
+ print(
+ " {:f} µW delta mean = {:0.1f}% / {:f} µW error".format(
+ np.mean(delta_powers) * 1e6,
+ measures["smape"],
+ measures["rmsd"] * 1e6,
+ )
+ )
+ print(
+ "Peak energy mean: {:.0f} µJ : {:.9f}".format(
+ total_energy * 1e6 / len(peaks), total_energy / len(peaks)
+ )
+ )
+ print(
+ "Average per-peak energy (delta over baseline): {:.0f} µJ : {:.9f}".format(
+ delta_energy * 1e6 / len(peaks), delta_energy / len(peaks)
+ )
+ )
+
+ if "stat" in opt:
mean_current = np.mean(currents)
mean_power = np.mean(powers)
- print('Mean current: {:.0f} µA : {:.9f}'.format(mean_current * 1e6, mean_current))
- print('Mean power: {:.0f} µW : {:.9f}'.format(mean_power * 1e6, mean_power))
-
- if 'plot' in opt:
+ print(
+ "Mean current: {:.0f} µA : {:.9f}".format(
+ mean_current * 1e6, mean_current
+ )
+ )
+ print(
+ "Mean power: {:.0f} µW : {:.9f}".format(mean_power * 1e6, mean_power)
+ )
+
+ if "plot" in opt:
timestamps = np.arange(len(powers)) * 1e-5
- pwrhandle, = plt.plot(timestamps, powers, 'b-', label='U*I', markersize=1)
+ (pwrhandle,) = plt.plot(timestamps, powers, "b-", label="U*I", markersize=1)
plt.legend(handles=[pwrhandle])
- plt.xlabel('Time [s]')
- plt.ylabel('Power [W]')
+ plt.xlabel("Time [s]")
+ plt.ylabel("Power [W]")
plt.grid(True)
plt.show()
diff --git a/bin/mimplot b/bin/mimplot
index 2a888ee..a55a875 100755
--- a/bin/mimplot
+++ b/bin/mimplot
@@ -9,54 +9,52 @@ import struct
import sys
import tarfile
import matplotlib.pyplot as plt
-from dfatool.dfatool import MIMOSA
+from dfatool.loader import MIMOSA
from dfatool.utils import running_mean
opt = dict()
-if __name__ == '__main__':
+if __name__ == "__main__":
- try:
- optspec = (
- 'export= '
- )
+ try:
+ optspec = "export= "
- raw_opts, args = getopt.getopt(sys.argv[1:], '', optspec.split())
+ raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split())
- for option, parameter in raw_opts:
- optname = re.sub(r'^--', '', option)
- opt[optname] = parameter
+ for option, parameter in raw_opts:
+ optname = re.sub(r"^--", "", option)
+ opt[optname] = parameter
- if 'export' in opt:
- opt['export'] = list(map(int, opt['export'].split(':')))
+ if "export" in opt:
+ opt["export"] = list(map(int, opt["export"].split(":")))
- except getopt.GetoptError as err:
- print(err)
- sys.exit(2)
+ except getopt.GetoptError as err:
+ print(err)
+ sys.exit(2)
- voltage = float(args[0])
- shunt = float(args[1])
- mimfile = args[2]
+ voltage = float(args[0])
+ shunt = float(args[1])
+ mimfile = args[2]
- mim = MIMOSA(voltage, shunt)
+ mim = MIMOSA(voltage, shunt)
- charges, triggers = mim.load_file(mimfile)
- charges = charges[:3000000]
+ charges, triggers = mim.load_file(mimfile)
+ charges = charges[:3000000]
- currents = running_mean(mim.charge_to_current_nocal(charges), 10) * 1e-6
- powers = currents * voltage
- xr = np.arange(len(currents)) * 1e-5
+ currents = running_mean(mim.charge_to_current_nocal(charges), 10) * 1e-6
+ powers = currents * voltage
+ xr = np.arange(len(currents)) * 1e-5
- if 'export' in opt:
- xr = xr[opt['export'][0] : opt['export'][1]]
- currents = currents[opt['export'][0] : opt['export'][1]]
- powers = powers[opt['export'][0] : opt['export'][1]]
+ if "export" in opt:
+ xr = xr[opt["export"][0] : opt["export"][1]]
+ currents = currents[opt["export"][0] : opt["export"][1]]
+ powers = powers[opt["export"][0] : opt["export"][1]]
- for pair in zip(xr, powers):
- print('{} {}'.format(*pair))
+ for pair in zip(xr, powers):
+ print("{} {}".format(*pair))
- plt.plot( xr, powers, "r-")
- plt.xlabel('Time [s]')
- plt.ylabel('Power [W]')
- plt.grid(True)
- plt.show()
+ plt.plot(xr, powers, "r-")
+ plt.xlabel("Time [s]")
+ plt.ylabel("Power [W]")
+ plt.grid(True)
+ plt.show()
diff --git a/bin/test_corrcoef.py b/bin/test_corrcoef.py
index 0b1ca54..ccb3366 100755
--- a/bin/test_corrcoef.py
+++ b/bin/test_corrcoef.py
@@ -4,8 +4,9 @@ import getopt
import re
import sys
from dfatool import plotter
-from dfatool.dfatool import PTAModel, RawData, pta_trace_to_aggregate
-from dfatool.dfatool import gplearn_to_function
+from dfatool.loader import RawData, pta_trace_to_aggregate
+from dfatool.functions import gplearn_to_function
+from dfatool.model import PTAModel
opt = dict()
@@ -110,7 +111,6 @@ def print_text_model_data(model, pm, pq, lm, lq, am, ai, aq):
if __name__ == "__main__":
ignored_trace_indexes = None
- discard_outliers = None
safe_functions_enabled = False
function_override = {}
show_models = []
@@ -119,7 +119,7 @@ if __name__ == "__main__":
try:
optspec = (
"plot-unparam= plot-param= show-models= show-quality= "
- "ignored-trace-indexes= discard-outliers= function-override= "
+ "ignored-trace-indexes= function-override= "
"with-safe-functions"
)
raw_opts, args = getopt.getopt(sys.argv[1:], "", optspec.split(" "))
@@ -135,9 +135,6 @@ if __name__ == "__main__":
if 0 in ignored_trace_indexes:
print("[E] arguments to --ignored-trace-indexes start from 1")
- if "discard-outliers" in opt:
- discard_outliers = float(opt["discard-outliers"])
-
if "function-override" in opt:
for function_desc in opt["function-override"].split(";"):
state_or_tran, attribute, *function_str = function_desc.split(" ")
@@ -169,7 +166,6 @@ if __name__ == "__main__":
arg_count,
traces=preprocessed_data,
ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
function_override=function_override,
use_corrcoef=False,
)
@@ -179,7 +175,6 @@ if __name__ == "__main__":
arg_count,
traces=preprocessed_data,
ignore_trace_indexes=ignored_trace_indexes,
- discard_outliers=discard_outliers,
function_override=function_override,
use_corrcoef=True,
)