summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xlib/automata.py11
-rw-r--r--lib/data_parameters.py17
-rw-r--r--lib/dfatool.py42
-rw-r--r--lib/functions.py8
-rw-r--r--lib/lex.py9
-rw-r--r--lib/parameters.py48
-rwxr-xr-xlib/protocol_benchmarks.py7
-rw-r--r--lib/utils.py4
8 files changed, 81 insertions, 65 deletions
diff --git a/lib/automata.py b/lib/automata.py
index b3318e0..69b3969 100755
--- a/lib/automata.py
+++ b/lib/automata.py
@@ -3,11 +3,14 @@
from .functions import AnalyticFunction, NormalizationFunction
from .utils import is_numeric
import itertools
+import logging
import numpy as np
import json
import queue
import yaml
+logger = logging.getLogger(__name__)
+
def _dict_to_list(input_dict: dict) -> list:
return [input_dict[x] for x in sorted(input_dict.keys())]
@@ -1305,8 +1308,8 @@ class PTA:
"power"
]
except KeyError:
- print(
- "[W] skipping model update of state {} due to missing data".format(
+ logger.warning(
+ "skipping model update of state {} due to missing data".format(
state.name
)
)
@@ -1353,8 +1356,8 @@ class PTA:
"timeout"
]
except KeyError:
- print(
- "[W] skipping model update of transition {} due to missing data".format(
+ logger.warning(
+ "skipping model update of transition {} due to missing data".format(
transition.name
)
)
diff --git a/lib/data_parameters.py b/lib/data_parameters.py
index 1150b71..84eacfd 100644
--- a/lib/data_parameters.py
+++ b/lib/data_parameters.py
@@ -7,9 +7,12 @@ length of lists, ane more.
from .protocol_benchmarks import codegen_for_lib
from . import cycles_to_energy, size_to_radio_energy, utils
+import logging
import numpy as np
import ubjson
+logger = logging.getLogger(__name__)
+
def _string_value_length(json):
if type(json) == str:
@@ -224,7 +227,7 @@ class Protolog:
except KeyError:
pass
except TypeError as e:
- print(
+ logger.error(
"TypeError in {} {} {} {}: {} -> {}".format(
arch_lib,
benchmark,
@@ -395,7 +398,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_enc is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -410,7 +413,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_ser is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -425,7 +428,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_encser is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -440,7 +443,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_des is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -455,7 +458,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_dec is NaN for {} -> {} -> {}".format(
arch, lib, key
)
@@ -470,7 +473,7 @@ class Protolog:
except KeyError:
pass
except ValueError:
- print(
+ logger.warning(
"cycles_desdec is NaN for {} -> {} -> {}".format(
arch, lib, key
)
diff --git a/lib/dfatool.py b/lib/dfatool.py
index ef3bac7..1e38907 100644
--- a/lib/dfatool.py
+++ b/lib/dfatool.py
@@ -75,7 +75,7 @@ def gplearn_to_function(function_str: str):
arg_list.append("X{:d}".format(i))
eval_str = "lambda {}, *whatever: {}".format(",".join(arg_list), function_str)
- print(eval_str)
+ logger.debug(eval_str)
return eval(eval_str, eval_globals)
@@ -1371,7 +1371,7 @@ class RawData:
for measurement in measurements:
if "energy_trace" not in measurement:
- logging.warning(
+ logger.warning(
"Skipping {ar:s}/{m:s}: {e:s}".format(
ar=self.filenames[measurement["fileno"]],
m=measurement["info"].name,
@@ -1393,7 +1393,7 @@ class RawData:
self._merge_online_and_offline(measurement)
num_valid += 1
else:
- logging.warning(
+ logger.warning(
"Skipping {ar:s}/{m:s}: {e:s}".format(
ar=self.filenames[measurement["fileno"]],
m=measurement["info"].name,
@@ -1405,14 +1405,14 @@ class RawData:
self._merge_online_and_etlog(measurement)
num_valid += 1
else:
- logging.warning(
+ logger.warning(
"Skipping {ar:s}/{m:s}: {e:s}".format(
ar=self.filenames[measurement["fileno"]],
m=measurement["info"].name,
e=measurement["error"],
),
)
- logging.info(
+ logger.info(
"{num_valid:d}/{num_total:d} measurements are valid".format(
num_valid=num_valid, num_total=len(measurements)
),
@@ -1834,9 +1834,9 @@ class AnalyticModel:
try:
model[name][key] = model_function(elem[key])
except RuntimeWarning:
- logging.warning("Got no data for {} {}".format(name, key))
+ logger.warning("Got no data for {} {}".format(name, key))
except FloatingPointError as fpe:
- logging.warning("Got no data for {} {}: {}".format(name, key, fpe),)
+ logger.warning("Got no data for {} {}: {}".format(name, key, fpe),)
return model
def param_index(self, param_name):
@@ -2230,9 +2230,9 @@ class PTAModel:
try:
model[name][key] = model_function(elem[key])
except RuntimeWarning:
- logging.warning("Got no data for {} {}".format(name, key))
+ logger.warning("Got no data for {} {}".format(name, key))
except FloatingPointError as fpe:
- logging.warning("Got no data for {} {}: {}".format(name, key, fpe),)
+ logger.warning("Got no data for {} {}: {}".format(name, key, fpe),)
return model
def get_static(self, use_mean=False):
@@ -2730,7 +2730,7 @@ class EnergyTraceLog:
self.sample_rate = data_count / (m_duration_us * 1e-6)
- logging.debug(
+ logger.debug(
"got {} samples with {} seconds of log data ({} Hz)".format(
data_count, m_duration_us * 1e-6, self.sample_rate
),
@@ -2837,19 +2837,17 @@ class EnergyTraceLog:
for name, duration in expected_transitions:
bc, start, stop, end = self.find_barcode(next_barcode)
if bc is None:
- print('[!!!] did not find transition "{}"'.format(name))
+ logger.error('did not find transition "{}"'.format(name))
break
next_barcode = end + self.state_duration + duration
- logging.debug(
+ logger.debug(
'{} barcode "{}" area: {:0.2f} .. {:0.2f} / {:0.2f} seconds'.format(
offline_index, bc, start, stop, end
),
)
if bc != name:
- logging.debug(
- '[!!!] mismatch: expected "{}", got "{}"'.format(name, bc),
- )
- logging.debug(
+ logger.error('mismatch: expected "{}", got "{}"'.format(name, bc),)
+ logger.debug(
"{} estimated transition area: {:0.3f} .. {:0.3f} seconds".format(
offline_index, end, end + duration
),
@@ -2862,7 +2860,7 @@ class EnergyTraceLog:
self.ts_to_index(end + duration + self.state_duration) + 1
)
- logging.debug(
+ logger.debug(
"{} estimated transitionindex: {:0.3f} .. {:0.3f} seconds".format(
offline_index,
transition_start_index / self.sample_rate,
@@ -2962,7 +2960,7 @@ class EnergyTraceLog:
+ self.led_power / 3
)
- logging.debug(
+ logger.debug(
"looking for barcode starting at {:0.2f} s, threshold is {:0.1f} mW".format(
start_ts, sync_threshold_power * 1e3
),
@@ -2996,7 +2994,7 @@ class EnergyTraceLog:
barcode_data = self.interval_power[sync_area_start:sync_area_end]
- logging.debug(
+ logger.debug(
"barcode search area: {:0.2f} .. {:0.2f} seconds ({} samples)".format(
sync_start_ts, sync_end_ts, len(barcode_data)
),
@@ -3074,7 +3072,7 @@ class EnergyTraceLog:
return content, sym_start, sym_end, padding_bits
else:
- logging.warning("unable to find barcode")
+ logger.warning("unable to find barcode")
return None, None, None, None
@@ -3300,7 +3298,7 @@ class MIMOSA:
if cal_r2_mean > cal_0_mean:
b_lower = (ua_r2 - 0) / (cal_r2_mean - cal_0_mean)
else:
- logging.warning("0 uA == %.f uA during calibration" % (ua_r2))
+ logger.warning("0 uA == %.f uA during calibration" % (ua_r2))
b_lower = 0
b_upper = (ua_r1 - ua_r2) / (cal_r1_mean - cal_r2_mean)
@@ -3472,7 +3470,7 @@ class MIMOSA:
data["substates"] = substates
ssum = np.sum(list(map(lambda x: x["duration"], substates["states"])))
if ssum != data["us"]:
- logging.warning("duration %d vs %d" % (data["us"], ssum))
+ logger.warning("duration %d vs %d" % (data["us"], ssum))
if isa == "transition":
# subtract average power of previous state
diff --git a/lib/functions.py b/lib/functions.py
index 359c8d7..0b849bd 100644
--- a/lib/functions.py
+++ b/lib/functions.py
@@ -229,7 +229,7 @@ class AnalyticFunction:
else:
X[i].extend([np.nan] * len(val[model_attribute]))
elif key[0] == state_or_tran and len(key[1]) != dimension:
- logging.warning(
+ logger.warning(
"Invalid parameter key length while gathering fit data for {}/{}. is {}, want {}.".format(
state_or_tran, model_attribute, len(key[1]), dimension
),
@@ -263,7 +263,7 @@ class AnalyticFunction:
error_function, self._regression_args, args=(X, Y), xtol=2e-15
)
except ValueError as err:
- logging.warning(
+ logger.warning(
"Fit failed for {}/{}: {} (function: {})".format(
state_or_tran, model_attribute, err, self._model_str
),
@@ -273,13 +273,13 @@ class AnalyticFunction:
self._regression_args = res.x
self.fit_success = True
else:
- logging.warning(
+ logger.warning(
"Fit failed for {}/{}: {} (function: {})".format(
state_or_tran, model_attribute, res.message, self._model_str
),
)
else:
- logging.warning(
+ logger.warning(
"Insufficient amount of valid parameter keys, cannot fit {}/{}".format(
state_or_tran, model_attribute
),
diff --git a/lib/lex.py b/lib/lex.py
index 7bb3760..f698e8c 100644
--- a/lib/lex.py
+++ b/lib/lex.py
@@ -1,4 +1,7 @@
from .sly import Lexer, Parser
+import logging
+
+logger = logging.getLogger(__name__)
class TimedWordLexer(Lexer):
@@ -38,7 +41,7 @@ class TimedSequenceLexer(Lexer):
FUNCTIONSEP = r";"
def error(self, t):
- print("Illegal character '%s'" % t.value[0])
+ logger.error("Illegal character '%s'" % t.value[0])
if t.value[0] == "{" and t.value.find("}"):
self.index += 1 + t.value.find("}")
else:
@@ -153,11 +156,11 @@ class TimedSequenceParser(Parser):
def error(self, p):
if p:
- print("Syntax error at token", p.type)
+ logger.error("Syntax error at token", p.type)
# Just discard the token and tell the parser it's okay.
self.errok()
else:
- print("Syntax error at EOF")
+ logger.error("Syntax error at EOF")
class TimedWord:
diff --git a/lib/parameters.py b/lib/parameters.py
index bd67cc1..79543a6 100644
--- a/lib/parameters.py
+++ b/lib/parameters.py
@@ -1,6 +1,7 @@
import itertools
import logging
import numpy as np
+import warnings
from collections import OrderedDict
from copy import deepcopy
from multiprocessing import Pool
@@ -163,12 +164,11 @@ def _std_by_param(by_param, all_param_values, state_or_tran, attribute, param_in
# vprint(verbose, '[W] parameter value partition for {} is empty'.format(param_value))
if np.all(np.isnan(stddev_matrix)):
- print(
- "[W] {}/{} parameter #{} has no data partitions -- how did this even happen?".format(
- state_or_tran, attribute, param_index
+ warnings.warn(
+ "{}/{} parameter #{} has no data partitions. stddev_matrix = {}".format(
+ state_or_tran, attribute, param_index, stddev_matrix
)
)
- print("stddev_matrix = {}".format(stddev_matrix))
return stddev_matrix, 0.0
return (
@@ -203,13 +203,13 @@ def _corr_by_param(by_name, state_or_trans, attribute, param_index):
# -> assume no correlation
return 0.0
except ValueError:
- print(
- "[!] Exception in _corr_by_param(by_name, state_or_trans={}, attribute={}, param_index={})".format(
+ logger.error(
+ "ValueError in _corr_by_param(by_name, state_or_trans={}, attribute={}, param_index={})".format(
state_or_trans, attribute, param_index
)
)
- print(
- "[!] while executing np.corrcoef(by_name[{}][{}]={}, {}))".format(
+ logger.error(
+ "while executing np.corrcoef(by_name[{}][{}]={}, {}))".format(
state_or_trans,
attribute,
by_name[state_or_trans][attribute],
@@ -443,8 +443,8 @@ def prune_dependent_parameters(by_name, parameter_names, correlation_threshold=0
correlation != np.nan
and np.abs(correlation) > correlation_threshold
):
- print(
- "[!] Parameters {} <-> {} are correlated with coefficcient {}".format(
+ logger.debug(
+ "Parameters {} <-> {} are correlated with coefficcient {}".format(
parameter_names[index_1],
parameter_names[index_2],
correlation,
@@ -454,7 +454,7 @@ def prune_dependent_parameters(by_name, parameter_names, correlation_threshold=0
index_to_remove = index_1
else:
index_to_remove = index_2
- print(
+ logger.debug(
" Removing parameter {}".format(
parameter_names[index_to_remove]
)
@@ -581,15 +581,17 @@ class ParamStats:
)
> 2
):
- print(
- key,
- param,
- list(
- filter(
- lambda n: is_numeric(n),
- self.distinct_values[key][param],
- )
- ),
+ logger.debug(
+ "{} can be fitted for param {} on {}".format(
+ key,
+ param,
+ list(
+ filter(
+ lambda n: is_numeric(n),
+ self.distinct_values[key][param],
+ )
+ ),
+ )
)
return True
return False
@@ -646,13 +648,15 @@ class ParamStats:
depends_on_a_parameter = False
for param in self._parameter_names:
if self.stats[state_or_tran][attribute]["depends_on_param"][param]:
- print("{}/{} depends on {}".format(state_or_tran, attribute, param))
+ logger.debug(
+ "{}/{} depends on {}".format(state_or_tran, attribute, param)
+ )
depends_on_a_parameter = True
if (
len(self.codependent_parameters(state_or_tran, attribute, param))
== 0
):
- print("has no codependent parameters")
+ logger.debug("... and has no codependent parameters")
# Always depends on this parameter, regardless of other parameters' values
return False
return depends_on_a_parameter
diff --git a/lib/protocol_benchmarks.py b/lib/protocol_benchmarks.py
index b42e821..d41979f 100755
--- a/lib/protocol_benchmarks.py
+++ b/lib/protocol_benchmarks.py
@@ -16,8 +16,11 @@ import io
import os
import re
import time
+import logging
from filelock import FileLock
+logger = logging.getLogger(__name__)
+
class DummyProtocol:
def __init__(self):
@@ -1838,14 +1841,14 @@ class Benchmark:
this_result["data"] = data
if value != None:
this_result[key] = {"v": value, "ts": int(time.time())}
- print(
+ logger.debug(
"{} {} {} ({}) :: {} -> {}".format(
libkey, bench_name, bench_index, data, key, value
)
)
else:
this_result[key] = {"e": error, "ts": int(time.time())}
- print(
+ logger.debug(
"{} {} {} ({}) :: {} -> [E] {}".format(
libkey, bench_name, bench_index, data, key, error[:500]
)
diff --git a/lib/utils.py b/lib/utils.py
index 8186ee7..d28ecda 100644
--- a/lib/utils.py
+++ b/lib/utils.py
@@ -1,7 +1,9 @@
import numpy as np
import re
+import logging
arg_support_enabled = True
+logger = logging.getLogger(__name__)
def running_mean(x: np.ndarray, N: int) -> np.ndarray:
@@ -212,7 +214,7 @@ def filter_aggregate_by_param(aggregate, parameters, parameter_filter):
)
)
if len(indices_to_keep) == 0:
- print("??? {}->{}".format(parameter_filter, name))
+ logger.debug("??? {}->{}".format(parameter_filter, name))
names_to_remove.add(name)
else:
for attribute in aggregate[name]["attributes"]: