summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBirte Kristina Friesel <birte.friesel@uos.de>2024-03-11 14:08:21 +0100
committerBirte Kristina Friesel <birte.friesel@uos.de>2024-03-11 14:08:21 +0100
commit1e29075c9f625947a82c86d7304ed20eb6f06e64 (patch)
tree789b3fe8b253c0069b97d5c47e625513ee37a32c
parent3a6089bc4258e884b5ce909538b2f49a3dc87e78 (diff)
Move Logfile and CSVfile from utils to a new loader class
-rwxr-xr-xbin/analyze-log.py5
-rw-r--r--lib/loader/__init__.py13
-rw-r--r--lib/loader/plain.py97
-rw-r--r--lib/utils.py93
4 files changed, 107 insertions, 101 deletions
diff --git a/bin/analyze-log.py b/bin/analyze-log.py
index 1deb438..dd32fab 100755
--- a/bin/analyze-log.py
+++ b/bin/analyze-log.py
@@ -10,6 +10,7 @@ import dfatool.cli
import dfatool.plotter
import dfatool.utils
import dfatool.functions as df
+from dfatool.loader import Logfile, CSVfile
from dfatool.model import AnalyticModel
from dfatool.validation import CrossValidator
from functools import reduce
@@ -22,9 +23,9 @@ import time
def parse_logfile(filename):
if ".csv" in filename:
- loader = dfatool.utils.CSVfile()
+ loader = CSVfile()
else:
- loader = dfatool.utils.Logfile()
+ loader = Logfile()
if filename.endswith("xz"):
import lzma
diff --git a/lib/loader/__init__.py b/lib/loader/__init__.py
index caa2212..9dc83e3 100644
--- a/lib/loader/__init__.py
+++ b/lib/loader/__init__.py
@@ -20,6 +20,7 @@ from .energytrace import (
EnergyTraceWithTimer,
)
from .keysight import DLog, KeysightCSV
+from .plain import Logfile, CSVfile
from .mimosa import MIMOSA
logger = logging.getLogger(__name__)
@@ -667,9 +668,9 @@ class RawData:
"offline_aggregates", None
)
if offline_aggregates:
- state_or_transition[
- "online_aggregates"
- ] = offline_aggregates
+ state_or_transition["online_aggregates"] = (
+ offline_aggregates
+ )
for j, traces in enumerate(ptalog["traces"]):
self.filenames.append("{}#{}".format(filename, j))
@@ -736,9 +737,9 @@ class RawData:
"offline_aggregates", None
)
if offline_aggregates:
- state_or_transition[
- "online_aggregates"
- ] = offline_aggregates
+ state_or_transition["online_aggregates"] = (
+ offline_aggregates
+ )
for j, traces in enumerate(ptalog["traces"]):
self.filenames.append("{}#{}".format(filename, j))
self.traces_by_fileno.append(traces)
diff --git a/lib/loader/plain.py b/lib/loader/plain.py
new file mode 100644
index 0000000..5aa4293
--- /dev/null
+++ b/lib/loader/plain.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+
+from ..utils import soft_cast_int, soft_cast_float
+import re
+
+
+class CSVfile:
+ def __init__(self):
+ pass
+
+ def load(self, f):
+ observations = list()
+ for lineno, line in enumerate(f):
+ if lineno == 0:
+ param_names = line.split(",")[1:-1]
+ attr_names = line.removesuffix("\n").split(",")[-1:]
+ else:
+ param_values = list(map(soft_cast_int, line.split(",")[1:-1]))
+ attr_values = list(
+ map(soft_cast_float, line.removesuffix("\n").split(",")[-1:])
+ )
+ observations.append(
+ {
+ "name": "CSVFile",
+ "param": dict(zip(param_names, param_values)),
+ "attribute": dict(zip(attr_names, attr_values)),
+ }
+ )
+ return observations
+
+
+class Logfile:
+ def __init__(self):
+ pass
+
+ def kv_to_param(self, kv_str, cast):
+ try:
+ key, value = kv_str.split("=")
+ value = cast(value)
+ return key, value
+ except ValueError:
+ logger.warning(f"Invalid key-value pair: {kv_str}")
+ raise
+
+ def kv_to_param_f(self, kv_str):
+ return self.kv_to_param(kv_str, soft_cast_float)
+
+ def kv_to_param_i(self, kv_str):
+ return self.kv_to_param(kv_str, soft_cast_int)
+
+ def load(self, f):
+ observations = list()
+ for lineno, line in enumerate(f):
+ m = re.search(r"\[::\] *([^|]*?) *[|] *([^|]*?) *[|] *(.*)", line)
+ if m:
+ name_str = m.group(1)
+ param_str = m.group(2)
+ attr_str = m.group(3)
+ try:
+ param = dict(map(self.kv_to_param_i, param_str.split()))
+ attr = dict(map(self.kv_to_param_f, attr_str.split()))
+ observations.append(
+ {
+ "name": name_str,
+ "param": param,
+ "attribute": attr,
+ }
+ )
+ except ValueError:
+ logger.warning(
+ f"Error parsing {f}: invalid key-value pair in line {lineno+1}"
+ )
+ logger.warning(f"Offending entry:\n{line}")
+ raise
+
+ return observations
+
+ def dump(self, observations, f):
+ for observation in observations:
+ name = observation["name"]
+ param = observation["param"]
+ attr = observation["attribute"]
+
+ param_str = " ".join(
+ map(
+ lambda kv: f"{kv[0]}={kv[1]}",
+ sorted(param.items(), key=lambda kv: kv[0]),
+ )
+ )
+ attr_str = " ".join(
+ map(
+ lambda kv: f"{kv[0]}={kv[1]}",
+ sorted(attr.items(), key=lambda kv: kv[0]),
+ )
+ )
+
+ print(f"[::] {name} | {param_str} | {attr_str}", file=f)
diff --git a/lib/utils.py b/lib/utils.py
index 61cb6f1..0cc9a7b 100644
--- a/lib/utils.py
+++ b/lib/utils.py
@@ -33,99 +33,6 @@ class NpEncoder(json.JSONEncoder):
return super(NpEncoder, self).default(obj)
-class CSVfile:
- def __init__(self):
- pass
-
- def load(self, f):
- observations = list()
- for lineno, line in enumerate(f):
- if lineno == 0:
- param_names = line.split(",")[1:-1]
- attr_names = line.removesuffix("\n").split(",")[-1:]
- else:
- param_values = list(map(soft_cast_int, line.split(",")[1:-1]))
- attr_values = list(
- map(soft_cast_float, line.removesuffix("\n").split(",")[-1:])
- )
- observations.append(
- {
- "name": "CSVFile",
- "param": dict(zip(param_names, param_values)),
- "attribute": dict(zip(attr_names, attr_values)),
- }
- )
- return observations
-
-
-class Logfile:
- def __init__(self):
- pass
-
- def kv_to_param(self, kv_str, cast):
- try:
- key, value = kv_str.split("=")
- value = cast(value)
- return key, value
- except ValueError:
- logger.warning(f"Invalid key-value pair: {kv_str}")
- raise
-
- def kv_to_param_f(self, kv_str):
- return self.kv_to_param(kv_str, soft_cast_float)
-
- def kv_to_param_i(self, kv_str):
- return self.kv_to_param(kv_str, soft_cast_int)
-
- def load(self, f):
- observations = list()
- for lineno, line in enumerate(f):
- m = re.search(r"\[::\] *([^|]*?) *[|] *([^|]*?) *[|] *(.*)", line)
- if m:
- name_str = m.group(1)
- param_str = m.group(2)
- attr_str = m.group(3)
- try:
- param = dict(map(self.kv_to_param_i, param_str.split()))
- attr = dict(map(self.kv_to_param_f, attr_str.split()))
- observations.append(
- {
- "name": name_str,
- "param": param,
- "attribute": attr,
- }
- )
- except ValueError:
- logger.warning(
- f"Error parsing {f}: invalid key-value pair in line {lineno+1}"
- )
- logger.warning(f"Offending entry:\n{line}")
- raise
-
- return observations
-
- def dump(self, observations, f):
- for observation in observations:
- name = observation["name"]
- param = observation["param"]
- attr = observation["attribute"]
-
- param_str = " ".join(
- map(
- lambda kv: f"{kv[0]}={kv[1]}",
- sorted(param.items(), key=lambda kv: kv[0]),
- )
- )
- attr_str = " ".join(
- map(
- lambda kv: f"{kv[0]}={kv[1]}",
- sorted(attr.items(), key=lambda kv: kv[0]),
- )
- )
-
- print(f"[::] {name} | {param_str} | {attr_str}", file=f)
-
-
def running_mean(x: np.ndarray, N: int) -> np.ndarray:
"""
Compute `N` elements wide running average over `x`.