summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/functions.py2
-rw-r--r--lib/harness.py208
-rw-r--r--lib/lennart/DataInterface.py24
-rw-r--r--lib/lennart/DataProcessor.py415
-rw-r--r--lib/lennart/EnergyInterface.py122
-rw-r--r--lib/lennart/SigrokAPIInterface.py150
-rw-r--r--lib/lennart/SigrokCLIInterface.py89
-rw-r--r--lib/lennart/SigrokInterface.py123
-rw-r--r--lib/lennart/__init__.py0
-rw-r--r--lib/loader.py277
-rw-r--r--lib/parameters.py7
-rwxr-xr-xlib/plotter.py6
-rwxr-xr-xlib/protocol_benchmarks.py30
-rw-r--r--lib/runner.py47
-rw-r--r--lib/sly/docparse.py4
-rw-r--r--lib/utils.py13
16 files changed, 1420 insertions, 97 deletions
diff --git a/lib/functions.py b/lib/functions.py
index 94b1aaf..9d799c7 100644
--- a/lib/functions.py
+++ b/lib/functions.py
@@ -394,7 +394,7 @@ class analytic:
:param safe_functions_enabled: Include "safe" variants of functions with
limited argument range, e.g. a safe
inverse which returns 1 when dividing by 0.
-
+
Returns a dict of functions which are typical for energy/timing
behaviour of embedded hardware, e.g. linear, exponential or inverse
dependency on a configuration setting/runtime variable.
diff --git a/lib/harness.py b/lib/harness.py
index ae9c28c..d1440db 100644
--- a/lib/harness.py
+++ b/lib/harness.py
@@ -33,6 +33,7 @@ class TransitionHarness:
log_return_values=False,
repeat=0,
post_transition_delay_us=0,
+ energytrace_sync=None,
):
"""
Create a new TransitionHarness
@@ -53,6 +54,7 @@ class TransitionHarness:
self.log_return_values = log_return_values
self.repeat = repeat
self.post_transition_delay_us = post_transition_delay_us
+ self.energytrace_sync = energytrace_sync
self.reset()
def copy(self):
@@ -63,6 +65,7 @@ class TransitionHarness:
log_return_values=self.log_return_values,
repeat=self.repeat,
post_transition_delay_us=self.post_transition_delay_us,
+ energytrace_sync=self.energytrace_sync,
)
new_object.traces = self.traces.copy()
new_object.trace_id = self.trace_id
@@ -138,9 +141,7 @@ class TransitionHarness:
def start_trace(self):
"""Prepare a new trace/run in the internal `.traces` structure."""
- self.traces.append(
- {"id": self.trace_id, "trace": list(),}
- )
+ self.traces.append({"id": self.trace_id, "trace": list()})
self.trace_id += 1
def append_state(self, state_name, param):
@@ -151,7 +152,7 @@ class TransitionHarness:
:param param: parameter dict
"""
self.traces[-1]["trace"].append(
- {"name": state_name, "isa": "state", "parameter": param,}
+ {"name": state_name, "isa": "state", "parameter": param}
)
def append_transition(self, transition_name, param, args=[]):
@@ -175,21 +176,16 @@ class TransitionHarness:
"""Return C++ code used to start a new run/trace."""
return "ptalog.reset();\n"
- def _pass_transition_call(self, transition_id):
- if self.gpio_mode == "bar":
- barcode_bits = Code128("T{}".format(transition_id), charset="B").modules
- if len(barcode_bits) % 8 != 0:
- barcode_bits.extend([1] * (8 - (len(barcode_bits) % 8)))
- barcode_bytes = [
- 255 - int("".join(map(str, reversed(barcode_bits[i : i + 8]))), 2)
- for i in range(0, len(barcode_bits), 8)
- ]
- inline_array = "".join(map(lambda s: "\\x{:02x}".format(s), barcode_bytes))
- return 'ptalog.startTransition("{}", {});\n'.format(
- inline_array, len(barcode_bytes)
- )
- else:
- return "ptalog.startTransition();\n"
+ def _get_barcode(self, transition_id):
+ barcode_bits = Code128("T{}".format(transition_id), charset="B").modules
+ if len(barcode_bits) % 8 != 0:
+ barcode_bits.extend([1] * (8 - (len(barcode_bits) % 8)))
+ barcode_bytes = [
+ 255 - int("".join(map(str, reversed(barcode_bits[i : i + 8]))), 2)
+ for i in range(0, len(barcode_bits), 8)
+ ]
+ inline_array = "".join(map(lambda s: "\\x{:02x}".format(s), barcode_bytes))
+ return inline_array, len(barcode_bytes)
def pass_transition(
self, transition_id, transition_code, transition: object = None
@@ -201,7 +197,12 @@ class TransitionHarness:
`post_transition_delay_us` is set.
"""
ret = "ptalog.passTransition({:d});\n".format(transition_id)
- ret += self._pass_transition_call(transition_id)
+ if self.gpio_mode == "bar":
+ ret += """ptalog.startTransition("{}", {});\n""".format(
+ *self._get_barcode(transition_id)
+ )
+ else:
+ ret += "ptalog.startTransition();\n"
if (
self.log_return_values
and transition
@@ -263,17 +264,17 @@ class TransitionHarness:
transition_name = None
if self.pta:
transition_name = self.pta.transitions[transition_id].name
- print(
- "[HARNESS] benchmark id={:d} trace={:d}: transition #{:d} (ID {:d}, name {}) is out of bounds".format(
+ self.abort = True
+ raise RuntimeError(
+ "Benchmark id={:d} trace={:d}: transition #{:d} (ID {:d}, name {}) is out of bounds. Offending line: {}".format(
0,
self.trace_id,
self.current_transition_in_trace,
transition_id,
transition_name,
+ line,
)
)
- print(" Offending line: {}".format(line))
- return
if log_data_target["isa"] != "transition":
self.abort = True
raise RuntimeError(
@@ -286,8 +287,8 @@ class TransitionHarness:
if transition.name != log_data_target["name"]:
self.abort = True
raise RuntimeError(
- "Log mismatch: Expected transition {:s}, got transition {:s} -- may have been caused by preceding malformed UART output".format(
- log_data_target["name"], transition.name
+ "Log mismatch: Expected transition {:s}, got transition {:s}\nMay have been caused by preceding malformed UART output\nOffending line: {:s}".format(
+ log_data_target["name"], transition.name, line
)
)
if self.log_return_values and len(transition.return_value_handlers):
@@ -354,10 +355,14 @@ class OnboardTimerHarness(TransitionHarness):
the dict `offline_aggregates` with the member `duration`. It contains a list of durations (in us) of the corresponding state/transition for each
benchmark iteration.
I.e. `.traces[*]['trace'][*]['offline_aggregates']['duration'] = [..., ...]`
+ :param remove_nop_from_timings: If true, remove the nop duration from reported timings
+ (i.e., reported timings reflect the estimated transition/state duration with the timer call overhea dremoved).
+ If false, do not remove nop durations, so the timings more accurately reflect the elapsed wall-clock time during the benchmark.
"""
- def __init__(self, counter_limits, **kwargs):
+ def __init__(self, counter_limits, remove_nop_from_timings=True, **kwargs):
super().__init__(**kwargs)
+ self.remove_nop_from_timings = remove_nop_from_timings
self.trace_length = 0
(
self.one_cycle_in_us,
@@ -368,16 +373,27 @@ class OnboardTimerHarness(TransitionHarness):
def copy(self):
new_harness = __class__(
(self.one_cycle_in_us, self.one_overflow_in_us, self.counter_max_overflow),
+ remove_nop_from_timings=self.remove_nop_from_timings,
gpio_pin=self.gpio_pin,
gpio_mode=self.gpio_mode,
pta=self.pta,
log_return_values=self.log_return_values,
repeat=self.repeat,
+ energytrace_sync=self.energytrace_sync,
)
new_harness.traces = self.traces.copy()
new_harness.trace_id = self.trace_id
return new_harness
+ def reset(self):
+ super().reset()
+ self.trace_length = 0
+
+ def set_trace_start_offset(self, start_offset):
+ if not "start_offset" in self.traces[0]:
+ self.traces[0]["start_offset"] = list()
+ self.traces[0]["start_offset"].append(start_offset)
+
def undo(self, undo_from):
"""
Undo all benchmark runs starting with index `undo_from`.
@@ -396,26 +412,63 @@ class OnboardTimerHarness(TransitionHarness):
] = state_or_transition["offline_aggregates"]["duration"][
:undo_from
]
+ if "start_offset" in trace:
+ trace["start_offset"] = trace["start_offset"][:undo_from]
def global_code(self):
- ret = '#include "driver/counter.h"\n'
- ret += "#define PTALOG_TIMING\n"
+ ret = "#define PTALOG_TIMING\n"
ret += super().global_code()
+ if self.energytrace_sync == "led":
+ # TODO Make nicer
+ ret += """\nvoid runLASync(){
+ // ======================= LED SYNC ================================
+ gpio.write(PTALOG_GPIO, 1);
+ gpio.led_on(0);
+ gpio.led_on(1);
+ gpio.write(PTALOG_GPIO, 0);
+
+ for (unsigned char i = 0; i < 4; i++) {
+ arch.sleep_ms(250);
+ }
+
+ gpio.write(PTALOG_GPIO, 1);
+ gpio.led_off(0);
+ gpio.led_off(1);
+ gpio.write(PTALOG_GPIO, 0);
+ // ======================= LED SYNC ================================
+}\n\n"""
return ret
def start_benchmark(self, benchmark_id=0):
- ret = "counter.start();\n"
- ret += "counter.stop();\n"
- ret += "ptalog.passNop(counter);\n"
+ ret = ""
+ if self.energytrace_sync == "led":
+ ret += "runLASync();\n"
+ ret += "ptalog.passNop();\n"
+ if self.energytrace_sync == "led":
+ ret += "arch.sleep_ms(250);\n"
ret += super().start_benchmark(benchmark_id)
return ret
+ def stop_benchmark(self):
+ ret = ""
+ if self.energytrace_sync == "led":
+ ret += "counter.stop();\n"
+ ret += "runLASync();\n"
+ ret += super().stop_benchmark()
+ if self.energytrace_sync == "led":
+ ret += "arch.sleep_ms(250);\n"
+ return ret
+
def pass_transition(
self, transition_id, transition_code, transition: object = None
):
ret = "ptalog.passTransition({:d});\n".format(transition_id)
- ret += self._pass_transition_call(transition_id)
- ret += "counter.start();\n"
+ if self.gpio_mode == "bar":
+ ret += """ptalog.startTransition("{}", {});\n""".format(
+ *self._get_barcode(transition_id)
+ )
+ else:
+ ret += "ptalog.startTransition();\n"
if (
self.log_return_values
and transition
@@ -424,14 +477,13 @@ class OnboardTimerHarness(TransitionHarness):
ret += "transition_return_value = {}\n".format(transition_code)
else:
ret += "{}\n".format(transition_code)
- ret += "counter.stop();\n"
if (
self.log_return_values
and transition
and len(transition.return_value_handlers)
):
ret += "ptalog.logReturn(transition_return_value);\n"
- ret += "ptalog.stopTransition(counter);\n"
+ ret += "ptalog.stopTransition();\n"
return ret
def _append_nondeterministic_parameter_value(
@@ -453,11 +505,27 @@ class OnboardTimerHarness(TransitionHarness):
res.group(1), res.group(2)
)
)
- if re.match(r"\[PTA\] benchmark stop", line):
+ match = re.match(r"\[PTA\] benchmark stop, cycles=(\S+)/(\S+)", line)
+ if match:
self.repetitions += 1
self.synced = False
if self.repeat > 0 and self.repetitions == self.repeat:
self.done = True
+ prev_state_cycles = int(match.group(1))
+ prev_state_overflow = int(match.group(2))
+ prev_state_duration_us = (
+ prev_state_cycles * self.one_cycle_in_us
+ + prev_state_overflow * self.one_overflow_in_us
+ )
+ if self.remove_nop_from_timings:
+ prev_state_duration_us -= self.nop_cycles * self.one_cycle_in_us
+ final_state = self.traces[self.trace_id]["trace"][-1]
+ if "offline_aggregates" not in final_state:
+ final_state["offline_aggregates"] = {"duration": list()}
+ final_state["offline_aggregates"]["duration"].append(
+ prev_state_duration_us
+ )
+
print("[HARNESS] done")
return
# May be repeated, e.g. if the device is reset shortly after start by
@@ -473,14 +541,20 @@ class OnboardTimerHarness(TransitionHarness):
self.current_transition_in_trace = 0
if self.log_return_values:
res = re.match(
- r"\[PTA\] transition=(\S+) cycles=(\S+)/(\S+) return=(\S+)", line
+ r"\[PTA\] transition=(\S+) prevcycles=(\S+)/(\S+) cycles=(\S+)/(\S+) return=(\S+)",
+ line,
)
else:
- res = re.match(r"\[PTA\] transition=(\S+) cycles=(\S+)/(\S+)", line)
+ res = re.match(
+ r"\[PTA\] transition=(\S+) prevcycles=(\S+)/(\S+) cycles=(\S+)/(\S+)",
+ line,
+ )
if res:
transition_id = int(res.group(1))
- cycles = int(res.group(2))
- overflow = int(res.group(3))
+ prev_state_cycles = int(res.group(2))
+ prev_state_overflow = int(res.group(3))
+ cycles = int(res.group(4))
+ overflow = int(res.group(5))
if overflow >= self.counter_max_overflow:
self.abort = True
raise RuntimeError(
@@ -493,11 +567,28 @@ class OnboardTimerHarness(TransitionHarness):
transition_id,
)
)
+ if prev_state_overflow >= self.counter_max_overflow:
+ self.abort = True
+ raise RuntimeError(
+ "Counter overflow ({:d}/{:d}) in benchmark id={:d} trace={:d}: state before transition #{:d} (ID {:d})".format(
+ prev_state_cycles,
+ prev_state_overflow,
+ 0,
+ self.trace_id,
+ self.current_transition_in_trace,
+ transition_id,
+ )
+ )
duration_us = (
- cycles * self.one_cycle_in_us
- + overflow * self.one_overflow_in_us
- - self.nop_cycles * self.one_cycle_in_us
+ cycles * self.one_cycle_in_us + overflow * self.one_overflow_in_us
+ )
+ prev_state_duration_us = (
+ prev_state_cycles * self.one_cycle_in_us
+ + prev_state_overflow * self.one_overflow_in_us
)
+ if self.remove_nop_from_timings:
+ duration_us -= self.nop_cycles * self.one_cycle_in_us
+ prev_state_duration_us -= self.nop_cycles * self.one_cycle_in_us
if duration_us < 0:
duration_us = 0
# self.traces contains transitions and states, UART output only contains transitions -> use index * 2
@@ -505,6 +596,16 @@ class OnboardTimerHarness(TransitionHarness):
log_data_target = self.traces[self.trace_id]["trace"][
self.current_transition_in_trace * 2
]
+ if self.current_transition_in_trace > 0:
+ prev_state_data = self.traces[self.trace_id]["trace"][
+ self.current_transition_in_trace * 2 - 1
+ ]
+ elif self.current_transition_in_trace == 0 and self.trace_id > 0:
+ prev_state_data = self.traces[self.trace_id - 1]["trace"][-1]
+ else:
+ if self.current_transition_in_trace == 0 and self.trace_id == 0:
+ self.set_trace_start_offset(prev_state_duration_us)
+ prev_state_data = None
except IndexError:
transition_name = None
if self.pta:
@@ -531,12 +632,23 @@ class OnboardTimerHarness(TransitionHarness):
log_data_target["isa"],
)
)
+ if prev_state_data and prev_state_data["isa"] != "state":
+ self.abort = True
+ raise RuntimeError(
+ "Log mismatch in benchmark id={:d} trace={:d}: state before transition #{:d} (ID {:d}): Expected state, got {:s}".format(
+ 0,
+ self.trace_id,
+ self.current_transition_in_trace,
+ transition_id,
+ prev_state_data["isa"],
+ )
+ )
if self.pta:
transition = self.pta.transitions[transition_id]
if transition.name != log_data_target["name"]:
self.abort = True
raise RuntimeError(
- "Log mismatch in benchmark id={:d} trace={:d}: transition #{:d} (ID {:d}): Expected transition {:s}, got transition {:s} -- may have been caused by preceding maformed UART output".format(
+ "Log mismatch in benchmark id={:d} trace={:d}: transition #{:d} (ID {:d}): Expected transition {:s}, got transition {:s}\nMay have been caused by preceding maformed UART output\nOffending line: {:s}".format(
0,
self.trace_id,
self.current_transition_in_trace,
@@ -601,4 +713,10 @@ class OnboardTimerHarness(TransitionHarness):
if "offline_aggregates" not in log_data_target:
log_data_target["offline_aggregates"] = {"duration": list()}
log_data_target["offline_aggregates"]["duration"].append(duration_us)
+ if prev_state_data is not None:
+ if "offline_aggregates" not in prev_state_data:
+ prev_state_data["offline_aggregates"] = {"duration": list()}
+ prev_state_data["offline_aggregates"]["duration"].append(
+ prev_state_duration_us
+ )
self.current_transition_in_trace += 1
diff --git a/lib/lennart/DataInterface.py b/lib/lennart/DataInterface.py
new file mode 100644
index 0000000..4495db2
--- /dev/null
+++ b/lib/lennart/DataInterface.py
@@ -0,0 +1,24 @@
+class DataInterface:
+ def runMeasure(self):
+ """
+ Implemented in subclasses.
+
+ Starts the measurement
+ """
+ raise NotImplementedError("The method not implemented")
+
+ def getData(self):
+ """
+ Implemented in subclasses
+
+ :returns: gathered data
+ """
+ raise NotImplementedError("The method not implemented")
+
+ def forceStopMeasure(self):
+ """
+ Implemented in subclasses
+
+ Force stops the measurement
+ """
+ raise NotImplementedError("The method not implemented")
diff --git a/lib/lennart/DataProcessor.py b/lib/lennart/DataProcessor.py
new file mode 100644
index 0000000..b46315a
--- /dev/null
+++ b/lib/lennart/DataProcessor.py
@@ -0,0 +1,415 @@
+import numpy as np
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class DataProcessor:
+ def __init__(self, sync_data, energy_data):
+ """
+ Creates DataProcessor object.
+
+ :param sync_data: input timestamps (SigrokResult)
+ :param energy_data: List of EnergyTrace datapoints
+ """
+ self.reduced_timestamps = []
+ self.modified_timestamps = []
+ self.plot_data_x = []
+ self.plot_data_y = []
+ self.sync_data = sync_data
+ self.energy_data = energy_data
+ self.start_offset = 0
+
+ self.power_sync_watt = 0.011
+ self.power_sync_len = 0.7
+ self.power_sync_max_outliers = 2
+
+ def run(self):
+ """
+ Main Function to remove unwanted data, get synchronization points, add the offset and add drift.
+ :return: None
+ """
+ # remove Dirty Data from previously running program (happens if logic Analyzer Measurement starts earlier than
+ # the HW Reset from energytrace)
+ use_data_after_index = 0
+ for x in range(1, len(self.sync_data.timestamps)):
+ if self.sync_data.timestamps[x] - self.sync_data.timestamps[x - 1] > 1.3:
+ use_data_after_index = x
+ break
+
+ time_stamp_data = self.sync_data.timestamps[use_data_after_index:]
+
+ last_data = [0, 0, 0, 0]
+
+ # clean timestamp data, if at the end strange ts got added somehow
+ # time_stamp_data = self.removeTooFarDatasets(time_stamp_data)
+
+ self.reduced_timestamps = time_stamp_data
+
+ # NEW
+ datasync_timestamps = []
+ sync_start = 0
+ outliers = 0
+ pre_outliers_ts = None
+ # TODO only consider the first few and the last few seconds for sync points
+ for i, energytrace_dataset in enumerate(self.energy_data):
+ usedtime = energytrace_dataset[0] - last_data[0] # in microseconds
+ timestamp = energytrace_dataset[0]
+ usedenergy = energytrace_dataset[3] - last_data[3]
+ power = usedenergy / usedtime * 1e-3 # in watts
+ if power > 0:
+ if power > self.power_sync_watt:
+ if sync_start is None:
+ sync_start = timestamp
+ outliers = 0
+ else:
+ # Sync point over or outliers
+ if outliers == 0:
+ pre_outliers_ts = timestamp
+ outliers += 1
+ if outliers > self.power_sync_max_outliers:
+ if sync_start is not None:
+ if (
+ pre_outliers_ts - sync_start
+ ) / 1_000_000 > self.power_sync_len:
+ datasync_timestamps.append(
+ (
+ sync_start / 1_000_000,
+ pre_outliers_ts / 1_000_000,
+ )
+ )
+ sync_start = None
+
+ last_data = energytrace_dataset
+
+ self.plot_data_x.append(timestamp / 1_000_000)
+ self.plot_data_y.append(power)
+
+ if power > self.power_sync_watt:
+ if (self.energy_data[-1][0] - sync_start) / 1_000_000 > self.power_sync_len:
+ datasync_timestamps.append(
+ (sync_start / 1_000_000, pre_outliers_ts / 1_000_000)
+ )
+
+ # print(datasync_timestamps)
+
+ # time_stamp_data contains an entry for each level change on the Logic Analyzer input.
+ # So, time_stamp_data[0] is the first low-to-high transition, time_stamp_data[2] the second, etc.
+ # -> time_stamp_data[2] is the low-to-high transition indicating the end of the first sync pulse
+ # -> time_stamp_data[-8] is the low-to-high transition indicating the start of the first after-measurement sync pulse
+
+ start_timestamp = datasync_timestamps[0][1]
+ start_offset = start_timestamp - time_stamp_data[2]
+
+ end_timestamp = datasync_timestamps[-2][0]
+ end_offset = end_timestamp - (time_stamp_data[-8] + start_offset)
+ logger.debug(
+ f"Measurement area: ET timestamp range [{start_timestamp}, {end_timestamp}]"
+ )
+ logger.debug(
+ f"Measurement area: LA timestamp range [{time_stamp_data[2]}, {time_stamp_data[-8]}]"
+ )
+ logger.debug(f"Start/End offsets: {start_offset} / {end_offset}")
+
+ if end_offset > 10:
+ logger.warning(
+ f"synchronization end_offset == {end_offset}. It should be no more than a few seconds."
+ )
+
+ with_offset = np.array(time_stamp_data) + start_offset
+ logger.debug(
+ f"Measurement area with offset: LA timestamp range [{with_offset[2]}, {with_offset[-8]}]"
+ )
+
+ with_drift = self.addDrift(
+ with_offset, end_timestamp, end_offset, start_timestamp
+ )
+ logger.debug(
+ f"Measurement area with drift: LA timestamp range [{with_drift[2]}, {with_drift[-8]}]"
+ )
+
+ self.modified_timestamps = with_drift
+
+ def removeTooFarDatasets(self, input_timestamps):
+ """
+ Removing datasets, that are to far away at ethe end
+
+ :param input_timestamps: List of timestamps (float list)
+ :return: List of modified timestamps (float list)
+ """
+ modified_timestamps = []
+ for i, x in enumerate(input_timestamps):
+ # print(x - input_timestamps[i - 1], x - input_timestamps[i - 1] < 2.5)
+ if x - input_timestamps[i - 1] < 1.6:
+ modified_timestamps.append(x)
+ else:
+ break
+ return modified_timestamps
+
+ def addDrift(self, input_timestamps, end_timestamp, end_offset, start_timestamp):
+ """
+ Add drift to datapoints
+
+ :param input_timestamps: List of timestamps (float list)
+ :param end_timestamp: Timestamp of first EnergyTrace datapoint at the second-to-last sync point
+ :param end_offset: the time between end_timestamp and the timestamp of synchronisation signal
+ :param start_timestamp: Timestamp of last EnergyTrace datapoint at the first sync point
+ :return: List of modified timestamps (float list)
+ """
+ endFactor = 1 + (end_offset / ((end_timestamp - end_offset) - start_timestamp))
+ # print(
+ # f"({end_timestamp} + {end_offset} - {start_timestamp}) / ({end_timestamp} - {start_timestamp}) == {endFactor}"
+ # )
+ # Manuelles endFactor += 0.0001 macht es merklich besser
+ # print(f"endFactor = {endFactor}")
+ # endFactor assumes that the end of the first sync pulse is at timestamp 0.
+ # Then, timestamps with drift := timestamps * endFactor.
+ # As this is not the case (the first sync pulse ends at start_timestamp > 0), we shift the data by first
+ # removing start_timestamp, then multiplying with endFactor, and then re-adding the start_timestamp.
+ modified_timestamps_with_drift = (
+ input_timestamps - start_timestamp
+ ) * endFactor + start_timestamp
+ return modified_timestamps_with_drift
+
+ def export_sync(self):
+ # [1st trans start, 1st trans stop, 2nd trans start, 2nd trans stop, ...]
+ sync_timestamps = list()
+
+ for i in range(4, len(self.modified_timestamps) - 8, 2):
+ sync_timestamps.append(
+ (self.modified_timestamps[i], self.modified_timestamps[i + 1])
+ )
+
+ # EnergyTrace timestamps
+ timestamps = self.plot_data_x
+
+ # EnergyTrace power values
+ power = self.plot_data_y
+
+ return {"sync": sync_timestamps, "timestamps": timestamps, "power": power}
+
+ def plot(self, annotateData=None):
+ """
+ Plots the power usage and the timestamps by logic analyzer
+
+ :param annotateData: List of Strings with labels, only needed if annotated plots are wished
+ :return: None
+ """
+
+ def calculateRectangleCurve(timestamps, min_value=0, max_value=0.160):
+ import numpy as np
+
+ data = []
+ for ts in timestamps:
+ data.append(ts)
+ data.append(ts)
+
+ a = np.empty((len(data),))
+ a[0::4] = min_value
+ a[1::4] = max_value
+ a[2::4] = max_value
+ a[3::4] = min_value
+ return data, a # plotting by columns
+
+ import matplotlib.pyplot as plt
+
+ fig, ax = plt.subplots()
+
+ if annotateData:
+ annot = ax.annotate(
+ "",
+ xy=(0, 0),
+ xytext=(20, 20),
+ textcoords="offset points",
+ bbox=dict(boxstyle="round", fc="w"),
+ arrowprops=dict(arrowstyle="->"),
+ )
+ annot.set_visible(True)
+
+ rectCurve_with_drift = calculateRectangleCurve(
+ self.modified_timestamps, max_value=max(self.plot_data_y)
+ )
+
+ plt.plot(self.plot_data_x, self.plot_data_y, label="Leistung")
+
+ plt.plot(
+ rectCurve_with_drift[0],
+ rectCurve_with_drift[1],
+ "-g",
+ label="Synchronisationsignale mit Driftfaktor",
+ )
+
+ plt.xlabel("Zeit von EnergyTrace [s]")
+ plt.ylabel("Leistung [W]")
+ leg = plt.legend()
+
+ def getDataText(x):
+ # print(x)
+ dl = len(annotateData)
+ for i, xt in enumerate(self.modified_timestamps):
+ if xt > x and i >= 4 and i - 5 < dl:
+ return f"SoT: {annotateData[i - 5]}"
+
+ def update_annot(x, y, name):
+ annot.xy = (x, y)
+ text = name
+
+ annot.set_text(text)
+ annot.get_bbox_patch().set_alpha(0.4)
+
+ def hover(event):
+ if event.xdata and event.ydata:
+ annot.set_visible(False)
+ update_annot(event.xdata, event.ydata, getDataText(event.xdata))
+ annot.set_visible(True)
+ fig.canvas.draw_idle()
+
+ if annotateData:
+ fig.canvas.mpl_connect("motion_notify_event", hover)
+
+ plt.show()
+
+ def getPowerBetween(self, start, end, state_sleep): # 0.001469
+ """
+ calculates the powerusage in interval
+ NOT SIDE EFFECT FREE, DON'T USE IT EVERYWHERE
+
+ :param start: Start timestamp of interval
+ :param end: End timestamp of interval
+ :param state_sleep: Length in seconds of one state, needed for cutting out the UART Sending cycle
+ :return: power measurements in W
+ """
+ first_index = 0
+ all_power = list()
+ all_ts = list()
+ for ind in range(self.start_offset, len(self.plot_data_x)):
+ first_index = ind
+ if self.plot_data_x[ind] > start:
+ break
+
+ nextIndAfterIndex = None
+ for ind in range(first_index, len(self.plot_data_x)):
+ nextIndAfterIndex = ind
+ if (
+ self.plot_data_x[ind] > end
+ or self.plot_data_x[ind] > start + state_sleep
+ ):
+ self.start_offset = ind - 1
+ break
+ all_power.append(self.plot_data_y[ind])
+ all_ts.append(self.plot_data_x[ind])
+
+ # TODO Idea remove datapoints that are too far away
+ def removeSD_Mean_Values(arr):
+ import numpy
+
+ elements = numpy.array(arr)
+
+ mean = numpy.mean(elements, axis=0)
+ sd = numpy.std(elements, axis=0)
+
+ return [x for x in arr if (mean - 1 * sd < x < mean + 1.5 * sd)]
+
+ if len(all_power) > 10:
+ # all_power = removeSD_Mean_Values(all_power)
+ pass
+ # TODO algorithm relocate datapoint
+
+ pre_fix_len = len(all_power)
+ if len(all_power) == 0:
+ # print("PROBLEM")
+ all_power.append(self.plot_data_y[nextIndAfterIndex])
+ all_ts.append(0)
+ elif len(all_power) == 1:
+ # print("OKAY")
+ pass
+ return np.array(all_power), np.array(all_ts)
+
+ def getStatesdfatool(self, state_sleep, with_traces=False, algorithm=False):
+ """
+ Calculates the length and energy usage of the states
+
+ :param state_sleep: Length in seconds of one state, needed for cutting out the UART Sending cycle
+ :param algorithm: possible usage of accuracy algorithm / not implemented yet
+ :returns: returns list of states and transitions, starting with a transition and ending with astate
+ Each element is a dict containing:
+ * `isa`: 'state' or 'transition'
+ * `W_mean`: Mittelwert der Leistungsaufnahme
+ * `W_std`: Standardabweichung der Leistungsaufnahme
+ * `s`: Dauer
+ """
+ if algorithm:
+ raise NotImplementedError
+ end_transition_ts = None
+ timestamps_sync_start = 0
+ energy_trace_new = list()
+
+ for ts_index in range(
+ 0 + timestamps_sync_start, int(len(self.modified_timestamps) / 2)
+ ):
+ start_transition_ts = self.modified_timestamps[ts_index * 2]
+ start_transition_ts_timing = self.reduced_timestamps[ts_index * 2]
+
+ if end_transition_ts is not None:
+ power, timestamps = self.getPowerBetween(
+ end_transition_ts, start_transition_ts, state_sleep
+ )
+
+ # print("STATE", end_transition_ts * 10 ** 6, start_transition_ts * 10 ** 6, (start_transition_ts - end_transition_ts) * 10 ** 6, power)
+ if (
+ (start_transition_ts - end_transition_ts) * 10 ** 6 > 900_000
+ and np.mean(power) > self.power_sync_watt * 0.9
+ and ts_index > 10
+ ):
+ # remove last transition and stop (upcoming data only sync)
+ del energy_trace_new[-1]
+ break
+ pass
+
+ state = {
+ "isa": "state",
+ "W_mean": np.mean(power),
+ "W_std": np.std(power),
+ "s": (
+ start_transition_ts_timing - end_transition_ts_timing
+ ), # * 10 ** 6,
+ }
+ if with_traces:
+ state["plot"] = (timestamps - timestamps[0], power)
+ energy_trace_new.append(state)
+
+ energy_trace_new[-2]["W_mean_delta_next"] = (
+ energy_trace_new[-2]["W_mean"] - energy_trace_new[-1]["W_mean"]
+ )
+
+ # get energy end_transition_ts
+ end_transition_ts = self.modified_timestamps[ts_index * 2 + 1]
+ power, timestamps = self.getPowerBetween(
+ start_transition_ts, end_transition_ts, state_sleep
+ )
+
+ # print("TRANS", start_transition_ts * 10 ** 6, end_transition_ts * 10 ** 6, (end_transition_ts - start_transition_ts) * 10 ** 6, power)
+ end_transition_ts_timing = self.reduced_timestamps[ts_index * 2 + 1]
+
+ transition = {
+ "isa": "transition",
+ "W_mean": np.mean(power),
+ "W_std": np.std(power),
+ "s": (
+ end_transition_ts_timing - start_transition_ts_timing
+ ), # * 10 ** 6,
+ "count_dp": len(power),
+ }
+ if with_traces:
+ transition["plot"] = (timestamps - timestamps[0], power)
+
+ if (end_transition_ts - start_transition_ts) * 10 ** 6 > 2_000_000:
+ # TODO Last data set corrupted? HOT FIX!!!!!!!!!!!! REMOVE LATER
+ # for x in range(4):
+ # del energy_trace_new[-1]
+ # break
+ pass
+
+ energy_trace_new.append(transition)
+ # print(start_transition_ts, "-", end_transition_ts, "-", end_transition_ts - start_transition_ts)
+ return energy_trace_new
diff --git a/lib/lennart/EnergyInterface.py b/lib/lennart/EnergyInterface.py
new file mode 100644
index 0000000..19aae84
--- /dev/null
+++ b/lib/lennart/EnergyInterface.py
@@ -0,0 +1,122 @@
+import re
+import subprocess
+
+from dfatool.lennart.DataInterface import DataInterface
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class EnergyInterface(DataInterface):
+ def __init__(
+ self,
+ duration_seconds=10,
+ console_output=False,
+ temp_file="temp/energytrace.log",
+ fake=False,
+ ):
+ """
+ class is not used in embedded into dfatool.
+
+ :param duration_seconds: seconds the EnergyTrace should be running
+ :param console_output: if EnergyTrace output should be printed to the user
+ :param temp_file: file path for temporary file
+ :param fake: if already existing file should be used
+ """
+ self.energytrace = None
+ self.duration_seconds = duration_seconds
+ self.console_output = console_output
+ self.temp_file = temp_file
+ self.fake = fake
+
+ def runMeasure(self):
+ """
+ starts the measurement, with waiting for done
+ """
+ if self.fake:
+ return
+ self.runMeasureAsynchronously()
+ self.waitForAsynchronousMeasure()
+
+ def runMeasureAsynchronously(self):
+ """
+ starts the measurement, not waiting for done
+ """
+ if self.fake:
+ return
+ self.energytrace = subprocess.Popen(
+ "msp430-etv --save %s %s %s"
+ % (
+ self.temp_file,
+ self.duration_seconds,
+ "" if self.console_output else "> /dev/null",
+ ),
+ shell=True,
+ )
+ print(
+ "msp430-etv --save %s %s %s"
+ % (
+ self.temp_file,
+ self.duration_seconds,
+ "" if self.console_output else "> /dev/null",
+ )
+ )
+
+ def waitForAsynchronousMeasure(self):
+ """
+ Wait until is command call is done
+ """
+ if self.fake:
+ return
+ self.energytrace.wait()
+
+ def getData(self):
+ """
+ cleans the string data and creates int list
+ :return: list of data, in format [[int,int,int,int], [int,int,int,int], ... ]
+ """
+ energytrace_log = open(self.temp_file)
+ lines = energytrace_log.readlines()[21:]
+ data = []
+ for line in lines:
+ if "MSP430_DisableEnergyTrace" in line:
+ break
+ else:
+ data.append([int(i) for i in line.split()])
+ return data
+
+ @classmethod
+ def getDataFromString(cls, string, delimiter="\\n"):
+ """
+ Parsing the data from string
+
+ :param string: input string which will be parsed
+ :param delimiter: for normal file its \n
+ :return: list of data, in format [[int,int,int,int], [int,int,int,int], ... ]
+ """
+ lines = string.split(delimiter)[21:]
+ data = []
+ for line in lines:
+ if "MSP430_DisableEnergyTrace" in line:
+ break
+ else:
+ data.append([int(i) for i in line.split()])
+ return data
+
+ def setFile(self, path):
+ """
+ changeing the temporary file
+
+ :param path: file path of new temp file
+ :return: None
+ """
+ self.temp_file = path
+ pass
+
+ def forceStopMeasure(self):
+ """
+ force stops the Measurement, with signals
+ :return: None
+ """
+ self.energytrace.send_signal(subprocess.signal.SIGINT)
+ stdout, stderr = self.energytrace.communicate(timeout=15)
diff --git a/lib/lennart/SigrokAPIInterface.py b/lib/lennart/SigrokAPIInterface.py
new file mode 100644
index 0000000..44da678
--- /dev/null
+++ b/lib/lennart/SigrokAPIInterface.py
@@ -0,0 +1,150 @@
+import time
+
+from dfatool.lennart.SigrokInterface import SigrokInterface
+
+import sigrok.core as sr
+from sigrok.core.classes import *
+
+from util.ByteHelper import ByteHelper
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class SigrokAPIInterface(SigrokInterface):
+ def datafeed_changes(self, device, packet):
+ """
+ Callback type with changes analysis
+ :param device: device object
+ :param packet: data (String with binary data)
+ """
+ data = ByteHelper.rawbytes(self.output.receive(packet))
+ if data:
+ # only using every second byte,
+ # because only every second contains the useful information.
+ for x in data[1::2]:
+ self.analyzeData(x)
+
+ def datafeed_in_all(self, device, packet):
+ """
+ Callback type which writes all data into the array
+ :param device: device object
+ :param packet: data (String with binary data)
+ """
+ data = ByteHelper.rawbytes(self.output.receive(packet))
+ if data:
+ # only using every second byte,
+ # because only every second contains the useful information.
+ self.all_data += data[1::2]
+
+ def datafeed_file(self, device, packet):
+ """
+ Callback type which writes all data into a file
+ :param device: device object
+ :param packet: data (String with binary data)
+ """
+ data = ByteHelper.rawbytes(self.output.receive(packet))
+ if data:
+ # only using every second byte,
+ # because only every second contains the useful information.
+ for x in data[1::2]:
+ self.file.write(str(x) + "\n")
+
+ def __init__(
+ self,
+ driver="fx2lafw",
+ sample_rate=100_000,
+ debug_output=False,
+ used_datafeed=datafeed_changes,
+ fake=False,
+ ):
+ """
+
+ :param driver: Driver that should be used
+ :param sample_rate: The sample rate of the Logic analyzer
+ :param debug_output: Should be true if output should be displayed to user
+ :param used_datafeed: one of the datafeeds above, user later as callback.
+ :param fake:
+ """
+ super(SigrokAPIInterface, self).__init__(sample_rate)
+ if fake:
+ raise NotImplementedError("Not implemented!")
+ self.used_datafeed = used_datafeed
+
+ self.debug_output = debug_output
+ self.session = None
+
+ def forceStopMeasure(self):
+ """
+ Force stopping the measurement
+ :return: None
+ """
+ self.session.stop()
+
+ def runMeasure(self):
+ """
+ Start the Measurement and set all settings
+ """
+ context = sr.Context_create()
+
+ devs = context.drivers[self.driver].scan()
+ # print(devs)
+ if len(devs) == 0:
+ raise RuntimeError("No device with that driver found!")
+ sigrokDevice = devs[0]
+ if len(devs) > 1:
+ raise Warning(
+ "Attention! Multiple devices with that driver found! Using ",
+ sigrokDevice.connection_id(),
+ )
+
+ sigrokDevice.open()
+ sigrokDevice.config_set(ConfigKey.SAMPLERATE, self.sample_rate)
+
+ enabled_channels = ["D1"]
+ for channel in sigrokDevice.channels:
+ channel.enabled = channel.name in enabled_channels
+
+ self.session = context.create_session()
+ self.session.add_device(sigrokDevice)
+ self.session.start()
+
+ self.output = context.output_formats["binary"].create_output(sigrokDevice)
+
+ print(context.output_formats)
+ self.all_data = b""
+
+ def datafeed(device, packet):
+ self.used_datafeed(self, device, packet)
+
+ self.session.add_datafeed_callback(datafeed)
+ time_running = time.time()
+ self.session.run()
+ total_time = time.time() - time_running
+ print(
+ "Used time: ",
+ total_time * 1_000_000,
+ "µs",
+ )
+ self.session.stop()
+
+ if self.debug_output:
+ # if self.used_datafeed == self.datafeed_in_change:
+ if True:
+ changes = [x / self.sample_rate for x in self.changes]
+ print(changes)
+ is_on = self.start == 0xFF
+ print("0", " - ", changes[0], " # Pin ", "HIGH" if is_on else "LOW")
+ for x in range(len(changes) - 1):
+ is_on = not is_on
+ print(
+ changes[x],
+ " - ",
+ changes[x + 1],
+ " / ",
+ changes[x + 1] - changes[x],
+ " # Pin ",
+ "HIGH" if is_on else "LOW",
+ )
+ elif self.used_datafeed == self.datafeed_in_all:
+ print(self.all_data)
diff --git a/lib/lennart/SigrokCLIInterface.py b/lib/lennart/SigrokCLIInterface.py
new file mode 100644
index 0000000..b28a8a9
--- /dev/null
+++ b/lib/lennart/SigrokCLIInterface.py
@@ -0,0 +1,89 @@
+import subprocess
+import time
+
+from dfatool.lennart.SigrokInterface import SigrokInterface
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class SigrokCLIInterface(SigrokInterface):
+ def __init__(
+ self,
+ bin_temp_file="temp/out.bin",
+ sample_rate=100_000,
+ fake=False,
+ ):
+ """
+ creates SigrokCLIInterface object. Uses the CLI Interface (Command: sigrok-cli)
+
+ :param bin_temp_file: temporary file for binary output
+ :param sample_rate: The sample rate of the Logic analyzer
+ :param fake: if it should use existing data
+ """
+ super(SigrokCLIInterface, self).__init__(sample_rate)
+ self.fake = fake
+ self.bin_temp_file = bin_temp_file
+ self.sigrok_cli_thread = None
+
+ def forceStopMeasure(self):
+ """
+ Force stopping measure, sometimes needs pkill for killing definitly
+ :return: None
+ """
+ self.sigrok_cli_thread.terminate()
+
+ try:
+ self.sigrok_cli_thread.wait(timeout=10)
+ except subprocess.TimeoutExpired:
+ logger.warning("sigrok-cli has not stopped. Killing it.")
+ self.sigrok_cli_thread.kill()
+
+ self.sigrok_cli_thread.communicate()
+ self.runOpenAnalyze()
+
+ def runMeasure(self):
+ """
+ starts the measurement, with waiting for done
+ """
+ if not self.fake:
+ self.runMeasureAsynchronous()
+ self.waitForAsynchronousMeasure()
+
+ def runMeasureAsynchronous(self):
+ """
+ starts the measurement, not waiting for done
+ """
+ shellcommand = [
+ "sigrok-cli",
+ "--output-file",
+ self.bin_temp_file,
+ "--output-format",
+ "binary",
+ "--continuous",
+ "-d",
+ self.driver,
+ "--config",
+ f"samplerate={self.sample_rate} Hz",
+ ]
+ self.sigrok_cli_thread = subprocess.Popen(shellcommand)
+
+ def waitForAsynchronousMeasure(self):
+ """
+ Wait until is command call is done
+ """
+ if not self.fake:
+ self.sigrok_cli_thread.wait()
+ self.runOpenAnalyze()
+
+ def runOpenAnalyze(self):
+ """
+ Opens the generated binary file and parses it byte by byte
+
+ """
+ in_file = open(self.bin_temp_file, "rb") # opening for [r]eading as [b]inary
+ data = in_file.read() # if you only wanted to read 512 bytes, do .read(512)
+ in_file.close()
+
+ for x in data:
+ self.analyzeData(x)
diff --git a/lib/lennart/SigrokInterface.py b/lib/lennart/SigrokInterface.py
new file mode 100644
index 0000000..32e8fe2
--- /dev/null
+++ b/lib/lennart/SigrokInterface.py
@@ -0,0 +1,123 @@
+import json
+import numpy as np
+
+from dfatool.lennart.DataInterface import DataInterface
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+# Adding additional parsing functionality
+class SigrokResult:
+ def __init__(self, timestamps, onbeforefirstchange):
+ """
+ Creates SigrokResult object, struct for timestamps and onBeforeFirstChange.
+
+ :param timestamps: list of changing timestamps
+ :param onbeforefirstchange: if the state before the first change is already on / should always be off, just to be data correct
+ """
+ self.timestamps = timestamps
+ self.onBeforeFirstChange = onbeforefirstchange
+
+ def __str__(self):
+ """
+ :return: string representation of object
+ """
+ return "<Sigrok Result onBeforeFirstChange=%s timestamps=%s>" % (
+ self.onBeforeFirstChange,
+ self.timestamps,
+ )
+
+ def getDict(self):
+ """
+ :return: dict representation of object
+ """
+ data = {
+ "onBeforeFirstChange": self.onBeforeFirstChange,
+ "timestamps": self.timestamps,
+ }
+ return data
+
+ @classmethod
+ def fromFile(cls, path):
+ """
+ Generates SigrokResult from json_file
+
+ :param path: file path
+ :return: SigrokResult object
+ """
+ with open(path) as json_file:
+ data = json.load(json_file)
+ return SigrokResult(data["timestamps"], data["onBeforeFirstChange"])
+ pass
+
+ @classmethod
+ def fromString(cls, string):
+ """
+ Generates SigrokResult from string
+
+ :param string: string
+ :return: SigrokResult object
+ """
+ data = json.loads(string)
+ return SigrokResult(data["timestamps"], data["onBeforeFirstChange"])
+ pass
+
+
+class SigrokInterface(DataInterface):
+ def __init__(self, sample_rate, driver="fx2lafw", filename="temp/sigrok.log"):
+ """
+
+ :param sample_rate: Samplerate of the Logic Analyzer
+ :param driver: for many Logic Analyzer from Saleae the "fx2lafw" should be working
+ :param filename: temporary file name
+ """
+ # options
+ self.sample_rate = sample_rate
+ self.file = open(filename, "w+")
+ self.driver = driver
+
+ # internal data
+ self.changes = []
+ self.start = None
+ self.last_val = None
+ self.index = 0
+
+ def runMeasure(self):
+ """
+ Not implemented because implemented in subclasses
+ :return: None
+ """
+ raise NotImplementedError("The method not implemented")
+
+ def forceStopMeasure(self):
+ """
+ Not implemented because implemented in subclasses
+ :return: None
+ """
+ raise NotImplementedError("The method not implemented")
+
+ def getData(self):
+ """
+
+ :return:
+ """
+ # return sigrok_energy_api_result(self.changes, True if self.start == 0xff else False)
+ return SigrokResult(
+ [x / self.sample_rate for x in self.changes],
+ True if self.start == 0xFF else False,
+ )
+
+ def analyzeData(self, byte):
+ """
+ analyze one byte if it differs from the last byte, it will be appended to changes.
+
+ :param byte: one byte to analyze
+ """
+ if self.start is None:
+ self.start = byte
+ self.last_val = byte
+ if byte != self.last_val:
+ self.changes.append(self.index)
+ self.last_val = byte
+ self.index += 1
diff --git a/lib/lennart/__init__.py b/lib/lennart/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/lennart/__init__.py
diff --git a/lib/loader.py b/lib/loader.py
index 0c3bac7..b9a2930 100644
--- a/lib/loader.py
+++ b/lib/loader.py
@@ -11,7 +11,8 @@ import struct
import tarfile
import hashlib
from multiprocessing import Pool
-from .utils import running_mean, soft_cast_int
+
+from .utils import NpEncoder, running_mean, soft_cast_int
logger = logging.getLogger(__name__)
@@ -107,7 +108,14 @@ def _preprocess_mimosa(measurement):
def _preprocess_etlog(measurement):
setup = measurement["setup"]
- etlog = EnergyTraceLog(
+
+ energytrace_class = EnergyTraceWithBarcode
+ if measurement["sync_mode"] == "la":
+ energytrace_class = EnergyTraceWithLogicAnalyzer
+ elif measurement["sync_mode"] == "timer":
+ energytrace_class = EnergyTraceWithTimer
+
+ etlog = energytrace_class(
float(setup["voltage"]),
int(setup["state_duration"]),
measurement["transition_names"],
@@ -406,7 +414,7 @@ class RawData:
processed_data["error"] = "; ".join(processed_data["datasource_errors"])
return False
- # Note that the low-level parser (EnergyTraceLog) already checks
+ # Note that the low-level parser (EnergyTraceWithBarcode) already checks
# whether the transition count is correct
return True
@@ -570,7 +578,7 @@ class RawData:
# TODO es gibt next_transitions ohne 'plan'
return True
- def _merge_online_and_offline(self, measurement):
+ def _merge_online_and_mimosa(self, measurement):
# Edits self.traces_by_fileno[measurement['fileno']][*]['trace'][*]['offline']
# and self.traces_by_fileno[measurement['fileno']][*]['trace'][*]['offline_aggregates'] in place
# (appends data from measurement['energy_trace'])
@@ -692,7 +700,18 @@ class RawData:
online_datapoints.append((run_idx, trace_part_idx))
for offline_idx, online_ref in enumerate(online_datapoints):
online_run_idx, online_trace_part_idx = online_ref
- offline_trace_part = measurement["energy_trace"][offline_idx]
+ try:
+ offline_trace_part = measurement["energy_trace"][offline_idx]
+ except IndexError:
+ logger.error(
+ f"While handling file #{measurement['fileno']} {measurement['info']}:"
+ )
+ logger.error(f" offline energy_trace data is shorted than online data")
+ logger.error(f" len(online_datapoints) == {len(online_datapoints)}")
+ logger.error(
+ f" len(energy_trace) == {len(measurement['energy_trace'])}"
+ )
+ raise
online_trace_part = traces[online_run_idx]["trace"][online_trace_part_idx]
if "offline" not in online_trace_part:
@@ -901,6 +920,12 @@ class RawData:
}
)
for repeat_id, mim_file in enumerate(ptalog["files"][j]):
+ # MIMOSA benchmarks always use a single .mim file per benchmark run.
+ # However, depending on the dfatool version used to run the
+ # benchmark, ptalog["files"][j] is either "foo.mim" (before Oct 2020)
+ # or ["foo.mim"] (from Oct 2020 onwards).
+ if type(mim_file) is list:
+ mim_file = mim_file[0]
member = tf.getmember(mim_file)
offline_data.append(
{
@@ -920,6 +945,10 @@ class RawData:
new_filenames = list()
with tarfile.open(filename) as tf:
ptalog = self.ptalog
+ if "sync" in ptalog["opt"]["energytrace"]:
+ sync_mode = ptalog["opt"]["energytrace"]["sync"]
+ else:
+ sync_mode = "bar"
# Benchmark code may be too large to be executed in a single
# run, so benchmarks (a benchmark is basically a list of DFA runs)
@@ -974,16 +1003,19 @@ class RawData:
"state_duration": ptalog["opt"]["sleep"],
}
)
- for repeat_id, etlog_file in enumerate(ptalog["files"][j]):
- member = tf.getmember(etlog_file)
+ for repeat_id, etlog_files in enumerate(ptalog["files"][j]):
+ members = list(map(tf.getmember, etlog_files))
offline_data.append(
{
- "content": tf.extractfile(member).read(),
+ "content": list(
+ map(lambda f: tf.extractfile(f).read(), members)
+ ),
+ "sync_mode": sync_mode,
"fileno": j,
- "info": member,
+ "info": members[0],
"setup": self.setup_by_fileno[j],
"repeat_id": repeat_id,
- "expected_trace": ptalog["traces"][j],
+ "expected_trace": traces,
"with_traces": self.with_traces,
"transition_names": list(
map(
@@ -1029,7 +1061,7 @@ class RawData:
if version == 0 or version == 1:
if self._measurement_is_valid_01(measurement):
- self._merge_online_and_offline(measurement)
+ self._merge_online_and_mimosa(measurement)
num_valid += 1
else:
logger.warning(
@@ -1105,7 +1137,7 @@ def _add_trace_data_to_aggregate(aggregate, key, element):
def pta_trace_to_aggregate(traces, ignore_trace_indexes=[]):
- u"""
+ """
Convert preprocessed DFA traces from peripherals/drivers to by_name aggregate for PTAModel.
arguments:
@@ -1176,7 +1208,7 @@ def pta_trace_to_aggregate(traces, ignore_trace_indexes=[]):
return by_name, parameter_names, arg_count
-class EnergyTraceLog:
+class EnergyTraceWithBarcode:
"""
EnergyTrace log loader for DFA traces.
@@ -1199,7 +1231,7 @@ class EnergyTraceLog:
with_traces=False,
):
"""
- Create a new EnergyTraceLog object.
+ Create a new EnergyTraceWithBarcode object.
:param voltage: supply voltage [V], usually 3.3 V
:param state_duration: state duration [ms]
@@ -1241,7 +1273,7 @@ class EnergyTraceLog:
)
return list()
- lines = log_data.decode("ascii").split("\n")
+ lines = log_data[0].decode("ascii").split("\n")
data_count = sum(map(lambda x: len(x) > 0 and x[0] != "#", lines))
data_lines = filter(lambda x: len(x) > 0 and x[0] != "#", lines)
@@ -1311,7 +1343,7 @@ class EnergyTraceLog:
return self._ts_to_index(timestamp, mid_index, right_index)
def analyze_states(self, traces, offline_index: int):
- u"""
+ """
Split log data into states and transitions and return duration, energy, and mean power for each element.
:param traces: expected traces, needed to synchronize with the measurement.
@@ -1420,7 +1452,13 @@ class EnergyTraceLog:
}
if self.with_traces:
- transition["uW"] = transition_power_W * 1e6
+ timestamps = (
+ self.interval_start_timestamp[
+ transition_start_index:transition_done_index
+ ]
+ - self.interval_start_timestamp[transition_start_index]
+ )
+ transition["plot"] = (timestamps, transition_power_W)
energy_trace.append(transition)
@@ -1440,7 +1478,11 @@ class EnergyTraceLog:
}
if self.with_traces:
- state["uW"] = state_power_W * 1e6
+ timestamps = (
+ self.interval_start_timestamp[state_start_index:state_done_index]
+ - self.interval_start_timestamp[state_start_index]
+ )
+ state["plot"] = (timestamps, state_power_W)
energy_trace.append(state)
@@ -1614,6 +1656,184 @@ class EnergyTraceLog:
return None, None, None, None
+class EnergyTraceWithLogicAnalyzer:
+ def __init__(
+ self,
+ voltage: float,
+ state_duration: int,
+ transition_names: list,
+ with_traces=False,
+ ):
+
+ """
+ Create a new EnergyTraceWithLogicAnalyzer object.
+
+ :param voltage: supply voltage [V], usually 3.3 V
+ :param state_duration: state duration [ms]
+ :param transition_names: list of transition names in PTA transition order.
+ Needed to map barcode synchronization numbers to transitions.
+ """
+ self.voltage = voltage
+ self.state_duration = state_duration * 1e-3
+ self.transition_names = transition_names
+ self.with_traces = with_traces
+ self.errors = list()
+
+ def load_data(self, log_data):
+ from dfatool.lennart.SigrokInterface import SigrokResult
+ from dfatool.lennart.EnergyInterface import EnergyInterface
+
+ # Daten laden
+ self.sync_data = SigrokResult.fromString(log_data[0])
+ self.energy_data = EnergyInterface.getDataFromString(str(log_data[1]))
+
+ def analyze_states(self, traces, offline_index: int):
+ """
+ Split log data into states and transitions and return duration, energy, and mean power for each element.
+
+ :param traces: expected traces, needed to synchronize with the measurement.
+ traces is a list of runs, traces[*]['trace'] is a single run
+ (i.e. a list of states and transitions, starting with a transition
+ and ending with a state).
+ :param offline_index: This function uses traces[*]['trace'][*]['online_aggregates']['duration'][offline_index] to find sync codes
+
+ :param charges: raw charges (each element describes the charge in pJ transferred during 10 µs)
+ :param trigidx: "charges" indexes corresponding to a trigger edge, see `trigger_edges`
+ :param ua_func: charge(pJ) -> current(µA) function as returned by `calibration_function`
+
+ :returns: returns list of states and transitions, starting with a transition and ending with astate
+ Each element is a dict containing:
+ * `isa`: 'state' or 'transition'
+ * `W_mean`: Mittelwert der Leistungsaufnahme
+ * `W_std`: Standardabweichung der Leistungsaufnahme
+ * `s`: Dauer
+ if isa == 'transition, it also contains:
+ * `W_mean_delta_prev`: Differenz zwischen W_mean und W_mean des vorherigen Zustands
+ * `W_mean_delta_next`: Differenz zwischen W_mean und W_mean des Folgezustands
+ """
+
+ names = []
+ for trace_number, trace in enumerate(traces):
+ for state_or_transition in trace["trace"]:
+ names.append(state_or_transition["name"])
+ # print(names[:15])
+ from dfatool.lennart.DataProcessor import DataProcessor
+
+ dp = DataProcessor(sync_data=self.sync_data, energy_data=self.energy_data)
+ dp.run()
+ energy_trace_new = dp.getStatesdfatool(
+ state_sleep=self.state_duration, with_traces=self.with_traces
+ )
+ # Uncomment to plot traces
+ if offline_index == 0 and os.getenv("DFATOOL_PLOT_LASYNC") is not None:
+ dp.plot() # <- plot traces with sync annotatons
+ # dp.plot(names) # <- plot annotated traces (with state/transition names)
+ pass
+ if os.getenv("DFATOOL_EXPORT_LASYNC") is not None:
+ filename = os.getenv("DFATOOL_EXPORT_LASYNC") + f"_{offline_index}.json"
+ with open(filename, "w") as f:
+ json.dump(dp.export_sync(), f, cls=NpEncoder)
+ logger.info("Exported data and LA sync timestamps to {filename}")
+ energy_trace_new = energy_trace_new[4:]
+
+ energy_trace = list()
+ expected_transitions = list()
+
+ # Print for debug purposes
+ # for number, name in enumerate(names):
+ # if "P15_8MW" in name:
+ # print(name, energy_trace_new[number]["W_mean"])
+
+ # add next/prev state W_mean_delta
+ for number, item in enumerate(energy_trace_new):
+ if item["isa"] == "transition" and 0 < number < len(energy_trace_new) - 1:
+ item["W_mean_delta_prev"] = energy_trace_new[number - 1]
+ item["W_mean_delta_next"] = energy_trace_new[number + 1]
+
+ # st = ""
+ # for i, x in enumerate(energy_trace_new[-10:]):
+ # #st += "(%s|%s|%s)" % (energy_trace[i-10]["name"],x['W_mean'],x['s'])
+ # st += "(%s|%s|%s)\n" % (energy_trace[i-10]["s"], x['s'], x['W_mean'])
+
+ # print(st, "\n_______________________")
+ # print(len(self.sync_data.timestamps), " - ", len(energy_trace_new), " - ", len(energy_trace), " - ", ",".join([str(x["s"]) for x in energy_trace_new[-6:]]), " - ", ",".join([str(x["s"]) for x in energy_trace[-6:]]))
+ # if len(energy_trace_new) < len(energy_trace):
+ # return None
+
+ return energy_trace_new
+
+
+class EnergyTraceWithTimer(EnergyTraceWithLogicAnalyzer):
+ def __init__(
+ self,
+ voltage: float,
+ state_duration: int,
+ transition_names: list,
+ with_traces=False,
+ ):
+
+ """
+ Create a new EnergyTraceWithLogicAnalyzer object.
+
+ :param voltage: supply voltage [V], usually 3.3 V
+ :param state_duration: state duration [ms]
+ :param transition_names: list of transition names in PTA transition order.
+ Needed to map barcode synchronization numbers to transitions.
+ """
+
+ self.voltage = voltage
+ self.state_duration = state_duration * 1e-3
+ self.transition_names = transition_names
+ self.with_traces = with_traces
+ self.errors = list()
+
+ super().__init__(voltage, state_duration, transition_names, with_traces)
+
+ def load_data(self, log_data):
+ from dfatool.lennart.SigrokInterface import SigrokResult
+ from dfatool.lennart.EnergyInterface import EnergyInterface
+
+ # Daten laden
+ self.sync_data = None
+ self.energy_data = EnergyInterface.getDataFromString(str(log_data[0]))
+
+ pass
+
+ def analyze_states(self, traces, offline_index: int):
+
+ # Start "Synchronization pulse"
+ timestamps = [0, 10, 1e6, 1e6 + 10]
+
+ # The first trace doesn't start immediately, append offset saved by OnboarTimerHarness
+ timestamps.append(timestamps[-1] + traces[0]["start_offset"][offline_index])
+ for tr in traces:
+ for t in tr["trace"]:
+ # print(t["online_aggregates"]["duration"][offline_index])
+ try:
+ timestamps.append(
+ timestamps[-1]
+ + t["online_aggregates"]["duration"][offline_index]
+ )
+ except IndexError:
+ self.errors.append(
+ f"""offline_index {offline_index} missing in trace {tr["id"]}"""
+ )
+ return list()
+
+ # print(timestamps)
+
+ # Stop "Synchronization pulses". The first one has already started.
+ timestamps.extend(np.array([10, 1e6, 1e6 + 10]) + timestamps[-1])
+ timestamps.extend(np.array([0, 10, 1e6, 1e6 + 10]) + 250e3 + timestamps[-1])
+
+ timestamps = list(np.array(timestamps) * 1e-6)
+
+ from dfatool.lennart.SigrokInterface import SigrokResult
+
+ self.sync_data = SigrokResult(timestamps, False)
+ return super().analyze_states(traces, offline_index)
+
+
class MIMOSA:
"""
MIMOSA log loader for DFA traces with auto-calibration.
@@ -1645,7 +1865,7 @@ class MIMOSA:
self.errors = list()
def charge_to_current_nocal(self, charge):
- u"""
+ """
Convert charge per 10µs (in pJ) to mean currents (in µA) without accounting for calibration.
:param charge: numpy array of charges (pJ per 10µs) as returned by `load_data` or `load_file`
@@ -1657,7 +1877,7 @@ class MIMOSA:
return charge * ua_step
def _load_tf(self, tf):
- u"""
+ """
Load MIMOSA log data from an open `tarfile` instance.
:param tf: `tarfile` instance
@@ -1678,7 +1898,7 @@ class MIMOSA:
return charges, triggers
def load_data(self, raw_data):
- u"""
+ """
Load MIMOSA log data from a MIMOSA log file passed as raw byte string
:param raw_data: MIMOSA log file, passed as raw byte string
@@ -1690,7 +1910,7 @@ class MIMOSA:
return self._load_tf(tf)
def load_file(self, filename):
- u"""
+ """
Load MIMOSA log data from a MIMOSA log file
:param filename: MIMOSA log file
@@ -1701,7 +1921,7 @@ class MIMOSA:
return self._load_tf(tf)
def currents_nocal(self, charges):
- u"""
+ """
Convert charges (pJ per 10µs) to mean currents without accounting for calibration.
:param charges: numpy array of charges (pJ per 10µs)
@@ -1758,7 +1978,7 @@ class MIMOSA:
return trigidx
def calibration_edges(self, currents):
- u"""
+ """
Return start/stop indexes of calibration measurements.
:param currents: uncalibrated currents as reported by MIMOSA. For best results,
@@ -1795,7 +2015,7 @@ class MIMOSA:
)
def calibration_function(self, charges, cal_edges):
- u"""
+ """
Calculate calibration function from previously determined calibration edges.
:param charges: raw charges from MIMOSA
@@ -1885,7 +2105,7 @@ class MIMOSA:
return calfunc, caldata
def analyze_states(self, charges, trigidx, ua_func):
- u"""
+ """
Split log data into states and transitions and return duration, energy, and mean power for each element.
:param charges: raw charges (each element describes the charge in pJ transferred during 10 µs)
@@ -1934,7 +2154,10 @@ class MIMOSA:
}
if self.with_traces:
- data["uW"] = range_ua * self.voltage
+ data["plot"] = (
+ np.arange(len(range_ua)) * 1e-5,
+ range_ua * self.voltage * 1e-6,
+ )
if isa == "transition":
# subtract average power of previous state
diff --git a/lib/parameters.py b/lib/parameters.py
index 5c6b978..fa966a3 100644
--- a/lib/parameters.py
+++ b/lib/parameters.py
@@ -434,7 +434,12 @@ class ParamStats:
"""
def __init__(
- self, by_name, by_param, parameter_names, arg_count, use_corrcoef=False,
+ self,
+ by_name,
+ by_param,
+ parameter_names,
+ arg_count,
+ use_corrcoef=False,
):
"""
Compute standard deviation and correlation coefficient on parameterized data partitions.
diff --git a/lib/plotter.py b/lib/plotter.py
index 16c0145..929ceb9 100755
--- a/lib/plotter.py
+++ b/lib/plotter.py
@@ -136,7 +136,11 @@ def plot_xy(X, Y, xlabel=None, ylabel=None, title=None, output=None, family=Fals
if family:
cm = plt.get_cmap("brg", len(Y))
for i, YY in enumerate(Y):
- plt.plot(np.arange(len(YY)), YY, "-", markersize=2, color=cm(i))
+ if X:
+ XX = X[i]
+ else:
+ XX = np.arange(len(YY))
+ plt.plot(XX, YY, "-", markersize=2, color=cm(i))
else:
plt.plot(X, Y, "bo", markersize=2)
if output:
diff --git a/lib/protocol_benchmarks.py b/lib/protocol_benchmarks.py
index d41979f..7f3e2f2 100755
--- a/lib/protocol_benchmarks.py
+++ b/lib/protocol_benchmarks.py
@@ -328,8 +328,10 @@ class ArduinoJSON(DummyProtocol):
child = enc_node + "l"
while child in self.children:
child += "_"
- self.enc_buf += "ArduinoJson::JsonArray& {} = {}.createNestedArray();\n".format(
- child, enc_node
+ self.enc_buf += (
+ "ArduinoJson::JsonArray& {} = {}.createNestedArray();\n".format(
+ child, enc_node
+ )
)
self.children.add(child)
self.from_json(value, child)
@@ -338,8 +340,10 @@ class ArduinoJSON(DummyProtocol):
child = enc_node + "o"
while child in self.children:
child += "_"
- self.enc_buf += "ArduinoJson::JsonObject& {} = {}.createNestedObject();\n".format(
- child, enc_node
+ self.enc_buf += (
+ "ArduinoJson::JsonObject& {} = {}.createNestedObject();\n".format(
+ child, enc_node
+ )
)
self.children.add(child)
self.from_json(value, child)
@@ -616,11 +620,15 @@ class CapnProtoC(DummyProtocol):
[len(value)],
)
for i, elem in enumerate(value):
- self.enc_buf += "capn_set{:d}({}.{}, {:d}, capn_from_f{:d}({:f}));\n".format(
- self.float_bits, self.name, key, i, self.float_bits, elem
+ self.enc_buf += (
+ "capn_set{:d}({}.{}, {:d}, capn_from_f{:d}({:f}));\n".format(
+ self.float_bits, self.name, key, i, self.float_bits, elem
+ )
)
- self.dec_buf += "kout << capn_to_f{:d}(capn_get{:d}({}.{}, {:d}));\n".format(
- self.float_bits, self.float_bits, self.name, key, i
+ self.dec_buf += (
+ "kout << capn_to_f{:d}(capn_get{:d}({}.{}, {:d}));\n".format(
+ self.float_bits, self.float_bits, self.name, key, i
+ )
)
self.assign_and_kout(
self.float_type,
@@ -1196,8 +1204,10 @@ class NanoPB(DummyProtocol):
self.cc_encoders += (
"if (!pb_encode_tag_for_field(stream, field)) return false;\n"
)
- self.cc_encoders += 'return pb_encode_string(stream, (uint8_t*)"{}", {:d});\n'.format(
- value, len(value)
+ self.cc_encoders += (
+ 'return pb_encode_string(stream, (uint8_t*)"{}", {:d});\n'.format(
+ value, len(value)
+ )
)
self.cc_encoders += "}\n"
self.enc_buf += "msg.{}{}.funcs.encode = encode_{};\n".format(
diff --git a/lib/runner.py b/lib/runner.py
index 71ca799..72d222d 100644
--- a/lib/runner.py
+++ b/lib/runner.py
@@ -9,7 +9,7 @@ Functions:
get_monitor -- return Monitor class suitable for the selected multipass arch
get_counter_limits -- return arch-specific multipass counter limits (max value, max overflow)
"""
-
+import json
import os
import re
import serial
@@ -17,6 +17,7 @@ import serial.threaded
import subprocess
import sys
import time
+from dfatool.lennart.SigrokCLIInterface import SigrokCLIInterface
class SerialReader(serial.threaded.Protocol):
@@ -156,6 +157,7 @@ class EnergyTraceMonitor(SerialMonitor):
self._start_energytrace()
def _start_energytrace(self):
+ print("[%s] Starting Measurement" % type(self).__name__)
cmd = ["msp430-etv", "--save", self._output, "0"]
self._logger = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
@@ -166,17 +168,19 @@ class EnergyTraceMonitor(SerialMonitor):
super().close()
self._logger.send_signal(subprocess.signal.SIGINT)
stdout, stderr = self._logger.communicate(timeout=15)
+ print("[%s] Stopped Measurement" % type(self).__name__)
# Zusätzliche Dateien, die mit dem Benchmark-Log und -Plan abgespeichert werden sollen
# (hier: Die von msp430-etv generierten Logfiles)
def get_files(self) -> list:
+ print("[%s] Getting files" % type(self).__name__)
return [self._output]
- #
+ # Benchmark-Konfiguration. Hier: Die (konstante) Spannung.
+ # MSP430FR5969: 3,6V (wird aktuell nicht unterstützt)
+ # MSP430FR5994: 3,3V (default)
def get_config(self) -> dict:
- return {
- "voltage": self._voltage,
- }
+ return {"voltage": self._voltage}
class EnergyTraceLogicAnalyzerMonitor(EnergyTraceMonitor):
@@ -185,6 +189,33 @@ class EnergyTraceLogicAnalyzerMonitor(EnergyTraceMonitor):
def __init__(self, port: str, baud: int, callback=None, voltage=3.3):
super().__init__(port=port, baud=baud, callback=callback, voltage=voltage)
+ options = {"fake": False, "sample_rate": 1_000_000}
+ self.log_file = "logic_output_log_%s.json" % (time.strftime("%Y%m%d-%H%M%S"))
+
+ # Initialization of Interfaces
+ self.sig = SigrokCLIInterface(
+ sample_rate=options["sample_rate"],
+ fake=options["fake"],
+ )
+
+ # Start Measurements
+ self.sig.runMeasureAsynchronous()
+
+ def close(self):
+ super().close()
+ # Read measured data
+ # self.sig.waitForAsynchronousMeasure()
+ self.sig.forceStopMeasure()
+ time.sleep(0.2)
+ sync_data = self.sig.getData()
+ with open(self.log_file, "w") as fp:
+ json.dump(sync_data.getDict(), fp)
+
+ def get_files(self) -> list:
+ files = [self.log_file]
+ files.extend(super().get_files())
+ return files
+
class MIMOSAMonitor(SerialMonitor):
"""MIMOSAMonitor captures serial output and MIMOSA energy data for a specific amount of time."""
@@ -261,11 +292,7 @@ class MIMOSAMonitor(SerialMonitor):
return [self.mim_file]
def get_config(self) -> dict:
- return {
- "offset": self._offset,
- "shunt": self._shunt,
- "voltage": self._voltage,
- }
+ return {"offset": self._offset, "shunt": self._shunt, "voltage": self._voltage}
class ShellMonitor:
diff --git a/lib/sly/docparse.py b/lib/sly/docparse.py
index 0f35c97..6a60eaf 100644
--- a/lib/sly/docparse.py
+++ b/lib/sly/docparse.py
@@ -9,7 +9,7 @@ class DocParseMeta(type):
'''
Metaclass that processes the class docstring through a parser and
incorporates the result into the resulting class definition. This
- allows Python classes to be defined with alternative syntax.
+ allows Python classes to be defined with alternative syntax.
To use this class, you first need to define a lexer and parser:
from sly import Lexer, Parser
@@ -39,7 +39,7 @@ class DocParseMeta(type):
...
"""
- It is expected that the MyParser() class would return a dictionary.
+ It is expected that the MyParser() class would return a dictionary.
This dictionary is used to create the final class Spam in this example.
'''
diff --git a/lib/utils.py b/lib/utils.py
index e537525..31fedcf 100644
--- a/lib/utils.py
+++ b/lib/utils.py
@@ -1,3 +1,4 @@
+import json
import numpy as np
import re
import logging
@@ -6,6 +7,18 @@ arg_support_enabled = True
logger = logging.getLogger(__name__)
+class NpEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, np.integer):
+ return int(obj)
+ elif isinstance(obj, np.floating):
+ return float(obj)
+ elif isinstance(obj, np.ndarray):
+ return obj.tolist()
+ else:
+ return super(NpEncoder, self).default(obj)
+
+
def running_mean(x: np.ndarray, N: int) -> np.ndarray:
"""
Compute `N` elements wide running average over `x`.