summaryrefslogtreecommitdiff
path: root/bin/workload.py
diff options
context:
space:
mode:
Diffstat (limited to 'bin/workload.py')
-rwxr-xr-xbin/workload.py161
1 files changed, 81 insertions, 80 deletions
diff --git a/bin/workload.py b/bin/workload.py
index 19a7378..72b66bb 100755
--- a/bin/workload.py
+++ b/bin/workload.py
@@ -1,92 +1,93 @@
#!/usr/bin/env python3
+import argparse
+import json
+import logging
import sys
-from dfatool.automata import PTA
-from dfatool.utils import human_readable
-from dfatool.lex import TimedSequence, TimedWord, Workload
+import dfatool.cli
+import dfatool.utils
+from dfatool.behaviour import EventSequenceModel
+from dfatool.model import AnalyticModel
-args = sys.argv[1:]
-
-loops = dict()
-ptafiles = list()
-loop_names = set()
+def main():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__
+ )
+ parser.add_argument("--aggregate", choices=["sum"], default="sum")
+ parser.add_argument("--aggregate-unit", choices=["s", "B/s"], default="s")
+ parser.add_argument(
+ "--aggregate-init",
+ default=0,
+ type=float,
+ )
+ parser.add_argument(
+ "--log-level",
+ metavar="LEVEL",
+ choices=["debug", "info", "warning", "error"],
+ default="warning",
+ help="Set log level",
+ )
+ parser.add_argument("--normalize-output", type=str)
+ parser.add_argument(
+ "--info",
+ action="store_true",
+ help="Show benchmark information (number of measurements, parameter values, ...)",
+ )
+ parser.add_argument(
+ "--models",
+ nargs="+",
+ type=str,
+ help="Path to model file (.json or .json.xz)",
+ )
+ parser.add_argument(
+ "--use-lut",
+ action="store_true",
+ help="Use LUT rather than performance model for prediction",
+ )
+ parser.add_argument("event", nargs="+", type=str)
+ args = parser.parse_args()
-def simulate_word(timedword):
- prev_state = "UNINITIALIZED"
- prev_param = None
- ret = dict()
- for trace_part in timedword:
- print("Trace Part {}".format(trace_part))
- if type(trace_part) is TimedWord:
- result = pta.simulate(
- trace_part, orig_state=prev_state, orig_param=prev_param
- )
- elif type(trace_part) is Workload:
- result = pta.simulate(
- trace_part.word, orig_state=prev_state, orig_param=prev_param
- )
- if prev_state != result.end_state:
- print(
- "Warning: loop starts in state {}, but terminates in {}".format(
- prev_state, result.end_state.name
- )
- )
- if prev_param != result.parameters:
- print(
- "Warning: loop starts with parameters {}, but terminates with {}".format(
- prev_param, result.parameters
- )
- )
- ret[trace_part.name] = result
- loop_names.add(trace_part.name)
+ if args.log_level:
+ numeric_level = getattr(logging, args.log_level.upper(), None)
+ if not isinstance(numeric_level, int):
+ print(f"Invalid log level: {args.log_level}", file=sys.stderr)
+ sys.exit(1)
+ logging.basicConfig(
+ level=numeric_level,
+ format="{asctime} {levelname}:{name}:{message}",
+ style="{",
+ )
- print(" Duration: " + human_readable(result.duration, "s"))
- if result.duration_mae:
- print(
- u" ± {} / {:.0f}%".format(
- human_readable(result.duration_mae, "s"), result.duration_mape
- )
- )
- print(" Energy: " + human_readable(result.energy, "J"))
- if result.energy_mae:
- print(
- u" ± {} / {:.0f}%".format(
- human_readable(result.energy_mae, "J"), result.energy_mape
- )
- )
- print(" Mean Power: " + human_readable(result.mean_power, "W"))
- print("")
+ models = list()
+ for model_file in args.models:
+ with open(model_file, "r") as f:
+ models.append(AnalyticModel.from_json(json.load(f)))
- prev_state = result.end_state
- prev_param = result.parameters
+ if args.info:
+ for i in range(len(models)):
+ print(f"""{args.models[i]}: {" ".join(models[i].parameters)}""")
+ _, param_info = models[i].get_fitted()
+ for name in models[i].names:
+ for attr in models[i].attributes(name):
+ print(f" {name}.{attr} {param_info(name, attr)}")
- return ret
+ workload = EventSequenceModel(models)
+ aggregate = workload.eval_strs(
+ args.event,
+ aggregate=args.aggregate,
+ aggregate_init=args.aggregate_init,
+ use_lut=args.use_lut,
+ )
+ if args.normalize_output:
+ sf = dfatool.cli.parse_shift_function(
+ "--normalize-output", args.normalize_output
+ )
+ print(dfatool.utils.human_readable(sf(aggregate), args.aggregate_unit))
+ else:
+ print(dfatool.utils.human_readable(aggregate, args.aggregate_unit))
-for i in range(len(args) // 2):
- ptafile, raw_word = args[i * 2], args[i * 2 + 1]
- ptafiles.append(ptafile)
- pta = PTA.from_file(ptafile)
- timedword = TimedSequence(raw_word)
- print("Input: {}\n".format(timedword))
- loops[ptafile] = simulate_word(timedword)
-for loop_name in sorted(loop_names):
- result_set = list()
- total_power = 0
- for ptafile in sorted(ptafiles):
- if loop_name in loops[ptafile]:
- result_set.append(loops[ptafile][loop_name])
- total_power += loops[ptafile][loop_name].mean_power
- print(
- "{}: total mean power is {}".format(loop_name, human_readable(total_power, "W"))
- )
- for i, result in enumerate(result_set):
- print(
- " {:.0f}% {} (period: {})".format(
- result.mean_power * 100 / total_power,
- ptafiles[i],
- human_readable(result.duration, "s"),
- )
- )
+if __name__ == "__main__":
+ main()