summaryrefslogtreecommitdiff
path: root/bin/eval-kconfig.py
diff options
context:
space:
mode:
Diffstat (limited to 'bin/eval-kconfig.py')
-rwxr-xr-xbin/eval-kconfig.py84
1 files changed, 84 insertions, 0 deletions
diff --git a/bin/eval-kconfig.py b/bin/eval-kconfig.py
new file mode 100755
index 0000000..1f44b9e
--- /dev/null
+++ b/bin/eval-kconfig.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+
+"""eval-kconfig - tbd
+"""
+
+import argparse
+import json
+import kconfiglib
+import logging
+import os
+import sys
+
+from dfatool import kconfig, validation
+from dfatool.loader import KConfigAttributes
+from dfatool.model import KConfigModel
+
+from versuchung.experiment import Experiment
+from versuchung.types import String, Bool, Integer
+from versuchung.files import File, Directory
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__
+ )
+ parser.add_argument(
+ "--log-level",
+ default=logging.INFO,
+ type=lambda level: getattr(logging, level.upper()),
+ help="Set log level",
+ )
+ parser.add_argument(
+ "--attribute", choices=["rom", "ram"], default="rom", help="Model attribute"
+ )
+ parser.add_argument("kconfig_path", type=str, help="Path to Kconfig file")
+ parser.add_argument(
+ "experiment_root", type=str, help="Experiment results directory"
+ )
+ parser.add_argument("model", type=str, help="JSON model", nargs="?")
+
+ args = parser.parse_args()
+
+ if isinstance(args.log_level, int):
+ logging.basicConfig(level=args.log_level)
+ else:
+ print(f"Invalid log level. Setting log level to INFO.", file=sys.stderr)
+
+ data = KConfigAttributes(args.kconfig_path, args.experiment_root)
+
+ k = 10
+
+ partition_pairs = validation._xv_partitions_kfold(len(data.data), k)
+ measures = list()
+ for training_set, validation_set in partition_pairs:
+ model = KConfigModel.from_benchmark(data, args.attribute, indices=training_set)
+ model.build_tree()
+ measures.append(model.assess_benchmark(data, indices=validation_set))
+
+ aggregate = dict()
+ for measure in measures[0].keys():
+ aggregate[measure] = np.mean(map(lambda m: m[measure], measures))
+ aggregate["unpredictable_count"] = np.sum(
+ map(lambda m: m["unpredictable_count"], measures)
+ )
+
+ print("10-fold Cross Validation:")
+ print(f"MAE: {aggregate['mae']:.0f} B")
+ print(f"SMAPE: {aggregate['smape']:.0f} %")
+ print(f"Unpredictable Configurations: {aggregate['unpredictable_count']}")
+
+ print(aggregate)
+
+ """
+ if args.model:
+ with open(args.model, "r") as f:
+ model = KConfigModel.from_json(json.load(f))
+ else:
+ model = KConfigModel.from_benchmark(data, args.attribute)
+ model.build_tree()
+ """
+
+
+if __name__ == "__main__":
+ main()