summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xbin/eval-kconfig.py84
-rw-r--r--lib/loader.py2
-rw-r--r--lib/model.py41
3 files changed, 124 insertions, 3 deletions
diff --git a/bin/eval-kconfig.py b/bin/eval-kconfig.py
new file mode 100755
index 0000000..1f44b9e
--- /dev/null
+++ b/bin/eval-kconfig.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+
+"""eval-kconfig - tbd
+"""
+
+import argparse
+import json
+import kconfiglib
+import logging
+import os
+import sys
+
+from dfatool import kconfig, validation
+from dfatool.loader import KConfigAttributes
+from dfatool.model import KConfigModel
+
+from versuchung.experiment import Experiment
+from versuchung.types import String, Bool, Integer
+from versuchung.files import File, Directory
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__
+ )
+ parser.add_argument(
+ "--log-level",
+ default=logging.INFO,
+ type=lambda level: getattr(logging, level.upper()),
+ help="Set log level",
+ )
+ parser.add_argument(
+ "--attribute", choices=["rom", "ram"], default="rom", help="Model attribute"
+ )
+ parser.add_argument("kconfig_path", type=str, help="Path to Kconfig file")
+ parser.add_argument(
+ "experiment_root", type=str, help="Experiment results directory"
+ )
+ parser.add_argument("model", type=str, help="JSON model", nargs="?")
+
+ args = parser.parse_args()
+
+ if isinstance(args.log_level, int):
+ logging.basicConfig(level=args.log_level)
+ else:
+ print(f"Invalid log level. Setting log level to INFO.", file=sys.stderr)
+
+ data = KConfigAttributes(args.kconfig_path, args.experiment_root)
+
+ k = 10
+
+ partition_pairs = validation._xv_partitions_kfold(len(data.data), k)
+ measures = list()
+ for training_set, validation_set in partition_pairs:
+ model = KConfigModel.from_benchmark(data, args.attribute, indices=training_set)
+ model.build_tree()
+ measures.append(model.assess_benchmark(data, indices=validation_set))
+
+ aggregate = dict()
+ for measure in measures[0].keys():
+ aggregate[measure] = np.mean(map(lambda m: m[measure], measures))
+ aggregate["unpredictable_count"] = np.sum(
+ map(lambda m: m["unpredictable_count"], measures)
+ )
+
+ print("10-fold Cross Validation:")
+ print(f"MAE: {aggregate['mae']:.0f} B")
+ print(f"SMAPE: {aggregate['smape']:.0f} %")
+ print(f"Unpredictable Configurations: {aggregate['unpredictable_count']}")
+
+ print(aggregate)
+
+ """
+ if args.model:
+ with open(args.model, "r") as f:
+ model = KConfigModel.from_json(json.load(f))
+ else:
+ model = KConfigModel.from_benchmark(data, args.attribute)
+ model.build_tree()
+ """
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/loader.py b/lib/loader.py
index 14b7853..5e9b20a 100644
--- a/lib/loader.py
+++ b/lib/loader.py
@@ -1974,8 +1974,10 @@ class KConfigAttributes:
self.choice[choice.name] = choice
self.data = list()
+ self.configs = list()
for config_path, attr_path in experiments:
+ self.configs.append(config_path)
kconf.load_config(config_path)
with open(attr_path, "r") as f:
attr = json.load(f)
diff --git a/lib/model.py b/lib/model.py
index f330327..f422204 100644
--- a/lib/model.py
+++ b/lib/model.py
@@ -1158,7 +1158,11 @@ class PTAModel:
class KConfigModel:
- """Decision-Tree Model for a specific system attribute such as ROM or RAM usage"""
+ """
+ Decision-Tree Model for a specific system attribute such as ROM or RAM usage.
+
+ See Guo et al: "Data-efficient performance learning for configurable systems", 2017
+ """
class Node:
pass
@@ -1280,6 +1284,8 @@ class KConfigModel:
kconf_choice = next(
filter(lambda choice: choice.name == self.symbol, kconf.choices)
)
+ if kconf_choice.selection is None:
+ return None
selection = kconf_choice.selection.name
if selection in self.choice:
return self.choice[selection].model(kconf)
@@ -1299,9 +1305,14 @@ class KConfigModel:
return ret
@classmethod
- def from_benchmark(cls, kconfig_benchmark, attribute):
+ def from_benchmark(cls, kconfig_benchmark, attribute, indices=None):
self = cls()
- self.data = kconfig_benchmark.data
+ if indices is None:
+ self.data = kconfig_benchmark.data
+ else:
+ self.data = list()
+ for i in indices:
+ self.data.append(kconfig_benchmark.data[i])
self.symbols = kconfig_benchmark.symbol_names
self.choices = kconfig_benchmark.choice_names
self.symbol = kconfig_benchmark.symbol
@@ -1459,3 +1470,27 @@ class KConfigModel:
)
return node
+
+ def assess_benchmark(self, kconfig_benchmark, indices=None):
+ if indices is None:
+ indices = range(len(kconfig_benchmark.data))
+
+ kconf = kconfig_benchmark.kconf
+
+ predictions = list()
+ values = list()
+ unpredictable_count = 0
+
+ for i in indices:
+ kconf.load_config(kconfig_benchmark.configs[i])
+ prediction = self.model.model(kconf)
+
+ if prediction is None:
+ unpredictable_count += 1
+ else:
+ predictions.append(self.model.model(kconf))
+ values.append(self.attr_function(kconfig_benchmark.data[i]))
+
+ measures = regression_measures(np.array(predictions), np.array(values))
+ measures["unpredictable_count"] = unpredictable_count
+ return measures