summaryrefslogtreecommitdiff
path: root/bin
diff options
context:
space:
mode:
authorDaniel Friesel <daniel.friesel@uos.de>2021-10-28 08:31:05 +0200
committerDaniel Friesel <daniel.friesel@uos.de>2021-10-28 08:31:05 +0200
commite3d8c083154d8e9c0c725ce5e5b8619e5158148d (patch)
tree99f00b8ab179447218c491c6153fd887109118bc /bin
parent5da5fb2067f42a1ca183e892cfe02bc2073e7e61 (diff)
analyze-kconfig: add dataref export
Diffstat (limited to 'bin')
-rwxr-xr-xbin/analyze-kconfig.py64
1 files changed, 63 insertions, 1 deletions
diff --git a/bin/analyze-kconfig.py b/bin/analyze-kconfig.py
index 27540cf..56b2925 100755
--- a/bin/analyze-kconfig.py
+++ b/bin/analyze-kconfig.py
@@ -53,12 +53,23 @@ def main():
help="Export observations (intermediate and generic benchmark data representation) to FILE",
)
parser.add_argument(
+ "--export-observations-only",
+ action="store_true",
+ help="Exit after exporting observations",
+ )
+ parser.add_argument(
"--export-model",
type=str,
help="Export kconfig-webconf NFP model to file",
metavar="FILE",
)
parser.add_argument(
+ "--export-dref",
+ type=str,
+ help="Export model and model quality to LaTeX dataref file",
+ metavar="FILE",
+ )
+ parser.add_argument(
"--config",
type=str,
help="Show model results for symbols in .config file",
@@ -129,8 +140,13 @@ def main():
if args.export_observations:
import lzma
+ print(
+ f"Exporting {len(observations)} observations to {args.export_observations}"
+ )
with lzma.open(args.export_observations, "wt") as f:
json.dump(observations, f)
+ if args.export_observations_only:
+ return
else:
# show-failing-symbols, show-nop-symbols, DFATOOL_KCONF_WITH_CHOICE_NODES, DFATOOL_KCONF_IGNORE_NUMERIC, and DFATOOL_KCONF_IGNORE_STRING have no effect
# in this branch.
@@ -142,7 +158,7 @@ def main():
by_name, parameter_names = dfatool.utils.observations_to_by_name(observations)
# Release memory
- observations = None
+ del observations
if args.max_std:
max_std = dict()
@@ -183,14 +199,41 @@ def main():
else:
xv_method = None
+ static_model = model.get_static()
+ try:
+ lut_model = model.get_param_lut()
+ except RuntimeError as e:
+ if args.force_tree:
+ # this is to be expected
+ logging.debug(f"Skipping LUT model: {e}")
+ else:
+ logging.warning(f"Skipping LUT model: {e}")
+ lut_model = None
param_model, param_info = model.get_fitted()
if xv_method == "montecarlo":
+ static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count)
analytic_quality = xv.montecarlo(lambda m: m.get_fitted()[0], xv_count)
+ if lut_model:
+ lut_quality = xv.montecarlo(
+ lambda m: m.get_param_lut(fallback=True), xv_count
+ )
+ else:
+ lut_quality = None
elif xv_method == "kfold":
+ static_quality = xv.kfold(lambda m: m.get_static(), xv_count)
analytic_quality = xv.kfold(lambda m: m.get_fitted()[0], xv_count)
+ if lut_model:
+ lut_quality = xv.kfold(lambda m: m.get_param_lut(fallback=True), xv_count)
+ else:
+ lut_quality = None
else:
+ static_quality = model.assess(static_model)
analytic_quality = model.assess(param_model)
+ if lut_model:
+ lut_quality = model.assess(lut_model)
+ else:
+ lut_quality = None
print("Model Error on Training Data:")
for name in model.names:
@@ -221,6 +264,25 @@ def main():
with open(args.export_model, "w") as f:
json.dump(json_model, f, sort_keys=True, cls=dfatool.utils.NpEncoder)
+ if xv_method == "montecarlo":
+ static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count)
+ elif xv_method == "kfold":
+ static_quality = xv.kfold(lambda m: m.get_static(), xv_count)
+ else:
+ static_quality = model.assess(static_model)
+
+ if args.export_dref:
+ dref = model.to_dref(static_quality, lut_quality, analytic_quality)
+ with open(args.export_dref, "w") as f:
+ for k, v in dref.items():
+ if type(v) is not tuple:
+ v = (v, None)
+ if v[1] is None:
+ prefix = r"\drefset{"
+ else:
+ prefix = r"\drefset" + f"[unit={v[1]}]" + "{"
+ print(f"{prefix}/{k}" + "}{" + str(v[0]) + "}", file=f)
+
if args.config:
kconf = kconfiglib.Kconfig(args.kconfig_path)
kconf.load_config(args.config)