summaryrefslogtreecommitdiff
path: root/bin/eval-kconfig.py
blob: 1f44b9e92552328f23369e85499374ed402be1d6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
#!/usr/bin/env python3

"""eval-kconfig - tbd
"""

import argparse
import json
import kconfiglib
import logging
import os
import sys

from dfatool import kconfig, validation
from dfatool.loader import KConfigAttributes
from dfatool.model import KConfigModel

from versuchung.experiment import Experiment
from versuchung.types import String, Bool, Integer
from versuchung.files import File, Directory


def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__
    )
    parser.add_argument(
        "--log-level",
        default=logging.INFO,
        type=lambda level: getattr(logging, level.upper()),
        help="Set log level",
    )
    parser.add_argument(
        "--attribute", choices=["rom", "ram"], default="rom", help="Model attribute"
    )
    parser.add_argument("kconfig_path", type=str, help="Path to Kconfig file")
    parser.add_argument(
        "experiment_root", type=str, help="Experiment results directory"
    )
    parser.add_argument("model", type=str, help="JSON model", nargs="?")

    args = parser.parse_args()

    if isinstance(args.log_level, int):
        logging.basicConfig(level=args.log_level)
    else:
        print(f"Invalid log level. Setting log level to INFO.", file=sys.stderr)

    data = KConfigAttributes(args.kconfig_path, args.experiment_root)

    k = 10

    partition_pairs = validation._xv_partitions_kfold(len(data.data), k)
    measures = list()
    for training_set, validation_set in partition_pairs:
        model = KConfigModel.from_benchmark(data, args.attribute, indices=training_set)
        model.build_tree()
        measures.append(model.assess_benchmark(data, indices=validation_set))

    aggregate = dict()
    for measure in measures[0].keys():
        aggregate[measure] = np.mean(map(lambda m: m[measure], measures))
    aggregate["unpredictable_count"] = np.sum(
        map(lambda m: m["unpredictable_count"], measures)
    )

    print("10-fold Cross Validation:")
    print(f"MAE: {aggregate['mae']:.0f} B")
    print(f"SMAPE: {aggregate['smape']:.0f} %")
    print(f"Unpredictable Configurations: {aggregate['unpredictable_count']}")

    print(aggregate)

    """
    if args.model:
        with open(args.model, "r") as f:
            model = KConfigModel.from_json(json.load(f))
    else:
        model = KConfigModel.from_benchmark(data, args.attribute)
        model.build_tree()
    """


if __name__ == "__main__":
    main()