diff options
author | Birte Kristina Friesel <birte.friesel@uos.de> | 2024-10-24 16:54:23 +0200 |
---|---|---|
committer | Birte Kristina Friesel <birte.friesel@uos.de> | 2024-10-24 16:54:23 +0200 |
commit | 06c4a5f52d9a58afac2f5a7d60bd4cc6e8b4ae58 (patch) | |
tree | 30ca9820c7125708aea278acd156ba2868ef7aa2 | |
parent | a0933fef969c4555452fcbf70e6183eddf141956 (diff) |
Add --add-total-observation support to non-xv quality metrics
-rwxr-xr-x | bin/analyze-log.py | 6 | ||||
-rw-r--r-- | lib/model.py | 22 |
2 files changed, 24 insertions, 4 deletions
diff --git a/bin/analyze-log.py b/bin/analyze-log.py index fc7fc0d..9e48afb 100755 --- a/bin/analyze-log.py +++ b/bin/analyze-log.py @@ -235,13 +235,15 @@ def main(): lambda m: m.get_fitted()[0], xv_count, with_sum=args.add_total_observation ) else: - static_quality = model.assess(static_model) + static_quality = model.assess(static_model, with_sum=args.add_total_observation) if args.export_raw_predictions: analytic_quality, raw_results = model.assess(param_model, return_raw=True) with open(args.export_raw_predictions, "w") as f: json.dump(raw_results, f, cls=dfatool.utils.NpEncoder) else: - analytic_quality = model.assess(param_model) + analytic_quality = model.assess( + param_model, with_sum=args.add_total_observation + ) timing["assess model"] = time.time() - ts if "static" in args.show_model or "all" in args.show_model: diff --git a/lib/model.py b/lib/model.py index 2815830..f22173c 100644 --- a/lib/model.py +++ b/lib/model.py @@ -449,7 +449,7 @@ class AnalyticModel: return model_getter, info_getter - def assess(self, model_function, ref=None, return_raw=False): + def assess(self, model_function, ref=None, return_raw=False, with_sum=False): """ Calculate MAE, SMAPE, etc. of model_function for each by_name entry. @@ -479,12 +479,30 @@ class AnalyticModel: ) measures = regression_measures(predicted_data, elem[attribute]) detailed_results[name][attribute] = measures - if return_raw: + if return_raw or with_sum: raw_results[name]["attribute"][attribute] = { "groundTruth": elem[attribute], "modelOutput": predicted_data, } + if with_sum: + for name in ref.keys(): + attr_0 = ref[name]["attributes"][0] + gt_sum = np.zeros( + len(raw_results[name]["attribute"][attr_0]["groundTruth"]) + ) + mo_sum = np.zeros( + len(raw_results[name]["attribute"][attr_0]["modelOutput"]) + ) + for attribute in ref[name]["attributes"]: + gt_sum += np.array( + raw_results[name]["attribute"][attribute]["groundTruth"] + ) + mo_sum += np.array( + raw_results[name]["attribute"][attribute]["modelOutput"] + ) + detailed_results[name]["TOTAL"] = regression_measures(mo_sum, gt_sum) + if return_raw: return detailed_results, raw_results return detailed_results |