From feafeb9f619d426201b98d05e4feb77c8b1cf4a3 Mon Sep 17 00:00:00 2001 From: Daniel Friesel Date: Thu, 2 Jul 2020 09:29:01 +0200 Subject: Use logging module for debug output --- lib/parameters.py | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) (limited to 'lib/parameters.py') diff --git a/lib/parameters.py b/lib/parameters.py index 8b562b6..bd67cc1 100644 --- a/lib/parameters.py +++ b/lib/parameters.py @@ -1,4 +1,5 @@ import itertools +import logging import numpy as np from collections import OrderedDict from copy import deepcopy @@ -6,6 +7,8 @@ from multiprocessing import Pool from .utils import remove_index_from_tuple, is_numeric from .utils import filter_aggregate_by_param, by_name_to_by_param +logger = logging.getLogger(__name__) + def distinct_param_values(by_name, state_or_tran): """ @@ -94,9 +97,7 @@ def _codependent_parameters(param, lut_by_param_values, std_by_param_values): return influencer_parameters -def _std_by_param( - by_param, all_param_values, state_or_tran, attribute, param_index, verbose=False -): +def _std_by_param(by_param, all_param_values, state_or_tran, attribute, param_index): u""" Calculate standard deviations for a static model where all parameters but `param_index` are constant. @@ -229,7 +230,6 @@ def _compute_param_statistics( attribute, distinct_values, distinct_values_by_param_index, - verbose=False, ): """ Compute standard deviation and correlation coefficient for various data partitions. @@ -252,7 +252,6 @@ def _compute_param_statistics( :param arg_count: dict providing the number of functions args ("local parameters") for each function. :param state_or_trans: state or transition name, e.g. 'send' or 'TX' :param attribute: model attribute, e.g. 'power' or 'duration' - :param verbose: print warning if some parameter partitions are too small for fitting :returns: a dict with the following content: std_static -- static parameter-unaware model error: stddev of by_name[state_or_trans][attribute] @@ -299,7 +298,6 @@ def _compute_param_statistics( state_or_trans, attribute, param_idx, - verbose, ) ret["std_by_param"][param] = mean_std ret["std_by_param_values"][param] = std_matrix @@ -351,7 +349,6 @@ def _compute_param_statistics( state_or_trans, attribute, param_idx, - verbose, ) ret["param_data"][param]["depends_for_codependent_value"][ combi @@ -365,7 +362,6 @@ def _compute_param_statistics( state_or_trans, attribute, len(parameter_names) + arg_index, - verbose, ) ret["std_by_arg"].append(mean_std) ret["std_by_arg_values"].append(std_matrix) @@ -495,13 +491,7 @@ class ParamStats: """ def __init__( - self, - by_name, - by_param, - parameter_names, - arg_count, - use_corrcoef=False, - verbose=False, + self, by_name, by_param, parameter_names, arg_count, use_corrcoef=False, ): """ Compute standard deviation and correlation coefficient on parameterized data partitions. @@ -556,7 +546,6 @@ class ParamStats: attribute, self.distinct_values[state_or_tran], self.distinct_values_by_param_index[state_or_tran], - verbose, ], } ) -- cgit v1.2.3 From e42f3541d9e16264f79e090ddd87b864f5c2a837 Mon Sep 17 00:00:00 2001 From: Daniel Friesel Date: Fri, 3 Jul 2020 09:27:46 +0200 Subject: more consistent logging; use logger. instead of logging. where appropriate --- lib/automata.py | 11 +++++++---- lib/data_parameters.py | 17 +++++++++------- lib/dfatool.py | 42 +++++++++++++++++++--------------------- lib/functions.py | 8 ++++---- lib/lex.py | 9 ++++++--- lib/parameters.py | 48 +++++++++++++++++++++++++--------------------- lib/protocol_benchmarks.py | 7 +++++-- lib/utils.py | 4 +++- 8 files changed, 81 insertions(+), 65 deletions(-) (limited to 'lib/parameters.py') diff --git a/lib/automata.py b/lib/automata.py index b3318e0..69b3969 100755 --- a/lib/automata.py +++ b/lib/automata.py @@ -3,11 +3,14 @@ from .functions import AnalyticFunction, NormalizationFunction from .utils import is_numeric import itertools +import logging import numpy as np import json import queue import yaml +logger = logging.getLogger(__name__) + def _dict_to_list(input_dict: dict) -> list: return [input_dict[x] for x in sorted(input_dict.keys())] @@ -1305,8 +1308,8 @@ class PTA: "power" ] except KeyError: - print( - "[W] skipping model update of state {} due to missing data".format( + logger.warning( + "skipping model update of state {} due to missing data".format( state.name ) ) @@ -1353,8 +1356,8 @@ class PTA: "timeout" ] except KeyError: - print( - "[W] skipping model update of transition {} due to missing data".format( + logger.warning( + "skipping model update of transition {} due to missing data".format( transition.name ) ) diff --git a/lib/data_parameters.py b/lib/data_parameters.py index 1150b71..84eacfd 100644 --- a/lib/data_parameters.py +++ b/lib/data_parameters.py @@ -7,9 +7,12 @@ length of lists, ane more. from .protocol_benchmarks import codegen_for_lib from . import cycles_to_energy, size_to_radio_energy, utils +import logging import numpy as np import ubjson +logger = logging.getLogger(__name__) + def _string_value_length(json): if type(json) == str: @@ -224,7 +227,7 @@ class Protolog: except KeyError: pass except TypeError as e: - print( + logger.error( "TypeError in {} {} {} {}: {} -> {}".format( arch_lib, benchmark, @@ -395,7 +398,7 @@ class Protolog: except KeyError: pass except ValueError: - print( + logger.warning( "cycles_enc is NaN for {} -> {} -> {}".format( arch, lib, key ) @@ -410,7 +413,7 @@ class Protolog: except KeyError: pass except ValueError: - print( + logger.warning( "cycles_ser is NaN for {} -> {} -> {}".format( arch, lib, key ) @@ -425,7 +428,7 @@ class Protolog: except KeyError: pass except ValueError: - print( + logger.warning( "cycles_encser is NaN for {} -> {} -> {}".format( arch, lib, key ) @@ -440,7 +443,7 @@ class Protolog: except KeyError: pass except ValueError: - print( + logger.warning( "cycles_des is NaN for {} -> {} -> {}".format( arch, lib, key ) @@ -455,7 +458,7 @@ class Protolog: except KeyError: pass except ValueError: - print( + logger.warning( "cycles_dec is NaN for {} -> {} -> {}".format( arch, lib, key ) @@ -470,7 +473,7 @@ class Protolog: except KeyError: pass except ValueError: - print( + logger.warning( "cycles_desdec is NaN for {} -> {} -> {}".format( arch, lib, key ) diff --git a/lib/dfatool.py b/lib/dfatool.py index ef3bac7..1e38907 100644 --- a/lib/dfatool.py +++ b/lib/dfatool.py @@ -75,7 +75,7 @@ def gplearn_to_function(function_str: str): arg_list.append("X{:d}".format(i)) eval_str = "lambda {}, *whatever: {}".format(",".join(arg_list), function_str) - print(eval_str) + logger.debug(eval_str) return eval(eval_str, eval_globals) @@ -1371,7 +1371,7 @@ class RawData: for measurement in measurements: if "energy_trace" not in measurement: - logging.warning( + logger.warning( "Skipping {ar:s}/{m:s}: {e:s}".format( ar=self.filenames[measurement["fileno"]], m=measurement["info"].name, @@ -1393,7 +1393,7 @@ class RawData: self._merge_online_and_offline(measurement) num_valid += 1 else: - logging.warning( + logger.warning( "Skipping {ar:s}/{m:s}: {e:s}".format( ar=self.filenames[measurement["fileno"]], m=measurement["info"].name, @@ -1405,14 +1405,14 @@ class RawData: self._merge_online_and_etlog(measurement) num_valid += 1 else: - logging.warning( + logger.warning( "Skipping {ar:s}/{m:s}: {e:s}".format( ar=self.filenames[measurement["fileno"]], m=measurement["info"].name, e=measurement["error"], ), ) - logging.info( + logger.info( "{num_valid:d}/{num_total:d} measurements are valid".format( num_valid=num_valid, num_total=len(measurements) ), @@ -1834,9 +1834,9 @@ class AnalyticModel: try: model[name][key] = model_function(elem[key]) except RuntimeWarning: - logging.warning("Got no data for {} {}".format(name, key)) + logger.warning("Got no data for {} {}".format(name, key)) except FloatingPointError as fpe: - logging.warning("Got no data for {} {}: {}".format(name, key, fpe),) + logger.warning("Got no data for {} {}: {}".format(name, key, fpe),) return model def param_index(self, param_name): @@ -2230,9 +2230,9 @@ class PTAModel: try: model[name][key] = model_function(elem[key]) except RuntimeWarning: - logging.warning("Got no data for {} {}".format(name, key)) + logger.warning("Got no data for {} {}".format(name, key)) except FloatingPointError as fpe: - logging.warning("Got no data for {} {}: {}".format(name, key, fpe),) + logger.warning("Got no data for {} {}: {}".format(name, key, fpe),) return model def get_static(self, use_mean=False): @@ -2730,7 +2730,7 @@ class EnergyTraceLog: self.sample_rate = data_count / (m_duration_us * 1e-6) - logging.debug( + logger.debug( "got {} samples with {} seconds of log data ({} Hz)".format( data_count, m_duration_us * 1e-6, self.sample_rate ), @@ -2837,19 +2837,17 @@ class EnergyTraceLog: for name, duration in expected_transitions: bc, start, stop, end = self.find_barcode(next_barcode) if bc is None: - print('[!!!] did not find transition "{}"'.format(name)) + logger.error('did not find transition "{}"'.format(name)) break next_barcode = end + self.state_duration + duration - logging.debug( + logger.debug( '{} barcode "{}" area: {:0.2f} .. {:0.2f} / {:0.2f} seconds'.format( offline_index, bc, start, stop, end ), ) if bc != name: - logging.debug( - '[!!!] mismatch: expected "{}", got "{}"'.format(name, bc), - ) - logging.debug( + logger.error('mismatch: expected "{}", got "{}"'.format(name, bc),) + logger.debug( "{} estimated transition area: {:0.3f} .. {:0.3f} seconds".format( offline_index, end, end + duration ), @@ -2862,7 +2860,7 @@ class EnergyTraceLog: self.ts_to_index(end + duration + self.state_duration) + 1 ) - logging.debug( + logger.debug( "{} estimated transitionindex: {:0.3f} .. {:0.3f} seconds".format( offline_index, transition_start_index / self.sample_rate, @@ -2962,7 +2960,7 @@ class EnergyTraceLog: + self.led_power / 3 ) - logging.debug( + logger.debug( "looking for barcode starting at {:0.2f} s, threshold is {:0.1f} mW".format( start_ts, sync_threshold_power * 1e3 ), @@ -2996,7 +2994,7 @@ class EnergyTraceLog: barcode_data = self.interval_power[sync_area_start:sync_area_end] - logging.debug( + logger.debug( "barcode search area: {:0.2f} .. {:0.2f} seconds ({} samples)".format( sync_start_ts, sync_end_ts, len(barcode_data) ), @@ -3074,7 +3072,7 @@ class EnergyTraceLog: return content, sym_start, sym_end, padding_bits else: - logging.warning("unable to find barcode") + logger.warning("unable to find barcode") return None, None, None, None @@ -3300,7 +3298,7 @@ class MIMOSA: if cal_r2_mean > cal_0_mean: b_lower = (ua_r2 - 0) / (cal_r2_mean - cal_0_mean) else: - logging.warning("0 uA == %.f uA during calibration" % (ua_r2)) + logger.warning("0 uA == %.f uA during calibration" % (ua_r2)) b_lower = 0 b_upper = (ua_r1 - ua_r2) / (cal_r1_mean - cal_r2_mean) @@ -3472,7 +3470,7 @@ class MIMOSA: data["substates"] = substates ssum = np.sum(list(map(lambda x: x["duration"], substates["states"]))) if ssum != data["us"]: - logging.warning("duration %d vs %d" % (data["us"], ssum)) + logger.warning("duration %d vs %d" % (data["us"], ssum)) if isa == "transition": # subtract average power of previous state diff --git a/lib/functions.py b/lib/functions.py index 359c8d7..0b849bd 100644 --- a/lib/functions.py +++ b/lib/functions.py @@ -229,7 +229,7 @@ class AnalyticFunction: else: X[i].extend([np.nan] * len(val[model_attribute])) elif key[0] == state_or_tran and len(key[1]) != dimension: - logging.warning( + logger.warning( "Invalid parameter key length while gathering fit data for {}/{}. is {}, want {}.".format( state_or_tran, model_attribute, len(key[1]), dimension ), @@ -263,7 +263,7 @@ class AnalyticFunction: error_function, self._regression_args, args=(X, Y), xtol=2e-15 ) except ValueError as err: - logging.warning( + logger.warning( "Fit failed for {}/{}: {} (function: {})".format( state_or_tran, model_attribute, err, self._model_str ), @@ -273,13 +273,13 @@ class AnalyticFunction: self._regression_args = res.x self.fit_success = True else: - logging.warning( + logger.warning( "Fit failed for {}/{}: {} (function: {})".format( state_or_tran, model_attribute, res.message, self._model_str ), ) else: - logging.warning( + logger.warning( "Insufficient amount of valid parameter keys, cannot fit {}/{}".format( state_or_tran, model_attribute ), diff --git a/lib/lex.py b/lib/lex.py index 7bb3760..f698e8c 100644 --- a/lib/lex.py +++ b/lib/lex.py @@ -1,4 +1,7 @@ from .sly import Lexer, Parser +import logging + +logger = logging.getLogger(__name__) class TimedWordLexer(Lexer): @@ -38,7 +41,7 @@ class TimedSequenceLexer(Lexer): FUNCTIONSEP = r";" def error(self, t): - print("Illegal character '%s'" % t.value[0]) + logger.error("Illegal character '%s'" % t.value[0]) if t.value[0] == "{" and t.value.find("}"): self.index += 1 + t.value.find("}") else: @@ -153,11 +156,11 @@ class TimedSequenceParser(Parser): def error(self, p): if p: - print("Syntax error at token", p.type) + logger.error("Syntax error at token", p.type) # Just discard the token and tell the parser it's okay. self.errok() else: - print("Syntax error at EOF") + logger.error("Syntax error at EOF") class TimedWord: diff --git a/lib/parameters.py b/lib/parameters.py index bd67cc1..79543a6 100644 --- a/lib/parameters.py +++ b/lib/parameters.py @@ -1,6 +1,7 @@ import itertools import logging import numpy as np +import warnings from collections import OrderedDict from copy import deepcopy from multiprocessing import Pool @@ -163,12 +164,11 @@ def _std_by_param(by_param, all_param_values, state_or_tran, attribute, param_in # vprint(verbose, '[W] parameter value partition for {} is empty'.format(param_value)) if np.all(np.isnan(stddev_matrix)): - print( - "[W] {}/{} parameter #{} has no data partitions -- how did this even happen?".format( - state_or_tran, attribute, param_index + warnings.warn( + "{}/{} parameter #{} has no data partitions. stddev_matrix = {}".format( + state_or_tran, attribute, param_index, stddev_matrix ) ) - print("stddev_matrix = {}".format(stddev_matrix)) return stddev_matrix, 0.0 return ( @@ -203,13 +203,13 @@ def _corr_by_param(by_name, state_or_trans, attribute, param_index): # -> assume no correlation return 0.0 except ValueError: - print( - "[!] Exception in _corr_by_param(by_name, state_or_trans={}, attribute={}, param_index={})".format( + logger.error( + "ValueError in _corr_by_param(by_name, state_or_trans={}, attribute={}, param_index={})".format( state_or_trans, attribute, param_index ) ) - print( - "[!] while executing np.corrcoef(by_name[{}][{}]={}, {}))".format( + logger.error( + "while executing np.corrcoef(by_name[{}][{}]={}, {}))".format( state_or_trans, attribute, by_name[state_or_trans][attribute], @@ -443,8 +443,8 @@ def prune_dependent_parameters(by_name, parameter_names, correlation_threshold=0 correlation != np.nan and np.abs(correlation) > correlation_threshold ): - print( - "[!] Parameters {} <-> {} are correlated with coefficcient {}".format( + logger.debug( + "Parameters {} <-> {} are correlated with coefficcient {}".format( parameter_names[index_1], parameter_names[index_2], correlation, @@ -454,7 +454,7 @@ def prune_dependent_parameters(by_name, parameter_names, correlation_threshold=0 index_to_remove = index_1 else: index_to_remove = index_2 - print( + logger.debug( " Removing parameter {}".format( parameter_names[index_to_remove] ) @@ -581,15 +581,17 @@ class ParamStats: ) > 2 ): - print( - key, - param, - list( - filter( - lambda n: is_numeric(n), - self.distinct_values[key][param], - ) - ), + logger.debug( + "{} can be fitted for param {} on {}".format( + key, + param, + list( + filter( + lambda n: is_numeric(n), + self.distinct_values[key][param], + ) + ), + ) ) return True return False @@ -646,13 +648,15 @@ class ParamStats: depends_on_a_parameter = False for param in self._parameter_names: if self.stats[state_or_tran][attribute]["depends_on_param"][param]: - print("{}/{} depends on {}".format(state_or_tran, attribute, param)) + logger.debug( + "{}/{} depends on {}".format(state_or_tran, attribute, param) + ) depends_on_a_parameter = True if ( len(self.codependent_parameters(state_or_tran, attribute, param)) == 0 ): - print("has no codependent parameters") + logger.debug("... and has no codependent parameters") # Always depends on this parameter, regardless of other parameters' values return False return depends_on_a_parameter diff --git a/lib/protocol_benchmarks.py b/lib/protocol_benchmarks.py index b42e821..d41979f 100755 --- a/lib/protocol_benchmarks.py +++ b/lib/protocol_benchmarks.py @@ -16,8 +16,11 @@ import io import os import re import time +import logging from filelock import FileLock +logger = logging.getLogger(__name__) + class DummyProtocol: def __init__(self): @@ -1838,14 +1841,14 @@ class Benchmark: this_result["data"] = data if value != None: this_result[key] = {"v": value, "ts": int(time.time())} - print( + logger.debug( "{} {} {} ({}) :: {} -> {}".format( libkey, bench_name, bench_index, data, key, value ) ) else: this_result[key] = {"e": error, "ts": int(time.time())} - print( + logger.debug( "{} {} {} ({}) :: {} -> [E] {}".format( libkey, bench_name, bench_index, data, key, error[:500] ) diff --git a/lib/utils.py b/lib/utils.py index 8186ee7..d28ecda 100644 --- a/lib/utils.py +++ b/lib/utils.py @@ -1,7 +1,9 @@ import numpy as np import re +import logging arg_support_enabled = True +logger = logging.getLogger(__name__) def running_mean(x: np.ndarray, N: int) -> np.ndarray: @@ -212,7 +214,7 @@ def filter_aggregate_by_param(aggregate, parameters, parameter_filter): ) ) if len(indices_to_keep) == 0: - print("??? {}->{}".format(parameter_filter, name)) + logger.debug("??? {}->{}".format(parameter_filter, name)) names_to_remove.add(name) else: for attribute in aggregate[name]["attributes"]: -- cgit v1.2.3 From 95d635df4b3daa1df1b66c360f38e4d52ee721eb Mon Sep 17 00:00:00 2001 From: Daniel Friesel Date: Mon, 6 Jul 2020 14:01:08 +0200 Subject: Remove co-dependent parameter detection code It doesn't work and is not methodically sound. Decision/Regression Trees seem to be the way to go --- bin/analyze-archive.py | 15 ---- bin/analyze-timing.py | 24 ------- lib/model.py | 25 ------- lib/parameters.py | 188 ------------------------------------------------- 4 files changed, 252 deletions(-) (limited to 'lib/parameters.py') diff --git a/bin/analyze-archive.py b/bin/analyze-archive.py index e9d70f6..80ebd78 100755 --- a/bin/analyze-archive.py +++ b/bin/analyze-archive.py @@ -513,21 +513,6 @@ if __name__ == "__main__": model.stats.param_dependence_ratio(state, "power", param), ) ) - if model.stats.has_codependent_parameters(state, "power", param): - print( - "{:24s} co-dependencies: {:s}".format( - "", - ", ".join( - model.stats.codependent_parameters( - state, "power", param - ) - ), - ) - ) - for param_dict in model.stats.codependent_parameter_value_dicts( - state, "power", param - ): - print("{:24s} parameter-aware for {}".format("", param_dict)) for trans in model.transitions(): # Mean power is not a typical transition attribute, but may be present for debugging or analysis purposes diff --git a/bin/analyze-timing.py b/bin/analyze-timing.py index 6761e7b..e27acbf 100755 --- a/bin/analyze-timing.py +++ b/bin/analyze-timing.py @@ -307,30 +307,6 @@ if __name__ == "__main__": model.stats.param_dependence_ratio(trans, "duration", param), ) ) - if model.stats.has_codependent_parameters(trans, "duration", param): - print( - "{:24s} co-dependencies: {:s}".format( - "", - ", ".join( - model.stats.codependent_parameters( - trans, "duration", param - ) - ), - ) - ) - for param_dict in model.stats.codependent_parameter_value_dicts( - trans, "duration", param - ): - print("{:24s} parameter-aware for {}".format("", param_dict)) - # import numpy as np - # safe_div = np.vectorize(lambda x,y: 0. if x == 0 else 1 - x/y) - # ratio_by_value = safe_div(model.stats.stats['write']['duration']['lut_by_param_values']['max_retry_count'], model.stats.stats['write']['duration']['std_by_param_values']['max_retry_count']) - # err_mode = np.seterr('warn') - # dep_by_value = ratio_by_value > 0.5 - # np.seterr(**err_mode) - # Eigentlich sollte hier ein paar mal True stehen, ist aber nicht so... - # und warum ist da eine non-power-of-two Zahl von True-Einträgen in der Matrix? 3 stück ist komisch... - # print(dep_by_value) if xv_method == "montecarlo": static_quality = xv.montecarlo(lambda m: m.get_static(), xv_count) diff --git a/lib/model.py b/lib/model.py index d83c12c..e908af4 100644 --- a/lib/model.py +++ b/lib/model.py @@ -866,19 +866,6 @@ class PTAModel: parameter_name, safe_functions_enabled, ) - for ( - codependent_param_dict - ) in self.stats.codependent_parameter_value_dicts( - state_or_tran, model_attribute, parameter_name - ): - paramfit.enqueue( - state_or_tran, - model_attribute, - parameter_index, - parameter_name, - safe_functions_enabled, - codependent_param_dict, - ) if ( arg_support_enabled and self.by_name[state_or_tran]["isa"] == "transition" @@ -906,18 +893,6 @@ class PTAModel: for model_attribute in self.by_name[state_or_tran]["attributes"]: fit_results = paramfit.get_result(state_or_tran, model_attribute) - for parameter_name in self._parameter_names: - if self.depends_on_param( - state_or_tran, model_attribute, parameter_name - ): - for ( - codependent_param_dict - ) in self.stats.codependent_parameter_value_dicts( - state_or_tran, model_attribute, parameter_name - ): - pass - # FIXME paramfit.get_result hat ja gar keinen Parameter als Argument... - if (state_or_tran, model_attribute) in self.function_override: function_str = self.function_override[ (state_or_tran, model_attribute) diff --git a/lib/parameters.py b/lib/parameters.py index 79543a6..81649f2 100644 --- a/lib/parameters.py +++ b/lib/parameters.py @@ -82,22 +82,6 @@ def _reduce_param_matrix(matrix: np.ndarray, parameter_names: list) -> list: return list() -def _codependent_parameters(param, lut_by_param_values, std_by_param_values): - """ - Return list of parameters which affect whether a parameter affects a model attribute or not. - """ - return list() - safe_div = np.vectorize(lambda x, y: 0.0 if x == 0 else 1 - x / y) - ratio_by_value = safe_div(lut_by_param_values, std_by_param_values) - err_mode = np.seterr("ignore") - dep_by_value = ratio_by_value > 0.5 - np.seterr(**err_mode) - - other_param_list = list(filter(lambda x: x != param, self._parameter_names)) - influencer_parameters = _reduce_param_matrix(dep_by_value, other_param_list) - return influencer_parameters - - def _std_by_param(by_param, all_param_values, state_or_tran, attribute, param_index): u""" Calculate standard deviations for a static model where all parameters but `param_index` are constant. @@ -312,48 +296,6 @@ def _compute_param_statistics( ret["std_param_lut"], ) - if ret["depends_on_param"][param]: - ret["param_data"][param] = { - "codependent_parameters": _codependent_parameters( - param, lut_matrix, std_matrix - ), - "depends_for_codependent_value": dict(), - } - - # calculate parameter dependence for individual values of codependent parameters - codependent_param_values = list() - for codependent_param in ret["param_data"][param]["codependent_parameters"]: - codependent_param_values.append(distinct_values[codependent_param]) - for combi in itertools.product(*codependent_param_values): - by_name_part = deepcopy(by_name) - filter_list = list( - zip(ret["param_data"][param]["codependent_parameters"], combi) - ) - filter_aggregate_by_param(by_name_part, parameter_names, filter_list) - by_param_part = by_name_to_by_param(by_name_part) - # there may be no data for this specific parameter value combination - if state_or_trans in by_name_part: - part_corr = _corr_by_param( - by_name_part, state_or_trans, attribute, param_idx - ) - part_std_lut = np.mean( - [ - np.std(by_param_part[x][attribute]) - for x in by_param_part.keys() - if x[0] == state_or_trans - ] - ) - _, part_std_param, _ = _std_by_param( - by_param_part, - distinct_values_by_param_index, - state_or_trans, - attribute, - param_idx, - ) - ret["param_data"][param]["depends_for_codependent_value"][ - combi - ] = _depends_on_param(part_corr, part_std_param, part_std_lut) - if state_or_trans in arg_count: for arg_index in range(arg_count[state_or_trans]): std_matrix, mean_std, lut_matrix = _std_by_param( @@ -596,136 +538,6 @@ class ParamStats: return True return False - def static_submodel_params(self, state_or_tran, attribute): - """ - Return the union of all parameter values which decide whether another parameter influences the model or not. - - I.e., the returned list of dicts contains one entry for each parameter value combination which (probably) does not have any parameter influencing the model. - If the current parameters matches one of these, a static sub-model built based on this subset of parameters can likely be used. - """ - # TODO - pass - - def has_codependent_parameters( - self, state_or_tran: str, attribute: str, param: str - ) -> bool: - """ - Return whether there are parameters which determine whether `param` influences `state_or_tran` `attribute` or not. - - :param state_or_tran: model state or transition - :param attribute: model attribute - :param param: parameter name - """ - if len(self.codependent_parameters(state_or_tran, attribute, param)): - return True - return False - - def codependent_parameters( - self, state_or_tran: str, attribute: str, param: str - ) -> list: - """ - Return list of parameters which determine whether `param` influences `state_or_tran` `attribute` or not. - - :param state_or_tran: model state or transition - :param attribute: model attribute - :param param: parameter name - """ - if self.stats[state_or_tran][attribute]["depends_on_param"][param]: - return self.stats[state_or_tran][attribute]["param_data"][param][ - "codependent_parameters" - ] - return list() - - def has_codependent_parameters_union( - self, state_or_tran: str, attribute: str - ) -> bool: - """ - Return whether there is a subset of parameters which decides whether `state_or_tran` `attribute` is static or parameter-dependent - - :param state_or_tran: model state or transition - :param attribute: model attribute - """ - depends_on_a_parameter = False - for param in self._parameter_names: - if self.stats[state_or_tran][attribute]["depends_on_param"][param]: - logger.debug( - "{}/{} depends on {}".format(state_or_tran, attribute, param) - ) - depends_on_a_parameter = True - if ( - len(self.codependent_parameters(state_or_tran, attribute, param)) - == 0 - ): - logger.debug("... and has no codependent parameters") - # Always depends on this parameter, regardless of other parameters' values - return False - return depends_on_a_parameter - - def codependent_parameters_union(self, state_or_tran: str, attribute: str) -> list: - """ - Return list of parameters which determine whether any parameter influences `state_or_tran` `attribute`. - - :param state_or_tran: model state or transition - :param attribute: model attribute - """ - codependent_parameters = set() - for param in self._parameter_names: - if self.stats[state_or_tran][attribute]["depends_on_param"][param]: - if ( - len(self.codependent_parameters(state_or_tran, attribute, param)) - == 0 - ): - return list(self._parameter_names) - for codependent_param in self.codependent_parameters( - state_or_tran, attribute, param - ): - codependent_parameters.add(codependent_param) - return sorted(codependent_parameters) - - def codependence_by_codependent_param_values( - self, state_or_tran: str, attribute: str, param: str - ) -> dict: - """ - Return dict mapping codependent parameter values to a boolean indicating whether `param` influences `state_or_tran` `attribute`. - - If a dict value is true, `attribute` depends on `param` for the corresponding codependent parameter values, otherwise it does not. - - :param state_or_tran: model state or transition - :param attribute: model attribute - :param param: parameter name - """ - if self.stats[state_or_tran][attribute]["depends_on_param"][param]: - return self.stats[state_or_tran][attribute]["param_data"][param][ - "depends_for_codependent_value" - ] - return dict() - - def codependent_parameter_value_dicts( - self, state_or_tran: str, attribute: str, param: str, kind="dynamic" - ): - """ - Return dicts of codependent parameter key-value mappings for which `param` influences (or does not influence) `state_or_tran` `attribute`. - - :param state_or_tran: model state or transition - :param attribute: model attribute - :param param: parameter name: - :param kind: 'static' or 'dynamic'. If 'dynamic' (the default), returns codependent parameter values for which `param` influences `attribute`. If 'static', returns codependent parameter values for which `param` does not influence `attribute` - """ - codependent_parameters = self.stats[state_or_tran][attribute]["param_data"][ - param - ]["codependent_parameters"] - codependence_info = self.stats[state_or_tran][attribute]["param_data"][param][ - "depends_for_codependent_value" - ] - if len(codependent_parameters) == 0: - return - else: - for param_values, is_dynamic in codependence_info.items(): - if (is_dynamic and kind == "dynamic") or ( - not is_dynamic and kind == "static" - ): - yield dict(zip(codependent_parameters, param_values)) - def _generic_param_independence_ratio(self, state_or_trans, attribute): """ Return the heuristic ratio of parameter independence for state_or_trans and attribute. -- cgit v1.2.3 From 2f5fa87125ccf44f1c6e208ed736070274ea6e2e Mon Sep 17 00:00:00 2001 From: Daniel Friesel Date: Thu, 9 Jul 2020 13:37:49 +0200 Subject: parameters: documentation, remove unused dict key --- lib/parameters.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib/parameters.py') diff --git a/lib/parameters.py b/lib/parameters.py index 81649f2..5c6b978 100644 --- a/lib/parameters.py +++ b/lib/parameters.py @@ -250,6 +250,8 @@ def _compute_param_statistics( corr_by_param -- correlation coefficient corr_by_arg -- same, but ignoring a single function argument Only set if state_or_trans appears in arg_count, empty dict otherwise. + depends_on_param -- dict(parameter_name -> Bool). True if /attribute/ behaviour probably depends on /parameter_name/ + depends_on_arg -- list(bool). Same, but for function arguments, if any. """ ret = { "std_static": np.std(by_name[state_or_trans][attribute]), @@ -270,7 +272,6 @@ def _compute_param_statistics( "corr_by_arg": [], "depends_on_param": {}, "depends_on_arg": [], - "param_data": {}, } np.seterr("raise") -- cgit v1.2.3