From e5ca5fc02f5a114596b1eacab101d7337a32ce29 Mon Sep 17 00:00:00 2001 From: Daniele Nerini Date: Fri, 1 Nov 2024 09:35:51 +0100 Subject: [PATCH] Fix formatting --- mlpp_lib/__init__.py | 1 + mlpp_lib/datasets.py | 4 +- mlpp_lib/losses.py | 4 +- mlpp_lib/normalizers.py | 4 +- mlpp_lib/probabilistic_layers.py | 66 ++++++++++++-------------------- mlpp_lib/train.py | 2 + tests/__init__.py | 1 + tests/conftest.py | 10 ++++- tests/test_normalizers.py | 28 +++++++------- 9 files changed, 59 insertions(+), 61 deletions(-) diff --git a/mlpp_lib/__init__.py b/mlpp_lib/__init__.py index 3130e2e..ef2d419 100644 --- a/mlpp_lib/__init__.py +++ b/mlpp_lib/__init__.py @@ -1,4 +1,5 @@ __version__ = "0.1.0" import os + os.environ["TF_USE_LEGACY_KERAS"] = "1" diff --git a/mlpp_lib/datasets.py b/mlpp_lib/datasets.py index 20797bf..45ec201 100644 --- a/mlpp_lib/datasets.py +++ b/mlpp_lib/datasets.py @@ -451,7 +451,9 @@ def drop_nans(self, group_size: int = 1): event_axes = [self.dims.index(dim) for dim in self.dims if dim != "s"] mask = da.any(~da.isfinite(da.from_array(x, name="x")), axis=event_axes) if y is not None: - mask = mask | da.any(~da.isfinite(da.from_array(y, name="y")), axis=event_axes) + mask = mask | da.any( + ~da.isfinite(da.from_array(y, name="y")), axis=event_axes + ) mask = (~mask).compute() # with grouped samples, nans have to be removed in blocks: diff --git a/mlpp_lib/losses.py b/mlpp_lib/losses.py index ad1cd1a..c61d833 100644 --- a/mlpp_lib/losses.py +++ b/mlpp_lib/losses.py @@ -432,9 +432,7 @@ class MultivariateLoss(tf.keras.losses.Loss): """ def mse_metric(y_true, y_pred): - return tf.reduce_mean( - tf.square(y_true - y_pred), axis=0 - ) + return tf.reduce_mean(tf.square(y_true - y_pred), axis=0) def mae_metric(y_true, y_pred): return tf.reduce_mean(tf.abs(y_true - y_pred), axis=0) diff --git a/mlpp_lib/normalizers.py b/mlpp_lib/normalizers.py index a188e5d..8a36d95 100644 --- a/mlpp_lib/normalizers.py +++ b/mlpp_lib/normalizers.py @@ -211,7 +211,9 @@ def f(ds: xr.Dataset) -> xr.Dataset: ds = ds.fillna(self.fillvalue) else: if ds.isnull().any(): - raise ValueError("Missing values found in the data. Please provide a fill value.") + raise ValueError( + "Missing values found in the data. Please provide a fill value." + ) return ds.astype("float32") return tuple(f(ds) for ds in datasets) diff --git a/mlpp_lib/probabilistic_layers.py b/mlpp_lib/probabilistic_layers.py index af092db..34e14b1 100644 --- a/mlpp_lib/probabilistic_layers.py +++ b/mlpp_lib/probabilistic_layers.py @@ -88,9 +88,7 @@ def new_from_t(t): return IndependentBeta.new(t, event_shape, validate_args) super(IndependentBeta, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -200,9 +198,7 @@ def new_from_t(t): return Independent4ParamsBeta.new(t, event_shape, validate_args) super(Independent4ParamsBeta, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -313,14 +309,10 @@ def __init__( kwargs.pop("make_distribution_fn", None) def new_from_t(t): - return IndependentDoublyCensoredNormal.new( - t, event_shape, validate_args - ) + return IndependentDoublyCensoredNormal.new(t, event_shape, validate_args) super(IndependentDoublyCensoredNormal, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -390,9 +382,11 @@ def _mean(self): cdf = lambda x: tfd.Normal(0, 1).cdf(x) pdf = lambda x: tfd.Normal(0, 1).prob(x) - return 1 * (1 - cdf(high_bound_standard)) + mu * ( - cdf(high_bound_standard) - cdf(low_bound_standard)) + sigma * ( - pdf(low_bound_standard) - pdf(high_bound_standard)) + return ( + 1 * (1 - cdf(high_bound_standard)) + + mu * (cdf(high_bound_standard) - cdf(low_bound_standard)) + + sigma * (pdf(low_bound_standard) - pdf(high_bound_standard)) + ) def _log_prob(self, value): @@ -402,9 +396,15 @@ def _log_prob(self, value): logprob_left = lambda x: tf.math.log(cdf(-mu / sigma) + 1e-3) logprob_middle = lambda x: self.normal.log_prob(x) - logprob_right = lambda x: tf.math.log(1 - cdf((1 - mu) / sigma) + 1e-3) + logprob_right = lambda x: tf.math.log( + 1 - cdf((1 - mu) / sigma) + 1e-3 + ) - return logprob_left(value) + logprob_middle(value) + logprob_right(value) + return ( + logprob_left(value) + + logprob_middle(value) + + logprob_right(value) + ) return independent_lib.Independent( CustomCensored(normal_dist), @@ -486,9 +486,7 @@ def new_from_t(t): return IndependentConcaveBeta.new(t, event_shape, validate_args) super(IndependentConcaveBeta, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -602,9 +600,7 @@ def new_from_t(t): return IndependentGamma.new(t, event_shape, validate_args) super(IndependentGamma, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -715,9 +711,7 @@ def new_from_t(t): return IndependentLogNormal.new(t, event_shape, validate_args) super(IndependentLogNormal, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -826,9 +820,7 @@ def new_from_t(t): return IndependentLogitNormal.new(t, event_shape, validate_args) super(IndependentLogitNormal, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -940,9 +932,7 @@ def new_from_t(t): return IndependentMixtureNormal.new(t, event_shape, validate_args) super(IndependentMixtureNormal, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -1116,9 +1106,7 @@ def new_from_t(t): return IndependentTruncatedNormal.new(t, event_shape, validate_args) super(IndependentTruncatedNormal, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -1229,9 +1217,7 @@ def new_from_t(t): return IndependentWeibull.new(t, event_shape, validate_args) super(IndependentWeibull, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_shape = event_shape @@ -1346,9 +1332,7 @@ def new_from_t(t): return MultivariateNormalDiag.new(t, event_size, validate_args) super(MultivariateNormalDiag, self).__init__( - new_from_t, - convert_to_tensor_fn, - **kwargs + new_from_t, convert_to_tensor_fn, **kwargs ) self._event_size = event_size diff --git a/mlpp_lib/train.py b/mlpp_lib/train.py index 214a49c..519394a 100644 --- a/mlpp_lib/train.py +++ b/mlpp_lib/train.py @@ -39,8 +39,10 @@ def get_log_params(param_run: dict) -> dict: def get_lr(optimizer: tf.keras.optimizers.Optimizer) -> float: """Get the learning rate of the optimizer""" + def lr(y_true, y_pred): return optimizer.lr + return lr diff --git a/tests/__init__.py b/tests/__init__.py index d6665fb..7c96756 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,2 +1,3 @@ import os + os.environ["TF_USE_LEGACY_KERAS"] = "1" diff --git a/tests/conftest.py b/tests/conftest.py index 6b1a773..13be07d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -76,7 +76,9 @@ def datatransformations() -> list: import mlpp_lib.normalizers as no datatransformations = [ - no.create_transformation_from_str(n.name, inputs={"fillvalue": -5} if n.name == "Identity" else {}) # temporary fix, do we want to let the user define different fillvalue for each transformation ? + no.create_transformation_from_str( + n.name, inputs={"fillvalue": -5} if n.name == "Identity" else {} + ) # temporary fix, do we want to let the user define different fillvalue for each transformation ? for n in no.DataTransformation.__subclasses__() ] @@ -96,7 +98,11 @@ def data_transformer() -> xr.Dataset: for i, transformation in enumerate(transformations_list) } data_transformer = no.DataTransformer(method_var_dict) - data_transformer.transformers['Identity'][0].fillvalue = -5 # temporary fix, do we want to let the user define different fillvalue for each transformation ? + data_transformer.transformers["Identity"][ + 0 + ].fillvalue = ( + -5 + ) # temporary fix, do we want to let the user define different fillvalue for each transformation ? return data_transformer diff --git a/tests/test_normalizers.py b/tests/test_normalizers.py index 7e03e23..420f7e3 100644 --- a/tests/test_normalizers.py +++ b/tests/test_normalizers.py @@ -165,20 +165,22 @@ def test_retro_compatibility(self, standardizer, features_multi): data_transformer = DataTransformer.from_dict(dict_stand) assert all( - [ - np.allclose( - getattr(data_transformer.transformers["Standardizer"][0], attr)[ - var - ].values, - getattr(standardizer, attr)[var].values, - equal_nan=True, + ( + [ + np.allclose( + getattr(data_transformer.transformers["Standardizer"][0], attr)[ + var + ].values, + getattr(standardizer, attr)[var].values, + equal_nan=True, + ) + for var in getattr(standardizer, attr).data_vars + ] + if isinstance(getattr(standardizer, attr), xr.Dataset) + else np.allclose( + getattr(data_transformer.transformers["Standardizer"][0], attr), + getattr(standardizer, attr), ) - for var in getattr(standardizer, attr).data_vars - ] - if isinstance(getattr(standardizer, attr), xr.Dataset) - else np.allclose( - getattr(data_transformer.transformers["Standardizer"][0], attr), - getattr(standardizer, attr), ) for attr in get_class_attributes(standardizer) )