From 0033359697c9955a33276af599794aab0b85482d Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Tue, 15 Aug 2023 01:39:14 +0200 Subject: [PATCH 01/43] refactor: add class for MultiIndex convertion --- xeofs/models/rotator_factory.py | 4 +- xeofs/preprocessing/_base_stacker.py | 5 +- xeofs/preprocessing/factory.py | 56 ++++++ xeofs/preprocessing/multi_index_converter.py | 113 +++++++++++ xeofs/preprocessing/preprocessor.py | 35 +++- xeofs/preprocessing/scaler_factory.py | 20 -- xeofs/preprocessing/stacker.py | 185 +++++++++++-------- xeofs/preprocessing/stacker_factory.py | 20 -- xeofs/utils/data_types.py | 3 +- xeofs/utils/xarray_utils.py | 4 +- 10 files changed, 308 insertions(+), 137 deletions(-) create mode 100644 xeofs/preprocessing/factory.py create mode 100644 xeofs/preprocessing/multi_index_converter.py delete mode 100644 xeofs/preprocessing/scaler_factory.py delete mode 100644 xeofs/preprocessing/stacker_factory.py diff --git a/xeofs/models/rotator_factory.py b/xeofs/models/rotator_factory.py index e2b91cf..e6d6139 100644 --- a/xeofs/models/rotator_factory.py +++ b/xeofs/models/rotator_factory.py @@ -2,14 +2,12 @@ import xarray as xr from typing import Optional, Union, List, Tuple -from xeofs.utils.data_types import DataArrayList, XarrayData - from .eof import EOF, ComplexEOF from .mca import MCA, ComplexMCA from .eof_rotator import EOFRotator, ComplexEOFRotator from .mca_rotator import MCARotator, ComplexMCARotator from ..utils.rotation import promax -from ..utils.data_types import XarrayData, DataArrayList, Dataset, DataArray +from ..utils.data_types import DataArrayList, Dataset, DataArray class RotatorFactory: diff --git a/xeofs/preprocessing/_base_stacker.py b/xeofs/preprocessing/_base_stacker.py index 7cd742c..54c4958 100644 --- a/xeofs/preprocessing/_base_stacker.py +++ b/xeofs/preprocessing/_base_stacker.py @@ -24,8 +24,9 @@ class _BaseStacker(ABC): """ - def __init__(self): - pass + def __init__(self, sample_name: str = "sample", feature_name: str = "feature"): + self.sample_name = sample_name + self.feature_name = feature_name def fit( self, diff --git a/xeofs/preprocessing/factory.py b/xeofs/preprocessing/factory.py new file mode 100644 index 0000000..34f1f10 --- /dev/null +++ b/xeofs/preprocessing/factory.py @@ -0,0 +1,56 @@ +import xarray as xr + +from ._base_scaler import _BaseScaler +from ._base_stacker import _BaseStacker +from .scaler import SingleDataArrayScaler, SingleDatasetScaler, ListDataArrayScaler +from .stacker import SingleDataArrayStacker, SingleDatasetStacker, ListDataArrayStacker +from .multi_index_converter import ( + MultiIndexConverter, + ListMultiIndexConverter, +) +from ..utils.data_types import AnyDataObject + + +class ScalerFactory: + @staticmethod + def create_scaler(data: AnyDataObject, **kwargs) -> _BaseScaler: + if isinstance(data, xr.DataArray): + return SingleDataArrayScaler(**kwargs) + elif isinstance(data, xr.Dataset): + return SingleDatasetScaler(**kwargs) + elif isinstance(data, list) and all( + isinstance(da, xr.DataArray) for da in data + ): + return ListDataArrayScaler(**kwargs) + else: + raise ValueError("Invalid data type") + + +class MultiIndexConverterFactory: + @staticmethod + def create_converter( + data: AnyDataObject, **kwargs + ) -> MultiIndexConverter | ListMultiIndexConverter: + if isinstance(data, (xr.DataArray, xr.Dataset)): + return MultiIndexConverter(**kwargs) + elif isinstance(data, list) and all( + isinstance(da, xr.DataArray) for da in data + ): + return ListMultiIndexConverter(**kwargs) + else: + raise ValueError("Invalid data type") + + +class StackerFactory: + @staticmethod + def create_stacker(data: AnyDataObject, **kwargs) -> _BaseStacker: + if isinstance(data, xr.DataArray): + return SingleDataArrayStacker(**kwargs) + elif isinstance(data, xr.Dataset): + return SingleDatasetStacker(**kwargs) + elif isinstance(data, list) and all( + isinstance(da, xr.DataArray) for da in data + ): + return ListDataArrayStacker(**kwargs) + else: + raise ValueError("Invalid data type") diff --git a/xeofs/preprocessing/multi_index_converter.py b/xeofs/preprocessing/multi_index_converter.py new file mode 100644 index 0000000..f937114 --- /dev/null +++ b/xeofs/preprocessing/multi_index_converter.py @@ -0,0 +1,113 @@ +from typing import Dict, TypeVar, List + + +import xarray as xr +import pandas as pd +from sklearn.base import BaseEstimator, TransformerMixin + +from ..utils.data_types import DataArray, XArrayData, AnyDataObject, DataArrayList + + +class MultiIndexConverter(BaseEstimator, TransformerMixin): + def __init__(self, return_copy=False): + self.original_indexes = {} + self.modified_dimensions = [] + self.return_copy = return_copy + + def fit(self, X: XArrayData, y=None): + # Check if input is a DataArray or Dataset + if not isinstance(X, (xr.DataArray, xr.Dataset)): + raise ValueError("Input must be an xarray DataArray or Dataset") + + # Store original MultiIndexes and replace with simple index + for dim in X.dims: + index = X.indexes[dim] + if isinstance(index, pd.MultiIndex): + self.original_indexes[dim] = X.coords[dim] + self.modified_dimensions.append(dim) + + return self + + def transform(self, X: XArrayData) -> XArrayData: + # Check if input is a DataArray or Dataset + if not isinstance(X, (xr.DataArray, xr.Dataset)): + raise ValueError("Input must be an xarray DataArray or Dataset") + + # Make a copy if return_copy is True + X_transformed = X.copy(deep=True) if self.return_copy else X + + # Replace MultiIndexes with simple index + for dim in self.modified_dimensions: + size = X_transformed.coords[dim].size + X_transformed = X_transformed.drop_vars(dim) + X_transformed.coords[dim] = range(size) + + return X_transformed + + def inverse_transform(self, X: XArrayData) -> XArrayData: + # Check if input is a DataArray or Dataset + if not isinstance(X, (xr.DataArray, xr.Dataset)): + raise ValueError("Input must be an xarray DataArray or Dataset") + + # Make a copy if return_copy is True + X_inverse_transformed = X.copy() if self.return_copy else X + + # Restore original MultiIndexes + for dim, original_index in self.original_indexes.items(): + if dim in X_inverse_transformed.dims: + X_inverse_transformed.coords[dim] = original_index + # Set indexes to original MultiIndexes + indexes = [ + idx + for idx in self.original_indexes[dim].indexes.keys() + if idx != dim + ] + X_inverse_transformed = X_inverse_transformed.set_index({dim: indexes}) + + return X_inverse_transformed + + def fit_transform(self, X: XArrayData, y=None) -> XArrayData: + return self.fit(X, y).transform(X) + + +class ListMultiIndexConverter(BaseEstimator, TransformerMixin): + def __init__(self, return_copy=False): + self.converters: List[MultiIndexConverter] = [] + self.return_copy = return_copy + + def fit(self, X: DataArrayList, y=None): + # Check if input is a List of DataArrays + if not isinstance(X, list) or not all(isinstance(x, xr.DataArray) for x in X): + raise ValueError("Input must be a list of xarray DataArray") + + for x in X: + converter = MultiIndexConverter(return_copy=self.return_copy) + converter.fit(x) + self.converters.append(converter) + + return self + + def transform(self, X: DataArrayList) -> DataArrayList: + # Check if input is a List of DataArrays + if not isinstance(X, list) or not all(isinstance(x, xr.DataArray) for x in X): + raise ValueError("Input must be a list of xarray DataArray") + + X_transformed: List[DataArray] = [] + for x, converter in zip(X, self.converters): + X_transformed.append(converter.transform(x)) + + return X_transformed + + def inverse_transform(self, X: DataArrayList) -> DataArrayList: + # Check if input is a List of DataArrays + if not isinstance(X, list) or not all(isinstance(x, xr.DataArray) for x in X): + raise ValueError("Input must be a list of xarray DataArray") + + X_inverse_transformed: List[DataArray] = [] + for x, converter in zip(X, self.converters): + X_inverse_transformed.append(converter.inverse_transform(x)) + + return X_inverse_transformed + + def fit_transform(self, X: DataArrayList, y=None) -> DataArrayList: + return self.fit(X, y).transform(X) diff --git a/xeofs/preprocessing/preprocessor.py b/xeofs/preprocessing/preprocessor.py index 9a7137d..59eb2b5 100644 --- a/xeofs/preprocessing/preprocessor.py +++ b/xeofs/preprocessing/preprocessor.py @@ -1,7 +1,6 @@ from typing import Optional, Sequence, Hashable, List -from .scaler_factory import ScalerFactory -from .stacker_factory import StackerFactory +from .factory import StackerFactory, ScalerFactory, MultiIndexConverterFactory from ..utils.xarray_utils import get_dims from ..utils.data_types import AnyDataObject, DataArray @@ -26,7 +25,17 @@ class Preprocessor: """ - def __init__(self, with_std=True, with_coslat=False, with_weights=False): + def __init__( + self, + sample_name="sample", + feature_name="feature", + with_std=True, + with_coslat=False, + with_weights=False, + ): + self.sample_name = sample_name + self.feature_name = feature_name + # Define model parameters self._params = { "with_std": with_std, @@ -68,14 +77,22 @@ def fit_transform( """ # Set sample and feature dimensions sample_dims, feature_dims = get_dims(data, sample_dims=dim) - self.dims = {"sample": sample_dims, "feature": feature_dims} + self.dims = {self.sample_name: sample_dims, self.feature_name: feature_dims} # Scale the data self.scaler = ScalerFactory.create_scaler(data, **self._params) data = self.scaler.fit_transform(data, sample_dims, feature_dims, weights) + # Convert MultiIndex to single index + self.converter = MultiIndexConverterFactory.create_converter(data) + data = self.converter.fit_transform(data) # type: ignore + # Stack the data - self.stacker = StackerFactory.create_stacker(data) + stacker_kwargs = { + "sample_name": self.sample_name, + "feature_name": self.feature_name, + } + self.stacker = StackerFactory.create_stacker(data, **stacker_kwargs) return self.stacker.fit_transform(data, sample_dims, feature_dims) def transform(self, data: AnyDataObject) -> DataArray: @@ -93,6 +110,7 @@ def transform(self, data: AnyDataObject) -> DataArray: """ data = self.scaler.transform(data) + data = self.converter.transform(data) # type: ignore return self.stacker.transform(data) def inverse_transform_data(self, data: DataArray) -> AnyDataObject: @@ -110,6 +128,7 @@ def inverse_transform_data(self, data: DataArray) -> AnyDataObject: """ data = self.stacker.inverse_transform_data(data) + data = self.converter.inverse_transform(data) # type: ignore return self.scaler.inverse_transform(data) def inverse_transform_components(self, data: DataArray) -> AnyDataObject: @@ -126,7 +145,8 @@ def inverse_transform_components(self, data: DataArray) -> AnyDataObject: The inverse transformed components. """ - return self.stacker.inverse_transform_components(data) + data = self.stacker.inverse_transform_components(data) + return self.converter.inverse_transform_components(data) # type: ignore def inverse_transform_scores(self, data: DataArray) -> AnyDataObject: """Inverse transform the scores. @@ -142,4 +162,5 @@ def inverse_transform_scores(self, data: DataArray) -> AnyDataObject: The inverse transformed scores. """ - return self.stacker.inverse_transform_scores(data) + data = self.stacker.inverse_transform_scores(data) + return self.converter.inverse_transform_scores(data) # type: ignore diff --git a/xeofs/preprocessing/scaler_factory.py b/xeofs/preprocessing/scaler_factory.py deleted file mode 100644 index 15564b2..0000000 --- a/xeofs/preprocessing/scaler_factory.py +++ /dev/null @@ -1,20 +0,0 @@ -import xarray as xr - -from ._base_scaler import _BaseScaler -from .scaler import SingleDataArrayScaler, SingleDatasetScaler, ListDataArrayScaler -from ..utils.data_types import AnyDataObject - - -class ScalerFactory: - @staticmethod - def create_scaler(data: AnyDataObject, **kwargs) -> _BaseScaler: - if isinstance(data, xr.DataArray): - return SingleDataArrayScaler(**kwargs) - elif isinstance(data, xr.Dataset): - return SingleDatasetScaler(**kwargs) - elif isinstance(data, list) and all( - isinstance(da, xr.DataArray) for da in data - ): - return ListDataArrayScaler(**kwargs) - else: - raise ValueError("Invalid data type") diff --git a/xeofs/preprocessing/stacker.py b/xeofs/preprocessing/stacker.py index d51c4ad..665b3a1 100644 --- a/xeofs/preprocessing/stacker.py +++ b/xeofs/preprocessing/stacker.py @@ -18,13 +18,12 @@ class SingleDataStacker(_BaseStacker): - def __init__(self): - super().__init__() - def _validate_matching_dimensions(self, data: SingleDataObject): """Verify that the dimensions of the data are consistent with the dimensions used to fit the stacker.""" # Test whether sample and feature dimensions are present in data array - expected_dims = set(self.dims_out_["sample"] + self.dims_out_["feature"]) + expected_sample_dims = set(self.dims_out_[self.sample_name]) + expected_feature_dims = set(self.dims_out_[self.feature_name]) + expected_dims = expected_sample_dims | expected_feature_dims given_dims = set(data.dims) if not (expected_dims == given_dims): raise ValueError( @@ -140,23 +139,25 @@ def fit_transform( ) # Set in/out dimensions + sample_name = self.sample_name + feature_name = self.feature_name self.dims_in_ = data.dims - self.dims_out_ = {"sample": sample_dims, "feature": feature_dims} + self.dims_out_ = {sample_name: sample_dims, feature_name: feature_dims} # Set in/out coordinates self.coords_in_ = {dim: data.coords[dim] for dim in data.dims} # Stack data da: DataArray = self._stack( - data, self.dims_out_["sample"], self.dims_out_["feature"] + data, self.dims_out_[sample_name], self.dims_out_[feature_name] ) # Remove NaN samples/features - da = da.dropna("feature", how="all") - da = da.dropna("sample", how="all") + da = da.dropna(feature_name, how="all") + da = da.dropna(sample_name, how="all") self.coords_out_ = { - "sample": da.coords["sample"], - "feature": da.coords["feature"], + sample_name: da.coords[sample_name], + feature_name: da.coords[feature_name], } # Ensure that no NaNs are present in the data @@ -204,10 +205,10 @@ def transform(self, data: SingleDataObject) -> DataArray: # Stack data and remove NaN features da: DataArray = self._stack( - data, self.dims_out_["sample"], self.dims_out_["feature"] + data, self.dims_out_[self.sample_name], self.dims_out_[self.feature_name] ) - da = da.dropna("feature", how="all") - da = da.dropna("sample", how="all") + da = da.dropna(self.feature_name, how="all") + da = da.dropna(self.sample_name, how="all") # Ensure that no NaNs are present in the data if da.isnull().any(): @@ -233,8 +234,7 @@ class SingleDataArrayStacker(SingleDataStacker): """ - @staticmethod - def _validate_dimensions(sample_dims: Tuple[str], feature_dims: Tuple[str]): + def _validate_dimensions(self, sample_dims: Tuple[str], feature_dims: Tuple[str]): """Verify the dimensions are correctly specified. For example, valid input dimensions (sample, feature) are: @@ -245,35 +245,37 @@ def _validate_dimensions(sample_dims: Tuple[str], feature_dims: Tuple[str]): (("sample",), ("feature",)), <-- special case only valid for DataArrays """ + sample_name = self.sample_name + feature_name = self.feature_name # Check for `sample` and `feature` special cases - if sample_dims == ("sample",) and feature_dims != ("feature",): + if sample_dims == (sample_name,) and feature_dims != (feature_name,): err_msg = """Due to the internal logic of this package, when using the 'sample' dimension in sample_dims, it should only be paired with the 'feature' dimension in feature_dims. Please rename or remove other dimensions.""" raise ValueError(err_msg) - if feature_dims == ("feature",) and sample_dims != ("sample",): + if feature_dims == (feature_name,) and sample_dims != (sample_name,): err_msg = """Invalid combination: 'feature' dimension in feature_dims should only be paired with 'sample' dimension in sample_dims.""" raise ValueError(err_msg) - if "sample" in sample_dims and len(sample_dims) > 1: + if sample_name in sample_dims and len(sample_dims) > 1: err_msg = """Invalid combination: 'sample' dimension should not be combined with other dimensions in sample_dims.""" raise ValueError(err_msg) - if "feature" in feature_dims and len(feature_dims) > 1: + if feature_name in feature_dims and len(feature_dims) > 1: err_msg = """Invalid combination: 'feature' dimension should not be combined with other dimensions in feature_dims.""" raise ValueError(err_msg) - if "sample" in feature_dims: + if sample_name in feature_dims: err_msg = """Invalid combination: 'sample' dimension should not appear in feature_dims.""" raise ValueError(err_msg) - if "feature" in sample_dims: + if feature_name in sample_dims: err_msg = """Invalid combination: 'feature' dimension should not appear in sample_dims.""" raise ValueError(err_msg) @@ -294,6 +296,9 @@ def _stack(self, data: DataArray, sample_dims, feature_dims) -> DataArray: data_stacked : DataArray The reshaped 2d-data. """ + sample_name = self.sample_name + feature_name = self.feature_name + self._validate_dimensions(sample_dims, feature_dims) # 3 cases: # 1. uni-dimensional with correct feature/sample name ==> do nothing @@ -303,28 +308,28 @@ def _stack(self, data: DataArray, sample_dims, feature_dims) -> DataArray: # - FEATURE - if len(feature_dims) == 1: # Case 1 - if feature_dims[0] == "feature": + if feature_dims[0] == feature_name: pass # Case 2 else: - data = data.rename({feature_dims[0]: "feature"}) + data = data.rename({feature_dims[0]: feature_name}) # Case 3 else: - data = data.stack(feature=feature_dims) + data = data.stack({feature_name: feature_dims}) # - SAMPLE - if len(sample_dims) == 1: # Case 1 - if sample_dims[0] == "sample": + if sample_dims[0] == sample_name: pass # Case 2 else: - data = data.rename({sample_dims[0]: "sample"}) + data = data.rename({sample_dims[0]: sample_name}) # Case 3 else: - data = data.stack(sample=sample_dims) + data = data.stack({sample_name: sample_dims}) - return data.transpose("sample", "feature") + return data.transpose(sample_name, feature_name) def _unstack(self, data: DataArray) -> DataArray: """Unstack `sample` and `feature` dimension of an DataArray to its original dimensions. @@ -339,22 +344,24 @@ def _unstack(self, data: DataArray) -> DataArray: data_unstacked : DataArray The unstacked data. """ + sample_name = self.sample_name + feature_name = self.feature_name # pass if feature/sample dimensions do not exist in data - if "feature" in data.dims: + if feature_name in data.dims: # If sample dimensions is one dimensional, rename is sufficient, otherwise unstack - if len(self.dims_out_["feature"]) == 1: - if self.dims_out_["feature"][0] != "feature": - data = data.rename({"feature": self.dims_out_["feature"][0]}) + if len(self.dims_out_[feature_name]) == 1: + if self.dims_out_[feature_name][0] != feature_name: + data = data.rename({feature_name: self.dims_out_[feature_name][0]}) else: - data = data.unstack("feature") + data = data.unstack(feature_name) - if "sample" in data.dims: + if sample_name in data.dims: # If sample dimensions is one dimensional, rename is sufficient, otherwise unstack - if len(self.dims_out_["sample"]) == 1: - if self.dims_out_["sample"][0] != "sample": - data = data.rename({"sample": self.dims_out_["sample"][0]}) + if len(self.dims_out_[sample_name]) == 1: + if self.dims_out_[sample_name][0] != sample_name: + data = data.rename({sample_name: self.dims_out_[sample_name][0]}) else: - data = data.unstack("sample") + data = data.unstack(sample_name) # Reorder dimensions to original order data = self._reorder_dims(data) @@ -381,8 +388,8 @@ def inverse_transform_data(self, data: DataArray) -> DataArray: data = self._unstack(data) # Reindex data to original coordinates in case that some features at the boundaries were dropped - data = self._reindex_dim(data, "feature") - data = self._reindex_dim(data, "sample") + data = self._reindex_dim(data, self.feature_name) + data = self._reindex_dim(data, self.sample_name) return data @@ -392,7 +399,7 @@ def inverse_transform_components(self, data: DataArray) -> DataArray: data = self._unstack(data) # Reindex data to original coordinates in case that some features at the boundaries were dropped - data = self._reindex_dim(data, "feature") + data = self._reindex_dim(data, self.feature_name) return data @@ -418,8 +425,7 @@ class SingleDatasetStacker(SingleDataStacker): """ - @staticmethod - def _validate_dimensions(sample_dims: Tuple[str], feature_dims: Tuple[str]): + def _validate_dimensions(self, sample_dims: Tuple[str], feature_dims: Tuple[str]): """Verify the dimensions are correctly specified. For example, valid input dimensions (sample, feature) are: @@ -434,15 +440,14 @@ def _validate_dimensions(sample_dims: Tuple[str], feature_dims: Tuple[str]): any combination that contains 'sample' and/or 'feature' dimension """ - if "sample" in sample_dims or "sample" in feature_dims: - err_msg = ( - "The dimension 'sample' is reserved for internal used. Please rename." - ) + sample_name = self.sample_name + feature_name = self.feature_name + + if sample_name in sample_dims or sample_name in feature_dims: + err_msg = f"The dimension {sample_name} is reserved for internal used. Please rename." raise ValueError(err_msg) - if "feature" in sample_dims or "feature" in feature_dims: - err_msg = ( - "The dimension 'feature' is reserved for internal used. Please rename." - ) + if feature_name in sample_dims or feature_name in feature_dims: + err_msg = f"The dimension {feature_name} is reserved for internal used. Please rename." raise ValueError(err_msg) def _stack(self, data: Dataset, sample_dims, feature_dims) -> DataArray: @@ -462,6 +467,9 @@ def _stack(self, data: Dataset, sample_dims, feature_dims) -> DataArray: data_stacked : DataArray | Dataset The reshaped 2d-data. """ + sample_name = self.sample_name + feature_name = self.feature_name + self._validate_dimensions(sample_dims, feature_dims) # 2 cases: # 1. uni-dimensional with name different from feature/sample ==> rename @@ -470,34 +478,38 @@ def _stack(self, data: Dataset, sample_dims, feature_dims) -> DataArray: # - FEATURE - # Convert Dataset -> DataArray, stacking all non-sample dimensions to feature dimension, including data variables # Case 1 & 2 - da = data.to_stacked_array(new_dim="feature", sample_dims=sample_dims) + da = data.to_stacked_array(new_dim=feature_name, sample_dims=sample_dims) # Rename if sample dimensions is one dimensional, otherwise stack # Case 1 if len(sample_dims) == 1: - da = da.rename({sample_dims[0]: "sample"}) + da = da.rename({sample_dims[0]: sample_name}) # Case 2 else: - da = da.stack(sample=sample_dims) + da = da.stack({sample_name: sample_dims}) - return da.transpose("sample", "feature") + return da.transpose(sample_name, feature_name) def _unstack_data(self, data: DataArray) -> Dataset: """Unstack `sample` and `feature` dimension of an DataArray to its original dimensions.""" - if len(self.dims_out_["sample"]) == 1: - data = data.rename({"sample": self.dims_out_["sample"][0]}) - ds: Dataset = data.to_unstacked_dataset("feature", "variable").unstack() + sample_name = self.sample_name + feature_name = self.feature_name + if len(self.dims_out_[sample_name]) == 1: + data = data.rename({sample_name: self.dims_out_[sample_name][0]}) + ds: Dataset = data.to_unstacked_dataset(feature_name, "variable").unstack() ds = self._reorder_dims(ds) return ds def _unstack_components(self, data: DataArray) -> Dataset: - ds: Dataset = data.to_unstacked_dataset("feature", "variable").unstack() + feature_name = self.feature_name + ds: Dataset = data.to_unstacked_dataset(feature_name, "variable").unstack() ds = self._reorder_dims(ds) return ds def _unstack_scores(self, data: DataArray) -> DataArray: - if len(self.dims_out_["sample"]) == 1: - data = data.rename({"sample": self.dims_out_["sample"][0]}) + sample_name = self.sample_name + if len(self.dims_out_[sample_name]) == 1: + data = data.rename({sample_name: self.dims_out_[sample_name][0]}) data = data.unstack() data = self._reorder_dims(data) return data @@ -521,8 +533,8 @@ def inverse_transform_data(self, data: DataArray) -> Dataset: data_ds: Dataset = self._unstack_data(data) # Reindex data to original coordinates in case that some features at the boundaries were dropped - data_ds = self._reindex_dim(data_ds, "feature") - data_ds = self._reindex_dim(data_ds, "sample") + data_ds = self._reindex_dim(data_ds, self.feature_name) + data_ds = self._reindex_dim(data_ds, self.sample_name) return data_ds @@ -531,7 +543,7 @@ def inverse_transform_components(self, data: DataArray) -> Dataset: data_ds: Dataset = self._unstack_components(data) # Reindex data to original coordinates in case that some features at the boundaries were dropped - data_ds = self._reindex_dim(data_ds, "feature") + data_ds = self._reindex_dim(data_ds, self.feature_name) return data_ds @@ -559,7 +571,8 @@ class ListDataArrayStacker(_BaseStacker): """ - def __init__(self): + def __init__(self, **kwargs): + super().__init__(**kwargs) self.stackers = [] def fit_transform( @@ -580,6 +593,9 @@ def fit_transform( The dimensions of the data that will be stacked along the `feature` dimension. """ + sample_name = self.sample_name + feature_name = self.feature_name + # Check input if not isinstance(feature_dims, list): raise TypeError( @@ -598,7 +614,10 @@ def fit_transform( # Set in/out dimensions self.dims_in_ = [da.dims for da in data] - self.dims_out_ = {"sample": sample_dims, "feature": feature_dims} + self.dims_out_ = { + sample_name: sample_dims, + feature_name: feature_dims, + } # Set in/out coordinates self.coords_in_ = [ @@ -618,7 +637,7 @@ def fit_transform( for da, fdims in zip(data, feature_dims): stacker = SingleDataArrayStacker() da_stacked = stacker.fit_transform(da, sample_dims, fdims) - idx_coords_size.append(da_stacked.coords["feature"].size) + idx_coords_size.append(da_stacked.coords[feature_name].size) stacked_data_list.append(da_stacked) # Create dummy feature coordinates for each DataArray @@ -633,11 +652,11 @@ def fit_transform( self._dummy_feature_coords = dummy_feature_coords - stacked_data_list = xr.concat(stacked_data_list, dim="feature") + stacked_data_list = xr.concat(stacked_data_list, dim=feature_name) self.coords_out_ = { - "sample": stacked_data_list.coords["sample"], - "feature": stacked_data_list.coords["feature"], + sample_name: stacked_data_list.coords[sample_name], + feature_name: stacked_data_list.coords[feature_name], } return stacked_data_list @@ -655,34 +674,37 @@ def transform(self, data: DataArrayList) -> DataArray: The reshaped 2D data. """ + feature_name = self.feature_name + stacked_data_list = [] # Stack individual DataArrays for i, (stacker, da) in enumerate(zip(self.stackers, data)): stacked_data = stacker.transform(da) - stacked_data = stacked_data.drop("feature") + stacked_data = stacked_data.drop(feature_name) # Replace original feature coordiantes with dummy coordinates - stacked_data.coords.update({"feature": self._dummy_feature_coords[i]}) + stacked_data.coords.update({feature_name: self._dummy_feature_coords[i]}) stacked_data_list.append(stacked_data) - return xr.concat(stacked_data_list, dim="feature") + return xr.concat(stacked_data_list, dim=feature_name) def inverse_transform_data(self, data: DataArray) -> DataArrayList: """Reshape the 2D data (sample x feature) back into its original shape.""" + feature_name = self.feature_name dalist = [] for stacker, features in zip(self.stackers, self._dummy_feature_coords): # Select the features corresponding to the current DataArray subda = data.sel(feature=features) # Replace dummy feature coordinates with original feature coordinates - subda = subda.assign_coords(feature=stacker.coords_out_["feature"]) + subda = subda.assign_coords(feature=stacker.coords_out_[feature_name]) # In case of MultiIndex we have to set the index to the feature dimension again - if isinstance(subda.indexes["feature"], pd.MultiIndex): - subda = subda.set_index(feature=stacker.dims_out_["feature"]) + if isinstance(subda.indexes[feature_name], pd.MultiIndex): + subda = subda.set_index(feature=stacker.dims_out_[feature_name]) else: # NOTE: This is a workaround for the case where the feature dimension is a tuple of length 1 # the problem is described here: https://github.com/pydata/xarray/discussions/7958 - subda = subda.rename(feature=stacker.dims_out_["feature"][0]) + subda = subda.rename(feature=stacker.dims_out_[feature_name][0]) # Inverse transform the data using the corresponding stacker subda = stacker.inverse_transform_data(subda) @@ -691,20 +713,21 @@ def inverse_transform_data(self, data: DataArray) -> DataArrayList: def inverse_transform_components(self, data: DataArray) -> DataArrayList: """Reshape the 2D data (mode x feature) back into its original shape.""" + feature_name = self.feature_name dalist = [] for stacker, features in zip(self.stackers, self._dummy_feature_coords): # Select the features corresponding to the current DataArray subda = data.sel(feature=features) # Replace dummy feature coordinates with original feature coordinates - subda = subda.assign_coords(feature=stacker.coords_out_["feature"]) + subda = subda.assign_coords(feature=stacker.coords_out_[feature_name]) # In case of MultiIndex we have to set the index to the feature dimension again - if isinstance(subda.indexes["feature"], pd.MultiIndex): - subda = subda.set_index(feature=stacker.dims_out_["feature"]) + if isinstance(subda.indexes[feature_name], pd.MultiIndex): + subda = subda.set_index(feature=stacker.dims_out_[feature_name]) else: # NOTE: This is a workaround for the case where the feature dimension is a tuple of length 1 # the problem is described here: https://github.com/pydata/xarray/discussions/7958 - subda = subda.rename(feature=stacker.dims_out_["feature"][0]) + subda = subda.rename(feature=stacker.dims_out_[feature_name][0]) # Inverse transform the data using the corresponding stacker subda = stacker.inverse_transform_components(subda) diff --git a/xeofs/preprocessing/stacker_factory.py b/xeofs/preprocessing/stacker_factory.py deleted file mode 100644 index ce73b25..0000000 --- a/xeofs/preprocessing/stacker_factory.py +++ /dev/null @@ -1,20 +0,0 @@ -import xarray as xr - -from ._base_stacker import _BaseStacker -from .stacker import SingleDataArrayStacker, SingleDatasetStacker, ListDataArrayStacker -from ..utils.data_types import AnyDataObject - - -class StackerFactory: - @staticmethod - def create_stacker(data: AnyDataObject, **kwargs) -> _BaseStacker: - if isinstance(data, xr.DataArray): - return SingleDataArrayStacker(**kwargs) - elif isinstance(data, xr.Dataset): - return SingleDatasetStacker(**kwargs) - elif isinstance(data, list) and all( - isinstance(da, xr.DataArray) for da in data - ): - return ListDataArrayStacker(**kwargs) - else: - raise ValueError("Invalid data type") diff --git a/xeofs/utils/data_types.py b/xeofs/utils/data_types.py index 379ff2a..01cea83 100644 --- a/xeofs/utils/data_types.py +++ b/xeofs/utils/data_types.py @@ -6,9 +6,8 @@ Dataset: TypeAlias = xr.Dataset DataArrayList: TypeAlias = List[DataArray] SingleDataObject = TypeVar("SingleDataObject", DataArray, Dataset) +XArrayData = TypeVar("XArrayData", DataArray, Dataset) AnyDataObject = TypeVar("AnyDataObject", DataArray, Dataset, DataArrayList) - -XarrayData: TypeAlias = DataArray | Dataset # Model dimensions are always 2-dimensional: sample and feature Dims: TypeAlias = Tuple[str] DimsList: TypeAlias = List[Dims] diff --git a/xeofs/utils/xarray_utils.py b/xeofs/utils/xarray_utils.py index 9d209ee..ddb711e 100644 --- a/xeofs/utils/xarray_utils.py +++ b/xeofs/utils/xarray_utils.py @@ -5,7 +5,7 @@ from scipy.signal import hilbert # type: ignore from .sanity_checks import ensure_tuple -from .data_types import XarrayData, DataArray, Dataset, SingleDataObject +from .data_types import DataArray, Dataset, SingleDataObject, XArrayData from .constants import VALID_LATITUDE_NAMES @@ -92,7 +92,7 @@ def get_dims( return sample_dims, feature_dims # type: ignore -def _get_feature_dims(data: XarrayData, sample_dims: Tuple[str]) -> Tuple[Hashable]: +def _get_feature_dims(data: XArrayData, sample_dims: Tuple[str]) -> Tuple[Hashable]: """Extracts the dimensions of a DataArray that are not included in the sample dimensions. From 79b43cf874905c531fe0974a0255d95905101eda Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Wed, 16 Aug 2023 10:52:11 +0200 Subject: [PATCH 02/43] refactor: remove dim checks Checks were hard coded to ensure dimensions of e.g. sample-feature. However, we want to allow more general data, with sample and feature names provided by the user. --- xeofs/data_container/_base_cross_model_data_container.py | 7 ------- xeofs/data_container/_base_model_data_container.py | 4 ---- xeofs/data_container/eof_data_container.py | 4 +--- xeofs/data_container/eof_rotator_data_container.py | 3 --- xeofs/data_container/mca_data_container.py | 4 ---- xeofs/data_container/mca_rotator_data_container.py | 3 --- xeofs/data_container/opa_data_container.py | 2 -- 7 files changed, 1 insertion(+), 26 deletions(-) diff --git a/xeofs/data_container/_base_cross_model_data_container.py b/xeofs/data_container/_base_cross_model_data_container.py index a53a9fa..c81dedc 100644 --- a/xeofs/data_container/_base_cross_model_data_container.py +++ b/xeofs/data_container/_base_cross_model_data_container.py @@ -40,13 +40,6 @@ def set_data( scores1: DataArray, scores2: DataArray, ): - self._verify_dims(input_data1, ("sample", "feature")) - self._verify_dims(input_data2, ("sample", "feature")) - self._verify_dims(components1, ("feature", "mode")) - self._verify_dims(components2, ("feature", "mode")) - self._verify_dims(scores1, ("sample", "mode")) - self._verify_dims(scores2, ("sample", "mode")) - components1.name = "left_components" components2.name = "right_components" scores1.name = "left_scores" diff --git a/xeofs/data_container/_base_model_data_container.py b/xeofs/data_container/_base_model_data_container.py index 2dc00f3..59ffc71 100644 --- a/xeofs/data_container/_base_model_data_container.py +++ b/xeofs/data_container/_base_model_data_container.py @@ -29,10 +29,6 @@ def _sanity_check(data) -> DataArray: return data def set_data(self, input_data: DataArray, components: DataArray, scores: DataArray): - self._verify_dims(input_data, ("sample", "feature")) - self._verify_dims(components, ("feature", "mode")) - self._verify_dims(scores, ("sample", "mode")) - components.name = "components" scores.name = "scores" diff --git a/xeofs/data_container/eof_data_container.py b/xeofs/data_container/eof_data_container.py index b22a7da..2448966 100644 --- a/xeofs/data_container/eof_data_container.py +++ b/xeofs/data_container/eof_data_container.py @@ -28,14 +28,12 @@ def set_data( ): super().set_data(input_data=input_data, components=components, scores=scores) - self._verify_dims(explained_variance, ("mode",)) self._explained_variance = explained_variance self._explained_variance.name = "explained_variance" self._total_variance = total_variance self._total_variance.name = "total_variance" - self._verify_dims(idx_modes_sorted, ("mode",)) self._idx_modes_sorted = idx_modes_sorted self._idx_modes_sorted.name = "idx_modes_sorted" @@ -68,7 +66,7 @@ def idx_modes_sorted(self) -> DataArray: @property def singular_values(self) -> DataArray: """Get the explained variance.""" - svals = np.sqrt((self.input_data.sample.size - 1) * self.explained_variance) + svals = (self.explained_variance * (self.input_data.shape[0] - 1)) ** 0.5 svals.attrs.update(self.explained_variance.attrs) svals.name = "singular_values" return svals diff --git a/xeofs/data_container/eof_rotator_data_container.py b/xeofs/data_container/eof_rotator_data_container.py index 2b18d4d..cb0d334 100644 --- a/xeofs/data_container/eof_rotator_data_container.py +++ b/xeofs/data_container/eof_rotator_data_container.py @@ -39,15 +39,12 @@ def set_data( idx_modes_sorted=idx_modes_sorted, ) - self._verify_dims(rotation_matrix, ("mode_m", "mode_n")) self._rotation_matrix = rotation_matrix self._rotation_matrix.name = "rotation_matrix" - self._verify_dims(phi_matrix, ("mode_m", "mode_n")) self._phi_matrix = phi_matrix self._phi_matrix.name = "phi_matrix" - self._verify_dims(modes_sign, ("mode",)) self._modes_sign = modes_sign self._modes_sign.name = "modes_sign" diff --git a/xeofs/data_container/mca_data_container.py b/xeofs/data_container/mca_data_container.py index 68fe1f5..c24169c 100644 --- a/xeofs/data_container/mca_data_container.py +++ b/xeofs/data_container/mca_data_container.py @@ -42,22 +42,18 @@ def set_data( scores2=scores2, ) - self._verify_dims(squared_covariance, ("mode",)) self._squared_covariance = squared_covariance self._squared_covariance.name = "squared_covariance" self._total_squared_covariance = total_squared_covariance self._total_squared_covariance.name = "total_squared_covariance" - self._verify_dims(idx_modes_sorted, ("mode",)) self._idx_modes_sorted = idx_modes_sorted self._idx_modes_sorted.name = "idx_modes_sorted" - self._verify_dims(norm1, ("mode",)) self._norm1 = norm1 self._norm1.name = "left_norm" - self._verify_dims(norm2, ("mode",)) self._norm2 = norm2 self._norm2.name = "right_norm" diff --git a/xeofs/data_container/mca_rotator_data_container.py b/xeofs/data_container/mca_rotator_data_container.py index fa051f2..4727ab7 100644 --- a/xeofs/data_container/mca_rotator_data_container.py +++ b/xeofs/data_container/mca_rotator_data_container.py @@ -50,15 +50,12 @@ def set_data( norm2=norm2, ) - self._verify_dims(rotation_matrix, ("mode_m", "mode_n")) self._rotation_matrix = rotation_matrix self._rotation_matrix.name = "rotation_matrix" - self._verify_dims(phi_matrix, ("mode_m", "mode_n")) self._phi_matrix = phi_matrix self._phi_matrix.name = "phi_matrix" - self._verify_dims(modes_sign, ("mode",)) self._modes_sign = modes_sign self._modes_sign.name = "modes_sign" diff --git a/xeofs/data_container/opa_data_container.py b/xeofs/data_container/opa_data_container.py index 7f6930e..035c529 100644 --- a/xeofs/data_container/opa_data_container.py +++ b/xeofs/data_container/opa_data_container.py @@ -28,11 +28,9 @@ def set_data( ): super().set_data(input_data=input_data, components=components, scores=scores) - self._verify_dims(decorrelation_time, ("mode",)) self._decorrelation_time = decorrelation_time self._decorrelation_time.name = "decorrelation_time" - self._verify_dims(filter_patterns, ("feature", "mode")) self._filter_patterns = filter_patterns self._filter_patterns.name = "filter_patterns" From 983e0adf7cf46dcea148d2f24a31b650054f4570 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Wed, 16 Aug 2023 10:53:18 +0200 Subject: [PATCH 03/43] refactor: generalize hilbert transform over dims Dimensions were hard coded but we want Hilbert transform to be performed over any general 2D Arrays --- xeofs/utils/xarray_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/xeofs/utils/xarray_utils.py b/xeofs/utils/xarray_utils.py index ddb711e..03df0c2 100644 --- a/xeofs/utils/xarray_utils.py +++ b/xeofs/utils/xarray_utils.py @@ -156,7 +156,7 @@ def total_variance(data: DataArray, dim) -> DataArray: def hilbert_transform( - data: DataArray, dim, padding="exp", decay_factor=0.2 + data: DataArray, dims, padding="exp", decay_factor=0.2 ) -> DataArray: """Hilbert transform with optional padding to mitigate spectral leakage. @@ -180,8 +180,8 @@ def hilbert_transform( return xr.apply_ufunc( _hilbert_transform_with_padding, data, - input_core_dims=[["sample", "feature"]], - output_core_dims=[["sample", "feature"]], + input_core_dims=[dims], + output_core_dims=[dims], kwargs={"padding": padding, "decay_factor": decay_factor}, dask="parallelized", dask_gufunc_kwargs={"allow_rechunk": True}, From 39ee3fbc77b1386f92586e699cffe3aa54ad17cd Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Wed, 16 Aug 2023 11:14:14 +0200 Subject: [PATCH 04/43] refactor: add MultIndexConverter to preprocessor --- xeofs/preprocessing/multi_index_converter.py | 21 ++++++++++---------- xeofs/preprocessing/preprocessor.py | 4 ++-- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/xeofs/preprocessing/multi_index_converter.py b/xeofs/preprocessing/multi_index_converter.py index f937114..2680b95 100644 --- a/xeofs/preprocessing/multi_index_converter.py +++ b/xeofs/preprocessing/multi_index_converter.py @@ -98,16 +98,17 @@ def transform(self, X: DataArrayList) -> DataArrayList: return X_transformed - def inverse_transform(self, X: DataArrayList) -> DataArrayList: - # Check if input is a List of DataArrays - if not isinstance(X, list) or not all(isinstance(x, xr.DataArray) for x in X): - raise ValueError("Input must be a list of xarray DataArray") - - X_inverse_transformed: List[DataArray] = [] - for x, converter in zip(X, self.converters): - X_inverse_transformed.append(converter.inverse_transform(x)) - - return X_inverse_transformed + def inverse_transform(self, X: DataArrayList) -> DataArrayList | DataArray: + # Data & components are stored in a list of DataArrays + if isinstance(X, list): + X_inverse_transformed: List[DataArray] = [] + for x, converter in zip(X, self.converters): + X_inverse_transformed.append(converter.inverse_transform(x)) + + return X_inverse_transformed + # Scores are stored as a DataArray + else: + return self.converters[0].inverse_transform(X) def fit_transform(self, X: DataArrayList, y=None) -> DataArrayList: return self.fit(X, y).transform(X) diff --git a/xeofs/preprocessing/preprocessor.py b/xeofs/preprocessing/preprocessor.py index 59eb2b5..46209ad 100644 --- a/xeofs/preprocessing/preprocessor.py +++ b/xeofs/preprocessing/preprocessor.py @@ -146,7 +146,7 @@ def inverse_transform_components(self, data: DataArray) -> AnyDataObject: """ data = self.stacker.inverse_transform_components(data) - return self.converter.inverse_transform_components(data) # type: ignore + return self.converter.inverse_transform(data) # type: ignore def inverse_transform_scores(self, data: DataArray) -> AnyDataObject: """Inverse transform the scores. @@ -163,4 +163,4 @@ def inverse_transform_scores(self, data: DataArray) -> AnyDataObject: """ data = self.stacker.inverse_transform_scores(data) - return self.converter.inverse_transform_scores(data) # type: ignore + return self.converter.inverse_transform(data) # type: ignore From 4d521f83dea46c558202fc9ce9cfe35c5126453c Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Wed, 16 Aug 2023 11:15:16 +0200 Subject: [PATCH 05/43] feat: choose name of sample and feature dim --- xeofs/models/_base_cross_model.py | 14 ++++++++ xeofs/models/_base_model.py | 16 +++++++++- xeofs/models/eof.py | 30 +++++++++++++---- xeofs/models/eof_rotator.py | 10 +++--- xeofs/models/mca.py | 53 +++++++++++++++++-------------- xeofs/models/mca_rotator.py | 27 +++++++++------- xeofs/models/opa.py | 17 +++++++--- 7 files changed, 116 insertions(+), 51 deletions(-) diff --git a/xeofs/models/_base_cross_model.py b/xeofs/models/_base_cross_model.py index a18b8a8..db349ee 100644 --- a/xeofs/models/_base_cross_model.py +++ b/xeofs/models/_base_cross_model.py @@ -27,6 +27,10 @@ class _BaseCrossModel(ABC): Whether to use weights. n_pca_modes: int, default=None Number of PCA modes to calculate. + sample_name: str, default="sample" + Name of the new sample dimension. + feature_name: str, default="feature" + Name of the new feature dimension. solver: {"auto", "full", "randomized"}, default="auto" Solver to use for the SVD computation. solver_kwargs: dict, default={} @@ -41,9 +45,14 @@ def __init__( use_coslat=False, use_weights=False, n_pca_modes=None, + sample_name="sample", + feature_name="feature", solver="auto", solver_kwargs={}, ): + self.sample_name = sample_name + self.feature_name = feature_name + # Define model parameters self._params = { "n_modes": n_modes, @@ -54,6 +63,9 @@ def __init__( "solver": solver, } self._solver_kwargs = solver_kwargs + self._preprocessor_kwargs = dict( + sample_name=sample_name, feature_name=feature_name + ) # Define analysis-relevant meta data self.attrs = {"model": "BaseCrossModel"} @@ -71,11 +83,13 @@ def __init__( with_std=standardize, with_coslat=use_coslat, with_weights=use_weights, + **self._preprocessor_kwargs, ) self.preprocessor2 = Preprocessor( with_std=standardize, with_coslat=use_coslat, with_weights=use_weights, + **self._preprocessor_kwargs, ) # Initialize the data container only to avoid type errors # The actual data container will be initialized in respective subclasses diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index 8963f40..b9adcc5 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -27,6 +27,10 @@ class _BaseModel(ABC): Whether to use cosine of latitude for scaling. use_weights: bool, default=False Whether to use weights. + sample_name: str, default="sample" + Name of the sample dimension. + feature_name: str, default="feature" + Name of the feature dimension. solver: {"auto", "full", "randomized"}, default="auto" Solver to use for the SVD computation. solver_kwargs: dict, default={} @@ -40,9 +44,13 @@ def __init__( standardize=False, use_coslat=False, use_weights=False, + sample_name="sample", + feature_name="feature", solver="auto", solver_kwargs={}, ): + self.sample_name = sample_name + self.feature_name = feature_name # Define model parameters self._params = { "n_modes": n_modes, @@ -52,6 +60,9 @@ def __init__( "solver": solver, } self._solver_kwargs = solver_kwargs + self._preprocessor_kwargs = dict( + sample_name=sample_name, feature_name=feature_name + ) # Define analysis-relevant meta data self.attrs = {"model": "BaseModel"} @@ -66,7 +77,10 @@ def __init__( # Initialize the Preprocessor to scale and stack the data self.preprocessor = Preprocessor( - with_std=standardize, with_coslat=use_coslat, with_weights=use_weights + with_std=standardize, + with_coslat=use_coslat, + with_weights=use_weights, + **self._preprocessor_kwargs ) # Initialize the data container only to avoid type errors # The actual data container will be initialized in respective subclasses diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index ad29064..43658ee 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -44,6 +44,7 @@ def __init__( use_weights=False, solver="auto", solver_kwargs={}, + **kwargs, ): super().__init__( n_modes=n_modes, @@ -52,6 +53,7 @@ def __init__( use_weights=use_weights, solver=solver, solver_kwargs=solver_kwargs, + **kwargs, ) self.attrs.update({"model": "EOF analysis"}) @@ -59,11 +61,14 @@ def __init__( self.data: EOFDataContainer = EOFDataContainer() def fit(self, data: AnyDataObject, dim, weights=None): + sample_name = self.sample_name + feature_name = self.feature_name + # Preprocess the data input_data: DataArray = self.preprocessor.fit_transform(data, dim, weights) # Compute the total variance - total_variance = compute_total_variance(input_data, dim="sample") + total_variance = compute_total_variance(input_data, dim=sample_name) # Decompose the data n_modes = self._params["n_modes"] @@ -71,14 +76,15 @@ def fit(self, data: AnyDataObject, dim, weights=None): decomposer = Decomposer( n_modes=n_modes, solver=self._params["solver"], **self._solver_kwargs ) - decomposer.fit(input_data, dims=("sample", "feature")) + decomposer.fit(input_data, dims=(sample_name, feature_name)) singular_values = decomposer.s_ components = decomposer.V_ scores = decomposer.U_ # Compute the explained variance - explained_variance = singular_values**2 / (input_data.sample.size - 1) + n_samples = input_data.coords[sample_name].size + explained_variance = singular_values**2 / (n_samples - 1) # Index of the sorted explained variance # It's already sorted, we just need to assign it to the DataContainer @@ -111,6 +117,7 @@ def transform(self, data: AnyDataObject) -> DataArray: Projections of the new data onto the components. """ + feature_name = self.preprocessor.feature_name # Preprocess the data data_stacked: DataArray = self.preprocessor.transform(data) @@ -118,7 +125,9 @@ def transform(self, data: AnyDataObject) -> DataArray: singular_values = self.data.singular_values # Project the data - projections = xr.dot(data_stacked, components, dims="feature") / singular_values + projections = ( + xr.dot(data_stacked, components, dims=feature_name) / singular_values + ) projections.name = "scores" # Unstack the projections @@ -294,6 +303,9 @@ def __init__(self, padding="exp", decay_factor=0.2, **kwargs): self.data: ComplexEOFDataContainer = ComplexEOFDataContainer() def fit(self, data: AnyDataObject, dim, weights=None): + sample_name = self.sample_name + feature_name = self.feature_name + # Preprocess the data input_data: DataArray = self.preprocessor.fit_transform(data, dim, weights) @@ -301,11 +313,14 @@ def fit(self, data: AnyDataObject, dim, weights=None): padding = self._params["padding"] decay_factor = self._params["decay_factor"] input_data = hilbert_transform( - input_data, dim="sample", padding=padding, decay_factor=decay_factor + input_data, + dims=(sample_name, feature_name), + padding=padding, + decay_factor=decay_factor, ) # Compute the total variance - total_variance = compute_total_variance(input_data, dim="sample") + total_variance = compute_total_variance(input_data, dim=sample_name) # Decompose the complex data n_modes = self._params["n_modes"] @@ -320,7 +335,8 @@ def fit(self, data: AnyDataObject, dim, weights=None): scores = decomposer.U_ # Compute the explained variance - explained_variance = singular_values**2 / (input_data.sample.size - 1) + n_samples = input_data.coords[sample_name].size + explained_variance = singular_values**2 / (n_samples - 1) # Index of the sorted explained variance # It's already sorted, we just need to assign it to the DataContainer diff --git a/xeofs/models/eof_rotator.py b/xeofs/models/eof_rotator.py index ab5d0f5..45419cc 100644 --- a/xeofs/models/eof_rotator.py +++ b/xeofs/models/eof_rotator.py @@ -88,6 +88,8 @@ def __init__( def fit(self, model): self.model = model self.preprocessor = model.preprocessor + sample_name = model.sample_name + feature_name = model.feature_name n_modes = self._params.get("n_modes") power = self._params.get("power") @@ -104,9 +106,9 @@ def fit(self, model): promax, loadings, power, - input_core_dims=[["feature", "mode"], []], + input_core_dims=[[feature_name, "mode"], []], output_core_dims=[ - ["feature", "mode"], + [feature_name, "mode"], ["mode_m", "mode_n"], ["mode_m", "mode_n"], ], @@ -124,7 +126,7 @@ def fit(self, model): ) # Reorder according to variance - expvar = (abs(rot_loadings) ** 2).sum("feature") + expvar = (abs(rot_loadings) ** 2).sum(feature_name) # NOTE: For delayed objects, the index must be computed. # NOTE: The index must be computed before sorting since argsort is not (yet) implemented in dask idx_sort = expvar.compute().argsort()[::-1] @@ -152,7 +154,7 @@ def fit(self, model): scores = scores.isel(mode=idx_sort.values).assign_coords(mode=scores.mode) # Ensure consitent signs for deterministic output - idx_max_value = abs(rot_loadings).argmax("feature").compute() + idx_max_value = abs(rot_loadings).argmax(feature_name).compute() modes_sign = xr.apply_ufunc( np.sign, rot_loadings.isel(feature=idx_max_value), dask="allowed" ) diff --git a/xeofs/models/mca.py b/xeofs/models/mca.py index 42f1037..cf628c7 100644 --- a/xeofs/models/mca.py +++ b/xeofs/models/mca.py @@ -1,4 +1,4 @@ -from typing import Tuple +from typing import Tuple, Optional import numpy as np import xarray as xr @@ -75,20 +75,25 @@ def _compute_cross_covariance_matrix(self, X1, X2): Note: It is assumed that the data objects are centered. """ - if X1.sample.size != X2.sample.size: + sample_name = self.sample_name + n_samples = X1.coords[sample_name].size + if X1.coords[sample_name].size != X2.coords[sample_name].size: err_msg = "The two data objects must have the same number of samples." raise ValueError(err_msg) - return xr.dot(X1.conj(), X2, dims="sample") / (X1.sample.size - 1) + return xr.dot(X1.conj(), X2, dims=sample_name) / (n_samples - 1) def fit( self, data1: AnyDataObject, data2: AnyDataObject, dim, - weights1=None, - weights2=None, + weights1: Optional[AnyDataObject] = None, + weights2: Optional[AnyDataObject] = None, ): + sample_name = self.sample_name + feature_name = self.feature_name + # Preprocess the data data1_processed: DataArray = self.preprocessor1.fit_transform( data1, dim, weights1 @@ -107,8 +112,8 @@ def fit( # Perform SVD on PCA-reduced data if (self.pca1 is not None) and (self.pca2 is not None): # Fit the PCA models - self.pca1.fit(data1_processed, "sample") - self.pca2.fit(data2_processed, "sample") + self.pca1.fit(data1_processed, dim=sample_name) + self.pca2.fit(data2_processed, dim=sample_name) # Get the PCA scores pca_scores1 = self.pca1.data.scores * self.pca1.data.singular_values pca_scores2 = self.pca2.data.scores * self.pca2.data.singular_values @@ -134,8 +139,8 @@ def fit( # Perform SVD directly on data else: # Rename feature and associated dimensions of data objects to avoid index conflicts - dim_renamer1 = DimensionRenamer("feature", "1") - dim_renamer2 = DimensionRenamer("feature", "2") + dim_renamer1 = DimensionRenamer(feature_name, "1") + dim_renamer2 = DimensionRenamer(feature_name, "2") data1_processed_temp = dim_renamer1.fit_transform(data1_processed) data2_processed_temp = dim_renamer2.fit_transform(data2_processed) # Compute the cross-covariance matrix @@ -167,8 +172,8 @@ def fit( idx_sorted_modes.coords.update(squared_covariance.coords) # Project the data onto the singular vectors - scores1 = xr.dot(data1_processed, singular_vectors1, dims="feature") / norm1 - scores2 = xr.dot(data2_processed, singular_vectors2, dims="feature") / norm2 + scores1 = xr.dot(data1_processed, singular_vectors1, dims=feature_name) / norm1 + scores2 = xr.dot(data2_processed, singular_vectors2, dims=feature_name) / norm2 self.data.set_data( input_data1=data1_processed, @@ -576,8 +581,8 @@ def fit( data1: AnyDataObject, data2: AnyDataObject, dim, - weights1=None, - weights2=None, + weights1: Optional[AnyDataObject] = None, + weights2: Optional[AnyDataObject] = None, ): """Fit the model. @@ -596,6 +601,8 @@ def fit( If specified, the right input data will be weighted by this array. """ + sample_name = self.sample_name + feature_name = self.feature_name data1_processed: DataArray = self.preprocessor1.fit_transform( data1, dim, weights1 @@ -609,13 +616,13 @@ def fit( decay_factor = self._params["decay_factor"] data1_processed = hilbert_transform( data1_processed, - dim="sample", + dims=(sample_name, feature_name), padding=padding, decay_factor=decay_factor, ) data2_processed = hilbert_transform( data2_processed, - dim="sample", + dims=(sample_name, feature_name), padding=padding, decay_factor=decay_factor, ) @@ -630,27 +637,27 @@ def fit( # Perform SVD on PCA-reduced data if (self.pca1 is not None) and (self.pca2 is not None): # Fit the PCA models - self.pca1.fit(data1_processed, "sample") - self.pca2.fit(data2_processed, "sample") + self.pca1.fit(data1_processed, sample_name) + self.pca2.fit(data2_processed, sample_name) # Get the PCA scores pca_scores1 = self.pca1.data.scores * self.pca1.data.singular_values pca_scores2 = self.pca2.data.scores * self.pca2.data.singular_values # Apply hilbert transform pca_scores1 = hilbert_transform( pca_scores1, - dim="sample", + dims=(sample_name, feature_name), padding=padding, decay_factor=decay_factor, ) pca_scores2 = hilbert_transform( pca_scores2, - dim="sample", + dims=(sample_name, feature_name), padding=padding, decay_factor=decay_factor, ) # Compute the cross-covariance matrix of the PCA scores - pca_scores1 = pca_scores1.rename({"mode": "feature"}) - pca_scores2 = pca_scores2.rename({"mode": "feature"}) + pca_scores1 = pca_scores1.rename({"mode": feature_name}) + pca_scores2 = pca_scores2.rename({"mode": feature_name}) cov_matrix = self._compute_cross_covariance_matrix(pca_scores1, pca_scores2) # Perform the SVD @@ -670,8 +677,8 @@ def fit( # Perform SVD directly on data else: # Rename feature and associated dimensions of data objects to avoid index conflicts - dim_renamer1 = DimensionRenamer("feature", "1") - dim_renamer2 = DimensionRenamer("feature", "2") + dim_renamer1 = DimensionRenamer(feature_name, "1") + dim_renamer2 = DimensionRenamer(feature_name, "2") data1_processed_temp = dim_renamer1.fit_transform(data1_processed) data2_processed_temp = dim_renamer2.fit_transform(data2_processed) # Compute the cross-covariance matrix diff --git a/xeofs/models/mca_rotator.py b/xeofs/models/mca_rotator.py index da381e4..2cd3154 100644 --- a/xeofs/models/mca_rotator.py +++ b/xeofs/models/mca_rotator.py @@ -125,6 +125,9 @@ def fit(self, model: MCA | ComplexMCA): self.preprocessor1 = model.preprocessor1 self.preprocessor2 = model.preprocessor2 + sample_name = self.model.sample_name + feature_name = self.model.feature_name + n_modes = self._params["n_modes"] power = self._params["power"] max_iter = self._params["max_iter"] @@ -153,16 +156,16 @@ def fit(self, model: MCA | ComplexMCA): comps1 = self.model.data.components1.sel(mode=slice(1, n_modes)) comps2 = self.model.data.components2.sel(mode=slice(1, n_modes)) - loadings = xr.concat([comps1, comps2], dim="feature") * scaling + loadings = xr.concat([comps1, comps2], dim=feature_name) * scaling # Rotate loadings rot_loadings, rot_matrix, phi_matrix = xr.apply_ufunc( promax, loadings, power, - input_core_dims=[["feature", "mode"], []], + input_core_dims=[[feature_name, "mode"], []], output_core_dims=[ - ["feature", "mode"], + [feature_name, "mode"], ["mode_m", "mode_n"], ["mode_m", "mode_n"], ], @@ -180,18 +183,20 @@ def fit(self, model: MCA | ComplexMCA): ) # Rotated (loaded) singular vectors - comps1_rot = rot_loadings.isel(feature=slice(0, comps1.coords["feature"].size)) + comps1_rot = rot_loadings.isel( + {feature_name: slice(0, comps1.coords[feature_name].size)} + ) comps2_rot = rot_loadings.isel( - feature=slice(comps1.coords["feature"].size, None) + {feature_name: slice(comps1.coords[feature_name].size, None)} ) # Normalization factor of singular vectors norm1_rot = xr.apply_ufunc( np.linalg.norm, comps1_rot, - input_core_dims=[["feature", "mode"]], + input_core_dims=[[feature_name, "mode"]], output_core_dims=[["mode"]], - exclude_dims={"feature"}, + exclude_dims={feature_name}, kwargs={"axis": 0}, vectorize=False, dask="allowed", @@ -199,9 +204,9 @@ def fit(self, model: MCA | ComplexMCA): norm2_rot = xr.apply_ufunc( np.linalg.norm, comps2_rot, - input_core_dims=[["feature", "mode"]], + input_core_dims=[[feature_name, "mode"]], output_core_dims=[["mode"]], - exclude_dims={"feature"}, + exclude_dims={feature_name}, kwargs={"axis": 0}, vectorize=False, dask="allowed", @@ -266,9 +271,9 @@ def fit(self, model: MCA | ComplexMCA): ) # Ensure consitent signs for deterministic output - idx_max_value = abs(rot_loadings).argmax("feature").compute() + idx_max_value = abs(rot_loadings).argmax(feature_name).compute() modes_sign = xr.apply_ufunc( - np.sign, rot_loadings.isel(feature=idx_max_value), dask="allowed" + np.sign, rot_loadings.isel({feature_name: idx_max_value}), dask="allowed" ) # Drop all dimensions except 'mode' so that the index is clean for dim, coords in modes_sign.coords.items(): diff --git a/xeofs/models/opa.py b/xeofs/models/opa.py index 0222ed7..e1e5a5f 100644 --- a/xeofs/models/opa.py +++ b/xeofs/models/opa.py @@ -62,12 +62,15 @@ def __init__(self, n_modes, tau_max, n_pca_modes, **kwargs): def _Ctau(self, X, tau: int) -> DataArray: """Compute the time-lage covariance matrix C(tau) of the data X.""" + sample_name = self.preprocessor.sample_name X0 = X.copy(deep=True) - Xtau = X.shift(sample=-tau).dropna("sample") + Xtau = X.shift({sample_name: -tau}).dropna(sample_name) X0 = X0.rename({"mode": "feature1"}) Xtau = Xtau.rename({"mode": "feature2"}) - return xr.dot(X0, Xtau, dims=["sample"]) / (Xtau.sample.size - 1) + + n_samples = Xtau[sample_name].size + return xr.dot(X0, Xtau, dims=[sample_name]) / (n_samples - 1) @staticmethod def _compute_matrix_inverse(X, dims): @@ -82,12 +85,15 @@ def _compute_matrix_inverse(X, dims): ) def fit(self, data: AnyDataObject, dim, weights: Optional[AnyDataObject] = None): + sample_name = self.sample_name + feature_name = self.feature_name + # Preprocess the data input_data: DataArray = self.preprocessor.fit_transform(data, dim, weights) # Perform PCA as a pre-processing step pca = EOF(n_modes=self._params["n_pca_modes"], use_coslat=False) - pca.fit(input_data, dim="sample") + pca.fit(input_data, dim=sample_name) svals = pca.data.singular_values expvar = pca.data.explained_variance comps = pca.data.components * svals / np.sqrt(expvar) @@ -177,14 +183,15 @@ def fit(self, data: AnyDataObject, dim, weights: Optional[AnyDataObject] = None) # -> W (feature x mode2) # Rename dimensions - U = U.rename({"feature1": "feature"}) # -> (feature x mode) + U = U.rename({"feature1": feature_name}) # -> (feature x mode) V = V.rename({"mode2": "mode"}) # -> (feature x mode) W = W.rename({"mode2": "mode"}) # -> (feature x mode) P = P.rename({"mode2": "mode"}) # -> (sample x mode) + scores = scores.rename({"mode": feature_name}) # -> (sample x feature) # Store the results self.data.set_data( - input_data=scores.rename({"mode": "feature"}), + input_data=scores, components=W, scores=P, filter_patterns=V, From a737127c75531b52e37634d90efb7635d21c42b4 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sat, 16 Sep 2023 12:11:13 +0200 Subject: [PATCH 06/43] test: provide method to create synthetic data Added three methods for creating synthetic DataArray, Dataset and list of DataArrays. These methods will slowly replace mock data currently used. --- tests/conftest.py | 196 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 6bdac1b..eeae05b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -4,11 +4,207 @@ import pytest import warnings import xarray as xr +import pandas as pd warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") +# ============================================================================= +# Synthetic data +# ============================================================================= +def generate_synthetic_dataarray( + n_sample=1, n_feature=1, has_multiindex=False, has_nan=None, is_dask=False, seed=0 +): + """Create synthetic DataArray. + + Parameters: + ------------ + n_sample: int + Number of sample dimensions. + n_dims_feature: int + Number of feature dimensions. + has_multiindex: bool, default=False + If True, the data will have a multiindex. + has_nan: [None, "isolated", "fulldim"], default=None + If specified, the data will contain NaNs. + is_dask: bool, default=False + If True, the data will be a dask array. + seed: int, default=0 + Seed for the random number generator. + + Returns: + --------- + data: xr.DataArray + Synthetic data. + + """ + rng = np.random.default_rng(seed) + + # Create dimensions + sample_dims = [f"sample{i}" for i in range(n_sample)] + feature_dims = [f"feature{i}" for i in range(n_feature)] + all_dims = feature_dims + sample_dims + + # Create coordinates + coords = {} + for i, dim in enumerate(all_dims): + if has_multiindex: + coords[dim] = pd.MultiIndex.from_arrays( + [np.arange(6 - i), np.arange(6 - i)], + names=[f"index{i}a", f"index{i}b"], + ) + else: + coords[dim] = np.arange(6 + i) + + # Get data shape + shape = tuple([len(coords[dim]) for dim in all_dims]) + + # Create data + noise = rng.normal(5, 3, size=shape) + signal = 2 * np.sin(np.linspace(0, 2 * np.pi, shape[-1])) + signal = np.broadcast_to(signal, shape) + data = signal + noise + data = xr.DataArray(data, dims=all_dims, coords=coords) + + # Add NaNs + if has_nan is not None: + if has_nan == "isolated": + isolated_point = {dim: 0 for dim in all_dims} + data.loc[isolated_point] = np.nan + elif has_nan == "fulldim": + fulldim_point = {dim: 0 for dim in feature_dims} + data.loc[fulldim_point] = np.nan + else: + raise ValueError(f"Invalid value for has_nan: {has_nan}") + + if is_dask: + data = data.chunk({"sample_0": 1}) + + return data + + +def generate_synthetic_dataset( + n_variables=1, + n_sample=1, + n_feature=1, + has_multiindex=False, + has_nan=None, + is_dask=False, + seed=0, +): + """Create synthetic Dataset. + + Parameters: + ------------ + n_variables: int + Number of variables. + n_sample: int + Number of sample dimensions. + n_dims_feature: int + Number of feature dimensions. + has_multiindex: bool, default=False + If True, the data will have a multiindex. + has_nan: [None, "isolated", "fulldim"], default=None + If specified, the data will contain NaNs. + is_dask: bool, default=False + If True, the data will be a dask array. + seed: int, default=0 + Seed for the random number generator. + + Returns: + --------- + data: xr.Dataset + Synthetic data. + + """ + data = generate_synthetic_dataarray( + n_sample, n_feature, has_multiindex, has_nan, is_dask, seed + ) + dataset = xr.Dataset({"var0": data}) + seed += 1 + + for n in range(1, n_variables): + data_n = generate_synthetic_dataarray( + n_sample=n_sample, + n_feature=n_feature, + has_multiindex=has_multiindex, + has_nan=has_nan, + is_dask=is_dask, + seed=seed, + ) + dataset[f"var{n}"] = data_n + seed += 1 + return dataset + + +def generate_list_of_synthetic_dataarrays( + n_arrays=1, + n_sample=1, + n_feature=1, + has_multiindex=False, + has_nan=None, + is_dask=False, + seed=0, +): + """Create synthetic Dataset. + + Parameters: + ------------ + n_arrays: int + Number of DataArrays. + n_sample: int + Number of sample dimensions. + n_dims_feature: int + Number of feature dimensions. + has_multiindex: bool, default=False + If True, the data will have a multiindex. + has_nan: [None, "isolated", "fulldim"], default=None + If specified, the data will contain NaNs. + is_dask: bool, default=False + If True, the data will be a dask array. + seed: int, default=0 + Seed for the random number generator. + + Returns: + --------- + data: xr.Dataset + Synthetic data. + + """ + data_arrays = [] + for n in range(n_arrays): + data_n = generate_synthetic_dataarray( + n_sample=n_sample, + n_feature=n_feature, + has_multiindex=has_multiindex, + has_nan=has_nan, + is_dask=is_dask, + seed=seed, + ) + data_arrays.append(data_n) + seed += 1 + return data_arrays + + +@pytest.fixture +def synthetic_dataarray(request): + data = generate_synthetic_dataarray(*request.param) + return data + + +@pytest.fixture +def synthetic_dataset(request): + data = generate_synthetic_dataset(*request.param) + return data + + +@pytest.fixture +def synthetic_dataarray_list(request): + data = generate_list_of_synthetic_dataarrays(*request.param) + return data + + # ============================================================================= # Input data # ============================================================================= From b51f36ac2a4235aebafac972547bf14c02f39c7a Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:47:15 +0200 Subject: [PATCH 07/43] style: add and streamline type hints --- xeofs/utils/data_types.py | 34 ++++++++++---- xeofs/utils/sanity_checks.py | 4 +- xeofs/utils/xarray_utils.py | 87 +++++++++++++++++++++--------------- 3 files changed, 79 insertions(+), 46 deletions(-) diff --git a/xeofs/utils/data_types.py b/xeofs/utils/data_types.py index 01cea83..0e8ad59 100644 --- a/xeofs/utils/data_types.py +++ b/xeofs/utils/data_types.py @@ -1,17 +1,35 @@ -from typing import List, TypeAlias, TypedDict, Optional, Tuple, TypeVar +from typing import ( + List, + TypeAlias, + Sequence, + TypedDict, + Optional, + Tuple, + TypeVar, + Hashable, +) import xarray as xr +import dask.array as da DataArray: TypeAlias = xr.DataArray +DataSet: TypeAlias = xr.Dataset +DataList: TypeAlias = List[xr.DataArray] +DaskArray: TypeAlias = da.Array # type: ignore +DataObject: TypeAlias = DataArray | DataSet | DataList +DataX2 = TypeVar("DataX2", DataArray, DataSet) +DataX3 = TypeVar("DataX3", DataArray, DataSet, DataList) + + +Dims: TypeAlias = Sequence[Hashable] +DimsTuple: TypeAlias = Tuple[Dims, ...] +DimsList: TypeAlias = List[Dims] +DimsListTuple: TypeAlias = Tuple[DimsList, ...] + + +# Replace this with the above Dataset: TypeAlias = xr.Dataset DataArrayList: TypeAlias = List[DataArray] SingleDataObject = TypeVar("SingleDataObject", DataArray, Dataset) XArrayData = TypeVar("XArrayData", DataArray, Dataset) AnyDataObject = TypeVar("AnyDataObject", DataArray, Dataset, DataArrayList) -# Model dimensions are always 2-dimensional: sample and feature -Dims: TypeAlias = Tuple[str] -DimsList: TypeAlias = List[Dims] -SampleDims: TypeAlias = Dims -FeatureDims: TypeAlias = Dims | DimsList -# can be either like ('lat', 'lon') (1 DataArray) or (('lat', 'lon'), ('lon')) (multiple DataArrays) -ModelDims = TypedDict("ModelDims", {"sample": SampleDims, "feature": FeatureDims}) diff --git a/xeofs/utils/sanity_checks.py b/xeofs/utils/sanity_checks.py index 5023655..6104056 100644 --- a/xeofs/utils/sanity_checks.py +++ b/xeofs/utils/sanity_checks.py @@ -2,6 +2,8 @@ import xarray as xr +from xeofs.utils.data_types import DataArray, Dataset, DataArrayList, Dims + def assert_single_dataarray(da, name): """Check if the given object is a DataArray. @@ -61,7 +63,7 @@ def assert_dataarray_or_dataset(da, name): raise TypeError(f"{name} must be either a DataArray or Dataset") -def ensure_tuple(arg: Any) -> Tuple[str]: +def convert_to_dim_type(arg: Any) -> Dims: # Check for invalid types if not isinstance(arg, (str, tuple, list)): raise TypeError(f"Invalid input type: {type(arg).__name__}") diff --git a/xeofs/utils/xarray_utils.py b/xeofs/utils/xarray_utils.py index 03df0c2..9d3dab1 100644 --- a/xeofs/utils/xarray_utils.py +++ b/xeofs/utils/xarray_utils.py @@ -1,63 +1,77 @@ -from typing import List, Sequence, Hashable, Tuple +from typing import Sequence, Hashable, Tuple import numpy as np import xarray as xr from scipy.signal import hilbert # type: ignore -from .sanity_checks import ensure_tuple -from .data_types import DataArray, Dataset, SingleDataObject, XArrayData +from .sanity_checks import convert_to_dim_type +from .data_types import ( + Dims, + DataArray, + DataSet, + DataList, +) from .constants import VALID_LATITUDE_NAMES def compute_sqrt_cos_lat_weights( - data: SingleDataObject, dim: Hashable | Sequence[Hashable] -) -> SingleDataObject: + data: DataArray | DataSet, dim: Hashable | Sequence[Hashable] +) -> DataArray: """Compute the square root of cosine of latitude weights. Parameters ---------- - data : xarray.DataArray or xarray.Dataset + data : xr.DataArray Data to be scaled. dim : sequence of hashable Dimensions along which the data is considered to be a feature. Returns ------- - xarray.DataArray or xarray.Dataset + xr.DataArray Square root of cosine of latitude weights. """ - dim = ensure_tuple(dim) - # Find latitude coordinate - is_lat_coord = np.isin(np.array(dim), VALID_LATITUDE_NAMES) + if isinstance(data, (xr.DataArray, xr.Dataset)): + dim = convert_to_dim_type(dim) + # Find latitude coordinate + is_lat_coord = np.isin(np.array(dim), VALID_LATITUDE_NAMES) - # Select latitude coordinate and compute coslat weights - lat_coord = np.array(dim)[is_lat_coord] + # Select latitude coordinate and compute coslat weights + lat_coord = np.array(dim)[is_lat_coord] - if len(lat_coord) > 1: - raise ValueError( - f"{lat_coord} are ambiguous latitude coordinates. Only ONE of the following is allowed for computing coslat weights: {VALID_LATITUDE_NAMES}" - ) + if len(lat_coord) > 1: + raise ValueError( + f"{lat_coord} are ambiguous latitude coordinates. Only ONE of the following is allowed for computing coslat weights: {VALID_LATITUDE_NAMES}" + ) + + if len(lat_coord) == 1: + latitudes: DataArray = data.coords[lat_coord[0]] + assert isinstance(latitudes, xr.DataArray) + weights = sqrt_cos_lat_weights(latitudes) + # Features that cannot be associated to a latitude receive a weight of 1 + weights = weights.where(weights.notnull(), 1) + else: + raise ValueError( + "No latitude coordinate was found to compute coslat weights. Must be one of the following: {:}".format( + VALID_LATITUDE_NAMES + ) + ) + weights.name = "coslat_weights" + return weights - if len(lat_coord) == 1: - latitudes = data.coords[lat_coord[0]] - weights = sqrt_cos_lat_weights(latitudes) - # Features that cannot be associated to a latitude receive a weight of 1 - weights = weights.where(weights.notnull(), 1) else: - raise ValueError( - "No latitude coordinate was found to compute coslat weights. Must be one of the following: {:}".format( - VALID_LATITUDE_NAMES + raise TypeError( + "Invalid input type: {:}. Expected one of the following: DataArray".format( + type(data).__name__ ) ) - weights.name = "coslat_weights" - return weights def get_dims( - data: DataArray | Dataset | List[DataArray], - sample_dims: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], + data: DataArray | DataSet | DataList, + sample_dims: Hashable | Sequence[Hashable], ) -> Tuple[Hashable, Hashable]: """Extracts the dimensions of a DataArray or Dataset that are not included in the sample dimensions. @@ -78,11 +92,11 @@ def get_dims( """ # Check for invalid types if isinstance(data, (xr.DataArray, xr.Dataset)): - sample_dims = ensure_tuple(sample_dims) + sample_dims = convert_to_dim_type(sample_dims) feature_dims = _get_feature_dims(data, sample_dims) elif isinstance(data, list): - sample_dims = ensure_tuple(sample_dims) + sample_dims = convert_to_dim_type(sample_dims) feature_dims = [_get_feature_dims(da, sample_dims) for da in data] else: err_message = f"Invalid input type: {type(data).__name__}. Expected one of " @@ -92,7 +106,7 @@ def get_dims( return sample_dims, feature_dims # type: ignore -def _get_feature_dims(data: XArrayData, sample_dims: Tuple[str]) -> Tuple[Hashable]: +def _get_feature_dims(data: DataArray | DataSet, sample_dims: Dims) -> Dims: """Extracts the dimensions of a DataArray that are not included in the sample dimensions. @@ -109,21 +123,20 @@ def _get_feature_dims(data: XArrayData, sample_dims: Tuple[str]) -> Tuple[Hashab Feature dimensions. """ - feature_dims = tuple(dim for dim in data.dims if dim not in sample_dims) - return feature_dims + return tuple(dim for dim in data.dims if dim not in sample_dims) -def sqrt_cos_lat_weights(data: SingleDataObject) -> SingleDataObject: +def sqrt_cos_lat_weights(data: DataArray) -> DataArray: """Compute the square root of the cosine of the latitude. Parameters: ------------ - data: xr.DataArray or xr.Dataset + data: xr.DataArray Input data. Returns: --------- - sqrt_cos_lat: xr.DataArray or xr.Dataset + sqrt_cos_lat: xr.DataArray Square root of the cosine of the latitude. """ @@ -202,7 +215,7 @@ def _np_sqrt_cos_lat_weights(data): Square root of the cosine of the latitude. """ - return np.sqrt(np.cos(np.deg2rad(data))).clip(0, 1) + return np.sqrt(np.cos(np.deg2rad(data)).clip(0, 1)) def _hilbert_transform_with_padding(y, padding="exp", decay_factor=0.2): From 15f01947ee430138dad6a301dd04ab84c2e534c4 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:50:26 +0200 Subject: [PATCH 08/43] test: add more flexible data generation classes Support generation of test cases (resolve #55) --- tests/conftest.py | 115 ++++++++++++++++++++++++++-------------------- 1 file changed, 65 insertions(+), 50 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index eeae05b..7927083 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,6 +6,8 @@ import xarray as xr import pandas as pd +from xeofs.utils.data_types import DataArray, DataSet, DataList + warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") @@ -14,8 +16,13 @@ # Synthetic data # ============================================================================= def generate_synthetic_dataarray( - n_sample=1, n_feature=1, has_multiindex=False, has_nan=None, is_dask=False, seed=0 -): + n_sample=1, + n_feature=1, + index_policy="index", + nan_policy="no_nan", + dask_policy="no_dask", + seed=0, +) -> DataArray: """Create synthetic DataArray. Parameters: @@ -24,12 +31,12 @@ def generate_synthetic_dataarray( Number of sample dimensions. n_dims_feature: int Number of feature dimensions. - has_multiindex: bool, default=False - If True, the data will have a multiindex. - has_nan: [None, "isolated", "fulldim"], default=None + index_policy: ["index", "multiindex"], default="index" + If "multiindex", the data will have a multiindex. + nan_policy: ["no_nan", "isolated", "fulldim"], default="no_nan" If specified, the data will contain NaNs. - is_dask: bool, default=False - If True, the data will be a dask array. + dsak_policy: ["no_dask", "dask"], default="no_dask" + If "dask", the data will be a dask array. seed: int, default=0 Seed for the random number generator. @@ -46,16 +53,18 @@ def generate_synthetic_dataarray( feature_dims = [f"feature{i}" for i in range(n_feature)] all_dims = feature_dims + sample_dims - # Create coordinates + # Create coordinates/indices coords = {} for i, dim in enumerate(all_dims): - if has_multiindex: + if index_policy == "multiindex": coords[dim] = pd.MultiIndex.from_arrays( [np.arange(6 - i), np.arange(6 - i)], names=[f"index{i}a", f"index{i}b"], ) - else: + elif index_policy == "index": coords[dim] = np.arange(6 + i) + else: + raise ValueError(f"Invalid value for index_policy: {index_policy}") # Get data shape shape = tuple([len(coords[dim]) for dim in all_dims]) @@ -68,18 +77,24 @@ def generate_synthetic_dataarray( data = xr.DataArray(data, dims=all_dims, coords=coords) # Add NaNs - if has_nan is not None: - if has_nan == "isolated": - isolated_point = {dim: 0 for dim in all_dims} - data.loc[isolated_point] = np.nan - elif has_nan == "fulldim": - fulldim_point = {dim: 0 for dim in feature_dims} - data.loc[fulldim_point] = np.nan - else: - raise ValueError(f"Invalid value for has_nan: {has_nan}") - - if is_dask: - data = data.chunk({"sample_0": 1}) + if nan_policy == "no_nan": + pass + elif nan_policy == "isolated": + isolated_point = {dim: 0 for dim in all_dims} + data.loc[isolated_point] = np.nan + elif nan_policy == "fulldim": + fulldim_point = {dim: 0 for dim in feature_dims} + data.loc[fulldim_point] = np.nan + else: + raise ValueError(f"Invalid value for nan_policy: {nan_policy}") + + # Convert to dask array + if dask_policy == "no_dask": + pass + elif dask_policy == "dask": + data = data.chunk({"sample0": 1}) + else: + raise ValueError(f"Invalid value for dask_policy: {dask_policy}") return data @@ -88,11 +103,11 @@ def generate_synthetic_dataset( n_variables=1, n_sample=1, n_feature=1, - has_multiindex=False, - has_nan=None, - is_dask=False, + index_policy="index", + nan_policy="no_nan", + dask_policy="no_dask", seed=0, -): +) -> DataSet: """Create synthetic Dataset. Parameters: @@ -103,12 +118,12 @@ def generate_synthetic_dataset( Number of sample dimensions. n_dims_feature: int Number of feature dimensions. - has_multiindex: bool, default=False - If True, the data will have a multiindex. - has_nan: [None, "isolated", "fulldim"], default=None + index_policy: ["index", "multiindex"], default="index" + If "multiindex", the data will have a multiindex. + nan_policy: ["no_nan", "isolated", "fulldim"], default="no_nan" If specified, the data will contain NaNs. - is_dask: bool, default=False - If True, the data will be a dask array. + dask_policy: ["no_dask", "dask"], default="no_dask" + If "dask", the data will be a dask array. seed: int, default=0 Seed for the random number generator. @@ -119,7 +134,7 @@ def generate_synthetic_dataset( """ data = generate_synthetic_dataarray( - n_sample, n_feature, has_multiindex, has_nan, is_dask, seed + n_sample, n_feature, index_policy, nan_policy, dask_policy, seed ) dataset = xr.Dataset({"var0": data}) seed += 1 @@ -128,9 +143,9 @@ def generate_synthetic_dataset( data_n = generate_synthetic_dataarray( n_sample=n_sample, n_feature=n_feature, - has_multiindex=has_multiindex, - has_nan=has_nan, - is_dask=is_dask, + index_policy=index_policy, + nan_policy=nan_policy, + dask_policy=dask_policy, seed=seed, ) dataset[f"var{n}"] = data_n @@ -142,11 +157,11 @@ def generate_list_of_synthetic_dataarrays( n_arrays=1, n_sample=1, n_feature=1, - has_multiindex=False, - has_nan=None, - is_dask=False, + index_policy="index", + nan_policy="no_nan", + dask_policy="no_dask", seed=0, -): +) -> DataList: """Create synthetic Dataset. Parameters: @@ -157,12 +172,12 @@ def generate_list_of_synthetic_dataarrays( Number of sample dimensions. n_dims_feature: int Number of feature dimensions. - has_multiindex: bool, default=False - If True, the data will have a multiindex. - has_nan: [None, "isolated", "fulldim"], default=None + index_policy: ["index", "multiindex"], default="index" + If "multiindex", the data will have a multiindex. + nan_policy: ["no_nan", "isolated", "fulldim"], default="no_nan" If specified, the data will contain NaNs. - is_dask: bool, default=False - If True, the data will be a dask array. + dask_policy: ["no_dask", "dask"], default="no_dask" + If "dask", the data will be a dask array. seed: int, default=0 Seed for the random number generator. @@ -177,9 +192,9 @@ def generate_list_of_synthetic_dataarrays( data_n = generate_synthetic_dataarray( n_sample=n_sample, n_feature=n_feature, - has_multiindex=has_multiindex, - has_nan=has_nan, - is_dask=is_dask, + index_policy=index_policy, + nan_policy=nan_policy, + dask_policy=dask_policy, seed=seed, ) data_arrays.append(data_n) @@ -188,19 +203,19 @@ def generate_list_of_synthetic_dataarrays( @pytest.fixture -def synthetic_dataarray(request): +def synthetic_dataarray(request) -> DataArray: data = generate_synthetic_dataarray(*request.param) return data @pytest.fixture -def synthetic_dataset(request): +def synthetic_dataset(request) -> DataSet: data = generate_synthetic_dataset(*request.param) return data @pytest.fixture -def synthetic_dataarray_list(request): +def synthetic_datalist(request) -> DataList: data = generate_list_of_synthetic_dataarrays(*request.param) return data From 52935a0c1e09484046276bedd102ea92d65690d8 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:51:37 +0200 Subject: [PATCH 09/43] refactor: add MultiIndexConvert & tests --- .../test_dataarray_multiindex_converter.py | 81 +++++++++ .../test_datalist_multiindex_converter.py | 93 ++++++++++ .../test_dataset_multiindex_converter.py | 88 +++++++++ tests/utilities.py | 172 ++++++++++++++++++ xeofs/preprocessing/multi_index_converter.py | 117 ++++++------ 5 files changed, 499 insertions(+), 52 deletions(-) create mode 100644 tests/preprocessing/test_dataarray_multiindex_converter.py create mode 100644 tests/preprocessing/test_datalist_multiindex_converter.py create mode 100644 tests/preprocessing/test_dataset_multiindex_converter.py create mode 100644 tests/utilities.py diff --git a/tests/preprocessing/test_dataarray_multiindex_converter.py b/tests/preprocessing/test_dataarray_multiindex_converter.py new file mode 100644 index 0000000..d0fa9f6 --- /dev/null +++ b/tests/preprocessing/test_dataarray_multiindex_converter.py @@ -0,0 +1,81 @@ +import pytest +import pandas as pd + +from xeofs.preprocessing.multi_index_converter import ( + DataArrayMultiIndexConverter, +) +from ..conftest import generate_synthetic_dataarray +from xeofs.utils.data_types import DataArray +from ..utilities import assert_expected_dims, data_is_dask, data_has_multiindex + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index", "multiindex"] +NAN_POLICY = ["no_nan"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (ns, nf, index, nan, dask) + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_transform(synthetic_dataarray): + converter = DataArrayMultiIndexConverter() + converter.fit(synthetic_dataarray) + transformed_data = converter.transform(synthetic_dataarray) + + is_dask_before = data_is_dask(synthetic_dataarray) + is_dask_after = data_is_dask(transformed_data) + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + # Transforming removes MultiIndex + assert data_has_multiindex(transformed_data) is False + + # Result is robust to calling the method multiple times + transformed_data = converter.transform(synthetic_dataarray) + assert data_has_multiindex(transformed_data) is False + + # Transforming data twice won't change the data + transformed_data2 = converter.transform(transformed_data) + assert data_has_multiindex(transformed_data2) is False + assert transformed_data.identical(transformed_data2) + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_inverse_transform_data(synthetic_dataarray): + converter = DataArrayMultiIndexConverter() + converter.fit(synthetic_dataarray) + transformed_data = converter.transform(synthetic_dataarray) + inverse_transformed_data = converter.inverse_transform_data(transformed_data) + + is_dask_before = data_is_dask(synthetic_dataarray) + is_dask_after = data_is_dask(transformed_data) + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + has_multiindex_before = data_has_multiindex(synthetic_dataarray) + has_multiindex_after = data_has_multiindex(inverse_transformed_data) + + assert inverse_transformed_data.identical(synthetic_dataarray) + assert has_multiindex_before == has_multiindex_after diff --git a/tests/preprocessing/test_datalist_multiindex_converter.py b/tests/preprocessing/test_datalist_multiindex_converter.py new file mode 100644 index 0000000..6c90101 --- /dev/null +++ b/tests/preprocessing/test_datalist_multiindex_converter.py @@ -0,0 +1,93 @@ +import pytest +import pandas as pd + +from xeofs.preprocessing.multi_index_converter import ( + DataListMultiIndexConverter, +) +from xeofs.utils.data_types import DataArray +from ..utilities import assert_expected_dims, data_is_dask, data_has_multiindex + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_ARRAYS = [1, 2] +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index"] +NAN_POLICY = ["no_nan"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (na, ns, nf, index, nan, dask) + for na in N_ARRAYS + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +# TESTS +# ============================================================================= +@pytest.mark.parametrize( + "synthetic_datalist", + VALID_TEST_DATA, + indirect=["synthetic_datalist"], +) +def test_transform(synthetic_datalist): + converter = DataListMultiIndexConverter() + converter.fit(synthetic_datalist) + transformed_data = converter.transform(synthetic_datalist) + + is_dask_before = data_is_dask(synthetic_datalist) + is_dask_after = data_is_dask(transformed_data) + + # Transforming does not affect dimensions + assert_expected_dims(transformed_data, synthetic_datalist, policy="all") + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + # Transforming removes MultiIndex + assert data_has_multiindex(transformed_data) is False + + # Result is robust to calling the method multiple times + transformed_data = converter.transform(synthetic_datalist) + assert data_has_multiindex(transformed_data) is False + + # Transforming data twice won't change the data + transformed_data2 = converter.transform(transformed_data) + assert data_has_multiindex(transformed_data2) is False + assert all( + trans.identical(data) + for trans, data in zip(transformed_data, transformed_data2) + ) + + +@pytest.mark.parametrize( + "synthetic_datalist", + VALID_TEST_DATA, + indirect=["synthetic_datalist"], +) +def test_inverse_transform(synthetic_datalist): + converter = DataListMultiIndexConverter() + converter.fit(synthetic_datalist) + transformed_data = converter.transform(synthetic_datalist) + inverse_transformed_data = converter.inverse_transform_data(transformed_data) + + is_dask_before = data_is_dask(synthetic_datalist) + is_dask_after = data_is_dask(transformed_data) + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + has_multiindex_before = data_has_multiindex(synthetic_datalist) + has_multiindex_after = data_has_multiindex(inverse_transformed_data) + + assert all( + trans.identical(data) + for trans, data in zip(inverse_transformed_data, synthetic_datalist) + ) + assert has_multiindex_before == has_multiindex_after diff --git a/tests/preprocessing/test_dataset_multiindex_converter.py b/tests/preprocessing/test_dataset_multiindex_converter.py new file mode 100644 index 0000000..d7d1aa3 --- /dev/null +++ b/tests/preprocessing/test_dataset_multiindex_converter.py @@ -0,0 +1,88 @@ +import pytest +import pandas as pd + +from xeofs.preprocessing.multi_index_converter import ( + DataSetMultiIndexConverter, +) +from ..conftest import generate_synthetic_dataset +from xeofs.utils.data_types import DataArray +from ..utilities import assert_expected_dims, data_is_dask, data_has_multiindex + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_VARIABLES = [1, 2] +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index"] +NAN_POLICY = ["no_nan"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (nv, ns, nf, index, nan, dask) + for nv in N_VARIABLES + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +# TESTS +# ============================================================================= +@pytest.mark.parametrize( + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], +) +def test_transform(synthetic_dataset): + converter = DataSetMultiIndexConverter() + converter.fit(synthetic_dataset) + transformed_data = converter.transform(synthetic_dataset) + + is_dask_before = data_is_dask(synthetic_dataset) + is_dask_after = data_is_dask(transformed_data) + + # Transforming does not affect dimensions + assert_expected_dims(transformed_data, synthetic_dataset, policy="all") + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + # Transforming removes MultiIndex + assert data_has_multiindex(transformed_data) is False + + # Result is robust to calling the method multiple times + transformed_data = converter.transform(synthetic_dataset) + assert data_has_multiindex(transformed_data) is False + + # Transforming data twice won't change the data + transformed_data2 = converter.transform(transformed_data) + assert data_has_multiindex(transformed_data2) is False + assert transformed_data.identical(transformed_data2) + + +@pytest.mark.parametrize( + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], +) +def test_inverse_transform(synthetic_dataset): + converter = DataSetMultiIndexConverter() + converter.fit(synthetic_dataset) + transformed_data = converter.transform(synthetic_dataset) + inverse_transformed_data = converter.inverse_transform_data(transformed_data) + + is_dask_before = data_is_dask(synthetic_dataset) + is_dask_after = data_is_dask(transformed_data) + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + has_multiindex_before = data_has_multiindex(synthetic_dataset) + has_multiindex_after = data_has_multiindex(inverse_transformed_data) + + assert inverse_transformed_data.identical(synthetic_dataset) + assert has_multiindex_before == has_multiindex_after diff --git a/tests/utilities.py b/tests/utilities.py new file mode 100644 index 0000000..1609f35 --- /dev/null +++ b/tests/utilities.py @@ -0,0 +1,172 @@ +from typing import Tuple, List, Hashable +import numpy as np +import pandas as pd +import xarray as xr +import dask.array as da +from xeofs.utils.data_types import ( + DataArray, + DataSet, + DataList, + DaskArray, + Dims, + DimsList, + DimsTuple, + DimsListTuple, +) + + +def is_xdata(data): + return isinstance(data, (DataArray, DataSet)) + + +def get_dims_from_data(data: DataArray | DataSet) -> DimsTuple: + # If data is DataArray/Dataset + if is_xdata(data): + data_dims: Dims = tuple(data.dims) + sample_dims: Dims = tuple([dim for dim in data.dims if "sample" in str(dim)]) + feature_dims: Dims = tuple([dim for dim in data.dims if "feature" in str(dim)]) + return data_dims, sample_dims, feature_dims + else: + raise ValueError("unrecognized input type") + + +def get_dims_from_data_list(data_list: DataList) -> DimsListTuple: + # If data is list + if isinstance(data_list, list): + data_dims: DimsList = [data.dims for data in data_list] + sample_dims: DimsList = [] + feature_dims: DimsList = [] + for data in data_list: + sdims = tuple([dim for dim in data.dims if "sample" in str(dim)]) + fdims = tuple([dim for dim in data.dims if "feature" in str(dim)]) + sample_dims.append(sdims) + feature_dims.append(fdims) + return data_dims, sample_dims, feature_dims + + else: + raise ValueError("unrecognized input type") + + +def data_has_multiindex(data: DataArray | DataSet | DataList) -> bool: + """Check if the given data object has any MultiIndex.""" + if isinstance(data, DataArray) or isinstance(data, DataSet): + return any(isinstance(index, pd.MultiIndex) for index in data.indexes.values()) + elif isinstance(data, list): + return all(data_has_multiindex(da) for da in data) + else: + raise ValueError("unrecognized input type") + + +def data_is_dask(data: DataArray | DataSet | DataList) -> bool: + """Check if the given data is backed by a dask array.""" + + # If data is a DataArray, check its underlying data type + if isinstance(data, DataArray): + return isinstance(data.data, DaskArray) + + # If data is a DataSet, recursively check all contained DataArrays + if isinstance(data, DataSet): + return all(data_is_dask(da) for da in data.data_vars.values()) + + # If data is a list, recursively check each element in the list + if isinstance(data, list): + return all(data_is_dask(da) for da in data) + + # If none of the above, the data type is unrecognized + raise ValueError("unrecognized data type.") + + +def assert_expected_dims(data1, data2, policy="all"): + """ + Check if dimensions of two data objects matches. + + Parameters: + - data1: Reference data object (either a DataArray, DataSet, or list of DataArray) + - data2: Test data object (same type as data1) + - policy: Policy to check the dimensions. Can be either "all", "feature" or "sample" + + """ + + if is_xdata(data1) and is_xdata(data2): + all_dims1, sample_dims1, feature_dims1 = get_dims_from_data(data1) + all_dims2, sample_dims2, feature_dims2 = get_dims_from_data(data2) + + if policy == "all": + err_msg = "Dimensions do not match: {:} vs {:}".format(all_dims1, all_dims2) + assert set(all_dims1) == set(all_dims2), err_msg + elif policy == "feature": + err_msg = "Dimensions do not match: {:} vs {:}".format( + feature_dims1, feature_dims2 + ) + assert set(feature_dims1) == set(feature_dims2), err_msg + assert len(sample_dims2) == 0, "Sample dimensions should be empty" + assert "mode" in all_dims2, "Mode dimension is missing" + + elif policy == "sample": + err_msg = "Dimensions do not match: {:} vs {:}".format( + sample_dims1, sample_dims2 + ) + assert set(sample_dims1) == set(sample_dims2), err_msg + assert len(feature_dims2) == 0, "Feature dimensions should be empty" + assert "mode" in all_dims2, "Mode dimension is missing" + else: + raise ValueError("Unrecognized policy: {:}".format(policy)) + + elif isinstance(data1, list) and isinstance(data2, list): + for da1, da2 in zip(data1, data2): + assert_expected_dims(da1, da2, policy=policy) + + # If neither of the above conditions are met, raise an error + else: + raise ValueError( + "Cannot check coordinates. Unrecognized data type. data1: {:}, data2: {:}".format( + type(data1), type(data2) + ) + ) + + +def assert_expected_coords(data1, data2, policy="all") -> None: + """ + Check if coordinates of the data objects matches. + + Parameters: + - data1: Reference data object (either a DataArray, DataSet, or list of DataArray) + - data2: Test data object (same type as data1) + - policy: Policy to check the dimensions. Can be either "all", "feature" or "sample" + + """ + + # Data objects is either DataArray or DataSet + if is_xdata(data1) and is_xdata(data2): + all_dims1, sample_dims1, feature_dims1 = get_dims_from_data(data1) + all_dims2, sample_dims2, feature_dims2 = get_dims_from_data(data2) + if policy == "all": + assert all( + np.all(data1.coords[dim].values == data2.coords[dim].values) + for dim in all_dims1 + ) + elif policy == "feature": + assert all( + np.all(data1.coords[dim].values == data2.coords[dim].values) + for dim in feature_dims1 + ) + elif policy == "sample": + assert all( + np.all(data1.coords[dim].values == data2.coords[dim].values) + for dim in sample_dims1 + ) + else: + raise ValueError("Unrecognized policy: {:}".format(policy)) + + # Data object is list + elif isinstance(data1, list) and isinstance(data2, list): + for da1, da2 in zip(data1, data2): + assert_expected_coords(da1, da2, policy=policy) + + # If neither of the above conditions are met, raise an error + else: + raise ValueError( + "Cannot check coordinates. Unrecognized data type. data1: {:}, data2: {:}".format( + type(data1), type(data2) + ) + ) diff --git a/xeofs/preprocessing/multi_index_converter.py b/xeofs/preprocessing/multi_index_converter.py index 2680b95..db70e1d 100644 --- a/xeofs/preprocessing/multi_index_converter.py +++ b/xeofs/preprocessing/multi_index_converter.py @@ -1,24 +1,22 @@ -from typing import Dict, TypeVar, List - +from typing import List, Self import xarray as xr import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin -from ..utils.data_types import DataArray, XArrayData, AnyDataObject, DataArrayList +from xeofs.utils.data_types import DataArray + +from ..utils.data_types import DataArray, DataSet, DataList + +class DataArrayMultiIndexConverter(BaseEstimator, TransformerMixin): + """Convert MultiIndexes of a ND DataArray to regular indexes.""" -class MultiIndexConverter(BaseEstimator, TransformerMixin): - def __init__(self, return_copy=False): + def __init__(self): self.original_indexes = {} self.modified_dimensions = [] - self.return_copy = return_copy - - def fit(self, X: XArrayData, y=None): - # Check if input is a DataArray or Dataset - if not isinstance(X, (xr.DataArray, xr.Dataset)): - raise ValueError("Input must be an xarray DataArray or Dataset") + def fit(self, X: DataArray, y=None) -> Self: # Store original MultiIndexes and replace with simple index for dim in X.dims: index = X.indexes[dim] @@ -28,13 +26,8 @@ def fit(self, X: XArrayData, y=None): return self - def transform(self, X: XArrayData) -> XArrayData: - # Check if input is a DataArray or Dataset - if not isinstance(X, (xr.DataArray, xr.Dataset)): - raise ValueError("Input must be an xarray DataArray or Dataset") - - # Make a copy if return_copy is True - X_transformed = X.copy(deep=True) if self.return_copy else X + def transform(self, X: DataArray) -> DataArray: + X_transformed = X.copy(deep=True) # Replace MultiIndexes with simple index for dim in self.modified_dimensions: @@ -44,13 +37,11 @@ def transform(self, X: XArrayData) -> XArrayData: return X_transformed - def inverse_transform(self, X: XArrayData) -> XArrayData: - # Check if input is a DataArray or Dataset - if not isinstance(X, (xr.DataArray, xr.Dataset)): - raise ValueError("Input must be an xarray DataArray or Dataset") + def fit_transform(self, X: DataArray, y=None) -> DataArray: + return self.fit(X, y).transform(X) - # Make a copy if return_copy is True - X_inverse_transformed = X.copy() if self.return_copy else X + def _inverse_transform(self, X: DataArray) -> DataArray: + X_inverse_transformed = X.copy(deep=True) # Restore original MultiIndexes for dim, original_index in self.original_indexes.items(): @@ -66,49 +57,71 @@ def inverse_transform(self, X: XArrayData) -> XArrayData: return X_inverse_transformed - def fit_transform(self, X: XArrayData, y=None) -> XArrayData: - return self.fit(X, y).transform(X) + def inverse_transform_data(self, X: DataArray) -> DataArray: + return self._inverse_transform(X) + + def inverse_transform_components(self, X: DataArray) -> DataArray: + return self._inverse_transform(X) + + def inverse_transform_scores(self, X: DataArray) -> DataArray: + return self._inverse_transform(X) + + +class DataSetMultiIndexConverter(DataArrayMultiIndexConverter): + """Converts MultiIndexes to simple indexes and vice versa.""" + + def fit(self, X: DataSet, y=None) -> Self: + return super().fit(X, y) # type: ignore + def transform(self, X: DataSet) -> DataSet: + return super().transform(X) # type: ignore -class ListMultiIndexConverter(BaseEstimator, TransformerMixin): - def __init__(self, return_copy=False): - self.converters: List[MultiIndexConverter] = [] - self.return_copy = return_copy + def fit_transform(self, X: DataSet, y=None) -> DataSet: + return super().fit_transform(X, y) # type: ignore - def fit(self, X: DataArrayList, y=None): - # Check if input is a List of DataArrays - if not isinstance(X, list) or not all(isinstance(x, xr.DataArray) for x in X): - raise ValueError("Input must be a list of xarray DataArray") + def inverse_transform_data(self, X: DataSet) -> DataSet: + return super().inverse_transform_data(X) # type: ignore + def inverse_transform_components(self, X: DataSet) -> DataSet: + return super().inverse_transform_components(X) # type: ignore + + +class DataListMultiIndexConverter(BaseEstimator, TransformerMixin): + """Converts MultiIndexes to simple indexes and vice versa.""" + + def __init__(self): + self.converters: List[DataArrayMultiIndexConverter] = [] + + def fit(self, X: DataList, y=None): for x in X: - converter = MultiIndexConverter(return_copy=self.return_copy) + converter = DataArrayMultiIndexConverter() converter.fit(x) self.converters.append(converter) return self - def transform(self, X: DataArrayList) -> DataArrayList: - # Check if input is a List of DataArrays - if not isinstance(X, list) or not all(isinstance(x, xr.DataArray) for x in X): - raise ValueError("Input must be a list of xarray DataArray") - + def transform(self, X: DataList) -> DataList: X_transformed: List[DataArray] = [] for x, converter in zip(X, self.converters): X_transformed.append(converter.transform(x)) return X_transformed - def inverse_transform(self, X: DataArrayList) -> DataArrayList | DataArray: - # Data & components are stored in a list of DataArrays - if isinstance(X, list): - X_inverse_transformed: List[DataArray] = [] - for x, converter in zip(X, self.converters): - X_inverse_transformed.append(converter.inverse_transform(x)) + def fit_transform(self, X: DataList, y=None) -> DataList: + return self.fit(X, y).transform(X) + + def _inverse_transform(self, X: DataList) -> DataList: + X_inverse_transformed: List[DataArray] = [] + for x, converter in zip(X, self.converters): + X_inverse_transformed.append(converter._inverse_transform(x)) + + return X_inverse_transformed - return X_inverse_transformed - # Scores are stored as a DataArray - else: - return self.converters[0].inverse_transform(X) + def inverse_transform_data(self, X: DataList) -> DataList: + return self._inverse_transform(X) - def fit_transform(self, X: DataArrayList, y=None) -> DataArrayList: - return self.fit(X, y).transform(X) + def inverse_transform_components(self, X: DataList) -> DataList: + return self._inverse_transform(X) + + def inverse_transform_scores(self, X: DataArray) -> DataArray: + return self.converters[0].inverse_transform_scores(X) From 508d742dc026b97f28be7af20ac41b12357a4f08 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:52:05 +0200 Subject: [PATCH 10/43] refactor: add Sanitizer & tests --- .../preprocessing/test_dataarray_sanitizer.py | 246 ++++++++++++++++++ 1 file changed, 246 insertions(+) create mode 100644 tests/preprocessing/test_dataarray_sanitizer.py diff --git a/tests/preprocessing/test_dataarray_sanitizer.py b/tests/preprocessing/test_dataarray_sanitizer.py new file mode 100644 index 0000000..7b46622 --- /dev/null +++ b/tests/preprocessing/test_dataarray_sanitizer.py @@ -0,0 +1,246 @@ +import pytest +import numpy as np +import xarray as xr + +from xeofs.preprocessing.sanitizer import DataArraySanitizer +from xeofs.utils.data_types import DataArray +from ..conftest import generate_synthetic_dataarray +from ..utilities import ( + data_is_dask, + assert_expected_dims, + assert_expected_coords, +) + +# ============================================================================= +# VALID TEST CASES +# ============================================================================= +N_SAMPLE_DIMS = [1] +N_FEATURE_DIMS = [1] +INDEX_POLICY = ["index"] +NAN_POLICY = ["no_nan", "isolated", "fulldim"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (ns, nf, index, nan, dask) + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + +# ============================================================================= +# INVALID TEST CASES +# ============================================================================= +N_SAMPLE_DIMS = [2] +N_FEATURE_DIMS = [2] +INDEX_POLICY = ["index", "multiindex"] +NAN_POLICY = ["no_nan", "isolated", "fulldim"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +INVALID_TEST_DATA = [ + (ns, nf, index, nan, dask) + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +# TESTS +# ============================================================================= +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + [ + ("sample", "feature", (1, 1)), + ("another_sample", "another_feature", (1, 1)), + ], +) +def test_fit_valid_dimension_names(sample_name, feature_name, data_params): + data = generate_synthetic_dataarray(*data_params) + data = data.rename({"sample0": sample_name, "feature0": feature_name}) + + sanitizer = DataArraySanitizer(sample_name=sample_name, feature_name=feature_name) + sanitizer.fit(data) + data_clean = sanitizer.transform(data) + reconstructed_data = sanitizer.inverse_transform_data(data_clean) + + assert data_clean.ndim == 2 + assert set(data_clean.dims) == set((sample_name, feature_name)) + assert set(reconstructed_data.dims) == set(data.dims) + + +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + [ + ("sample1", "feature", (1, 1)), + ("sample", "feature1", (1, 1)), + ("sample1", "feature1", (1, 1)), + ], +) +def test_fit_invalid_dimension_names(sample_name, feature_name, data_params): + data = generate_synthetic_dataarray(*data_params) + + sanitizer = DataArraySanitizer(sample_name=sample_name, feature_name=feature_name) + + with pytest.raises(ValueError): + sanitizer.fit(data) + + +@pytest.mark.parametrize( + "synthetic_dataarray", + INVALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_fit(synthetic_dataarray): + data = synthetic_dataarray + data = data.rename({"sample0": "sample", "feature0": "feature"}) + + sanitizer = DataArraySanitizer() + with pytest.raises(ValueError): + sanitizer.fit(data) + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_transform(synthetic_dataarray): + data = synthetic_dataarray + data = data.rename({"sample0": "sample", "feature0": "feature"}) + + sanitizer = DataArraySanitizer() + sanitizer.fit(data) + transformed_data = sanitizer.transform(data) + transformed_data2 = sanitizer.transform(data) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(transformed_data) + + assert transformed_data.notnull().all() + assert isinstance(transformed_data, DataArray) + assert transformed_data.ndim == 2 + assert transformed_data.dims == data.dims + assert is_dask_before == is_dask_after + assert transformed_data.identical(transformed_data2) + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_transform_invalid(synthetic_dataarray): + data = synthetic_dataarray + data = data.rename({"sample0": "sample", "feature0": "feature"}) + + sanitizer = DataArraySanitizer() + sanitizer.fit(data) + with pytest.raises(ValueError): + sanitizer.transform(data.isel(feature0=slice(0, 2))) + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_fit_transform(synthetic_dataarray): + data = synthetic_dataarray + data = data.rename({"sample0": "sample", "feature0": "feature"}) + + sanitizer = DataArraySanitizer() + transformed_data = sanitizer.fit_transform(data) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(transformed_data) + + assert isinstance(transformed_data, DataArray) + assert transformed_data.notnull().all() + assert transformed_data.ndim == 2 + assert transformed_data.dims == data.dims + assert is_dask_before == is_dask_after + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_invserse_transform_data(synthetic_dataarray): + data = synthetic_dataarray + data = data.rename({"sample0": "sample", "feature0": "feature"}) + + sanitizer = DataArraySanitizer() + sanitizer.fit(data) + cleaned_data = sanitizer.transform(data) + uncleaned_data = sanitizer.inverse_transform_data(cleaned_data) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(uncleaned_data) + + # inverse transform is only identical if nan_policy={"no_nan", "fulldim"} + # in case of "isolated" the inverse transform will set the entire feature column + # to NaNs, which is not identical to the original data + # assert data.identical(uncleaned_data) + + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_invserse_transform_components(synthetic_dataarray): + data: DataArray = synthetic_dataarray + data = data.rename({"sample0": "sample", "feature0": "feature"}) + + sanitizer = DataArraySanitizer() + sanitizer.fit(data) + + stacked_data = sanitizer.transform(data) + components = stacked_data.rename({"sample": "mode"}) + unstacked_data = sanitizer.inverse_transform_components(components) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked components has correct feature dimensions + assert_expected_dims(data, unstacked_data, policy="feature") + # Unstacked data has coordinates of original data + assert_expected_coords(data, unstacked_data, policy="feature") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_invserse_transform_scores(synthetic_dataarray): + data: DataArray = synthetic_dataarray + data = data.rename({"sample0": "sample", "feature0": "feature"}) + + sanitizer = DataArraySanitizer() + sanitizer.fit(data) + + stacked_data = sanitizer.transform(data) + components = stacked_data.rename({"feature": "mode"}) + unstacked_data = sanitizer.inverse_transform_scores(components) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked components has correct feature dimensions + assert_expected_dims(data, unstacked_data, policy="sample") + # Unstacked data has coordinates of original data + assert_expected_coords(data, unstacked_data, policy="sample") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after From 9f39f8fccacb03ea4e1f1f32f2082a0a525d7589 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:54:12 +0200 Subject: [PATCH 11/43] refactor: Stacker focuses on stacking Other actions like converting multiindices or removing NaNs is present in separate classes --- .../test_dataarray_list_stacker.py | 124 --- tests/preprocessing/test_dataarray_stacker.py | 378 ++++---- .../test_dataarray_stacker_stack.py | 132 --- tests/preprocessing/test_datalist_stacker.py | 236 +++++ tests/preprocessing/test_dataset_stacker.py | 321 ++++--- .../test_dataset_stacker_stack.py | 171 ---- xeofs/preprocessing/stacker.py | 903 +++++++++--------- 7 files changed, 1055 insertions(+), 1210 deletions(-) delete mode 100644 tests/preprocessing/test_dataarray_list_stacker.py delete mode 100644 tests/preprocessing/test_dataarray_stacker_stack.py create mode 100644 tests/preprocessing/test_datalist_stacker.py delete mode 100644 tests/preprocessing/test_dataset_stacker_stack.py diff --git a/tests/preprocessing/test_dataarray_list_stacker.py b/tests/preprocessing/test_dataarray_list_stacker.py deleted file mode 100644 index a9c4eaa..0000000 --- a/tests/preprocessing/test_dataarray_list_stacker.py +++ /dev/null @@ -1,124 +0,0 @@ -import pytest -import xarray as xr -import numpy as np - -from xeofs.preprocessing.stacker import ListDataArrayStacker - - -@pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], -) -def test_fit_transform(dim_sample, dim_feature, mock_data_array_list): - """ - Test that ListDataArrayStacker correctly stacks a list of DataArrays and - fit_transform returns DataArray with 'sample' and 'feature' dimensions. - """ - stacker = ListDataArrayStacker() - feature_dims_list = [dim_feature] * len( - mock_data_array_list - ) # Assume that all DataArrays have the same feature dimensions - stacked_data = stacker.fit_transform( - mock_data_array_list, dim_sample, feature_dims_list - ) - - # Check if the output is a DataArray - assert isinstance(stacked_data, xr.DataArray) - # Check if the dimensions are correct - assert set(stacked_data.dims) == set(("sample", "feature")) - # Check if the data is preserved - assert stacked_data.size == sum([da.size for da in mock_data_array_list]) - - # Check if the transform function returns the same result - transformed_data = stacker.transform(mock_data_array_list) - [ - xr.testing.assert_equal(stacked, transformed) - for stacked, transformed in zip(stacked_data, transformed_data) - ] - - # Check if the stacker dimensions are correct - for stckr, da in zip(stacker.stackers, mock_data_array_list): - assert set(stckr.dims_in_) == set(da.dims) - assert set(stckr.dims_out_) == set(("sample", "feature")) - # test that coordinates are preserved - for dim, coords in da.coords.items(): - assert ( - stckr.coords_in_[dim].size == coords.size - ), "Dimension {} has different size.".format(dim) - assert stckr.coords_out_["sample"].size == np.prod( - [coords.size for dim, coords in da.coords.items() if dim in dim_sample] - ), "Sample dimension has different size." - assert stckr.coords_out_["feature"].size == np.prod( - [coords.size for dim, coords in da.coords.items() if dim in dim_feature] - ), "Feature dimension has different size." - - # Check that invalid input raises an error in transform - with pytest.raises(ValueError): - stacker.transform( - [ - xr.DataArray(np.random.rand(2, 4, 5), dims=("a", "y", "x")) - for _ in range(3) - ] - ) - - -@pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], -) -def test_unstack_data(dim_sample, dim_feature, mock_data_array_list): - """Test if the inverse transformed DataArrays are identical to the original DataArrays.""" - stacker_list = ListDataArrayStacker() - feature_dims_list = [dim_feature] * len( - mock_data_array_list - ) # Assume that all DataArrays have the same feature dimensions - stacked = stacker_list.fit_transform(mock_data_array_list, dim_sample, feature_dims_list) # type: ignore - unstacked = stacker_list.inverse_transform_data(stacked) - - for da_test, da_ref in zip(unstacked, mock_data_array_list): - xr.testing.assert_equal(da_test, da_ref) - - -@pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], -) -def test_unstack_components(dim_sample, dim_feature, mock_data_array_list): - """Test if the inverse transformed components are identical to the original components.""" - stacker_list = ListDataArrayStacker() - feature_dims_list = [dim_feature] * len(mock_data_array_list) - stacked = stacker_list.fit_transform( - mock_data_array_list, dim_sample, feature_dims_list - ) - - components = xr.DataArray( - np.random.normal(size=(stacker_list.coords_out_["feature"].size, 10)), - dims=("feature", "mode"), - coords={"feature": stacker_list.coords_out_["feature"]}, - ) - unstacked = stacker_list.inverse_transform_components(components) - - for da_test, da_ref in zip(unstacked, mock_data_array_list): - # Check if the dimensions are correct - assert set(da_test.dims) == set(dim_feature + ("mode",)) - # Check if the coordinates are preserved - for dim, coords in da_ref.coords.items(): - if dim in dim_feature: - assert ( - da_test.coords[dim].size == coords.size - ), "Dimension {} has different size.".format(dim) diff --git a/tests/preprocessing/test_dataarray_stacker.py b/tests/preprocessing/test_dataarray_stacker.py index 14f697a..5882f13 100644 --- a/tests/preprocessing/test_dataarray_stacker.py +++ b/tests/preprocessing/test_dataarray_stacker.py @@ -1,204 +1,230 @@ import pytest -import xarray as xr import numpy as np +import xarray as xr -from xeofs.preprocessing.stacker import SingleDataArrayStacker - +from xeofs.preprocessing.stacker import DataArrayStacker +from xeofs.utils.data_types import DataArray +from ..conftest import generate_synthetic_dataarray +from ..utilities import ( + get_dims_from_data, + data_is_dask, + assert_expected_dims, + assert_expected_coords, +) +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index"] +NAN_POLICY = ["no_nan"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (ns, nf, index, nan, dask) + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +# TESTS +# ============================================================================= @pytest.mark.parametrize( - "dim_sample, dim_feature", + "sample_name, feature_name, data_params", [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), + ("sample", "feature", (1, 1)), + ("sample0", "feature0", (1, 1)), + ("sample0", "feature", (1, 2)), + ("sample", "feature0", (2, 1)), + ("sample", "feature", (2, 2)), + ("another_sample", "another_feature", (1, 1)), + ("another_sample", "another_feature", (2, 2)), ], ) -def test_fit_transform( - dim_sample, - dim_feature, - mock_data_array, - mock_data_array_isolated_nans, - mock_data_array_full_dimensional_nans, - mock_data_array_boundary_nans, -): - # Test basic functionality - stacker = SingleDataArrayStacker() - stacked = stacker.fit_transform(mock_data_array, dim_sample, dim_feature) - assert stacked.ndim == 2 - assert set(stacked.dims) == {"sample", "feature"} - assert not stacked.isnull().any() - - # Test that the operation is reversible - unstacked = stacker.inverse_transform_data(stacked) - xr.testing.assert_equal(unstacked, mock_data_array) - - # Test that isolated NaNs raise an error - with pytest.raises(ValueError): - stacker.fit_transform(mock_data_array_isolated_nans, dim_sample, dim_feature) - - # Test that NaNs across a full dimension are handled correctly - stacked = stacker.fit_transform( - mock_data_array_full_dimensional_nans, dim_sample, dim_feature - ) - unstacked = stacker.inverse_transform_data(stacked) - xr.testing.assert_equal(unstacked, mock_data_array_full_dimensional_nans) - - # Test that NaNs on the boundary are handled correctly - stacked = stacker.fit_transform( - mock_data_array_boundary_nans, dim_sample, dim_feature - ) - unstacked = stacker.inverse_transform_data(stacked) - xr.testing.assert_equal(unstacked, mock_data_array_boundary_nans) - - # Test that the same stacker cannot be used with data of different shapes - with pytest.raises(ValueError): - other_data = mock_data_array.isel(time=slice(None, -1), lon=slice(None, -1)) - stacker.transform(other_data) +def test_fit_valid_dimension_names(sample_name, feature_name, data_params): + data = generate_synthetic_dataarray(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataArrayStacker(sample_name=sample_name, feature_name=feature_name) + stacker.fit(data, sample_dims, feature_dims) + stacked_data = stacker.transform(data) + reconstructed_data = stacker.inverse_transform_data(stacked_data) + + assert stacked_data.ndim == 2 + assert set(stacked_data.dims) == set((sample_name, feature_name)) + assert set(reconstructed_data.dims) == set(data.dims) @pytest.mark.parametrize( - "dim_sample, dim_feature", + "sample_name, feature_name, data_params", [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), + ("sample1", "feature", (2, 1)), + ("sample", "feature1", (1, 2)), + ("sample1", "feature1", (3, 3)), ], ) -def test_transform(mock_data_array, dim_sample, dim_feature): - # Test basic functionality - stacker = SingleDataArrayStacker() - stacker.fit_transform(mock_data_array, dim_sample, dim_feature) - other_data = mock_data_array.copy(deep=True) - transformed = stacker.transform(other_data) - - # Test that transformed data has the correct dimensions - assert transformed.ndim == 2 - assert set(transformed.dims) == {"sample", "feature"} - assert not transformed.isnull().any() - - # Invalid data raises an error +def test_fit_invalid_dimension_names(sample_name, feature_name, data_params): + data = generate_synthetic_dataarray(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataArrayStacker(sample_name=sample_name, feature_name=feature_name) + with pytest.raises(ValueError): - stacker.transform(mock_data_array.isel(lon=slice(None, 2), time=slice(None, 2))) + stacker.fit(data, sample_dims, feature_dims) @pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], ) -def test_inverse_transform_data(mock_data_array, dim_sample, dim_feature): - # Test inverse transform - stacker = SingleDataArrayStacker() - stacker.fit_transform(mock_data_array, dim_sample, dim_feature) - stacked = stacker.transform(mock_data_array) - unstacked = stacker.inverse_transform_data(stacked) - xr.testing.assert_equal(unstacked, mock_data_array) +def test_fit(synthetic_dataarray): + data = synthetic_dataarray + all_dims, sample_dims, feature_dims = get_dims_from_data(data) - # Test that the operation is reversible - restacked = stacker.transform(unstacked) - xr.testing.assert_equal(restacked, stacked) + stacker = DataArrayStacker() + stacker.fit(data, sample_dims, feature_dims) @pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], ) -def test_inverse_transform_components(mock_data_array, dim_sample, dim_feature): - # Test basic functionality - stacker = SingleDataArrayStacker() - stacker.fit_transform(mock_data_array, dim_sample, dim_feature) - components = xr.DataArray( - np.random.normal(size=(len(stacker.coords_out_["feature"]), 10)), - dims=("feature", "mode"), - coords={"feature": stacker.coords_out_["feature"]}, - ) - unstacked = stacker.inverse_transform_components(components) - - # Test that feature dimensions are preserved - assert set(unstacked.dims) == set(dim_feature + ("mode",)) - - # Test that feature coordinates are preserved - for dim, coords in mock_data_array.coords.items(): - if dim in dim_feature: - assert ( - unstacked.coords[dim].size == coords.size - ), "Dimension {} has different size.".format(dim) +def test_transform(synthetic_dataarray): + data = synthetic_dataarray + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataArrayStacker() + stacker.fit(data, sample_dims, feature_dims) + transformed_data = stacker.transform(data) + transformed_data2 = stacker.transform(data) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(transformed_data) + + assert isinstance(transformed_data, DataArray) + assert transformed_data.ndim == 2 + assert transformed_data.dims == ("sample", "feature") + assert is_dask_before == is_dask_after + assert transformed_data.identical(transformed_data2) @pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_transform_invalid(synthetic_dataarray): + data = synthetic_dataarray + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataArrayStacker() + stacker.fit(data, sample_dims, feature_dims) + with pytest.raises(ValueError): + stacker.transform(data.isel(feature0=slice(0, 2))) + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_fit_transform(synthetic_dataarray): + data = synthetic_dataarray + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataArrayStacker() + transformed_data = stacker.fit_transform(data, sample_dims, feature_dims) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(transformed_data) + + assert isinstance(transformed_data, DataArray) + assert transformed_data.ndim == 2 + assert transformed_data.dims == ("sample", "feature") + assert is_dask_before == is_dask_after + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_invserse_transform_data(synthetic_dataarray): + data = synthetic_dataarray + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataArrayStacker() + stacker.fit(data, sample_dims, feature_dims) + stacked_data = stacker.transform(data) + unstacked_data = stacker.inverse_transform_data(stacked_data) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked data has dimensions of original data + assert_expected_dims(data, unstacked_data, policy="all") + # Unstacked data has coordinates of original data + assert_expected_coords(data, unstacked_data, policy="all") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_invserse_transform_components(synthetic_dataarray): + data: DataArray = synthetic_dataarray + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataArrayStacker() + stacker.fit(data, sample_dims, feature_dims) + + stacked_data = stacker.transform(data) + components = stacked_data.rename({"sample": "mode"}) + unstacked_data = stacker.inverse_transform_components(components) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked components has correct feature dimensions + assert_expected_dims(data, unstacked_data, policy="feature") + # Unstacked data has coordinates of original data + assert_expected_coords(data, unstacked_data, policy="feature") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], ) -def test_inverse_transform_scores(mock_data_array, dim_sample, dim_feature): - # Test basic functionality - stacker = SingleDataArrayStacker() - stacker.fit_transform(mock_data_array, dim_sample, dim_feature) - scores = xr.DataArray( - np.random.rand(len(stacker.coords_out_["sample"]), 10), - dims=("sample", "mode"), - coords={"sample": stacker.coords_out_["sample"]}, - ) - unstacked = stacker.inverse_transform_scores(scores) - - # Test that sample dimensions are preserved - assert set(unstacked.dims) == set(dim_sample + ("mode",)) - - # Test that sample coordinates are preserved - for dim, coords in mock_data_array.coords.items(): - if dim in dim_sample: - assert ( - unstacked.coords[dim].size == coords.size - ), "Dimension {} has different size.".format(dim) - - -def test_fit_transform_sample_feature_data(): - """Test fit_transform with sample and feature data.""" - # Create sample and feature data - np.random.seed(5) - simple_data = xr.DataArray( - np.random.rand(10, 5), - dims=("sample", "feature"), - coords={"sample": np.arange(10), "feature": np.arange(5)}, - ) - np.random.seed(5) - more_simple_data = xr.DataArray( - np.random.rand(10, 5), - dims=("sample", "feature"), - coords={"sample": np.arange(10), "feature": np.arange(5)}, - ) - - # Create stacker and fit_transform - stacker = SingleDataArrayStacker() - stacked = stacker.fit_transform(simple_data, ("sample",), ("feature")) - - # Test that the dimensions are correct - assert stacked.ndim == 2 - assert set(stacked.dims) == {"sample", "feature"} - assert not stacked.isnull().any() - - # Test that fitting new data yields the same results - more_stacked = stacker.transform(more_simple_data) - xr.testing.assert_equal(more_stacked, stacked) - - # Test that the operation is reversible - unstacked = stacker.inverse_transform_data(stacked) - xr.testing.assert_equal(unstacked, simple_data) - - more_unstacked = stacker.inverse_transform_data(more_stacked) - xr.testing.assert_equal(more_unstacked, more_simple_data) +def test_invserse_transform_scores(synthetic_dataarray): + data: DataArray = synthetic_dataarray + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataArrayStacker() + stacker.fit(data, sample_dims, feature_dims) + + stacked_data = stacker.transform(data) + components = stacked_data.rename({"feature": "mode"}) + unstacked_data = stacker.inverse_transform_scores(components) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked components has correct feature dimensions + assert_expected_dims(data, unstacked_data, policy="sample") + # Unstacked data has coordinates of original data + assert_expected_coords(data, unstacked_data, policy="sample") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after diff --git a/tests/preprocessing/test_dataarray_stacker_stack.py b/tests/preprocessing/test_dataarray_stacker_stack.py deleted file mode 100644 index 7389559..0000000 --- a/tests/preprocessing/test_dataarray_stacker_stack.py +++ /dev/null @@ -1,132 +0,0 @@ -import pytest -import xarray as xr -import numpy as np - -from xeofs.preprocessing.stacker import SingleDataArrayStacker - - -def create_da(dim_sample, dim_feature, seed=None): - n_dims = len(dim_sample) + len(dim_feature) - size = n_dims * [3] - rng = np.random.default_rng(seed) - dims = dim_sample + dim_feature - coords = {d: np.arange(i, i + 3) for i, d in enumerate(dims)} - return xr.DataArray(rng.normal(0, 1, size=size), dims=dims, coords=coords) - - -# Valid input -# ============================================================================= -valid_input_dims = [ - (("year", "month"), ("lon", "lat")), - (("year",), ("lat", "lon")), - (("year", "month"), ("lon",)), - (("year",), ("lon",)), - (("sample",), ("feature",)), -] - -valid_input = [] -for dim_sample, dim_feature in valid_input_dims: - da = create_da(dim_sample, dim_feature) - valid_input.append((da, dim_sample, dim_feature)) - - -# Invalid input -# ============================================================================= -invalid_input_dims = [ - (("sample",), ("feature", "lat")), - (("sample",), ("month", "feature")), - (("sample", "month"), ("lon", "lat")), - (("sample",), ("lon", "lat")), - (("year",), ("month", "sample")), - (("year",), ("sample",)), - (("sample",), ("lon",)), - (("year", "month"), ("lon", "feature")), - (("year", "month"), ("feature",)), - (("year",), ("feature",)), - (("feature",), ("lon", "lat")), - (("feature",), ("lon",)), - (("feature",), ("sample",)), -] -invalid_input = [] -for dim_sample, dim_feature in invalid_input_dims: - da = create_da(dim_sample, dim_feature) - invalid_input.append((da, dim_sample, dim_feature)) - - -# Test stacking -# ============================================================================= -@pytest.mark.parametrize("da, dim_sample, dim_feature", valid_input) -def test_fit_transform(da, dim_sample, dim_feature): - """Test fit_transform with valid input.""" - stacker = SingleDataArrayStacker() - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - - # Stacked data has dimensions (sample, feature) - err_msg = f"In: {da.dims}; Out: {da_stacked.dims}" - assert set(da_stacked.dims) == { - "sample", - "feature", - }, err_msg - - -@pytest.mark.parametrize("da, dim_sample, dim_feature", invalid_input) -def test_fit_transform_invalid_input(da, dim_sample, dim_feature): - """Test fit_transform with invalid input.""" - stacker = SingleDataArrayStacker() - with pytest.raises(ValueError): - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - - -@pytest.mark.parametrize("da, dim_sample, dim_feature", valid_input) -def test_inverse_transform_data(da, dim_sample, dim_feature): - """Test inverse transform with valid input.""" - stacker = SingleDataArrayStacker() - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - da_unstacked = stacker.inverse_transform_data(da_stacked) - - # Unstacked data has dimensions of original data - err_msg = f"Original: {da.dims}; Recovered: {da_unstacked.dims}" - assert set(da_unstacked.dims) == set(da.dims), err_msg - # Unstacked data has coordinates of original data - for d in da.dims: - assert np.all(da_unstacked.coords[d].values == da.coords[d].values) - - -@pytest.mark.parametrize("da, dim_sample, dim_feature", valid_input) -def test_inverse_transform_components(da, dim_sample, dim_feature): - """Test inverse transform components with valid input.""" - stacker = SingleDataArrayStacker() - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - # Mock components by dropping sampling dim from data - comps_stacked = da_stacked.drop_vars("sample").rename({"sample": "mode"}) - comps_stacked.coords.update({"mode": range(comps_stacked.mode.size)}) - - comps_unstacked = stacker.inverse_transform_components(comps_stacked) - - # Unstacked components has correct feature dimensions - expected_dims = dim_feature + ("mode",) - err_msg = f"Expected: {expected_dims}; Recovered: {comps_unstacked.dims}" - assert set(comps_unstacked.dims) == set(expected_dims), err_msg - # Unstacked data has coordinates of original data - for d in dim_feature: - assert np.all(comps_unstacked.coords[d].values == da.coords[d].values) - - -@pytest.mark.parametrize("da, dim_sample, dim_feature", valid_input) -def test_inverse_transform_scores(da, dim_sample, dim_feature): - """Test inverse transform scores with valid input.""" - stacker = SingleDataArrayStacker() - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - # Mock scores by dropping feature dim from data - scores_stacked = da_stacked.drop_vars("feature").rename({"feature": "mode"}) - scores_stacked.coords.update({"mode": range(scores_stacked.mode.size)}) - - scores_unstacked = stacker.inverse_transform_scores(scores_stacked) - - # Unstacked components has correct feature dimensions - expected_dims = dim_sample + ("mode",) - err_msg = f"Expected: {expected_dims}; Recovered: {scores_unstacked.dims}" - assert set(scores_unstacked.dims) == set(expected_dims), err_msg - # Unstacked data has coordinates of original data - for d in dim_sample: - assert np.all(scores_unstacked.coords[d].values == da.coords[d].values) diff --git a/tests/preprocessing/test_datalist_stacker.py b/tests/preprocessing/test_datalist_stacker.py new file mode 100644 index 0000000..5ae91f4 --- /dev/null +++ b/tests/preprocessing/test_datalist_stacker.py @@ -0,0 +1,236 @@ +import pytest +import numpy as np +import xarray as xr + +from xeofs.preprocessing.stacker import DataListStacker +from xeofs.utils.data_types import DataArray, DataList +from ..conftest import generate_list_of_synthetic_dataarrays +from ..utilities import ( + get_dims_from_data_list, + data_is_dask, + assert_expected_dims, + assert_expected_coords, +) + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_ARRAYS = [1, 2] +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index"] +NAN_POLICY = ["no_nan"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (na, ns, nf, index, nan, dask) + for na in N_ARRAYS + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +# TESTS +# ============================================================================= +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + [ + ("sample", "feature", (2, 1, 1)), + ("sample0", "feature0", (2, 1, 1)), + ("sample0", "feature", (2, 1, 2)), + ("sample", "feature0", (2, 2, 1)), + ("sample", "feature", (2, 2, 2)), + ("another_sample", "another_feature", (2, 1, 1)), + ("another_sample", "another_feature", (2, 2, 2)), + ], +) +def test_fit_valid_dimension_names(sample_name, feature_name, data_params): + data_list = generate_list_of_synthetic_dataarrays(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + + stacker = DataListStacker(sample_name=sample_name, feature_name=feature_name) + stacker.fit(data_list, sample_dims[0], feature_dims) + stacked_data = stacker.transform(data_list) + reconstructed_data_list = stacker.inverse_transform_data(stacked_data) + + assert stacked_data.ndim == 2 + assert set(stacked_data.dims) == set((sample_name, feature_name)) + for reconstructed_data, data in zip(reconstructed_data_list, data_list): + assert set(reconstructed_data.dims) == set(data.dims) + + +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + [ + ("sample1", "feature", (2, 2, 1)), + ("sample", "feature1", (2, 1, 2)), + ("sample1", "feature1", (2, 3, 3)), + ], +) +def test_fit_invalid_dimension_names(sample_name, feature_name, data_params): + data_list = generate_list_of_synthetic_dataarrays(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + + stacker = DataListStacker(sample_name=sample_name, feature_name=feature_name) + + with pytest.raises(ValueError): + stacker.fit(data_list, sample_dims[0], feature_dims) + + +@pytest.mark.parametrize( + "synthetic_datalist", + VALID_TEST_DATA, + indirect=["synthetic_datalist"], +) +def test_fit(synthetic_datalist): + data_list = synthetic_datalist + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + + stacker = DataListStacker() + stacker.fit(data_list, sample_dims[0], feature_dims) + + +@pytest.mark.parametrize( + "synthetic_datalist", + VALID_TEST_DATA, + indirect=["synthetic_datalist"], +) +def test_transform(synthetic_datalist): + data_list = synthetic_datalist + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + + stacker = DataListStacker() + stacker.fit(data_list, sample_dims[0], feature_dims) + transformed_data = stacker.transform(data_list) + transformed_data2 = stacker.transform(data_list) + + is_dask_before = data_is_dask(data_list) + is_dask_after = data_is_dask(transformed_data) + + assert isinstance(transformed_data, DataArray) + assert transformed_data.ndim == 2 + assert transformed_data.dims == ("sample", "feature") + assert is_dask_before == is_dask_after + assert transformed_data.identical(transformed_data2) + + +@pytest.mark.parametrize( + "synthetic_datalist", + VALID_TEST_DATA, + indirect=["synthetic_datalist"], +) +def test_transform_invalid(synthetic_datalist): + data_list = synthetic_datalist + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + + stacker = DataListStacker() + stacker.fit(data_list, sample_dims[0], feature_dims) + + data_list = [da.isel(feature0=slice(0, 2)) for da in data_list] + with pytest.raises(ValueError): + stacker.transform(data_list) + + +@pytest.mark.parametrize( + "synthetic_datalist", + VALID_TEST_DATA, + indirect=["synthetic_datalist"], +) +def test_fit_transform(synthetic_datalist): + data_list = synthetic_datalist + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + + stacker = DataListStacker() + transformed_data = stacker.fit_transform(data_list, sample_dims[0], feature_dims) + + is_dask_before = data_is_dask(data_list) + is_dask_after = data_is_dask(transformed_data) + + assert isinstance(transformed_data, DataArray) + assert transformed_data.ndim == 2 + assert transformed_data.dims == ("sample", "feature") + assert is_dask_before == is_dask_after + + +@pytest.mark.parametrize( + "synthetic_datalist", + VALID_TEST_DATA, + indirect=["synthetic_datalist"], +) +def test_invserse_transform_data(synthetic_datalist): + data_list = synthetic_datalist + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + + stacker = DataListStacker() + stacker.fit(data_list, sample_dims[0], feature_dims) + stacked_data = stacker.transform(data_list) + unstacked_data = stacker.inverse_transform_data(stacked_data) + + is_dask_before = data_is_dask(data_list) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked data has dimensions of original data + assert_expected_dims(data_list, unstacked_data, policy="all") + # Unstacked data has coordinates of original data + assert_expected_coords(data_list, unstacked_data, policy="all") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after + + +@pytest.mark.parametrize( + "synthetic_datalist", + VALID_TEST_DATA, + indirect=["synthetic_datalist"], +) +def test_invserse_transform_components(synthetic_datalist): + data_list = synthetic_datalist + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + + stacker = DataListStacker() + stacker.fit(data_list, sample_dims[0], feature_dims) + + stacked_data = stacker.transform(data_list) + components = stacked_data.rename({"sample": "mode"}) + components.coords.update({"mode": range(components.mode.size)}) + unstacked_data = stacker.inverse_transform_components(components) + + is_dask_before = data_is_dask(data_list) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked components has correct feature dimensions + assert_expected_dims(data_list, unstacked_data, policy="feature") + # Unstacked data has feature coordinates of original data + assert_expected_coords(data_list, unstacked_data, policy="feature") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after + + +@pytest.mark.parametrize( + "synthetic_datalist", + VALID_TEST_DATA, + indirect=["synthetic_datalist"], +) +def test_invserse_transform_scores(synthetic_datalist): + data_list = synthetic_datalist + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + + stacker = DataListStacker() + stacker.fit(data_list, sample_dims[0], feature_dims) + + stacked_data = stacker.transform(data_list) + scores = stacked_data.rename({"feature": "mode"}) + unstacked_data = stacker.inverse_transform_scores(scores) + + is_dask_before = data_is_dask(data_list) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked scores has correct feature dimensions + assert_expected_dims(data_list[0], unstacked_data, policy="sample") + # Unstacked data has coordinates of original data + assert_expected_coords(data_list[0], unstacked_data, policy="sample") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after diff --git a/tests/preprocessing/test_dataset_stacker.py b/tests/preprocessing/test_dataset_stacker.py index cfc53f9..e35d0e1 100644 --- a/tests/preprocessing/test_dataset_stacker.py +++ b/tests/preprocessing/test_dataset_stacker.py @@ -2,177 +2,240 @@ import xarray as xr import numpy as np -from xeofs.preprocessing.stacker import SingleDatasetStacker - +from xeofs.preprocessing.stacker import DataSetStacker +from xeofs.utils.data_types import DataSet, DataArray +from ..conftest import generate_synthetic_dataset +from ..utilities import ( + get_dims_from_data, + data_is_dask, + assert_expected_dims, + assert_expected_coords, +) +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_VARIABLES = [1, 2] +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index"] +NAN_POLICY = ["no_nan"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (nv, ns, nf, index, nan, dask) + for nv in N_VARIABLES + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +# TESTS +# ============================================================================= @pytest.mark.parametrize( - "dim_sample, dim_feature", + "sample_name, feature_name, data_params", [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), + ("sample", "feature", (1, 1, 1)), + ("sample", "feature", (2, 1, 1)), + ("sample", "feature", (1, 2, 2)), + ("sample", "feature", (2, 2, 2)), + ("sample0", "feature", (1, 1, 1)), + ("sample0", "feature", (1, 1, 2)), + ("sample0", "feature", (2, 1, 2)), + ("another_sample", "another_feature", (1, 1, 1)), + ("another_sample", "another_feature", (1, 2, 2)), + ("another_sample", "another_feature", (2, 1, 1)), + ("another_sample", "another_feature", (2, 2, 2)), ], ) -def test_fit_transform(mock_dataset, dim_sample, dim_feature): - stacker = SingleDatasetStacker() - stacked = stacker.fit_transform(mock_dataset, dim_sample, dim_feature) +def test_fit_valid_dimension_names(sample_name, feature_name, data_params): + data = generate_synthetic_dataset(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) - # check output type and dimensions - assert isinstance(stacked, xr.DataArray) - assert set(stacked.dims) == {"sample", "feature"} + stacker = DataSetStacker(sample_name=sample_name, feature_name=feature_name) + stacker.fit(data, sample_dims, feature_dims) + stacked_data = stacker.transform(data) + reconstructed_data = stacker.inverse_transform_data(stacked_data) - # check if all NaN rows or columns have been dropped - assert not stacked.isnull().any() - - # check the size of the output data - assert stacked.size > 0 + assert stacked_data.ndim == 2 + assert set(stacked_data.dims) == set((sample_name, feature_name)) + assert set(reconstructed_data.dims) == set(data.dims) @pytest.mark.parametrize( - "dim_sample, dim_feature", + "sample_name, feature_name, data_params", [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), + ("sample", "feature0", (1, 1, 1)), + ("sample0", "feature", (1, 2, 1)), + ("sample1", "feature1", (1, 3, 3)), + ("sample", "feature0", (2, 1, 1)), + ("sample0", "feature", (2, 2, 1)), + ("sample1", "feature1", (2, 3, 3)), ], ) -def test_transform(mock_dataset, dim_sample, dim_feature): - stacker = SingleDatasetStacker() - stacker.fit_transform(mock_dataset, dim_sample, dim_feature) +def test_fit_invalid_dimension_names(sample_name, feature_name, data_params): + data = generate_synthetic_dataset(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) - # create a new dataset for testing the transform function - new_data = mock_dataset.copy() - transformed = stacker.transform(new_data) + stacker = DataSetStacker(sample_name=sample_name, feature_name=feature_name) - assert isinstance(transformed, xr.DataArray) - assert set(transformed.dims) == {"sample", "feature"} - assert not transformed.isnull().any() - assert transformed.size > 0 + with pytest.raises(ValueError): + stacker.fit(data, sample_dims, feature_dims) @pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], ) -def test_inverse_transform_data(mock_dataset, dim_sample, dim_feature): - stacker = SingleDatasetStacker() - stacked = stacker.fit_transform(mock_dataset, dim_sample, dim_feature) +def test_fit(synthetic_dataset): + data = synthetic_dataset + all_dims, sample_dims, feature_dims = get_dims_from_data(data) - inverse_transformed = stacker.inverse_transform_data(stacked) - assert isinstance(inverse_transformed, xr.Dataset) + stacker = DataSetStacker() + stacker.fit(data, sample_dims, feature_dims) - for var in inverse_transformed.data_vars: - xr.testing.assert_equal(inverse_transformed[var], mock_dataset[var]) + +@pytest.mark.parametrize( + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], +) +def test_transform(synthetic_dataset): + data = synthetic_dataset + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataSetStacker() + stacker.fit(data, sample_dims, feature_dims) + transformed_data = stacker.transform(data) + transformed_data2 = stacker.transform(data) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(transformed_data) + + assert isinstance(transformed_data, DataArray) + assert transformed_data.ndim == 2 + assert transformed_data.dims == ("sample", "feature") + assert is_dask_before == is_dask_after + assert transformed_data.identical(transformed_data2) @pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], +) +def test_transform_invalid(synthetic_dataset): + data = synthetic_dataset + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataSetStacker() + stacker.fit(data, sample_dims, feature_dims) + with pytest.raises(ValueError): + stacker.transform(data.isel(feature0=slice(0, 2))) + + +@pytest.mark.parametrize( + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], ) -def test_inverse_transform_components(mock_dataset, dim_sample, dim_feature): - stacker = SingleDatasetStacker() - stacked = stacker.fit_transform(mock_dataset, dim_sample, dim_feature) +def test_fit_transform(synthetic_dataset): + data = synthetic_dataset + all_dims, sample_dims, feature_dims = get_dims_from_data(data) - # dummy components - components = xr.DataArray( - np.random.normal(size=(len(stacker.coords_out_["feature"]), 10)), - dims=("feature", "mode"), - coords={"feature": stacker.coords_out_["feature"]}, - ) - inverse_transformed = stacker.inverse_transform_components(components) + stacker = DataSetStacker() + transformed_data = stacker.fit_transform(data, sample_dims, feature_dims) - # check output type and dimensions - assert isinstance(inverse_transformed, xr.Dataset) - assert set(inverse_transformed.dims) == set(dim_feature + ("mode",)) + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(transformed_data) - assert set(mock_dataset.data_vars) == set( - inverse_transformed.data_vars - ), "Dataset variables are not the same." + assert isinstance(transformed_data, DataArray) + assert transformed_data.ndim == 2 + assert transformed_data.dims == ("sample", "feature") + assert is_dask_before == is_dask_after @pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], ) -def test_inverse_transform_scores(mock_dataset, dim_sample, dim_feature): - stacker = SingleDatasetStacker() - stacked = stacker.fit_transform(mock_dataset, dim_sample, dim_feature) +def test_invserse_transform_data(synthetic_dataset): + data = synthetic_dataset + all_dims, sample_dims, feature_dims = get_dims_from_data(data) - # dummy scores - scores = xr.DataArray( - np.random.rand(len(stacker.coords_out_["sample"]), 10), - dims=("sample", "mode"), - coords={"sample": stacker.coords_out_["sample"]}, - ) - inverse_transformed = stacker.inverse_transform_scores(scores) + stacker = DataSetStacker() + stacker.fit(data, sample_dims, feature_dims) + stacked_data = stacker.transform(data) + unstacked_data = stacker.inverse_transform_data(stacked_data) - assert isinstance(inverse_transformed, xr.DataArray) - assert set(inverse_transformed.dims) == set(dim_sample + ("mode",)) + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(unstacked_data) - # check that sample coordinates are preserved - for dim, coords in mock_dataset.coords.items(): - if dim in dim_sample: - assert ( - inverse_transformed.coords[dim].size == coords.size - ), "Dimension {} has different size.".format(dim) + # Unstacked data has dimensions of original data + assert_expected_dims(data, unstacked_data, policy="all") + # Unstacked data has coordinates of original data + assert_expected_coords(data, unstacked_data, policy="all") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after @pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], ) -def test_fit_transform_raises_on_invalid_dims(mock_dataset, dim_sample, dim_feature): - stacker = SingleDatasetStacker() - with pytest.raises(ValueError): - stacker.fit_transform(mock_dataset, ("invalid_dim",), dim_feature) +def test_invserse_transform_components(synthetic_dataset): + data = synthetic_dataset + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + stacker = DataSetStacker() + stacker.fit(data, sample_dims, feature_dims) -def test_fit_transform_raises_on_isolated_nans( - mock_data_array_isolated_nans, -): - stacker = SingleDatasetStacker() - invalid_dataset = xr.Dataset({"var": mock_data_array_isolated_nans}) - with pytest.raises(ValueError): - stacker.fit_transform(invalid_dataset, ("time",), ("lat", "lon")) + stacked_data = stacker.transform(data) + components = stacked_data.rename({"sample": "mode"}) + components.coords.update({"mode": range(components.mode.size)}) + unstacked_data = stacker.inverse_transform_components(components) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked components has correct feature dimensions + assert_expected_dims(data, unstacked_data, policy="feature") + # Unstacked data has coordinates of original data + assert_expected_coords(data, unstacked_data, policy="feature") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after @pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], ) -def test_fit_transform_passes_on_full_dimensional_nans( - mock_data_array_full_dimensional_nans, dim_sample, dim_feature -): - stacker = SingleDatasetStacker() - valid_dataset = xr.Dataset({"var": mock_data_array_full_dimensional_nans}) - try: - stacker.fit_transform(valid_dataset, dim_sample, dim_feature) - except ValueError: - pytest.fail("fit_transform() raised ValueError unexpectedly!") +def test_invserse_transform_scores(synthetic_dataset): + data = synthetic_dataset + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + stacker = DataSetStacker() + stacker.fit(data, sample_dims, feature_dims) + + stacked_data = stacker.transform(data) + scores = stacked_data.rename({"feature": "mode"}) + scores.coords.update({"mode": range(scores.mode.size)}) + unstacked_data = stacker.inverse_transform_scores(scores) + + is_dask_before = data_is_dask(data) + is_dask_after = data_is_dask(unstacked_data) + + # Unstacked scores has correct feature dimensions + assert_expected_dims(data, unstacked_data, policy="sample") + # Unstacked data has coordinates of original data + assert_expected_coords(data, unstacked_data, policy="sample") + # inverse transform should not change dask-ness + assert is_dask_before == is_dask_after diff --git a/tests/preprocessing/test_dataset_stacker_stack.py b/tests/preprocessing/test_dataset_stacker_stack.py deleted file mode 100644 index a267b4e..0000000 --- a/tests/preprocessing/test_dataset_stacker_stack.py +++ /dev/null @@ -1,171 +0,0 @@ -import pytest -import xarray as xr -import numpy as np - -from xeofs.preprocessing.stacker import SingleDatasetStacker - - -def create_ds(dim_sample, dim_feature, seed=None): - n_dims = len(dim_sample) + len(dim_feature) - size = n_dims * [3] - rng = np.random.default_rng(seed) - dims = dim_sample + dim_feature - coords = {d: np.arange(i, i + 3) for i, d in enumerate(dims)} - da1 = xr.DataArray(rng.normal(0, 1, size=size), dims=dims, coords=coords) - da2 = da1.sel(**{dim_feature[0]: slice(0, 2)}).squeeze().copy() - ds = xr.Dataset({"da1": da1, "da2": da2}) - return ds - - -# Valid input -# ============================================================================= -valid_input_dims = [ - # This SHOULD work but currently doesn't, potentially due to a bug in xarray (https://github.com/pydata/xarray/discussions/8063) - # (("year", "month"), ("lon", "lat")), - (("year",), ("lat", "lon")), - (("year", "month"), ("lon",)), - (("year",), ("lon",)), -] - -valid_input = [] -for dim_sample, dim_feature in valid_input_dims: - da = create_ds(dim_sample, dim_feature) - valid_input.append((da, dim_sample, dim_feature)) - - -# Invalid input -# ============================================================================= -invalid_input_dims = [ - (("sample",), ("feature", "lat")), - (("sample",), ("month", "feature")), - (("sample", "month"), ("lon", "lat")), - (("sample",), ("lon", "lat")), - (("year",), ("month", "sample")), - (("year",), ("sample",)), - (("sample",), ("lon",)), - (("year", "month"), ("lon", "feature")), - (("year", "month"), ("feature",)), - (("year",), ("feature",)), - (("feature",), ("lon", "lat")), - (("feature",), ("lon",)), - (("feature",), ("sample",)), - (("sample",), ("feature",)), -] -invalid_input = [] -for dim_sample, dim_feature in invalid_input_dims: - da = create_ds(dim_sample, dim_feature) - invalid_input.append((da, dim_sample, dim_feature)) - - -# Test stacking -# ============================================================================= -@pytest.mark.parametrize("da, dim_sample, dim_feature", valid_input) -def test_fit_transform(da, dim_sample, dim_feature): - """Test fit_transform with valid input.""" - stacker = SingleDatasetStacker() - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - - # Stacked data has dimensions (sample, feature) - err_msg = f"In: {da.dims}; Out: {da_stacked.dims}" - assert set(da_stacked.dims) == { - "sample", - "feature", - }, err_msg - - -@pytest.mark.parametrize("da, dim_sample, dim_feature", invalid_input) -def test_fit_transform_invalid_input(da, dim_sample, dim_feature): - """Test fit_transform with invalid input.""" - stacker = SingleDatasetStacker() - with pytest.raises(ValueError): - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - - -@pytest.mark.parametrize("da, dim_sample, dim_feature", valid_input) -def test_inverse_transform_data(da, dim_sample, dim_feature): - """Test inverse transform with valid input.""" - stacker = SingleDatasetStacker() - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - da_unstacked = stacker.inverse_transform_data(da_stacked) - - # Unstacked data has dimensions of original data - err_msg = f"Original: {da.dims}; Recovered: {da_unstacked.dims}" - assert set(da_unstacked.dims) == set(da.dims), err_msg - # Unstacked data has variables of original data - err_msg = f"Original: {set(da.data_vars)}; Recovered: {set(da_unstacked.data_vars)}" - assert set(da_unstacked.data_vars) == set(da.data_vars), err_msg - # Unstacked data has coordinates of original data - for var in da.data_vars: - # Check if the dimensions are correct - err_msg = f"Original: {da[var].dims}; Recovered: {da_unstacked[var].dims}" - assert set(da_unstacked[var].dims) == set(da[var].dims), err_msg - for coord in da[var].coords: - err_msg = f"Original: {da[var].coords[coord]}; Recovered: {da_unstacked[var].coords[coord]}" - coords_are_equal = da[var].coords[coord] == da_unstacked[var].coords[coord] - assert np.all(coords_are_equal), err_msg - - -@pytest.mark.parametrize("da, dim_sample, dim_feature", valid_input) -def test_inverse_transform_components(da, dim_sample, dim_feature): - """Test inverse transform components with valid input.""" - stacker = SingleDatasetStacker() - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - # Mock components by dropping sampling dim from data - comps_stacked = da_stacked.drop_vars("sample").rename({"sample": "mode"}) - comps_stacked.coords.update({"mode": range(comps_stacked.mode.size)}) - - comps_unstacked = stacker.inverse_transform_components(comps_stacked) - - # Unstacked components are a Dataset - assert isinstance(comps_unstacked, xr.Dataset) - - # Unstacked data has variables of original data - err_msg = ( - f"Original: {set(da.data_vars)}; Recovered: {set(comps_unstacked.data_vars)}" - ) - assert set(comps_unstacked.data_vars) == set(da.data_vars), err_msg - - # Unstacked components has correct feature dimensions - expected_dims = dim_feature + ("mode",) - err_msg = f"Expected: {expected_dims}; Recovered: {comps_unstacked.dims}" - assert set(comps_unstacked.dims) == set(expected_dims), err_msg - - # Unstacked components has coordinates of original data - for var in da.data_vars: - # Feature dimensions in original and recovered comps are same for each variable - expected_dims_in_var = tuple(d for d in da[var].dims if d in dim_feature) - expected_dims_in_var += ("mode",) - err_msg = ( - f"Original: {expected_dims_in_var}; Recovered: {comps_unstacked[var].dims}" - ) - assert set(expected_dims_in_var) == set(comps_unstacked[var].dims), err_msg - # Coordinates in original and recovered comps are same for each variable - for dim in expected_dims_in_var: - if dim != "mode": - err_msg = f"Original: {da[var].coords[dim]}; Recovered: {comps_unstacked[var].coords[dim]}" - coords_are_equal = ( - da[var].coords[dim] == comps_unstacked[var].coords[dim] - ) - assert np.all(coords_are_equal), err_msg - - -@pytest.mark.parametrize("da, dim_sample, dim_feature", valid_input) -def test_inverse_transform_scores(da, dim_sample, dim_feature): - """Test inverse transform scores with valid input.""" - stacker = SingleDatasetStacker() - da_stacked = stacker.fit_transform(da, dim_sample, dim_feature) - # Mock scores by dropping feature dim from data - scores_stacked = da_stacked.drop_vars("feature").rename({"feature": "mode"}) - scores_stacked.coords.update({"mode": range(scores_stacked.mode.size)}) - - scores_unstacked = stacker.inverse_transform_scores(scores_stacked) - - # Unstacked scores are a DataArray - assert isinstance(scores_unstacked, xr.DataArray) - # Unstacked components has correct feature dimensions - expected_dims = dim_sample + ("mode",) - err_msg = f"Expected: {expected_dims}; Recovered: {scores_unstacked.dims}" - assert set(scores_unstacked.dims) == set(expected_dims), err_msg - # Unstacked data has coordinates of original data - for d in dim_sample: - assert np.all(scores_unstacked.coords[d].values == da.coords[d].values) diff --git a/xeofs/preprocessing/stacker.py b/xeofs/preprocessing/stacker.py index 665b3a1..7a8d415 100644 --- a/xeofs/preprocessing/stacker.py +++ b/xeofs/preprocessing/stacker.py @@ -1,28 +1,62 @@ -from typing import List, Sequence, Hashable, Tuple +from typing import List, Self import numpy as np import pandas as pd import xarray as xr +from sklearn.base import BaseEstimator, TransformerMixin -from xeofs.utils.data_types import DataArray +from xeofs.utils.data_types import DataArray, DataSet, DataList -from ._base_stacker import _BaseStacker from ..utils.data_types import ( + Dims, + DimsList, DataArray, - DataArrayList, Dataset, - SingleDataObject, - AnyDataObject, ) -from ..utils.sanity_checks import ensure_tuple +from ..utils.sanity_checks import convert_to_dim_type -class SingleDataStacker(_BaseStacker): - def _validate_matching_dimensions(self, data: SingleDataObject): +class DataArrayStacker(BaseEstimator, TransformerMixin): + """Converts a DataArray of any dimensionality into a 2D structure. + + Attributes + ---------- + sample_name : str + The name of the sample dimension. + feature_name : str + The name of the feature dimension. + dims_in : Tuple[str] + The dimensions of the input data. + dims_out : Tuple[str] + The dimensions of the output data. + dims_mapping : Dict[str, Tuple[str]] + The mapping between the input and output dimensions. + coords_in : Dict[str, xr.Coordinates] + The coordinates of the input data. + coords_out : Dict[str, xr.Coordinates] + The coordinates of the output data. + """ + + def __init__( + self, + sample_name: str = "sample", + feature_name: str = "feature", + ): + self.sample_name = sample_name + self.feature_name = feature_name + + self.dims_in = tuple() + self.dims_out = tuple((sample_name, feature_name)) + self.dims_mapping = {d: tuple() for d in self.dims_out} + + self.coords_in = {} + self.coords_out = {} + + def _validate_matching_dimensions(self, data: DataArray): """Verify that the dimensions of the data are consistent with the dimensions used to fit the stacker.""" # Test whether sample and feature dimensions are present in data array - expected_sample_dims = set(self.dims_out_[self.sample_name]) - expected_feature_dims = set(self.dims_out_[self.feature_name]) + expected_sample_dims = set(self.dims_mapping[self.sample_name]) + expected_feature_dims = set(self.dims_mapping[self.feature_name]) expected_dims = expected_sample_dims | expected_feature_dims given_dims = set(data.dims) if not (expected_dims == given_dims): @@ -30,256 +64,41 @@ def _validate_matching_dimensions(self, data: SingleDataObject): f"One or more dimensions in {expected_dims} are not present in data." ) - def _validate_matching_feature_coords(self, data: SingleDataObject): + def _validate_matching_feature_coords(self, data: DataArray): """Verify that the feature coordinates of the data are consistent with the feature coordinates used to fit the stacker.""" + feature_dims = self.dims_mapping[self.feature_name] coords_are_equal = [ - data.coords[dim].equals(self.coords_in_[dim]) - for dim in self.dims_out_["feature"] + data.coords[dim].equals(self.coords_in[dim]) for dim in feature_dims ] if not all(coords_are_equal): raise ValueError( "Data to be transformed has different coordinates than the data used to fit." ) - def _reorder_dims(self, data): - """Reorder dimensions to original order; catch ('mode') dimensions via ellipsis""" - order_input_dims = [ - valid_dim for valid_dim in self.dims_in_ if valid_dim in data.dims - ] - return data.transpose(..., *order_input_dims) - - def _stack(self, data: SingleDataObject, sample_dims, feature_dims) -> DataArray: - """Reshape a SingleDataObject to 2D DataArray.""" - raise NotImplementedError - - def _unstack(self, data: SingleDataObject) -> SingleDataObject: - """Unstack `sample` and `feature` dimension of an DataArray to its original dimensions. - - Parameters - ---------- - data : DataArray - The data to be unstacked. - - Returns - ------- - data_unstacked : DataArray - The unstacked data. - """ - raise NotImplementedError() - - def _reindex_dim( - self, data: SingleDataObject, stacked_dim: str - ) -> SingleDataObject: - """Reindex data to original coordinates in case that some features at the boundaries were dropped - - Parameters - ---------- - data : DataArray - The data to be reindex. - stacked_dim : str ['sample', 'feature'] - The dimension to be reindexed. - - Returns - ------- - DataArray - The reindexed data. - - """ - # check if coordinates in self.coords have different length from data.coords - # if so, reindex data.coords to self.coords - # input_dim : dimensions of input data - # stacked_dim : dimensions of model data i.e. sample or feature - dims_in = self.dims_out_[stacked_dim] - for dim in dims_in: - if self.coords_in_[dim].size != data.coords[dim].size: - data = data.reindex({dim: self.coords_in_[dim]}, copy=False) - - return data - - def fit_transform( - self, - data: SingleDataObject, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable], + def _validate_dimension_names(self, sample_dims, feature_dims): + if len(sample_dims) > 1: + if self.sample_name in sample_dims: + raise ValueError( + f"Name of sample dimension ({self.sample_name}) is already present in data. Please use another name." + ) + if len(feature_dims) > 1: + if self.feature_name in feature_dims: + raise ValueError( + f"Name of feature dimension ({self.feature_name}) is already present in data. Please use another name." + ) + + def _validate_indices(self, data: DataArray): + """Check that the indices of the data are no MultiIndex""" + if any([isinstance(index, pd.MultiIndex) for index in data.indexes.values()]): + raise ValueError(f"Cannot stack data containing a MultiIndex.") + + def _sanity_check(self, data: DataArray, sample_dims, feature_dims): + self._validate_dimension_names(sample_dims, feature_dims) + self._validate_indices(data) + + def _stack( + self, data: DataArray, sample_dims: Dims, feature_dims: Dims ) -> DataArray: - """Fit the stacker and transform data to 2D. - - Parameters - ---------- - data : DataArray - The data to be reshaped. - sample_dims : Hashable or Sequence[Hashable] - The dimensions of the data that will be stacked along the `sample` dimension. - feature_dims : Hashable or Sequence[Hashable] - The dimensions of the data that will be stacked along the `feature` dimension. - - Returns - ------- - DataArray - The reshaped data. - - Raises - ------ - ValueError - If any of the dimensions in `sample_dims` or `feature_dims` are not present in the data. - ValueError - If data to be transformed has individual NaNs. - ValueError - If data is empty - - """ - - sample_dims = ensure_tuple(sample_dims) - feature_dims = ensure_tuple(feature_dims) - - # The two sets `sample_dims` and `feature_dims` are disjoint/mutually exclusive - if not (set(sample_dims + feature_dims) == set(data.dims)): - raise ValueError( - f"One or more dimensions in {sample_dims + feature_dims} are not present in data dimensions: {data.dims}" - ) - - # Set in/out dimensions - sample_name = self.sample_name - feature_name = self.feature_name - self.dims_in_ = data.dims - self.dims_out_ = {sample_name: sample_dims, feature_name: feature_dims} - - # Set in/out coordinates - self.coords_in_ = {dim: data.coords[dim] for dim in data.dims} - - # Stack data - da: DataArray = self._stack( - data, self.dims_out_[sample_name], self.dims_out_[feature_name] - ) - # Remove NaN samples/features - da = da.dropna(feature_name, how="all") - da = da.dropna(sample_name, how="all") - - self.coords_out_ = { - sample_name: da.coords[sample_name], - feature_name: da.coords[feature_name], - } - - # Ensure that no NaNs are present in the data - if da.isnull().any(): - raise ValueError( - "Isolated NaNs are present in the data. Please remove them before fitting the model." - ) - - # Ensure that data is not empty - if da.size == 0: - raise ValueError("Data is empty.") - - return da - - def transform(self, data: SingleDataObject) -> DataArray: - """Transform new "unseen" data to 2D version. - - Parameters - ---------- - data : DataArray - The data to be reshaped. - - Returns - ------- - DataArray - The reshaped data. - - Raises - ------ - ValueError - If the data to be transformed has different dimensions than the data used to fit the stacker. - ValueError - If the data to be transformed has different coordinates than the data used to fit the stacker. - ValueError - If the data to be transformed has individual NaNs. - ValueError - If data is empty - - """ - # Test whether sample and feature dimensions are present in data array - self._validate_matching_dimensions(data) - - # Check if data to be transformed has the same feature coordinates as the data used to fit the stacker - self._validate_matching_feature_coords(data) - - # Stack data and remove NaN features - da: DataArray = self._stack( - data, self.dims_out_[self.sample_name], self.dims_out_[self.feature_name] - ) - da = da.dropna(self.feature_name, how="all") - da = da.dropna(self.sample_name, how="all") - - # Ensure that no NaNs are present in the data - if da.isnull().any(): - raise ValueError( - "Isolated NaNs are present in the data. Please remove them before fitting the model." - ) - - # Ensure that data is not empty - if da.size == 0: - raise ValueError("Data is empty.") - - return da - - -class SingleDataArrayStacker(SingleDataStacker): - """Converts a DataArray of any dimensionality into a 2D structure. - - This operation generates a reshaped DataArray with two distinct dimensions: 'sample' and 'feature'. - - The handling of NaNs is specific: if they are found to populate an entire dimension (be it 'sample' or 'feature'), - they are temporarily removed during transformations and subsequently reinstated. - However, the presence of isolated NaNs will trigger an error. - - """ - - def _validate_dimensions(self, sample_dims: Tuple[str], feature_dims: Tuple[str]): - """Verify the dimensions are correctly specified. - For example, valid input dimensions (sample, feature) are: - - (("year", "month"), ("lon", "lat")), - (("year",), ("lat", "lon")), - (("year", "month"), ("lon",)), - (("year",), ("lon",)), - (("sample",), ("feature",)), <-- special case only valid for DataArrays - - """ - sample_name = self.sample_name - feature_name = self.feature_name - - # Check for `sample` and `feature` special cases - if sample_dims == (sample_name,) and feature_dims != (feature_name,): - err_msg = """Due to the internal logic of this package, - when using the 'sample' dimension in sample_dims, it should only be - paired with the 'feature' dimension in feature_dims. Please rename or remove - other dimensions.""" - raise ValueError(err_msg) - - if feature_dims == (feature_name,) and sample_dims != (sample_name,): - err_msg = """Invalid combination: 'feature' dimension in feature_dims should only - be paired with 'sample' dimension in sample_dims.""" - raise ValueError(err_msg) - - if sample_name in sample_dims and len(sample_dims) > 1: - err_msg = """Invalid combination: 'sample' dimension should not be combined with other - dimensions in sample_dims.""" - raise ValueError(err_msg) - - if feature_name in feature_dims and len(feature_dims) > 1: - err_msg = """Invalid combination: 'feature' dimension should not be combined with other - dimensions in feature_dims.""" - raise ValueError(err_msg) - - if sample_name in feature_dims: - err_msg = """Invalid combination: 'sample' dimension should not appear in feature_dims.""" - raise ValueError(err_msg) - - if feature_name in sample_dims: - err_msg = """Invalid combination: 'feature' dimension should not appear in sample_dims.""" - raise ValueError(err_msg) - - def _stack(self, data: DataArray, sample_dims, feature_dims) -> DataArray: """Reshape a DataArray to 2D. Parameters @@ -299,40 +118,43 @@ def _stack(self, data: DataArray, sample_dims, feature_dims) -> DataArray: sample_name = self.sample_name feature_name = self.feature_name - self._validate_dimensions(sample_dims, feature_dims) # 3 cases: # 1. uni-dimensional with correct feature/sample name ==> do nothing # 2. uni-dimensional with name different from feature/sample ==> rename # 3. multi-dimensinoal with names different from feature/sample ==> stack - # - FEATURE - - if len(feature_dims) == 1: + # - SAMPLE - + if len(sample_dims) == 1: # Case 1 - if feature_dims[0] == feature_name: + if sample_dims[0] == sample_name: pass # Case 2 else: - data = data.rename({feature_dims[0]: feature_name}) + data = data.rename({sample_dims[0]: sample_name}) # Case 3 else: - data = data.stack({feature_name: feature_dims}) + data = data.stack({sample_name: sample_dims}) - # - SAMPLE - - if len(sample_dims) == 1: + # - FEATURE - + if len(feature_dims) == 1: # Case 1 - if sample_dims[0] == sample_name: + if feature_dims[0] == feature_name: pass # Case 2 else: - data = data.rename({sample_dims[0]: sample_name}) + data = data.rename({feature_dims[0]: feature_name}) # Case 3 else: - data = data.stack({sample_name: sample_dims}) + data = data.stack({feature_name: feature_dims}) - return data.transpose(sample_name, feature_name) + # Reorder dimensions to be always (sample, feature) + if data.dims == (feature_name, sample_name): + data = data.transpose(sample_name, feature_name) + + return data def _unstack(self, data: DataArray) -> DataArray: - """Unstack `sample` and `feature` dimension of an DataArray to its original dimensions. + """Unstack 2D DataArray to its original dimensions. Parameters ---------- @@ -346,109 +168,195 @@ def _unstack(self, data: DataArray) -> DataArray: """ sample_name = self.sample_name feature_name = self.feature_name + # pass if feature/sample dimensions do not exist in data if feature_name in data.dims: # If sample dimensions is one dimensional, rename is sufficient, otherwise unstack - if len(self.dims_out_[feature_name]) == 1: - if self.dims_out_[feature_name][0] != feature_name: - data = data.rename({feature_name: self.dims_out_[feature_name][0]}) + if len(self.dims_mapping[feature_name]) == 1: + if self.dims_mapping[feature_name][0] != feature_name: + data = data.rename( + {feature_name: self.dims_mapping[feature_name][0]} + ) else: data = data.unstack(feature_name) if sample_name in data.dims: # If sample dimensions is one dimensional, rename is sufficient, otherwise unstack - if len(self.dims_out_[sample_name]) == 1: - if self.dims_out_[sample_name][0] != sample_name: - data = data.rename({sample_name: self.dims_out_[sample_name][0]}) + if len(self.dims_mapping[sample_name]) == 1: + if self.dims_mapping[sample_name][0] != sample_name: + data = data.rename({sample_name: self.dims_mapping[sample_name][0]}) else: data = data.unstack(sample_name) - # Reorder dimensions to original order - data = self._reorder_dims(data) + else: + pass return data - def _reindex_dim(self, data: DataArray, stacked_dim: str) -> DataArray: - return super()._reindex_dim(data, stacked_dim) + def _reorder_dims(self, data): + """Reorder dimensions to original order; catch ('mode') dimensions via ellipsis""" + order_input_dims = [ + valid_dim for valid_dim in self.dims_in if valid_dim in data.dims + ] + if order_input_dims != data.dims: + data = data.transpose(..., *order_input_dims) + return data - def fit_transform( + def fit( self, data: DataArray, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable], - ) -> DataArray: - return super().fit_transform(data, sample_dims, feature_dims) + sample_dims: Dims, + feature_dims: Dims, + y=None, + ) -> Self: + """Fit the stacker. - def transform(self, data: DataArray) -> DataArray: - return super().transform(data) + Parameters + ---------- + data : DataArray + The data to be reshaped. - def inverse_transform_data(self, data: DataArray) -> DataArray: - """Reshape the 2D data (sample x feature) back into its original shape.""" + Returns + ------- + self : DataArrayStacker + The fitted stacker. - data = self._unstack(data) + """ + self._sanity_check(data, sample_dims, feature_dims) - # Reindex data to original coordinates in case that some features at the boundaries were dropped - data = self._reindex_dim(data, self.feature_name) - data = self._reindex_dim(data, self.sample_name) + # Set in/out dimensions + self.dims_in = data.dims + self.dims_mapping = { + self.sample_name: sample_dims, + self.feature_name: feature_dims, + } - return data + # Set in coordinates + self.coords_in = {dim: data.coords[dim] for dim in data.dims} - def inverse_transform_components(self, data: DataArray) -> DataArray: - """Reshape the 2D data (mode x feature) back into its original shape.""" + return self - data = self._unstack(data) + def transform(self, data: DataArray) -> DataArray: + """Reshape DataArray to 2D. - # Reindex data to original coordinates in case that some features at the boundaries were dropped - data = self._reindex_dim(data, self.feature_name) + Parameters + ---------- + data : DataArray + The data to be reshaped. - return data + Returns + ------- + DataArray + The reshaped data. - def inverse_transform_scores(self, data: DataArray) -> DataArray: - """Reshape the 2D data (sample x mode) back into its original shape.""" + Raises + ------ + ValueError + If the data to be transformed has different dimensions than the data used to fit the stacker. + ValueError + If the data to be transformed has different coordinates than the data used to fit the stacker. - data = self._unstack(data) + """ + # Test whether sample and feature dimensions are present in data array + self._validate_matching_dimensions(data) - # Scores are not to be reindexed since they new data typically has different sample coordinates - # than the original data used for fitting the model + # Check if data to be transformed has the same feature coordinates as the data used to fit the stacker + self._validate_matching_feature_coords(data) - return data + # Stack data + sample_dims = self.dims_mapping[self.sample_name] + feature_dims = self.dims_mapping[self.feature_name] + da: DataArray = self._stack( + data, sample_dims=sample_dims, feature_dims=feature_dims + ) + # Set out coordinates + self.coords_out.update( + { + self.sample_name: da.coords[self.sample_name], + self.feature_name: da.coords[self.feature_name], + } + ) + return da -class SingleDatasetStacker(SingleDataStacker): - """Converts a Dataset of any dimensionality into a 2D structure. + def fit_transform( + self, + data: DataArray, + sample_dims: Dims, + feature_dims: Dims, + y=None, + ) -> DataArray: + return self.fit(data, sample_dims, feature_dims, y).transform(data) - This operation generates a reshaped Dataset with two distinct dimensions: 'sample' and 'feature'. + def inverse_transform_data(self, data: DataArray) -> DataArray: + """Reshape the 2D data (sample x feature) back into its original dimensions. - The handling of NaNs is specific: if they are found to populate an entire dimension (be it 'sample' or 'feature'), - they are temporarily removed during transformations and subsequently reinstated. - However, the presence of isolated NaNs will trigger an error. + Parameters + ---------- + data : DataArray + The data to be reshaped. - """ + Returns + ------- + DataArray + The reshaped data. - def _validate_dimensions(self, sample_dims: Tuple[str], feature_dims: Tuple[str]): - """Verify the dimensions are correctly specified. + """ + data = self._unstack(data) + data = self._reorder_dims(data) + return data - For example, valid input dimensions (sample, feature) are: + def inverse_transform_components(self, data: DataArray) -> DataArray: + """Reshape the 2D components (sample x feature) back into its original dimensions. - (("year", "month"), ("lon", "lat")), - (("year",), ("lat", "lon")), - (("year", "month"), ("lon",)), - (("year",), ("lon",)), + Parameters + ---------- + data : DataArray + The data to be reshaped. + + Returns + ------- + DataArray + The reshaped data. + + """ + data = self._unstack(data) + data = self._reorder_dims(data) + return data + + def inverse_transform_scores(self, data: DataArray) -> DataArray: + """Reshape the 2D scores (sample x feature) back into its original dimensions. + Parameters + ---------- + data : DataArray + The data to be reshaped. - Invalid examples are: - any combination that contains 'sample' and/or 'feature' dimension + Returns + ------- + DataArray + The reshaped data. """ - sample_name = self.sample_name - feature_name = self.feature_name + data = self._unstack(data) + data = self._reorder_dims(data) + return data - if sample_name in sample_dims or sample_name in feature_dims: - err_msg = f"The dimension {sample_name} is reserved for internal used. Please rename." - raise ValueError(err_msg) - if feature_name in sample_dims or feature_name in feature_dims: - err_msg = f"The dimension {feature_name} is reserved for internal used. Please rename." - raise ValueError(err_msg) + +class DataSetStacker(DataArrayStacker): + """Converts a Dataset of any dimensionality into a 2D structure.""" + + def _validate_dimension_names(self, sample_dims, feature_dims): + if len(sample_dims) > 1: + if self.sample_name in sample_dims: + raise ValueError( + f"Name of sample dimension ({self.sample_name}) is already present in data. Please use another name." + ) + if len(feature_dims) >= 1: + if self.feature_name in feature_dims: + raise ValueError( + f"Name of feature dimension ({self.feature_name}) is already present in data. Please use another name." + ) def _stack(self, data: Dataset, sample_dims, feature_dims) -> DataArray: """Reshape a Dataset to 2D. @@ -464,108 +372,130 @@ def _stack(self, data: Dataset, sample_dims, feature_dims) -> DataArray: Returns ------- - data_stacked : DataArray | Dataset + data_stacked : DataArray The reshaped 2d-data. """ sample_name = self.sample_name feature_name = self.feature_name - self._validate_dimensions(sample_dims, feature_dims) - # 2 cases: - # 1. uni-dimensional with name different from feature/sample ==> rename - # 2. multi-dimensinoal with names different from feature/sample ==> stack + # 3 cases: + # 1. uni-dimensional with correct feature/sample name ==> do nothing + # 2. uni-dimensional with name different from feature/sample ==> rename + # 3. multi-dimensinoal with names different from feature/sample ==> stack + + # - SAMPLE - + if len(sample_dims) == 1: + # Case 1 + if sample_dims[0] == sample_name: + pass + # Case 2 + else: + data = data.rename({sample_dims[0]: sample_name}) + # Case 3 + else: + data = data.stack({sample_name: sample_dims}) # - FEATURE - # Convert Dataset -> DataArray, stacking all non-sample dimensions to feature dimension, including data variables - # Case 1 & 2 - da = data.to_stacked_array(new_dim=feature_name, sample_dims=sample_dims) - - # Rename if sample dimensions is one dimensional, otherwise stack - # Case 1 - if len(sample_dims) == 1: - da = da.rename({sample_dims[0]: sample_name}) - # Case 2 + err_msg = f"Feature dimension {feature_dims[0]} already exists in data. Please choose another feature dimension name." + # Case 2 & 3 + if (len(feature_dims) == 1) & (feature_dims[0] == feature_name): + raise ValueError(err_msg) else: - da = da.stack({sample_name: sample_dims}) + try: + da = data.to_stacked_array( + new_dim=feature_name, sample_dims=(self.sample_name,) + ) + except ValueError: + raise ValueError(err_msg) + + # Reorder dimensions to be always (sample, feature) + if da.dims == (feature_name, sample_name): + da = da.transpose(sample_name, feature_name) - return da.transpose(sample_name, feature_name) + return da - def _unstack_data(self, data: DataArray) -> Dataset: + def _unstack_data(self, data: DataArray) -> DataSet: """Unstack `sample` and `feature` dimension of an DataArray to its original dimensions.""" sample_name = self.sample_name feature_name = self.feature_name - if len(self.dims_out_[sample_name]) == 1: - data = data.rename({sample_name: self.dims_out_[sample_name][0]}) - ds: Dataset = data.to_unstacked_dataset(feature_name, "variable").unstack() + has_only_one_sample_dim = len(self.dims_mapping[sample_name]) == 1 + + if has_only_one_sample_dim: + data = data.rename({sample_name: self.dims_mapping[sample_name][0]}) + + ds: DataSet = data.to_unstacked_dataset(feature_name, "variable").unstack() ds = self._reorder_dims(ds) return ds - def _unstack_components(self, data: DataArray) -> Dataset: + def _unstack_components(self, data: DataArray) -> DataSet: feature_name = self.feature_name - ds: Dataset = data.to_unstacked_dataset(feature_name, "variable").unstack() + ds: DataSet = data.to_unstacked_dataset(feature_name, "variable").unstack() ds = self._reorder_dims(ds) return ds def _unstack_scores(self, data: DataArray) -> DataArray: sample_name = self.sample_name - if len(self.dims_out_[sample_name]) == 1: - data = data.rename({sample_name: self.dims_out_[sample_name][0]}) + has_only_one_sample_dim = len(self.dims_mapping[sample_name]) == 1 + + if has_only_one_sample_dim: + data = data.rename({sample_name: self.dims_mapping[sample_name][0]}) + data = data.unstack() data = self._reorder_dims(data) return data - def _reindex_dim(self, data: Dataset, model_dim: str) -> Dataset: - return super()._reindex_dim(data, model_dim) - - def fit_transform( + def fit( self, - data: Dataset, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], - ) -> xr.DataArray: - return super().fit_transform(data, sample_dims, feature_dims) + data: DataSet, + sample_dims: Dims, + feature_dims: Dims, + y=None, + ) -> Self: + """Fit the stacker. - def transform(self, data: Dataset) -> DataArray: - return super().transform(data) + Parameters + ---------- + data : DataArray + The data to be reshaped. - def inverse_transform_data(self, data: DataArray) -> Dataset: - """Reshape the 2D data (sample x feature) back into its original shape.""" - data_ds: Dataset = self._unstack_data(data) + Returns + ------- + self : DataArrayStacker + The fitted stacker. - # Reindex data to original coordinates in case that some features at the boundaries were dropped - data_ds = self._reindex_dim(data_ds, self.feature_name) - data_ds = self._reindex_dim(data_ds, self.sample_name) + """ + return super().fit(data, sample_dims, feature_dims, y) # type: ignore - return data_ds + def transform(self, data: DataSet) -> DataArray: + return super().transform(data) # type: ignore - def inverse_transform_components(self, data: DataArray) -> Dataset: - """Reshape the 2D data (mode x feature) back into its original shape.""" - data_ds: Dataset = self._unstack_components(data) + def fit_transform( + self, data: DataSet, sample_dims: Dims, feature_dims: Dims, y=None + ) -> DataArray: + return super().fit_transform(data, sample_dims, feature_dims, y) # type: ignore - # Reindex data to original coordinates in case that some features at the boundaries were dropped - data_ds = self._reindex_dim(data_ds, self.feature_name) + def inverse_transform_data(self, data: DataArray) -> DataSet: + """Reshape the 2D data (sample x feature) back into its original shape.""" + data_ds: DataSet = self._unstack_data(data) + return data_ds + def inverse_transform_components(self, data: DataArray) -> DataSet: + """Reshape the 2D components (sample x feature) back into its original shape.""" + data_ds: DataSet = self._unstack_components(data) return data_ds def inverse_transform_scores(self, data: DataArray) -> DataArray: - """Reshape the 2D data (sample x mode) back into its original shape.""" + """Reshape the 2D scores (sample x feature) back into its original shape.""" data = self._unstack_scores(data) - - # Scores are not to be reindexed since they new data typically has different sample coordinates - # than the original data used for fitting the model - return data -class ListDataArrayStacker(_BaseStacker): +class DataListStacker(DataArrayStacker): """Converts a list of DataArrays of any dimensionality into a 2D structure. This operation generates a reshaped DataArray with two distinct dimensions: 'sample' and 'feature'. - The handling of NaNs is specific: if they are found to populate an entire dimension (be it 'sample' or 'feature'), - they are temporarily removed during transformations and subsequently reinstated. - However, the presence of isolated NaNs will trigger an error. - At a minimum, the `sample` dimension must be present in all DataArrays. The `feature` dimension can be different for each DataArray and must be specified as a list of dimensions. @@ -575,26 +505,26 @@ def __init__(self, **kwargs): super().__init__(**kwargs) self.stackers = [] - def fit_transform( + def fit( self, - data: DataArrayList, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], - ) -> DataArray: - """Fit the stacker to the data. + X: DataList, + sample_dims: Dims, + feature_dims: DimsList, + y=None, + ): + """Fit the stacker. Parameters ---------- - data : DataArray + X : DataArray The data to be reshaped. - sample_dims : Hashable or Sequence[Hashable] - The dimensions of the data that will be stacked along the `sample` dimension. - feature_dims : Hashable or Sequence[Hashable] - The dimensions of the data that will be stacked along the `feature` dimension. + + Returns + ------- + self : DataArrayStacker + The fitted stacker. """ - sample_name = self.sample_name - feature_name = self.feature_name # Check input if not isinstance(feature_dims, list): @@ -602,43 +532,75 @@ def fit_transform( "feature dims must be a list of the feature dimensions of each DataArray" ) - sample_dims = ensure_tuple(sample_dims) - feature_dims = [ensure_tuple(fdims) for fdims in feature_dims] + sample_dims = convert_to_dim_type(sample_dims) + feature_dims = [convert_to_dim_type(fdims) for fdims in feature_dims] - if len(data) != len(feature_dims): + if len(X) != len(feature_dims): err_message = ( "Number of data arrays and feature dimensions must be the same. " ) - err_message += f"Got {len(data)} data arrays and {len(feature_dims)} feature dimensions" + err_message += ( + f"Got {len(X)} data arrays and {len(feature_dims)} feature dimensions" + ) raise ValueError(err_message) # Set in/out dimensions - self.dims_in_ = [da.dims for da in data] - self.dims_out_ = { - sample_name: sample_dims, - feature_name: feature_dims, + self.dims_in = [data.dims for data in X] + self.dims_out = tuple((self.sample_name, self.feature_name)) + self.dims_mapping = { + self.sample_name: sample_dims, + self.feature_name: feature_dims, } # Set in/out coordinates - self.coords_in_ = [ - {dim: coords for dim, coords in da.coords.items()} for da in data - ] + self.coords_in = [{dim: data.coords[dim] for dim in data.dims} for data in X] - for da, fdims in zip(data, feature_dims): - stacker = SingleDataArrayStacker() - da_stacked = stacker.fit_transform(da, sample_dims, fdims) + # Fit stacker for each DataArray + for data, fdims in zip(X, feature_dims): + stacker = DataArrayStacker( + sample_name=self.sample_name, feature_name=self.feature_name + ) + stacker.fit(data, sample_dims=sample_dims, feature_dims=fdims) self.stackers.append(stacker) - stacked_data_list = [] + return self + + def transform(self, X: DataList) -> DataArray: + """Reshape DataArray to 2D. + + Parameters + ---------- + X : DataList + The data to be reshaped. + + Returns + ------- + DataArray + The reshaped data. + + Raises + ------ + ValueError + If the data to be transformed has different dimensions than the data used to fit the stacker. + ValueError + If the data to be transformed has different coordinates than the data used to fit the stacker. + + """ + # Test whether the input list has same length as the number of stackers + if len(X) != len(self.stackers): + raise ValueError( + f"Invalid input. Number of DataArrays ({len(X)}) does not match the number of fitted DataArrays ({len(self.stackers)})." + ) + + stacked_data_list: List[DataArray] = [] idx_coords_size = [] dummy_feature_coords = [] # Stack individual DataArrays - for da, fdims in zip(data, feature_dims): - stacker = SingleDataArrayStacker() - da_stacked = stacker.fit_transform(da, sample_dims, fdims) - idx_coords_size.append(da_stacked.coords[feature_name].size) - stacked_data_list.append(da_stacked) + for stacker, data in zip(self.stackers, X): + data_stacked = stacker.transform(data) + idx_coords_size.append(data_stacked.coords[self.feature_name].size) + stacked_data_list.append(data_stacked) # Create dummy feature coordinates for each DataArray idx_range = np.cumsum([0] + idx_coords_size) @@ -647,93 +609,78 @@ def fit_transform( # Replace original feature coordiantes with dummy coordinates for i, data in enumerate(stacked_data_list): - data = data.drop("feature") # type: ignore - stacked_data_list[i] = data.assign_coords(feature=dummy_feature_coords[i]) # type: ignore + data = data.drop_vars(self.feature_name) + stacked_data_list[i] = data.assign_coords( + {self.feature_name: dummy_feature_coords[i]} + ) self._dummy_feature_coords = dummy_feature_coords - stacked_data_list = xr.concat(stacked_data_list, dim=feature_name) + stacked_data: DataArray = xr.concat(stacked_data_list, dim=self.feature_name) - self.coords_out_ = { - sample_name: stacked_data_list.coords[sample_name], - feature_name: stacked_data_list.coords[feature_name], + self.coords_out = { + self.sample_name: stacked_data.coords[self.sample_name], + self.feature_name: stacked_data.coords[self.feature_name], } - return stacked_data_list + return stacked_data - def transform(self, data: DataArrayList) -> DataArray: - """Reshape the data into a 2D version. - - Parameters - ---------- - data: list of DataArrays - The data to be reshaped. - - Returns - ------- - DataArray - The reshaped 2D data. + def fit_transform( + self, + X: DataList, + sample_dims: Dims, + feature_dims: DimsList, + y=None, + ) -> DataArray: + return self.fit(X, sample_dims, feature_dims, y).transform(X) - """ + def _split_dataarray_into_list(self, data: DataArray) -> DataList: feature_name = self.feature_name + data_list: DataList = [] - stacked_data_list = [] - - # Stack individual DataArrays - for i, (stacker, da) in enumerate(zip(self.stackers, data)): - stacked_data = stacker.transform(da) - stacked_data = stacked_data.drop(feature_name) - # Replace original feature coordiantes with dummy coordinates - stacked_data.coords.update({feature_name: self._dummy_feature_coords[i]}) - stacked_data_list.append(stacked_data) - - return xr.concat(stacked_data_list, dim=feature_name) - - def inverse_transform_data(self, data: DataArray) -> DataArrayList: - """Reshape the 2D data (sample x feature) back into its original shape.""" - feature_name = self.feature_name - dalist = [] for stacker, features in zip(self.stackers, self._dummy_feature_coords): # Select the features corresponding to the current DataArray - subda = data.sel(feature=features) + sub_selection = data.sel({feature_name: features}) # Replace dummy feature coordinates with original feature coordinates - subda = subda.assign_coords(feature=stacker.coords_out_[feature_name]) + sub_selection = sub_selection.assign_coords( + {feature_name: stacker.coords_out[feature_name]} + ) # In case of MultiIndex we have to set the index to the feature dimension again - if isinstance(subda.indexes[feature_name], pd.MultiIndex): - subda = subda.set_index(feature=stacker.dims_out_[feature_name]) + if isinstance(sub_selection.indexes[feature_name], pd.MultiIndex): + sub_selection = sub_selection.set_index( + {feature_name: stacker.dims_mapping[feature_name]} + ) else: # NOTE: This is a workaround for the case where the feature dimension is a tuple of length 1 # the problem is described here: https://github.com/pydata/xarray/discussions/7958 - subda = subda.rename(feature=stacker.dims_out_[feature_name][0]) + sub_selection = sub_selection.rename( + {feature_name: stacker.dims_mapping[feature_name][0]} + ) + data_list.append(sub_selection) + + return data_list + def inverse_transform_data(self, data: DataArray) -> DataList: + """Reshape the 2D data (sample x feature) back into its original shape.""" + data_split: DataList = self._split_dataarray_into_list(data) + data_transformed = [] + for stacker, data in zip(self.stackers, data_split): # Inverse transform the data using the corresponding stacker - subda = stacker.inverse_transform_data(subda) - dalist.append(subda) - return dalist + data_transformed.append(stacker.inverse_transform_data(data)) - def inverse_transform_components(self, data: DataArray) -> DataArrayList: - """Reshape the 2D data (mode x feature) back into its original shape.""" - feature_name = self.feature_name - dalist = [] - for stacker, features in zip(self.stackers, self._dummy_feature_coords): - # Select the features corresponding to the current DataArray - subda = data.sel(feature=features) - # Replace dummy feature coordinates with original feature coordinates - subda = subda.assign_coords(feature=stacker.coords_out_[feature_name]) + return data_transformed - # In case of MultiIndex we have to set the index to the feature dimension again - if isinstance(subda.indexes[feature_name], pd.MultiIndex): - subda = subda.set_index(feature=stacker.dims_out_[feature_name]) - else: - # NOTE: This is a workaround for the case where the feature dimension is a tuple of length 1 - # the problem is described here: https://github.com/pydata/xarray/discussions/7958 - subda = subda.rename(feature=stacker.dims_out_[feature_name][0]) + def inverse_transform_components(self, data: DataArray) -> DataList: + """Reshape the 2D components (sample x feature) back into its original shape.""" + data_split: DataList = self._split_dataarray_into_list(data) + data_transformed = [] + for stacker, data in zip(self.stackers, data_split): # Inverse transform the data using the corresponding stacker - subda = stacker.inverse_transform_components(subda) - dalist.append(subda) - return dalist + data_transformed.append(stacker.inverse_transform_components(data)) + + return data_transformed def inverse_transform_scores(self, data: DataArray) -> DataArray: - """Reshape the 2D data (sample x mode) back into its original shape.""" + """Reshape the 2D scores (sample x mode) back into its original shape.""" return self.stackers[0].inverse_transform_scores(data) From 7b5068b9215351aaca855d716cc1b7b60069251c Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:54:32 +0200 Subject: [PATCH 12/43] refactor: Sanitizer removes NaNs --- xeofs/preprocessing/sanitizer.py | 88 ++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 xeofs/preprocessing/sanitizer.py diff --git a/xeofs/preprocessing/sanitizer.py b/xeofs/preprocessing/sanitizer.py new file mode 100644 index 0000000..64c27b7 --- /dev/null +++ b/xeofs/preprocessing/sanitizer.py @@ -0,0 +1,88 @@ +from typing import Self + +import xarray as xr +from sklearn.base import BaseEstimator, TransformerMixin + +from ..utils.data_types import DataArray + + +class DataArraySanitizer(BaseEstimator, TransformerMixin): + """ + Removes NaNs from the feature dimension of a 2D DataArray. + + """ + + def __init__(self, sample_name="sample", feature_name="feature"): + self.sample_name = sample_name + self.feature_name = feature_name + + def _check_input_type(self, data) -> None: + if not isinstance(data, xr.DataArray): + raise ValueError("Input must be an xarray DataArray") + + def _check_input_dims(self, data: DataArray) -> None: + if set(data.dims) != set([self.sample_name, self.feature_name]): + raise ValueError( + "Input must have dimensions ({:}, {:})".format( + self.sample_name, self.feature_name + ) + ) + + def _check_input_coords(self, data: DataArray) -> None: + if not data.coords[self.feature_name].identical(self.feature_coords): + raise ValueError( + "Cannot transform data. Feature coordinates are different." + ) + + def fit(self, data: DataArray, y=None) -> Self: + # Check if input is a DataArray + self._check_input_type(data) + + # Check if input has the correct dimensions + self._check_input_dims(data) + + self.feature_coords = data.coords[self.feature_name] + + # Identify NaN locations + self.is_valid_feature = data.notnull().all(self.sample_name).compute() + + return self + + def transform(self, data: DataArray) -> DataArray: + # Check if input is a DataArray + self._check_input_type(data) + + # Check if input has the correct dimensions + self._check_input_dims(data) + + # Check if input has the correct coordinates + self._check_input_coords(data) + + # Remove NaN entries + data = data.isel({self.feature_name: self.is_valid_feature}) + + return data + + def fit_transform(self, data: DataArray, y=None) -> DataArray: + return self.fit(data, y).transform(data) + + def inverse_transform_data(self, data: DataArray) -> DataArray: + # Reindex only if feature coordinates are different + is_same_coords = data.coords[self.feature_name].identical(self.feature_coords) + + if is_same_coords: + return data + else: + return data.reindex({self.feature_name: self.feature_coords.values}) + + def inverse_transform_components(self, data: DataArray) -> DataArray: + # Reindex only if feature coordinates are different + is_same_coords = data.coords[self.feature_name].identical(self.feature_coords) + + if is_same_coords: + return data + else: + return data.reindex({self.feature_name: self.feature_coords.values}) + + def inverse_transform_scores(self, data: DataArray) -> DataArray: + return data From a62fc5a2a617c4c5b7915c182944df6583e6b3ac Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:55:36 +0200 Subject: [PATCH 13/43] refactor: streamline Scaler --- ...ray_scaler.py => test_dataarray_scaler.py} | 65 +++-- ...rray_scaler.py => test_datalist_scaler.py} | 81 ++---- ...taset_scaler.py => test_dataset_scaler.py} | 59 ++-- xeofs/preprocessing/scaler.py | 255 +++++++++--------- 4 files changed, 209 insertions(+), 251 deletions(-) rename tests/preprocessing/{test_single_dataarray_scaler.py => test_dataarray_scaler.py} (79%) rename tests/preprocessing/{test_list_dataarray_scaler.py => test_datalist_scaler.py} (77%) rename tests/preprocessing/{test_single_dataset_scaler.py => test_dataset_scaler.py} (83%) diff --git a/tests/preprocessing/test_single_dataarray_scaler.py b/tests/preprocessing/test_dataarray_scaler.py similarity index 79% rename from tests/preprocessing/test_single_dataarray_scaler.py rename to tests/preprocessing/test_dataarray_scaler.py index 042c106..f38e466 100644 --- a/tests/preprocessing/test_single_dataarray_scaler.py +++ b/tests/preprocessing/test_dataarray_scaler.py @@ -2,7 +2,7 @@ import xarray as xr import numpy as np -from xeofs.preprocessing.scaler import SingleDataArrayScaler +from xeofs.preprocessing.scaler import DataArrayScaler @pytest.mark.parametrize( @@ -27,13 +27,12 @@ ], ) def test_init_params(with_std, with_coslat, with_weights): - s = SingleDataArrayScaler( + s = DataArrayScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) - assert hasattr(s, "_params") - assert s._params["with_std"] == with_std - assert s._params["with_coslat"] == with_coslat - assert s._params["with_weights"] == with_weights + assert s.get_params()["with_std"] == with_std + assert s.get_params()["with_coslat"] == with_coslat + assert s.get_params()["with_weights"] == with_weights @pytest.mark.parametrize( @@ -58,7 +57,7 @@ def test_init_params(with_std, with_coslat, with_weights): ], ) def test_fit_params(with_std, with_coslat, with_weights, mock_data_array): - s = SingleDataArrayScaler( + s = DataArrayScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) sample_dims = ["time"] @@ -66,20 +65,20 @@ def test_fit_params(with_std, with_coslat, with_weights, mock_data_array): size_lats = mock_data_array.lat.size weights = xr.DataArray(np.random.rand(size_lats), dims=["lat"]) s.fit(mock_data_array, sample_dims, feature_dims, weights) - assert hasattr(s, "mean"), "Scaler has no mean attribute." + assert hasattr(s, "mean_"), "Scaler has no mean attribute." if with_std: - assert hasattr(s, "std"), "Scaler has no std attribute." + assert hasattr(s, "std_"), "Scaler has no std attribute." if with_coslat: - assert hasattr(s, "coslat_weights"), "Scaler has no coslat_weights attribute." + assert hasattr(s, "coslat_weights_"), "Scaler has no coslat_weights attribute." if with_weights: - assert hasattr(s, "weights"), "Scaler has no weights attribute." - assert s.mean is not None, "Scaler mean is None." + assert hasattr(s, "weights_"), "Scaler has no weights attribute." + assert s.mean_ is not None, "Scaler mean is None." if with_std: - assert s.std is not None, "Scaler std is None." + assert s.std_ is not None, "Scaler std is None." if with_coslat: - assert s.coslat_weights is not None, "Scaler coslat_weights is None." + assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." if with_weights: - assert s.weights is not None, "Scaler weights is None." + assert s.weights_ is not None, "Scaler weights is None." @pytest.mark.parametrize( @@ -104,7 +103,7 @@ def test_fit_params(with_std, with_coslat, with_weights, mock_data_array): ], ) def test_transform_params(with_std, with_coslat, with_weights, mock_data_array): - s = SingleDataArrayScaler( + s = DataArrayScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) sample_dims = ["time"] @@ -132,13 +131,13 @@ def test_transform_params(with_std, with_coslat, with_weights, mock_data_array): ), "Standard deviation of the transformed data is not one." if with_coslat: - assert s.coslat_weights is not None, "Scaler coslat_weights is None." + assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." assert not np.array_equal( transformed, mock_data_array ), "Data has not been transformed." if with_weights: - assert s.weights is not None, "Scaler weights is None." + assert s.weights_ is not None, "Scaler weights is None." assert not np.array_equal( transformed, mock_data_array ), "Data has not been transformed." @@ -169,7 +168,7 @@ def test_transform_params(with_std, with_coslat, with_weights, mock_data_array): ], ) def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_data_array): - s = SingleDataArrayScaler( + s = DataArrayScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) sample_dims = ["time"] @@ -180,7 +179,7 @@ def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_data ) s.fit(mock_data_array, sample_dims, feature_dims, weights) transformed = s.transform(mock_data_array) - inverted = s.inverse_transform(transformed) + inverted = s.inverse_transform_data(transformed) xr.testing.assert_allclose(inverted, mock_data_array) @@ -194,17 +193,17 @@ def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_data ], ) def test_fit_dims(dim_sample, dim_feature, mock_data_array): - s = SingleDataArrayScaler() + s = DataArrayScaler(with_std=True) s.fit(mock_data_array, dim_sample, dim_feature) - assert hasattr(s, "mean"), "Scaler has no mean attribute." - assert s.mean is not None, "Scaler mean is None." - assert hasattr(s, "std"), "Scaler has no std attribute." - assert s.std is not None, "Scaler std is None." + assert hasattr(s, "mean_"), "Scaler has no mean attribute." + assert s.mean_ is not None, "Scaler mean is None." + assert hasattr(s, "std_"), "Scaler has no std attribute." + assert s.std_ is not None, "Scaler std is None." # check that all dimensions are present except the sample dimensions - assert set(s.mean.dims) == set(mock_data_array.dims) - set( + assert set(s.mean_.dims) == set(mock_data_array.dims) - set( dim_sample ), "Mean has wrong dimensions." - assert set(s.std.dims) == set(mock_data_array.dims) - set( + assert set(s.std_.dims) == set(mock_data_array.dims) - set( dim_sample ), "Standard deviation has wrong dimensions." @@ -219,7 +218,7 @@ def test_fit_dims(dim_sample, dim_feature, mock_data_array): ], ) def test_fit_transform_dims(dim_sample, dim_feature, mock_data_array): - s = SingleDataArrayScaler() + s = DataArrayScaler() transformed = s.fit_transform(mock_data_array, dim_sample, dim_feature) # check that all dimensions are present assert set(transformed.dims) == set( @@ -232,14 +231,14 @@ def test_fit_transform_dims(dim_sample, dim_feature, mock_data_array): # Test input types def test_fit_input_type(mock_data_array, mock_dataset, mock_data_array_list): - s = SingleDataArrayScaler() - with pytest.raises(TypeError): + s = DataArrayScaler() + with pytest.raises(ValueError): s.fit(mock_dataset, ["time"], ["lon", "lat"]) - with pytest.raises(TypeError): + with pytest.raises(ValueError): s.fit(mock_data_array_list, ["time"], ["lon", "lat"]) s.fit(mock_data_array, ["time"], ["lon", "lat"]) - with pytest.raises(TypeError): + with pytest.raises(ValueError): s.transform(mock_dataset) - with pytest.raises(TypeError): + with pytest.raises(ValueError): s.transform(mock_data_array_list) diff --git a/tests/preprocessing/test_list_dataarray_scaler.py b/tests/preprocessing/test_datalist_scaler.py similarity index 77% rename from tests/preprocessing/test_list_dataarray_scaler.py rename to tests/preprocessing/test_datalist_scaler.py index d0a466f..883c08f 100644 --- a/tests/preprocessing/test_list_dataarray_scaler.py +++ b/tests/preprocessing/test_datalist_scaler.py @@ -2,38 +2,8 @@ import xarray as xr import numpy as np -from xeofs.preprocessing.scaler import ListDataArrayScaler - - -@pytest.mark.parametrize( - "with_std, with_coslat, with_weights", - [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - ], -) -def test_init_params(with_std, with_coslat, with_weights): - s = ListDataArrayScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) - assert hasattr(s, "_params") - assert s._params["with_std"] == with_std - assert s._params["with_coslat"] == with_coslat - assert s._params["with_weights"] == with_weights +from xeofs.preprocessing.scaler import DataListScaler +from xeofs.utils.data_types import DimsList @pytest.mark.parametrize( @@ -58,12 +28,12 @@ def test_init_params(with_std, with_coslat, with_weights): ], ) def test_fit_params(with_std, with_coslat, with_weights, mock_data_array_list): - listscalers = ListDataArrayScaler( + listscalers = DataListScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) data = mock_data_array_list.copy() sample_dims = ["time"] - feature_dims = [["lat", "lon"]] * 3 + feature_dims: DimsList = [["lat", "lon"]] * 3 size_lats_list = [da.lat.size for da in data] weights = [ xr.DataArray(np.random.rand(size), dims=["lat"]) for size in size_lats_list @@ -71,22 +41,22 @@ def test_fit_params(with_std, with_coslat, with_weights, mock_data_array_list): listscalers.fit(mock_data_array_list, sample_dims, feature_dims, weights) for s in listscalers.scalers: - assert hasattr(s, "mean"), "Scaler has no mean attribute." + assert hasattr(s, "mean_"), "Scaler has no mean attribute." if with_std: - assert hasattr(s, "std"), "Scaler has no std attribute." + assert hasattr(s, "std_"), "Scaler has no std attribute." if with_coslat: assert hasattr( - s, "coslat_weights" + s, "coslat_weights_" ), "Scaler has no coslat_weights attribute." if with_weights: - assert hasattr(s, "weights"), "Scaler has no weights attribute." - assert s.mean is not None, "Scaler mean is None." + assert hasattr(s, "weights_"), "Scaler has no weights attribute." + assert s.mean_ is not None, "Scaler mean is None." if with_std: - assert s.std is not None, "Scaler std is None." + assert s.std_ is not None, "Scaler std is None." if with_coslat: - assert s.coslat_weights is not None, "Scaler coslat_weights is None." + assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." if with_weights: - assert s.weights is not None, "Scaler weights is None." + assert s.weights_ is not None, "Scaler weights is None." @pytest.mark.parametrize( @@ -111,17 +81,22 @@ def test_fit_params(with_std, with_coslat, with_weights, mock_data_array_list): ], ) def test_transform_params(with_std, with_coslat, with_weights, mock_data_array_list): - listscalers = ListDataArrayScaler( + listscalers = DataListScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) data = mock_data_array_list.copy() sample_dims = ["time"] - feature_dims = [["lat", "lon"]] * 3 + feature_dims: DimsList = [("lat", "lon")] * 3 size_lats_list = [da.lat.size for da in data] weights = [ xr.DataArray(np.random.rand(size), dims=["lat"]) for size in size_lats_list ] - listscalers.fit(mock_data_array_list, sample_dims, feature_dims, weights) + listscalers.fit( + mock_data_array_list, + sample_dims, + feature_dims, + weights, + ) transformed = listscalers.transform(mock_data_array_list) transformed2 = listscalers.fit_transform( @@ -146,13 +121,13 @@ def test_transform_params(with_std, with_coslat, with_weights, mock_data_array_l ), "Standard deviation of the transformed data is not one." if with_coslat: - assert s.coslat_weights is not None, "Scaler coslat_weights is None." + assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." assert not np.array_equal( t, mock_data_array_list ), "Data has not been transformed." if with_weights: - assert s.weights is not None, "Scaler weights is None." + assert s.weights_ is not None, "Scaler weights is None." assert not np.array_equal(t, ref), "Data has not been transformed." xr.testing.assert_allclose(t, t2) @@ -182,19 +157,19 @@ def test_transform_params(with_std, with_coslat, with_weights, mock_data_array_l def test_inverse_transform_params( with_std, with_coslat, with_weights, mock_data_array_list ): - listscalers = ListDataArrayScaler( + listscalers = DataListScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) data = mock_data_array_list.copy() sample_dims = ["time"] - feature_dims = [["lat", "lon"]] * 3 + feature_dims: DimsList = [["lat", "lon"]] * 3 size_lats_list = [da.lat.size for da in data] weights = [ xr.DataArray(np.random.rand(size), dims=["lat"]) for size in size_lats_list ] listscalers.fit(mock_data_array_list, sample_dims, feature_dims, weights) transformed = listscalers.transform(mock_data_array_list) - inverted = listscalers.inverse_transform(transformed) + inverted = listscalers.inverse_transform_data(transformed) # check that inverse transform is the same as the original data for inv, ref in zip(inverted, mock_data_array_list): @@ -211,7 +186,7 @@ def test_inverse_transform_params( ], ) def test_fit_dims(dim_sample, dim_feature, mock_data_array_list): - listscalers = ListDataArrayScaler(with_std=True) + listscalers = DataListScaler(with_std=True) data = mock_data_array_list.copy() dim_feature = [dim_feature] * 3 @@ -239,7 +214,7 @@ def test_fit_dims(dim_sample, dim_feature, mock_data_array_list): ], ) def test_fit_transform_dims(dim_sample, dim_feature, mock_data_array_list): - listscalers = ListDataArrayScaler(with_std=True) + listscalers = DataListScaler(with_std=True) data = mock_data_array_list.copy() dim_feature = [dim_feature] * 3 transformed = listscalers.fit_transform( @@ -267,7 +242,7 @@ def test_fit_transform_dims(dim_sample, dim_feature, mock_data_array_list): def test_fit_input_type( dim_sample, dim_feature, mock_data_array, mock_dataset, mock_data_array_list ): - s = ListDataArrayScaler() + s = DataListScaler() dim_feature = [dim_feature] * 3 with pytest.raises(TypeError): s.fit(mock_dataset, dim_sample, dim_feature) diff --git a/tests/preprocessing/test_single_dataset_scaler.py b/tests/preprocessing/test_dataset_scaler.py similarity index 83% rename from tests/preprocessing/test_single_dataset_scaler.py rename to tests/preprocessing/test_dataset_scaler.py index ce5ff62..90ce905 100644 --- a/tests/preprocessing/test_single_dataset_scaler.py +++ b/tests/preprocessing/test_dataset_scaler.py @@ -2,7 +2,7 @@ import xarray as xr import numpy as np -from xeofs.preprocessing.scaler import SingleDatasetScaler +from xeofs.preprocessing.scaler import DataSetScaler @pytest.mark.parametrize( @@ -27,13 +27,12 @@ ], ) def test_init_params(with_std, with_coslat, with_weights): - s = SingleDatasetScaler( + s = DataSetScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) - assert hasattr(s, "_params") - assert s._params["with_std"] == with_std - assert s._params["with_coslat"] == with_coslat - assert s._params["with_weights"] == with_weights + assert s.get_params()["with_std"] == with_std + assert s.get_params()["with_coslat"] == with_coslat + assert s.get_params()["with_weights"] == with_weights @pytest.mark.parametrize( @@ -58,7 +57,7 @@ def test_init_params(with_std, with_coslat, with_weights): ], ) def test_fit_params(with_std, with_coslat, with_weights, mock_dataset): - s = SingleDatasetScaler( + s = DataSetScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) sample_dims = ["time"] @@ -68,20 +67,20 @@ def test_fit_params(with_std, with_coslat, with_weights, mock_dataset): np.random.rand(size_lats), dims=["lat"], name="weights" ).to_dataset() s.fit(mock_dataset, sample_dims, feature_dims, weights) - assert hasattr(s, "mean"), "Scaler has no mean attribute." + assert hasattr(s, "mean_"), "Scaler has no mean attribute." if with_std: - assert hasattr(s, "std"), "Scaler has no std attribute." + assert hasattr(s, "std_"), "Scaler has no std attribute." if with_coslat: - assert hasattr(s, "coslat_weights"), "Scaler has no coslat_weights attribute." + assert hasattr(s, "coslat_weights_"), "Scaler has no coslat_weights attribute." if with_weights: - assert hasattr(s, "weights"), "Scaler has no weights attribute." - assert s.mean is not None, "Scaler mean is None." + assert hasattr(s, "weights_"), "Scaler has no weights attribute." + assert s.mean_ is not None, "Scaler mean is None." if with_std: - assert s.std is not None, "Scaler std is None." + assert s.std_ is not None, "Scaler std is None." if with_coslat: - assert s.coslat_weights is not None, "Scaler coslat_weights is None." + assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." if with_weights: - assert s.weights is not None, "Scaler weights is None." + assert s.weights_ is not None, "Scaler weights is None." @pytest.mark.parametrize( @@ -106,7 +105,7 @@ def test_fit_params(with_std, with_coslat, with_weights, mock_dataset): ], ) def test_transform_params(with_std, with_coslat, with_weights, mock_dataset): - s = SingleDatasetScaler( + s = DataSetScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) sample_dims = ["time"] @@ -136,13 +135,13 @@ def test_transform_params(with_std, with_coslat, with_weights, mock_dataset): ), "Standard deviation of the transformed data is not one." if with_coslat: - assert s.coslat_weights is not None, "Scaler coslat_weights is None." + assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." assert not np.array_equal( transformed, mock_dataset ), "Data has not been transformed." if with_weights: - assert s.weights is not None, "Scaler weights is None." + assert s.weights_ is not None, "Scaler weights is None." assert not np.array_equal( transformed, mock_dataset ), "Data has not been transformed." @@ -173,7 +172,7 @@ def test_transform_params(with_std, with_coslat, with_weights, mock_dataset): ], ) def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_dataset): - s = SingleDatasetScaler( + s = DataSetScaler( with_std=with_std, with_coslat=with_coslat, with_weights=with_weights ) sample_dims = ["time"] @@ -184,7 +183,7 @@ def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_data weights = xr.merge([weights1, weights2]) s.fit(mock_dataset, sample_dims, feature_dims, weights) transformed = s.transform(mock_dataset) - inverted = s.inverse_transform(transformed) + inverted = s.inverse_transform_data(transformed) xr.testing.assert_allclose(inverted, mock_dataset) @@ -198,17 +197,17 @@ def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_data ], ) def test_fit_dims(dim_sample, dim_feature, mock_dataset): - s = SingleDatasetScaler() + s = DataSetScaler(with_std=True) s.fit(mock_dataset, dim_sample, dim_feature) - assert hasattr(s, "mean"), "Scaler has no mean attribute." - assert s.mean is not None, "Scaler mean is None." - assert hasattr(s, "std"), "Scaler has no std attribute." - assert s.std is not None, "Scaler std is None." + assert hasattr(s, "mean_"), "Scaler has no mean attribute." + assert s.mean_ is not None, "Scaler mean is None." + assert hasattr(s, "std_"), "Scaler has no std attribute." + assert s.std_ is not None, "Scaler std is None." # check that all dimensions are present except the sample dimensions - assert set(s.mean.dims) == set(mock_dataset.dims) - set( + assert set(s.mean_.dims) == set(mock_dataset.dims) - set( dim_sample ), "Mean has wrong dimensions." - assert set(s.std.dims) == set(mock_dataset.dims) - set( + assert set(s.std_.dims) == set(mock_dataset.dims) - set( dim_sample ), "Standard deviation has wrong dimensions." @@ -223,7 +222,7 @@ def test_fit_dims(dim_sample, dim_feature, mock_dataset): ], ) def test_fit_transform_dims(dim_sample, dim_feature, mock_dataset): - s = SingleDatasetScaler() + s = DataSetScaler() transformed = s.fit_transform(mock_dataset, dim_sample, dim_feature) # check that all dimensions are present assert set(transformed.dims) == set( @@ -236,7 +235,7 @@ def test_fit_transform_dims(dim_sample, dim_feature, mock_dataset): # Test input types def test_fit_input_type(mock_dataset, mock_data_array, mock_data_array_list): - s = SingleDatasetScaler() + s = DataSetScaler() # Cannot fit DataArray with pytest.raises(TypeError): s.fit(mock_data_array, ["time"], ["lon", "lat"]) @@ -254,7 +253,7 @@ def test_fit_input_type(mock_dataset, mock_data_array, mock_data_array_list): # def test_fit_weights_input_type(mock_dataset): -# s = SingleDatasetScaler() +# s = DataSetScaler() # # Fitting with weights requires that the weights have the same variables as the dataset # # used for fitting; otherwise raise an error # size_lats = mock_dataset.lat.size diff --git a/xeofs/preprocessing/scaler.py b/xeofs/preprocessing/scaler.py index 4eec3cc..c79b90a 100644 --- a/xeofs/preprocessing/scaler.py +++ b/xeofs/preprocessing/scaler.py @@ -1,28 +1,28 @@ -from typing import List, Union, Tuple, Dict, Optional, TypeVar, Any, Sequence, Hashable +from typing import List, Optional, Sequence, Hashable import numpy as np import xarray as xr +from sklearn.base import BaseEstimator, TransformerMixin +from xeofs.utils.data_types import DataArray -from ._base_scaler import _BaseScaler from ..utils.constants import VALID_LATITUDE_NAMES from ..utils.sanity_checks import ( - assert_single_dataarray, assert_single_dataset, assert_list_dataarrays, - ensure_tuple, + convert_to_dim_type, ) from ..utils.data_types import ( + Dims, + DimsList, DataArray, - Dataset, - DataArrayList, - ModelDims, - SingleDataObject, + DataSet, + DataList, ) from ..utils.xarray_utils import compute_sqrt_cos_lat_weights -class _SingleDataScaler(_BaseScaler): +class DataArrayScaler(BaseEstimator, TransformerMixin): """Scale the data along sample dimensions. Scaling includes (i) removing the mean and, optionally, (ii) dividing by the standard deviation, @@ -40,12 +40,16 @@ class _SingleDataScaler(_BaseScaler): """ - def _verify_input(self, data: SingleDataObject, name: str): - raise NotImplementedError + def __init__(self, with_std=False, with_coslat=False, with_weights=False): + self.with_std = with_std + self.with_coslat = with_coslat + self.with_weights = with_weights - def _compute_sqrt_cos_lat_weights( - self, data: SingleDataObject, dim - ) -> SingleDataObject: + def _verify_input(self, data: DataArray, name: str): + if not isinstance(data, xr.DataArray): + raise ValueError(f"{name} must be an xarray DataArray") + + def _compute_sqrt_cos_lat_weights(self, data, dim): """Compute the square root of cosine of latitude weights. Parameters @@ -70,22 +74,22 @@ def _compute_sqrt_cos_lat_weights( def fit( self, - data: SingleDataObject, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable], - weights: Optional[SingleDataObject] = None, + data: DataArray, + sample_dims: Dims, + feature_dims: Dims, + weights: Optional[DataArray] = None, ): """Fit the scaler to the data. Parameters ---------- - data : SingleDataObject + data : DataArray Data to be scaled. sample_dims : sequence of hashable Dimensions along which the data is considered to be a sample. feature_dims : sequence of hashable Dimensions along which the data is considered to be a feature. - weights : SingleDataObject, optional + weights : DataArray, optional Weights to be applied to the data. Must have the same dimensions as the data. If None, no weights are applied. @@ -95,85 +99,89 @@ def fit( if weights is not None: self._verify_input(weights, "weights") - sample_dims = ensure_tuple(sample_dims) - feature_dims = ensure_tuple(feature_dims) + sample_dims = convert_to_dim_type(sample_dims) + feature_dims = convert_to_dim_type(feature_dims) # Store sample and feature dimensions for later use - self.dims: ModelDims = {"sample": sample_dims, "feature": feature_dims} + self.dims_ = {"sample": sample_dims, "feature": feature_dims} # Scaling parameters are computed along sample dimensions - self.mean: SingleDataObject = data.mean(sample_dims).compute() + self.mean_: DataArray = data.mean(sample_dims).compute() - if self._params["with_std"]: - self.std: SingleDataObject = data.std(sample_dims).compute() + params = self.get_params() + if params["with_std"]: + self.std_: DataArray = data.std(sample_dims).compute() - if self._params["with_coslat"]: - self.coslat_weights: SingleDataObject = self._compute_sqrt_cos_lat_weights( + if params["with_coslat"]: + self.coslat_weights_: DataArray = self._compute_sqrt_cos_lat_weights( data, feature_dims ).compute() - if self._params["with_weights"]: + if params["with_weights"]: if weights is None: raise ValueError("Weights must be provided when with_weights is True") - self.weights: SingleDataObject = weights.compute() + self.weights_: DataArray = weights.compute() - def transform(self, data: SingleDataObject) -> SingleDataObject: + return self + + def transform(self, data: DataArray) -> DataArray: """Scale the data. Parameters ---------- - data : SingleDataObject + data : DataArray Data to be scaled. Returns ------- - SingleDataObject + DataArray Scaled data. """ self._verify_input(data, "data") - data = data - self.mean + data = data - self.mean_ - if self._params["with_std"]: - data = data / self.std - if self._params["with_coslat"]: - data = data * self.coslat_weights - if self._params["with_weights"]: - data = data * self.weights + params = self.get_params() + if params["with_std"]: + data = data / self.std_ + if params["with_coslat"]: + data = data * self.coslat_weights_ + if params["with_weights"]: + data = data * self.weights_ return data def fit_transform( self, - data: SingleDataObject, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable], - weights: Optional[SingleDataObject] = None, - ) -> SingleDataObject: + data: DataArray, + sample_dims: Dims, + feature_dims: Dims, + weights: Optional[DataArray] = None, + ) -> DataArray: """Fit the scaler to the data and scale it. Parameters ---------- - data : SingleDataObject + data : DataArray Data to be scaled. sample_dims : sequence of hashable Dimensions along which the data is considered to be a sample. feature_dims : sequence of hashable Dimensions along which the data is considered to be a feature. - weights : SingleDataObject, optional + weights : DataArray, optional Weights to be applied to the data. Must have the same dimensions as the data. If None, no weights are applied. Returns ------- - SingleDataObject + DataArray Scaled data. """ - self.fit(data, sample_dims, feature_dims, weights) - return self.transform(data) - def inverse_transform(self, data: SingleDataObject) -> SingleDataObject: + return self.fit(data, sample_dims, feature_dims, weights).transform(data) + + def inverse_transform_data(self, data: DataArray) -> DataArray: """Unscale the data. Parameters @@ -189,60 +197,27 @@ def inverse_transform(self, data: SingleDataObject) -> SingleDataObject: """ self._verify_input(data, "data") - if self._params["with_weights"]: - data = data / self.weights - if self._params["with_coslat"]: - data = data / self.coslat_weights - if self._params["with_std"]: - data = data * self.std + params = self.get_params() + if params["with_weights"]: + data = data / self.weights_ + if params["with_coslat"]: + data = data / self.coslat_weights_ + if params["with_std"]: + data = data * self.std_ - data = data + self.mean + data = data + self.mean_ return data + def inverse_transform_components(self, data: DataArray) -> DataArray: + return data -class SingleDataArrayScaler(_SingleDataScaler): - def _verify_input(self, data: DataArray, name: str): - """Verify that the input data is a DataArray. - - Parameters - ---------- - data : xarray.Dataset - Data to be checked. - - """ - assert_single_dataarray(data, name) - - def _compute_sqrt_cos_lat_weights(self, data: DataArray, dim) -> DataArray: - return super()._compute_sqrt_cos_lat_weights(data, dim) - - def fit( - self, - data: DataArray, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable], - weights: Optional[DataArray] = None, - ): - super().fit(data, sample_dims, feature_dims, weights) - - def transform(self, data: DataArray) -> DataArray: - return super().transform(data) - - def fit_transform( - self, - data: DataArray, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable], - weights: Optional[DataArray] = None, - ) -> DataArray: - return super().fit_transform(data, sample_dims, feature_dims, weights) - - def inverse_transform(self, data: DataArray) -> DataArray: - return super().inverse_transform(data) + def inverse_transform_scores(self, data: DataArray) -> DataArray: + return data -class SingleDatasetScaler(_SingleDataScaler): - def _verify_input(self, data: Dataset, name: str): +class DataSetScaler(DataArrayScaler): + def _verify_input(self, data: DataSet, name: str): """Verify that the input data is a Dataset. Parameters @@ -253,35 +228,38 @@ def _verify_input(self, data: Dataset, name: str): """ assert_single_dataset(data, name) - def _compute_sqrt_cos_lat_weights(self, data: Dataset, dim) -> Dataset: + def _compute_sqrt_cos_lat_weights(self, data: DataSet, dim) -> DataArray: return super()._compute_sqrt_cos_lat_weights(data, dim) def fit( self, - data: Dataset, + data: DataSet, sample_dims: Hashable | Sequence[Hashable], feature_dims: Hashable | Sequence[Hashable], - weights: Optional[Dataset] = None, + weights: Optional[DataSet] = None, ): - super().fit(data, sample_dims, feature_dims, weights) + return super().fit(data, sample_dims, feature_dims, weights) # type: ignore - def transform(self, data: Dataset) -> Dataset: - return super().transform(data) + def transform(self, data: DataSet) -> DataSet: + return super().transform(data) # type: ignore def fit_transform( self, - data: Dataset, + data: DataSet, sample_dims: Hashable | Sequence[Hashable], feature_dims: Hashable | Sequence[Hashable], - weights: Optional[Dataset] = None, - ) -> Dataset: - return super().fit_transform(data, sample_dims, feature_dims, weights) + weights: Optional[DataSet] = None, + ) -> DataSet: + return super().fit_transform(data, sample_dims, feature_dims, weights) # type: ignore - def inverse_transform(self, data: Dataset) -> Dataset: - return super().inverse_transform(data) + def inverse_transform_data(self, data: DataSet) -> DataSet: + return super().inverse_transform_data(data) # type: ignore + def inverse_transform_components(self, data: DataSet) -> DataSet: + return super().inverse_transform_components(data) # type: ignore -class ListDataArrayScaler(_BaseScaler): + +class DataListScaler(DataArrayScaler): """Scale a list of xr.DataArray along sample dimensions. Scaling includes (i) removing the mean and, optionally, (ii) dividing by the standard deviation, @@ -299,11 +277,13 @@ class ListDataArrayScaler(_BaseScaler): """ - def __init__(self, **kwargs): - super().__init__(**kwargs) + def __init__(self, with_std=False, with_coslat=False, with_weights=False): + super().__init__( + with_std=with_std, with_coslat=with_coslat, with_weights=with_weights + ) self.scalers = [] - def _verify_input(self, data: DataArrayList, name: str): + def _verify_input(self, data: DataList, name: str): """Verify that the input data is a list of DataArrays. Parameters @@ -316,10 +296,10 @@ def _verify_input(self, data: DataArrayList, name: str): def fit( self, - data: DataArrayList, - sample_dims: Hashable | Sequence[Hashable], - feature_dims_list: List[Hashable | Sequence[Hashable]], - weights=None, + data: DataList, + sample_dims: Dims, + feature_dims_list: DimsList, + weights: Optional[DataList] = None, ): """Fit the scaler to the data. @@ -343,12 +323,12 @@ def fit( err_message += 'e.g. [("lon", "lat"), ("lon")]' raise TypeError(err_message) - sample_dims = ensure_tuple(sample_dims) - feature_dims = [ensure_tuple(fdims) for fdims in feature_dims_list] + sample_dims = convert_to_dim_type(sample_dims) + feature_dims = [convert_to_dim_type(fdims) for fdims in feature_dims_list] # Sample dimensions are the same for all data arrays # Feature dimensions may be different for each data array - self.dims: ModelDims = {"sample": sample_dims, "feature": feature_dims} + self.dims = {"sample": sample_dims, "feature": feature_dims} # However, for each DataArray a list of feature dimensions must be provided if len(data) != len(feature_dims): @@ -358,12 +338,15 @@ def fit( err_message += f"Got {len(data)} data arrays and {len(feature_dims)} feature dimensions" raise ValueError(err_message) - self.weights = weights # If no weights are provided, create a list of None - if self.weights is None: + if weights is None: self.weights = [None] * len(data) + else: + self.weights = weights + # Check that number of weights is the same as number of data arrays - if self._params["with_weights"]: + params = self.get_params() + if params["with_weights"]: if len(data) != len(self.weights): err_message = "Number of data arrays and weights must be the same. " err_message += ( @@ -372,13 +355,12 @@ def fit( raise ValueError(err_message) for da, wghts, fdims in zip(data, self.weights, feature_dims): - # Create SingleDataArrayScaler object for each data array - params = self.get_params() - scaler = SingleDataArrayScaler(**params) + # Create DataArrayScaler object for each data array + scaler = DataArrayScaler(**params) scaler.fit(da, sample_dims=sample_dims, feature_dims=fdims, weights=wghts) self.scalers.append(scaler) - def transform(self, da_list: DataArrayList) -> DataArrayList: + def transform(self, da_list: DataList) -> DataList: """Scale the data. Parameters @@ -401,11 +383,11 @@ def transform(self, da_list: DataArrayList) -> DataArrayList: def fit_transform( self, - data: DataArrayList, - sample_dims: Hashable | Sequence[Hashable], - feature_dims_list: List[Hashable | Sequence[Hashable]], - weights=None, - ) -> DataArrayList: + data: DataList, + sample_dims: Dims, + feature_dims_list: DimsList, + weights: Optional[DataList] = None, + ) -> DataList: """Fit the scaler to the data and scale it. Parameters @@ -428,7 +410,7 @@ def fit_transform( self.fit(data, sample_dims, feature_dims_list, weights) return self.transform(data) - def inverse_transform(self, da_list: DataArrayList) -> DataArrayList: + def inverse_transform_data(self, da_list: DataList) -> DataList: """Unscale the data. Parameters @@ -446,5 +428,8 @@ def inverse_transform(self, da_list: DataArrayList) -> DataArrayList: da_list_transformed = [] for scaler, da in zip(self.scalers, da_list): - da_list_transformed.append(scaler.inverse_transform(da)) + da_list_transformed.append(scaler.inverse_transform_data(da)) return da_list_transformed + + def inverse_transform_components(self, da_list: DataList) -> DataList: + return da_list From 4a43ce98ba37eaf2330ee1956686d3d73a8f2d1b Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:57:46 +0200 Subject: [PATCH 14/43] refactor: adapt Preprocessor to refactoring --- tests/preprocessing/test_preprocessor.py | 109 ---------- .../test_preprocessor_dataarray.py | 186 +++++++++++++++++ .../test_preprocessor_datalist.py | 195 ++++++++++++++++++ .../test_preprocessor_dataset.py | 188 +++++++++++++++++ xeofs/preprocessing/preprocessor.py | 120 +++++------ 5 files changed, 625 insertions(+), 173 deletions(-) delete mode 100644 tests/preprocessing/test_preprocessor.py create mode 100644 tests/preprocessing/test_preprocessor_dataarray.py create mode 100644 tests/preprocessing/test_preprocessor_datalist.py create mode 100644 tests/preprocessing/test_preprocessor_dataset.py diff --git a/tests/preprocessing/test_preprocessor.py b/tests/preprocessing/test_preprocessor.py deleted file mode 100644 index 6a28167..0000000 --- a/tests/preprocessing/test_preprocessor.py +++ /dev/null @@ -1,109 +0,0 @@ -import pytest -import numpy as np -import xarray as xr - -from xeofs.preprocessing.preprocessor import Preprocessor - - -@pytest.mark.parametrize( - "with_std, with_coslat, with_weights", - [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - ], -) -def test_init_params(with_std, with_coslat, with_weights): - prep = Preprocessor( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) - - assert hasattr(prep, "_params") - assert prep._params["with_std"] == with_std - assert prep._params["with_coslat"] == with_coslat - assert prep._params["with_weights"] == with_weights - - -@pytest.mark.parametrize( - "with_std, with_coslat, with_weights", - [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - ], -) -def test_fit(with_std, with_coslat, with_weights, mock_data_array): - """fit method should not be implemented.""" - prep = Preprocessor( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) - - with pytest.raises(NotImplementedError): - prep.fit(mock_data_array, dim="time") - - -@pytest.mark.parametrize( - "with_std, with_coslat, with_weights", - [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - ], -) -def test_fit_transform(with_std, with_coslat, with_weights, mock_data_array): - """fit method should not be implemented.""" - prep = Preprocessor( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) - - weights = None - if with_weights: - weights = mock_data_array.mean("time").copy() - weights[:] = 1.0 - - data_trans = prep.fit_transform(mock_data_array, weights=weights, dim="time") - - assert hasattr(prep, "scaler") - assert hasattr(prep, "stacker") - - # Transformed data is centered - assert np.isclose(data_trans.mean("sample"), 0).all() diff --git a/tests/preprocessing/test_preprocessor_dataarray.py b/tests/preprocessing/test_preprocessor_dataarray.py new file mode 100644 index 0000000..1388104 --- /dev/null +++ b/tests/preprocessing/test_preprocessor_dataarray.py @@ -0,0 +1,186 @@ +import pytest +import numpy as np +import xarray as xr + +from xeofs.preprocessing.preprocessor import Preprocessor +from ..conftest import generate_synthetic_dataarray +from ..utilities import ( + get_dims_from_data, + data_is_dask, + data_has_multiindex, + assert_expected_dims, + assert_expected_coords, +) + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index", "multiindex"] +NAN_POLICY = ["no_nan", "isolated", "fulldim"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +TEST_DATA_PARAMS = [ + (ns, nf, index, nan, dask) + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + +SAMPLE_DIM_NAMES = ["sample", "sample_alternative"] +FEATURE_DIM_NAMES = ["feature", "feature_alternative"] + +VALID_TEST_CASES = [ + (sample_name, feature_name, data_params) + for sample_name in SAMPLE_DIM_NAMES + for feature_name in FEATURE_DIM_NAMES + for data_params in TEST_DATA_PARAMS +] + + +# TESTS +# ============================================================================= +@pytest.mark.parametrize( + "with_std, with_coslat, with_weights", + [ + (True, True, True), + (True, True, False), + (True, False, True), + (True, False, False), + (False, True, True), + (False, True, False), + (False, False, True), + (False, False, False), + (True, True, True), + (True, True, False), + (True, False, True), + (True, False, False), + (False, True, True), + (False, True, False), + (False, False, True), + (False, False, False), + ], +) +def test_fit_transform_scalings(with_std, with_coslat, with_weights, mock_data_array): + """fit method should not be implemented.""" + prep = Preprocessor( + with_std=with_std, with_coslat=with_coslat, with_weights=with_weights + ) + + weights = None + if with_weights: + weights = mock_data_array.mean("time").copy() + weights[:] = 0.5 + + data_trans = prep.fit_transform(mock_data_array, weights=weights, dim="time") + + assert hasattr(prep, "scaler") + assert hasattr(prep, "preconverter") + assert hasattr(prep, "stacker") + assert hasattr(prep, "postconverter") + assert hasattr(prep, "sanitizer") + + # Transformed data is centered + assert np.isclose(data_trans.mean("sample"), 0).all() + # Transformed data is standardized + if with_std and not with_coslat: + if with_weights: + assert np.isclose(data_trans.std("sample"), 0.5).all() + else: + assert np.isclose(data_trans.std("sample"), 1).all() + + +@pytest.mark.parametrize( + "index_policy, nan_policy, dask_policy", + [ + ("index", "no_nan", "no_dask"), + ("multiindex", "isolated", "dask"), + ("multiindex", "fulldim", "dask"), + ], +) +def test_fit_transform_same_dim_names(index_policy, nan_policy, dask_policy): + data = generate_synthetic_dataarray(1, 1, index_policy, nan_policy, dask_policy) + + prep = Preprocessor(sample_name="sample0", feature_name="feature0") + transformed = prep.fit_transform(data, dim=("sample0",)) + reconstructed = prep.inverse_transform_data(transformed) + + data_is_dask_before = data_is_dask(data) + data_is_dask_interm = data_is_dask(transformed) + data_is_dask_after = data_is_dask(reconstructed) + + assert set(transformed.dims) == set(("sample0", "feature0")) + assert set(reconstructed.dims) == set(("sample0", "feature0")) + assert not data_has_multiindex(transformed) + assert transformed.notnull().all() + assert data_is_dask_before == data_is_dask_interm + assert data_is_dask_before == data_is_dask_after + + +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + VALID_TEST_CASES, +) +def test_fit_transform(sample_name, feature_name, data_params): + data = generate_synthetic_dataarray(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) + transformed = prep.fit_transform(data, dim=sample_dims) + + data_is_dask_before = data_is_dask(data) + data_is_dask_after = data_is_dask(transformed) + + assert transformed.dims == (sample_name, feature_name) + assert not data_has_multiindex(transformed) + assert transformed.notnull().all() + assert data_is_dask_before == data_is_dask_after + + +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + VALID_TEST_CASES, +) +def test_inverse_transform(sample_name, feature_name, data_params): + data = generate_synthetic_dataarray(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) + transformed = prep.fit_transform(data, dim=sample_dims) + components = transformed.rename({sample_name: "mode"}) + scores = transformed.rename({feature_name: "mode"}) + + reconstructed = prep.inverse_transform_data(transformed) + components = prep.inverse_transform_components(components) + scores = prep.inverse_transform_scores(scores) + + # Reconstructed data has the same dimensions as the original data + assert_expected_dims(data, reconstructed, policy="all") + assert_expected_dims(data, components, policy="feature") + assert_expected_dims(data, scores, policy="sample") + + # Reconstructed data has the same coordinates as the original data + assert_expected_coords(data, reconstructed, policy="all") + assert_expected_coords(data, components, policy="feature") + assert_expected_coords(data, scores, policy="sample") + + # Reconstructed data and original data have NaNs in the same FEATURES + # Note: NaNs in the same place is not guaranteed, since isolated NaNs will be propagated + # to all samples in the same feature + features_with_nans_before = data.isnull().any(sample_dims) + features_with_nans_after = reconstructed.isnull().any(sample_dims) + assert features_with_nans_before.equals(features_with_nans_after) + + # Reconstructed data has MultiIndex if and only if original data has MultiIndex + data_has_multiindex_before = data_has_multiindex(data) + data_has_multiindex_after = data_has_multiindex(reconstructed) + assert data_has_multiindex_before == data_has_multiindex_after + + # Reconstructed data is dask if and only if original data is dask + data_is_dask_before = data_is_dask(data) + data_is_dask_after = data_is_dask(reconstructed) + assert data_is_dask_before == data_is_dask_after diff --git a/tests/preprocessing/test_preprocessor_datalist.py b/tests/preprocessing/test_preprocessor_datalist.py new file mode 100644 index 0000000..0d17627 --- /dev/null +++ b/tests/preprocessing/test_preprocessor_datalist.py @@ -0,0 +1,195 @@ +import pytest +import numpy as np +import xarray as xr + +from xeofs.preprocessing.preprocessor import Preprocessor +from ..conftest import generate_list_of_synthetic_dataarrays +from ..utilities import ( + get_dims_from_data_list, + data_is_dask, + data_has_multiindex, + assert_expected_dims, + assert_expected_coords, +) + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_ARRAYS = [1, 2] +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index", "multiindex"] +NAN_POLICY = ["no_nan", "fulldim"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +TEST_DATA_PARAMS = [ + (na, ns, nf, index, nan, dask) + for na in N_ARRAYS + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + +SAMPLE_DIM_NAMES = ["sample"] +FEATURE_DIM_NAMES = ["feature", "feature_alternative"] + +VALID_TEST_CASES = [ + (sample_name, feature_name, data_params) + for sample_name in SAMPLE_DIM_NAMES + for feature_name in FEATURE_DIM_NAMES + for data_params in TEST_DATA_PARAMS +] + + +# TESTS +# ============================================================================= +@pytest.mark.parametrize( + "with_std, with_coslat, with_weights", + [ + (True, True, True), + (True, True, False), + (True, False, True), + (True, False, False), + (False, True, True), + (False, True, False), + (False, False, True), + (False, False, False), + (True, True, True), + (True, True, False), + (True, False, True), + (True, False, False), + (False, True, True), + (False, True, False), + (False, False, True), + (False, False, False), + ], +) +def test_fit_transform_scalings( + with_std, with_coslat, with_weights, mock_data_array_list +): + """fit method should not be implemented.""" + prep = Preprocessor( + with_std=with_std, with_coslat=with_coslat, with_weights=with_weights + ) + + weights = None + if with_weights: + weights = [da.mean("time").copy() for da in mock_data_array_list] + weights = [xr.ones_like(w) * 0.5 for w in weights] + + data_trans = prep.fit_transform(mock_data_array_list, weights=weights, dim="time") + + assert hasattr(prep, "scaler") + assert hasattr(prep, "preconverter") + assert hasattr(prep, "stacker") + assert hasattr(prep, "postconverter") + assert hasattr(prep, "sanitizer") + + # Transformed data is centered + assert np.isclose(data_trans.mean("sample"), 0).all() + # Transformed data is standardized + if with_std and not with_coslat: + if with_weights: + assert np.isclose(data_trans.std("sample"), 0.5).all() + else: + assert np.isclose(data_trans.std("sample"), 1).all() + + +@pytest.mark.parametrize( + "index_policy, nan_policy, dask_policy", + [ + ("index", "no_nan", "no_dask"), + ("multiindex", "isolated", "dask"), + ("multiindex", "fulldim", "dask"), + ], +) +def test_fit_transform_same_dim_names(index_policy, nan_policy, dask_policy): + data = generate_list_of_synthetic_dataarrays( + 1, 1, 1, index_policy, nan_policy, dask_policy + ) + + prep = Preprocessor(sample_name="sample0", feature_name="feature") + transformed = prep.fit_transform(data, dim=("sample0",)) + reconstructed = prep.inverse_transform_data(transformed) + + data_is_dask_before = data_is_dask(data) + data_is_dask_interm = data_is_dask(transformed) + data_is_dask_after = data_is_dask(reconstructed) + + assert set(transformed.dims) == set(("sample0", "feature")) + assert all(set(rec.dims) == set(("sample0", "feature0")) for rec in reconstructed) + assert not data_has_multiindex(transformed) + assert transformed.notnull().all() + assert data_is_dask_before == data_is_dask_interm + assert data_is_dask_before == data_is_dask_after + + +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + VALID_TEST_CASES, +) +def test_fit_transform(sample_name, feature_name, data_params): + data = generate_list_of_synthetic_dataarrays(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data) + + prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) + transformed = prep.fit_transform(data, dim=sample_dims[0]) + + data_is_dask_before = data_is_dask(data) + data_is_dask_after = data_is_dask(transformed) + + assert transformed.dims == (sample_name, feature_name) + assert not data_has_multiindex(transformed) + assert transformed.notnull().all() + assert data_is_dask_before == data_is_dask_after + + +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + VALID_TEST_CASES, +) +def test_inverse_transform(sample_name, feature_name, data_params): + data = generate_list_of_synthetic_dataarrays(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data) + + prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) + transformed = prep.fit_transform(data, dim=sample_dims[0]) + components = transformed.rename({sample_name: "mode"}) + scores = transformed.rename({feature_name: "mode"}) + + reconstructed = prep.inverse_transform_data(transformed) + components = prep.inverse_transform_components(components) + scores = prep.inverse_transform_scores(scores) + + # Reconstructed data has the same dimensions as the original data + assert_expected_dims(data, reconstructed, policy="all") + assert_expected_dims(data, components, policy="feature") + assert_expected_dims(data[0], scores, policy="sample") + + # Reconstructed data has the same coordinates as the original data + assert_expected_coords(data, reconstructed, policy="all") + assert_expected_coords(data, components, policy="feature") + assert_expected_coords(data[0], scores, policy="sample") + + # Reconstructed data and original data have NaNs in the same FEATURES + # Note: NaNs in the same place is not guaranteed, since isolated NaNs will be propagated + # to all samples in the same feature + nan_features_before = [da.isnull().any(sample_dims[0]) for da in data] + nan_features_after = [rec.isnull().any(sample_dims[0]) for rec in reconstructed] + assert all( + before.equals(after) + for before, after in zip(nan_features_before, nan_features_after) + ) + + # Reconstructed data has MultiIndex if and only if original data has MultiIndex + data_has_multiindex_before = data_has_multiindex(data) + data_has_multiindex_after = data_has_multiindex(reconstructed) + assert data_has_multiindex_before == data_has_multiindex_after + + # Reconstructed data is dask if and only if original data is dask + data_is_dask_before = data_is_dask(data) + data_is_dask_after = data_is_dask(reconstructed) + assert data_is_dask_before == data_is_dask_after diff --git a/tests/preprocessing/test_preprocessor_dataset.py b/tests/preprocessing/test_preprocessor_dataset.py new file mode 100644 index 0000000..6ffb859 --- /dev/null +++ b/tests/preprocessing/test_preprocessor_dataset.py @@ -0,0 +1,188 @@ +import pytest +import numpy as np +import xarray as xr + +from xeofs.preprocessing.preprocessor import Preprocessor +from ..conftest import generate_synthetic_dataset +from ..utilities import ( + get_dims_from_data, + data_is_dask, + data_has_multiindex, + assert_expected_dims, + assert_expected_coords, +) + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_VARIABLES = [1, 2] +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index", "multiindex"] +NAN_POLICY = ["no_nan", "fulldim"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +TEST_DATA_PARAMS = [ + (nv, ns, nf, index, nan, dask) + for nv in N_VARIABLES + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + +SAMPLE_DIM_NAMES = ["sample"] +FEATURE_DIM_NAMES = ["feature", "feature_alternative"] + +VALID_TEST_CASES = [ + (sample_name, feature_name, data_params) + for sample_name in SAMPLE_DIM_NAMES + for feature_name in FEATURE_DIM_NAMES + for data_params in TEST_DATA_PARAMS +] + + +# TESTS +# ============================================================================= +@pytest.mark.parametrize( + "with_std, with_coslat, with_weights", + [ + (True, True, True), + (True, True, False), + (True, False, True), + (True, False, False), + (False, True, True), + (False, True, False), + (False, False, True), + (False, False, False), + (True, True, True), + (True, True, False), + (True, False, True), + (True, False, False), + (False, True, True), + (False, True, False), + (False, False, True), + (False, False, False), + ], +) +def test_fit_transform_scalings(with_std, with_coslat, with_weights, mock_data_array): + """fit method should not be implemented.""" + prep = Preprocessor( + with_std=with_std, with_coslat=with_coslat, with_weights=with_weights + ) + + weights = None + if with_weights: + weights = mock_data_array.mean("time").copy() + weights[:] = 0.5 + + data_trans = prep.fit_transform(mock_data_array, weights=weights, dim="time") + + assert hasattr(prep, "scaler") + assert hasattr(prep, "preconverter") + assert hasattr(prep, "stacker") + assert hasattr(prep, "postconverter") + assert hasattr(prep, "sanitizer") + + # Transformed data is centered + assert np.isclose(data_trans.mean("sample"), 0).all() + # Transformed data is standardized + if with_std and not with_coslat: + if with_weights: + assert np.isclose(data_trans.std("sample"), 0.5).all() + else: + assert np.isclose(data_trans.std("sample"), 1).all() + + +@pytest.mark.parametrize( + "index_policy, nan_policy, dask_policy", + [ + ("index", "no_nan", "no_dask"), + ("multiindex", "isolated", "dask"), + ("multiindex", "fulldim", "dask"), + ], +) +def test_fit_transform_same_dim_names(index_policy, nan_policy, dask_policy): + data = generate_synthetic_dataset(1, 1, 1, index_policy, nan_policy, dask_policy) + + prep = Preprocessor(sample_name="sample0", feature_name="feature") + transformed = prep.fit_transform(data, dim=("sample0",)) + reconstructed = prep.inverse_transform_data(transformed) + + data_is_dask_before = data_is_dask(data) + data_is_dask_interm = data_is_dask(transformed) + data_is_dask_after = data_is_dask(reconstructed) + + assert set(transformed.dims) == set(("sample0", "feature")) + assert set(reconstructed.dims) == set(("sample0", "feature0")) + assert not data_has_multiindex(transformed) + assert transformed.notnull().all() + assert data_is_dask_before == data_is_dask_interm + assert data_is_dask_before == data_is_dask_after + + +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + VALID_TEST_CASES, +) +def test_fit_transform(sample_name, feature_name, data_params): + data = generate_synthetic_dataset(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) + transformed = prep.fit_transform(data, dim=sample_dims) + + data_is_dask_before = data_is_dask(data) + data_is_dask_after = data_is_dask(transformed) + + assert transformed.dims == (sample_name, feature_name) + assert not data_has_multiindex(transformed) + assert transformed.notnull().all() + assert data_is_dask_before == data_is_dask_after + + +@pytest.mark.parametrize( + "sample_name, feature_name, data_params", + VALID_TEST_CASES, +) +def test_inverse_transform(sample_name, feature_name, data_params): + data = generate_synthetic_dataset(*data_params) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) + + prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) + transformed = prep.fit_transform(data, dim=sample_dims) + components = transformed.rename({sample_name: "mode"}) + scores = transformed.rename({feature_name: "mode"}) + + reconstructed = prep.inverse_transform_data(transformed) + components = prep.inverse_transform_components(components) + scores = prep.inverse_transform_scores(scores) + + # Reconstructed data has the same dimensions as the original data + assert_expected_dims(data, reconstructed, policy="all") + assert_expected_dims(data, components, policy="feature") + assert_expected_dims(data, scores, policy="sample") + + # Reconstructed data has the same coordinates as the original data + assert_expected_coords(data, reconstructed, policy="all") + assert_expected_coords(data, components, policy="feature") + assert_expected_coords(data, scores, policy="sample") + + # Reconstructed data and original data have NaNs in the same FEATURES + # Note: NaNs in the same place is not guaranteed, since isolated NaNs will be propagated + # to all samples in the same feature + features_with_nans_before = data.isnull().any(sample_dims) + features_with_nans_after = reconstructed.isnull().any(sample_dims) + assert features_with_nans_before.equals(features_with_nans_after) + + # Reconstructed data has MultiIndex if and only if original data has MultiIndex + data_has_multiindex_before = data_has_multiindex(data) + data_has_multiindex_after = data_has_multiindex(reconstructed) + assert data_has_multiindex_before == data_has_multiindex_after + + # Reconstructed data is dask if and only if original data is dask + data_is_dask_before = data_is_dask(data) + data_is_dask_after = data_is_dask(reconstructed) + assert data_is_dask_before == data_is_dask_after diff --git a/xeofs/preprocessing/preprocessor.py b/xeofs/preprocessing/preprocessor.py index 46209ad..63f18dd 100644 --- a/xeofs/preprocessing/preprocessor.py +++ b/xeofs/preprocessing/preprocessor.py @@ -1,8 +1,9 @@ from typing import Optional, Sequence, Hashable, List from .factory import StackerFactory, ScalerFactory, MultiIndexConverterFactory +from .sanitizer import DataArraySanitizer from ..utils.xarray_utils import get_dims -from ..utils.data_types import AnyDataObject, DataArray +from ..utils.data_types import DataObject, DataArray class Preprocessor: @@ -35,85 +36,68 @@ def __init__( ): self.sample_name = sample_name self.feature_name = feature_name - - # Define model parameters - self._params = { - "with_std": with_std, - "with_coslat": with_coslat, - "with_weights": with_weights, - } + self.with_std = with_std + self.with_coslat = with_coslat + self.with_weights = with_weights def fit( self, - data: AnyDataObject, + data: DataObject, dim: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], - weights: Optional[AnyDataObject] = None, + weights: Optional[DataObject] = None, ): - """Just for consistency with the other classes.""" - raise NotImplementedError( - "Preprocessor does not implement fit method. Use fit_transform instead." - ) - - def fit_transform( - self, - data: AnyDataObject, - dim: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], - weights: Optional[AnyDataObject] = None, - ) -> DataArray: - """Preprocess the data. - - This will scale and stack the data. - - Parameters: - ------------- - data: xr.DataArray or list of xarray.DataArray - Input data. - dim: tuple - Tuple specifying the sample dimensions. The remaining dimensions - will be treated as feature dimensions. - weights: xr.DataArray or xr.Dataset or None, default=None - If specified, the input data will be weighted by this array. - - """ # Set sample and feature dimensions sample_dims, feature_dims = get_dims(data, sample_dims=dim) self.dims = {self.sample_name: sample_dims, self.feature_name: feature_dims} - # Scale the data - self.scaler = ScalerFactory.create_scaler(data, **self._params) + # Create Scaler + scaler_params = { + "with_std": self.with_std, + "with_coslat": self.with_coslat, + "with_weights": self.with_weights, + } + self.scaler = ScalerFactory.create_scaler(data, **scaler_params) data = self.scaler.fit_transform(data, sample_dims, feature_dims, weights) - # Convert MultiIndex to single index - self.converter = MultiIndexConverterFactory.create_converter(data) - data = self.converter.fit_transform(data) # type: ignore + # Create MultiIndexConverter (Pre) + self.preconverter = MultiIndexConverterFactory.create_converter(data) + data = self.preconverter.fit_transform(data) - # Stack the data + # Create Stacker stacker_kwargs = { "sample_name": self.sample_name, "feature_name": self.feature_name, } self.stacker = StackerFactory.create_stacker(data, **stacker_kwargs) - return self.stacker.fit_transform(data, sample_dims, feature_dims) + data: DataArray = self.stacker.fit_transform(data, sample_dims, feature_dims) - def transform(self, data: AnyDataObject) -> DataArray: - """Project new unseen data onto the components (EOFs/eigenvectors). - - Parameters: - ------------- - data: xr.DataArray or list of xarray.DataArray - Input data. + # Create MultiIndexConverter (Post) + self.postconverter = MultiIndexConverterFactory.create_converter(data) + data = self.postconverter.fit_transform(data) - Returns: - ---------- - projections: DataArray | Dataset | List[DataArray] - Projections of the new data onto the components. + # Create Sanitizer + self.sanitizer = DataArraySanitizer( + sample_name=self.sample_name, feature_name=self.feature_name + ) + self.sanitizer.fit(data) + return self - """ + def transform(self, data: DataObject) -> DataArray: data = self.scaler.transform(data) - data = self.converter.transform(data) # type: ignore - return self.stacker.transform(data) + data = self.preconverter.transform(data) + data = self.stacker.transform(data) + data = self.postconverter.transform(data) + return self.sanitizer.transform(data) + + def fit_transform( + self, + data: DataObject, + dim: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], + weights: Optional[DataObject] = None, + ) -> DataArray: + return self.fit(data, dim, weights).transform(data) - def inverse_transform_data(self, data: DataArray) -> AnyDataObject: + def inverse_transform_data(self, data: DataArray) -> DataObject: """Inverse transform the data. Parameters: @@ -127,11 +111,13 @@ def inverse_transform_data(self, data: DataArray) -> AnyDataObject: The inverse transformed data. """ + data = self.sanitizer.inverse_transform_data(data) + data = self.postconverter.inverse_transform_data(data) data = self.stacker.inverse_transform_data(data) - data = self.converter.inverse_transform(data) # type: ignore - return self.scaler.inverse_transform(data) + data = self.preconverter.inverse_transform_data(data) + return self.scaler.inverse_transform_data(data) - def inverse_transform_components(self, data: DataArray) -> AnyDataObject: + def inverse_transform_components(self, data: DataArray) -> DataObject: """Inverse transform the components. Parameters: @@ -145,10 +131,13 @@ def inverse_transform_components(self, data: DataArray) -> AnyDataObject: The inverse transformed components. """ + data = self.sanitizer.inverse_transform_components(data) + data = self.postconverter.inverse_transform_components(data) data = self.stacker.inverse_transform_components(data) - return self.converter.inverse_transform(data) # type: ignore + data = self.preconverter.inverse_transform_components(data) + return self.scaler.inverse_transform_components(data) - def inverse_transform_scores(self, data: DataArray) -> AnyDataObject: + def inverse_transform_scores(self, data: DataArray) -> DataArray: """Inverse transform the scores. Parameters: @@ -162,5 +151,8 @@ def inverse_transform_scores(self, data: DataArray) -> AnyDataObject: The inverse transformed scores. """ + data = self.sanitizer.inverse_transform_scores(data) + data = self.postconverter.inverse_transform_scores(data) data = self.stacker.inverse_transform_scores(data) - return self.converter.inverse_transform(data) # type: ignore + data = self.preconverter.inverse_transform_scores(data) + return self.scaler.inverse_transform_scores(data) From 0ce02f015c0981811fa5ce1aee02ad4affb97e86 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:58:09 +0200 Subject: [PATCH 15/43] refactor: reflect refactoring in Factory --- xeofs/preprocessing/factory.py | 37 +++++++++++++++++----------------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/xeofs/preprocessing/factory.py b/xeofs/preprocessing/factory.py index 34f1f10..6231e5d 100644 --- a/xeofs/preprocessing/factory.py +++ b/xeofs/preprocessing/factory.py @@ -1,27 +1,26 @@ import xarray as xr -from ._base_scaler import _BaseScaler -from ._base_stacker import _BaseStacker -from .scaler import SingleDataArrayScaler, SingleDatasetScaler, ListDataArrayScaler -from .stacker import SingleDataArrayStacker, SingleDatasetStacker, ListDataArrayStacker +from .scaler import DataArrayScaler, DataSetScaler, DataListScaler +from .stacker import DataArrayStacker, DataSetStacker, DataListStacker from .multi_index_converter import ( - MultiIndexConverter, - ListMultiIndexConverter, + DataArrayMultiIndexConverter, + DataSetMultiIndexConverter, + DataListMultiIndexConverter, ) from ..utils.data_types import AnyDataObject class ScalerFactory: @staticmethod - def create_scaler(data: AnyDataObject, **kwargs) -> _BaseScaler: + def create_scaler(data: AnyDataObject, **kwargs): if isinstance(data, xr.DataArray): - return SingleDataArrayScaler(**kwargs) + return DataArrayScaler(**kwargs) elif isinstance(data, xr.Dataset): - return SingleDatasetScaler(**kwargs) + return DataSetScaler(**kwargs) elif isinstance(data, list) and all( isinstance(da, xr.DataArray) for da in data ): - return ListDataArrayScaler(**kwargs) + return DataListScaler(**kwargs) else: raise ValueError("Invalid data type") @@ -30,27 +29,29 @@ class MultiIndexConverterFactory: @staticmethod def create_converter( data: AnyDataObject, **kwargs - ) -> MultiIndexConverter | ListMultiIndexConverter: - if isinstance(data, (xr.DataArray, xr.Dataset)): - return MultiIndexConverter(**kwargs) + ) -> DataArrayMultiIndexConverter | DataListMultiIndexConverter: + if isinstance(data, xr.DataArray): + return DataArrayMultiIndexConverter(**kwargs) + elif isinstance(data, xr.Dataset): + return DataSetMultiIndexConverter(**kwargs) elif isinstance(data, list) and all( isinstance(da, xr.DataArray) for da in data ): - return ListMultiIndexConverter(**kwargs) + return DataListMultiIndexConverter(**kwargs) else: raise ValueError("Invalid data type") class StackerFactory: @staticmethod - def create_stacker(data: AnyDataObject, **kwargs) -> _BaseStacker: + def create_stacker(data: AnyDataObject, **kwargs): if isinstance(data, xr.DataArray): - return SingleDataArrayStacker(**kwargs) + return DataArrayStacker(**kwargs) elif isinstance(data, xr.Dataset): - return SingleDatasetStacker(**kwargs) + return DataSetStacker(**kwargs) elif isinstance(data, list) and all( isinstance(da, xr.DataArray) for da in data ): - return ListDataArrayStacker(**kwargs) + return DataListStacker(**kwargs) else: raise ValueError("Invalid data type") From fe9bd46cd0e288fbf8bd217acd675bb3d952f0cb Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 15:59:11 +0200 Subject: [PATCH 16/43] refactor: generalize preprocessing in MCA --- xeofs/models/_base_cross_model.py | 117 ++++++++++++++++++++---------- 1 file changed, 78 insertions(+), 39 deletions(-) diff --git a/xeofs/models/_base_cross_model.py b/xeofs/models/_base_cross_model.py index db349ee..69b5c66 100644 --- a/xeofs/models/_base_cross_model.py +++ b/xeofs/models/_base_cross_model.py @@ -1,4 +1,4 @@ -from typing import Tuple, Hashable, Sequence, Dict, Any, Optional +from typing import Tuple, Hashable, Sequence, Dict, Self, Optional, List from abc import ABC, abstractmethod from datetime import datetime @@ -7,7 +7,7 @@ from .eof import EOF from ..preprocessing.preprocessor import Preprocessor from ..data_container import _BaseCrossModelDataContainer -from ..utils.data_types import AnyDataObject, DataArray +from ..utils.data_types import DataObject, DataArray from .._version import __version__ @@ -63,9 +63,13 @@ def __init__( "solver": solver, } self._solver_kwargs = solver_kwargs - self._preprocessor_kwargs = dict( - sample_name=sample_name, feature_name=feature_name - ) + self._preprocessor_kwargs = { + "sample_name": sample_name, + "feature_name": feature_name, + "with_std": standardize, + "with_coslat": use_coslat, + "with_weights": use_weights, + } # Define analysis-relevant meta data self.attrs = {"model": "BaseCrossModel"} @@ -79,18 +83,9 @@ def __init__( ) # Initialize preprocessors to scale and stack left (1) and right (2) data - self.preprocessor1 = Preprocessor( - with_std=standardize, - with_coslat=use_coslat, - with_weights=use_weights, - **self._preprocessor_kwargs, - ) - self.preprocessor2 = Preprocessor( - with_std=standardize, - with_coslat=use_coslat, - with_weights=use_weights, - **self._preprocessor_kwargs, - ) + self.preprocessor1 = Preprocessor(**self._preprocessor_kwargs) + self.preprocessor2 = Preprocessor(**self._preprocessor_kwargs) + # Initialize the data container only to avoid type errors # The actual data container will be initialized in respective subclasses self.data: _BaseCrossModelDataContainer = _BaseCrossModelDataContainer() @@ -99,58 +94,102 @@ def __init__( self.pca1 = EOF(n_modes=n_pca_modes) if n_pca_modes else None self.pca2 = EOF(n_modes=n_pca_modes) if n_pca_modes else None - @abstractmethod def fit( self, - data1: AnyDataObject, - data2: AnyDataObject, + data1: DataObject, + data2: DataObject, dim: Hashable | Sequence[Hashable], - weights1: Optional[AnyDataObject] = None, - weights2: Optional[AnyDataObject] = None, - ): + weights1: Optional[DataObject] = None, + weights2: Optional[DataObject] = None, + ) -> Self: """ - Abstract method to fit the model. + Fit the model to the data. Parameters ---------- - data1: DataArray | Dataset | list of DataArray + data1: DataArray | Dataset | List[DataArray] Left input data. - data2: DataArray | Dataset | list of DataArray + data2: DataArray | Dataset | List[DataArray] Right input data. dim: Hashable | Sequence[Hashable] Define the sample dimensions. The remaining dimensions will be treated as feature dimensions. - weights1: Optional[AnyDataObject] + weights1: Optional[DataObject] Weights to be applied to the left input data. - weights2: Optional[AnyDataObject]=None + weights2: Optional[DataObject] Weights to be applied to the right input data. """ - # Here follows the implementation to fit the model - # Typically you want to start by calling - # self.preprocessor1.fit_transform(data1, dim, weights) - # self.preprocessor2.fit_transform(data2, dim, weights) + # Preprocess data1 + data1 = self.preprocessor1.fit_transform(data1, dim, weights1) + # Preprocess data2 + data2 = self.preprocessor2.fit_transform(data2, dim, weights2) + + return self._fit_algorithm(data1, data2) + + def transform( + self, data1: Optional[DataObject] = None, data2: Optional[DataObject] = None + ) -> Sequence[DataArray]: + """ + Abstract method to transform the data. + + + """ + if data1 is None and data2 is None: + raise ValueError("Either data1 or data2 must be provided.") + + if data1 is not None: + # Preprocess data1 + data1 = self.preprocessor1.transform(data1) + if data2 is not None: + # Preprocess data2 + data2 = self.preprocessor2.transform(data2) + + return self._transform_algorithm(data1, data2) + + @abstractmethod + def _fit_algorithm(self, data1: DataArray, data2: DataArray) -> Self: + """ + Fit the model to the preprocessed data. This method needs to be implemented in the respective + subclass. + + Parameters + ---------- + data1, data2: DataArray + Preprocessed input data of two dimensions: (`sample_name`, `feature_name`) + + """ raise NotImplementedError @abstractmethod - def transform( - self, data1: Optional[AnyDataObject], data2: Optional[AnyDataObject] - ) -> Tuple[DataArray, DataArray]: + def _transform_algorithm( + self, data1: Optional[DataArray] = None, data2: Optional[DataArray] = None + ) -> Sequence[DataArray]: + """ + Transform the preprocessed data. This method needs to be implemented in the respective + subclass. + + Parameters + ---------- + data1, data2: DataArray + Preprocessed input data of two dimensions: (`sample_name`, `feature_name`) + + """ raise NotImplementedError @abstractmethod - def inverse_transform(self, mode) -> Tuple[AnyDataObject, AnyDataObject]: + def inverse_transform(self, mode) -> Tuple[DataObject, DataObject]: raise NotImplementedError - def components(self) -> Tuple[AnyDataObject, AnyDataObject]: + def components(self) -> Tuple[DataObject, DataObject]: """Get the components.""" comps1 = self.data.components1 comps2 = self.data.components2 - components1: AnyDataObject = self.preprocessor1.inverse_transform_components( + components1: DataObject = self.preprocessor1.inverse_transform_components( comps1 ) - components2: AnyDataObject = self.preprocessor2.inverse_transform_components( + components2: DataObject = self.preprocessor2.inverse_transform_components( comps2 ) return components1, components2 From 2bbcb6929ab92d051623c1b15f7ae0eb2f5eebac Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 24 Sep 2023 16:01:11 +0200 Subject: [PATCH 17/43] fix: PCA preprocessing before Hilbert transform In Complex MCA, PCA preprocessing must happen before Hilbert transform (fix #85) --- xeofs/models/mca.py | 224 +++++++++++++++++--------------------------- 1 file changed, 88 insertions(+), 136 deletions(-) diff --git a/xeofs/models/mca.py b/xeofs/models/mca.py index cf628c7..8f71e6c 100644 --- a/xeofs/models/mca.py +++ b/xeofs/models/mca.py @@ -1,4 +1,4 @@ -from typing import Tuple, Optional +from typing import Tuple, Optional, Sequence, Self import numpy as np import xarray as xr @@ -6,7 +6,7 @@ from ._base_cross_model import _BaseCrossModel from .decomposer import Decomposer -from ..utils.data_types import AnyDataObject, DataArray +from ..utils.data_types import DataObject, DataArray from ..data_container.mca_data_container import ( MCADataContainer, ComplexMCADataContainer, @@ -83,25 +83,14 @@ def _compute_cross_covariance_matrix(self, X1, X2): return xr.dot(X1.conj(), X2, dims=sample_name) / (n_samples - 1) - def fit( + def _fit_algorithm( self, - data1: AnyDataObject, - data2: AnyDataObject, - dim, - weights1: Optional[AnyDataObject] = None, - weights2: Optional[AnyDataObject] = None, - ): + data1: DataArray, + data2: DataArray, + ) -> Self: sample_name = self.sample_name feature_name = self.feature_name - # Preprocess the data - data1_processed: DataArray = self.preprocessor1.fit_transform( - data1, dim, weights1 - ) - data2_processed: DataArray = self.preprocessor2.fit_transform( - data2, dim, weights2 - ) - # Initialize the SVD decomposer decomposer = Decomposer( n_modes=self._params["n_modes"], @@ -112,8 +101,8 @@ def fit( # Perform SVD on PCA-reduced data if (self.pca1 is not None) and (self.pca2 is not None): # Fit the PCA models - self.pca1.fit(data1_processed, dim=sample_name) - self.pca2.fit(data2_processed, dim=sample_name) + self.pca1.fit(data1, dim=sample_name) + self.pca2.fit(data2, dim=sample_name) # Get the PCA scores pca_scores1 = self.pca1.data.scores * self.pca1.data.singular_values pca_scores2 = self.pca2.data.scores * self.pca2.data.singular_values @@ -141,12 +130,10 @@ def fit( # Rename feature and associated dimensions of data objects to avoid index conflicts dim_renamer1 = DimensionRenamer(feature_name, "1") dim_renamer2 = DimensionRenamer(feature_name, "2") - data1_processed_temp = dim_renamer1.fit_transform(data1_processed) - data2_processed_temp = dim_renamer2.fit_transform(data2_processed) + data1_temp = dim_renamer1.fit_transform(data1) + data2_temp = dim_renamer2.fit_transform(data2) # Compute the cross-covariance matrix - cov_matrix = self._compute_cross_covariance_matrix( - data1_processed_temp, data2_processed_temp - ) + cov_matrix = self._compute_cross_covariance_matrix(data1_temp, data2_temp) # Perform the SVD decomposer.fit(cov_matrix, dims=("feature1", "feature2")) @@ -172,12 +159,12 @@ def fit( idx_sorted_modes.coords.update(squared_covariance.coords) # Project the data onto the singular vectors - scores1 = xr.dot(data1_processed, singular_vectors1, dims=feature_name) / norm1 - scores2 = xr.dot(data2_processed, singular_vectors2, dims=feature_name) / norm2 + scores1 = xr.dot(data1, singular_vectors1, dims=feature_name) / norm1 + scores2 = xr.dot(data2, singular_vectors2, dims=feature_name) / norm2 self.data.set_data( - input_data1=data1_processed, - input_data2=data2_processed, + input_data1=data1, + input_data2=data2, components1=singular_vectors1, components2=singular_vectors2, scores1=scores1, @@ -190,15 +177,20 @@ def fit( ) # Assign analysis-relevant meta data self.data.set_attrs(self.attrs) + return self - def transform(self, **kwargs): - """Project new unseen data onto the singular vectors. + def transform( + self, data1: Optional[DataObject] = None, data2: Optional[DataObject] = None + ) -> Sequence[DataArray]: + """Get the expansion coefficients of "unseen" data. + + The expansion coefficients are obtained by projecting data onto the singular vectors. Parameters ---------- - data1: xr.DataArray or list of xarray.DataArray + data1: DataArray | Dataset | List[DataArray] Left input data. Must be provided if `data2` is not provided. - data2: xr.DataArray or list of xarray.DataArray + data2: DataArray | Dataset | List[DataArray] Right input data. Must be provided if `data1` is not provided. Returns @@ -209,11 +201,13 @@ def transform(self, **kwargs): Right scores. """ + return super().transform(data1, data2) + + def _transform_algorithm( + self, data1: Optional[DataArray] = None, data2: Optional[DataArray] = None + ) -> Sequence[DataArray]: results = [] - if "data1" in kwargs.keys(): - # Preprocess input data - data1 = kwargs["data1"] - data1 = self.preprocessor1.transform(data1) + if data1 is not None: # Project data onto singular vectors comps1 = self.data.components1 norm1 = self.data.norm1 @@ -222,10 +216,7 @@ def transform(self, **kwargs): scores1 = self.preprocessor1.inverse_transform_scores(scores1) results.append(scores1) - if "data2" in kwargs.keys(): - # Preprocess input data - data2 = kwargs["data2"] - data2 = self.preprocessor2.transform(data2) + if data2 is not None: # Project data onto singular vectors comps2 = self.data.components2 norm2 = self.data.norm2 @@ -434,18 +425,18 @@ def homogeneous_patterns(self, correction=None, alpha=0.05): input_data2, scores2, correction=correction, alpha=alpha ) - hom_pat1 = self.preprocessor1.inverse_transform_components(hom_pat1) - hom_pat2 = self.preprocessor2.inverse_transform_components(hom_pat2) - - pvals1 = self.preprocessor1.inverse_transform_components(pvals1) - pvals2 = self.preprocessor2.inverse_transform_components(pvals2) - hom_pat1.name = "left_homogeneous_patterns" hom_pat2.name = "right_homogeneous_patterns" pvals1.name = "pvalues_of_left_homogeneous_patterns" pvals2.name = "pvalues_of_right_homogeneous_patterns" + hom_pat1 = self.preprocessor1.inverse_transform_components(hom_pat1) + hom_pat2 = self.preprocessor2.inverse_transform_components(hom_pat2) + + pvals1 = self.preprocessor1.inverse_transform_components(pvals1) + pvals2 = self.preprocessor2.inverse_transform_components(pvals2) + return (hom_pat1, hom_pat2), (pvals1, pvals2) def heterogeneous_patterns(self, correction=None, alpha=0.05): @@ -496,18 +487,18 @@ def heterogeneous_patterns(self, correction=None, alpha=0.05): input_data2, scores1, correction=correction, alpha=alpha ) - patterns1 = self.preprocessor1.inverse_transform_components(patterns1) - patterns2 = self.preprocessor2.inverse_transform_components(patterns2) - - pvals1 = self.preprocessor1.inverse_transform_components(pvals1) - pvals2 = self.preprocessor2.inverse_transform_components(pvals2) - patterns1.name = "left_heterogeneous_patterns" patterns2.name = "right_heterogeneous_patterns" pvals1.name = "pvalues_of_left_heterogeneous_patterns" pvals2.name = "pvalues_of_right_heterogeneous_patterns" + patterns1 = self.preprocessor1.inverse_transform_components(patterns1) + patterns2 = self.preprocessor2.inverse_transform_components(patterns2) + + pvals1 = self.preprocessor1.inverse_transform_components(pvals1) + pvals2 = self.preprocessor2.inverse_transform_components(pvals2) + return (patterns1, patterns2), (pvals1, pvals2) @@ -576,56 +567,15 @@ def __init__(self, padding="exp", decay_factor=0.2, **kwargs): # Initialize the DataContainer to store the results self.data: ComplexMCADataContainer = ComplexMCADataContainer() - def fit( - self, - data1: AnyDataObject, - data2: AnyDataObject, - dim, - weights1: Optional[AnyDataObject] = None, - weights2: Optional[AnyDataObject] = None, - ): - """Fit the model. - - Parameters - ---------- - data1: xr.DataArray or list of xarray.DataArray - Left input data. - data2: xr.DataArray or list of xarray.DataArray - Right input data. - dim: tuple - Tuple specifying the sample dimensions. The remaining dimensions - will be treated as feature dimensions. - weights1: xr.DataArray or xr.Dataset or None, default=None - If specified, the left input data will be weighted by this array. - weights2: xr.DataArray or xr.Dataset or None, default=None - If specified, the right input data will be weighted by this array. - - """ + def _fit_algorithm(self, data1: DataArray, data2: DataArray) -> Self: sample_name = self.sample_name feature_name = self.feature_name - data1_processed: DataArray = self.preprocessor1.fit_transform( - data1, dim, weights1 - ) - data2_processed: DataArray = self.preprocessor2.fit_transform( - data2, dim, weights2 - ) - - # Apply Hilbert transform: - padding = self._params["padding"] - decay_factor = self._params["decay_factor"] - data1_processed = hilbert_transform( - data1_processed, - dims=(sample_name, feature_name), - padding=padding, - decay_factor=decay_factor, - ) - data2_processed = hilbert_transform( - data2_processed, - dims=(sample_name, feature_name), - padding=padding, - decay_factor=decay_factor, - ) + # Settings for Hilbert transform + hilbert_kwargs = { + "padding": self._params["padding"], + "decay_factor": self._params["decay_factor"], + } # Initialize the SVD decomposer decomposer = Decomposer( @@ -637,54 +587,55 @@ def fit( # Perform SVD on PCA-reduced data if (self.pca1 is not None) and (self.pca2 is not None): # Fit the PCA models - self.pca1.fit(data1_processed, sample_name) - self.pca2.fit(data2_processed, sample_name) + self.pca1.fit(data1, sample_name) + self.pca2.fit(data2, sample_name) # Get the PCA scores pca_scores1 = self.pca1.data.scores * self.pca1.data.singular_values pca_scores2 = self.pca2.data.scores * self.pca2.data.singular_values # Apply hilbert transform pca_scores1 = hilbert_transform( - pca_scores1, - dims=(sample_name, feature_name), - padding=padding, - decay_factor=decay_factor, + pca_scores1, dims=(sample_name, "mode"), **hilbert_kwargs ) pca_scores2 = hilbert_transform( - pca_scores2, - dims=(sample_name, feature_name), - padding=padding, - decay_factor=decay_factor, + pca_scores2, dims=(sample_name, "mode"), **hilbert_kwargs ) # Compute the cross-covariance matrix of the PCA scores - pca_scores1 = pca_scores1.rename({"mode": feature_name}) - pca_scores2 = pca_scores2.rename({"mode": feature_name}) + pca_scores1 = pca_scores1.rename({"mode": "feature_temp1"}) + pca_scores2 = pca_scores2.rename({"mode": "feature_temp2"}) cov_matrix = self._compute_cross_covariance_matrix(pca_scores1, pca_scores2) # Perform the SVD - decomposer.fit(cov_matrix, dims=("feature1", "feature2")) - V1 = decomposer.U_ # left singular vectors (feature1 x mode) - V2 = decomposer.V_ # right singular vectors (feature2 x mode) + decomposer.fit(cov_matrix, dims=("feature_temp1", "feature_temp2")) + V1 = decomposer.U_ # left singular vectors (feature_temp1 x mode) + V2 = decomposer.V_ # right singular vectors (feature_temp2 x mode) - V1pre = self.pca1.data.components # left PCA eigenvectors (feature x mode) - V2pre = self.pca2.data.components # right PCA eigenvectors (feature x mode) + # left PCA eigenvectors (feature_name x mode) + V1pre = self.pca1.data.components + # right PCA eigenvectors (feature_name x mode) + V2pre = self.pca2.data.components # Compute the singular vectors - V1pre = V1pre.rename({"mode": "feature1"}) - V2pre = V2pre.rename({"mode": "feature2"}) - singular_vectors1 = xr.dot(V1pre, V1, dims="feature1") - singular_vectors2 = xr.dot(V2pre, V2, dims="feature2") + V1pre = V1pre.rename({"mode": "feature_temp1"}) + V2pre = V2pre.rename({"mode": "feature_temp2"}) + singular_vectors1 = xr.dot(V1pre, V1, dims="feature_temp1") + singular_vectors2 = xr.dot(V2pre, V2, dims="feature_temp2") # Perform SVD directly on data else: + # Perform Hilbert transform + data1 = hilbert_transform( + data1, dims=(sample_name, feature_name), **hilbert_kwargs + ) + data2 = hilbert_transform( + data2, dims=(sample_name, feature_name), **hilbert_kwargs + ) # Rename feature and associated dimensions of data objects to avoid index conflicts dim_renamer1 = DimensionRenamer(feature_name, "1") dim_renamer2 = DimensionRenamer(feature_name, "2") - data1_processed_temp = dim_renamer1.fit_transform(data1_processed) - data2_processed_temp = dim_renamer2.fit_transform(data2_processed) + data1_temp = dim_renamer1.fit_transform(data1) + data2_temp = dim_renamer2.fit_transform(data2) # Compute the cross-covariance matrix - cov_matrix = self._compute_cross_covariance_matrix( - data1_processed_temp, data2_processed_temp - ) + cov_matrix = self._compute_cross_covariance_matrix(data1_temp, data2_temp) # Perform the SVD decomposer.fit(cov_matrix, dims=("feature1", "feature2")) @@ -710,12 +661,12 @@ def fit( idx_sorted_modes.coords.update(squared_covariance.coords) # Project the data onto the singular vectors - scores1 = xr.dot(data1_processed, singular_vectors1) / norm1 - scores2 = xr.dot(data2_processed, singular_vectors2) / norm2 + scores1 = xr.dot(data1, singular_vectors1) / norm1 + scores2 = xr.dot(data2, singular_vectors2) / norm2 self.data.set_data( - input_data1=data1_processed, - input_data2=data2_processed, + input_data1=data1, + input_data2=data2, components1=singular_vectors1, components2=singular_vectors2, scores1=scores1, @@ -728,8 +679,9 @@ def fit( ) # Assign analysis relevant meta data self.data.set_attrs(self.attrs) + return self - def components_amplitude(self) -> Tuple[AnyDataObject, AnyDataObject]: + def components_amplitude(self) -> Tuple[DataObject, DataObject]: """Compute the amplitude of the components. The amplitude of the components are defined as @@ -742,9 +694,9 @@ def components_amplitude(self) -> Tuple[AnyDataObject, AnyDataObject]: Returns ------- - AnyDataObject + DataObject Amplitude of the left components. - AnyDataObject + DataObject Amplitude of the left components. """ @@ -756,7 +708,7 @@ def components_amplitude(self) -> Tuple[AnyDataObject, AnyDataObject]: return (comps1, comps2) - def components_phase(self) -> Tuple[AnyDataObject, AnyDataObject]: + def components_phase(self) -> Tuple[DataObject, DataObject]: """Compute the phase of the components. The phase of the components are defined as @@ -769,9 +721,9 @@ def components_phase(self) -> Tuple[AnyDataObject, AnyDataObject]: Returns ------- - AnyDataObject + DataObject Phase of the left components. - AnyDataObject + DataObject Phase of the right components. """ @@ -836,7 +788,7 @@ def scores_phase(self) -> Tuple[DataArray, DataArray]: return (scores1, scores2) - def transform(self, data1: AnyDataObject, data2: AnyDataObject): + def transform(self, data1: DataObject, data2: DataObject): raise NotImplementedError("Complex MCA does not support transform method.") def homogeneous_patterns(self, correction=None, alpha=0.05): From 79e82ccb0802079123d3254c0ef9a617a962fb90 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Tue, 26 Sep 2023 03:32:13 +0200 Subject: [PATCH 18/43] feat: add CCA support --- xeofs/models/__init__.py | 16 + xeofs/models/cca.py | 1420 ++++++++++++++++++-------------------- 2 files changed, 693 insertions(+), 743 deletions(-) diff --git a/xeofs/models/__init__.py b/xeofs/models/__init__.py index d1d954e..c1a39ab 100644 --- a/xeofs/models/__init__.py +++ b/xeofs/models/__init__.py @@ -4,3 +4,19 @@ from .rotator_factory import RotatorFactory from .eof_rotator import EOFRotator, ComplexEOFRotator from .mca_rotator import MCARotator, ComplexMCARotator +from .cca import CCA + + +__all__ = [ + "EOF", + "ComplexEOF", + "MCA", + "ComplexMCA", + "OPA", + "RotatorFactory", + "EOFRotator", + "ComplexEOFRotator", + "MCARotator", + "ComplexMCARotator", + "CCA", +] diff --git a/xeofs/models/cca.py b/xeofs/models/cca.py index 1955887..bf32e5c 100644 --- a/xeofs/models/cca.py +++ b/xeofs/models/cca.py @@ -1,743 +1,677 @@ -# from typing import Tuple - -# import numpy as np -# import xarray as xr - -# from ._base_cross_model import _BaseCrossModel -# from .decomposer import CrossDecomposer -# from ..utils.data_types import AnyDataObject, DataArray -# from ..data_container.mca_data_container import ( -# MCADataContainer, -# ComplexMCADataContainer, -# ) -# from ..utils.statistics import pearson_correlation -# from ..utils.xarray_utils import hilbert_transform - - -# class MCA(_BaseCrossModel): -# """Maximum Covariance Analyis (MCA). - -# MCA is a statistical method that finds patterns of maximum covariance between two datasets. - -# Parameters -# ---------- -# n_modes: int, default=10 -# Number of modes to calculate. -# standardize: bool, default=False -# Whether to standardize the input data. -# use_coslat: bool, default=False -# Whether to use cosine of latitude for scaling. -# use_weights: bool, default=False -# Whether to use additional weights. -# solver_kwargs: dict, default={} -# Additional keyword arguments passed to the SVD solver. -# n_pca_modes: int, default=None -# The number of principal components to retain during the PCA preprocessing -# step applied to both data sets prior to executing MCA. -# If set to None, PCA preprocessing will be bypassed, and the MCA will be performed on the original datasets. -# Specifying an integer value greater than 0 for `n_pca_modes` will trigger the PCA preprocessing, retaining -# only the specified number of principal components. This reduction in dimensionality can be especially beneficial -# when dealing with high-dimensional data, where computing the cross-covariance matrix can become computationally -# intensive or in scenarios where multicollinearity is a concern. - -# Notes -# ----- -# MCA is similar to Principal Component Analysis (PCA) and Canonical Correlation Analysis (CCA), -# but while PCA finds modes of maximum variance and CCA finds modes of maximum correlation, -# MCA finds modes of maximum covariance. See [1]_ [2]_ for more details. - -# References -# ---------- -# .. [1] Bretherton, C., Smith, C., Wallace, J., 1992. An intercomparison of methods for finding coupled patterns in climate data. Journal of climate 5, 541–560. -# .. [2] Cherry, S., 1996. Singular value decomposition analysis and canonical correlation analysis. Journal of Climate 9, 2003–2009. - -# Examples -# -------- -# >>> model = MCA(n_modes=5, standardize=True) -# >>> model.fit(data1, data2) - -# """ - -# def __init__(self, solver_kwargs={}, **kwargs): -# super().__init__(solver_kwargs=solver_kwargs, **kwargs) -# self.attrs.update({"model": "MCA"}) - -# # Initialize the DataContainer to store the results -# self.data: MCADataContainer = MCADataContainer() - -# def fit( -# self, -# data1: AnyDataObject, -# data2: AnyDataObject, -# dim, -# weights1=None, -# weights2=None, -# ): -# # Preprocess the data -# data1_processed: DataArray = self.preprocessor1.fit_transform( -# data1, dim, weights1 -# ) -# data2_processed: DataArray = self.preprocessor2.fit_transform( -# data2, dim, weights2 -# ) - -# # Perform the decomposition of the cross-covariance matrix -# decomposer = CrossDecomposer( -# n_modes=self._params["n_modes"], **self._solver_kwargs -# ) - -# # Perform SVD on PCA-reduced data -# if (self.pca1 is not None) and (self.pca2 is not None): -# # Fit the PCA models -# self.pca1.fit(data1_processed, "sample") -# self.pca2.fit(data2_processed, "sample") -# # Get the PCA scores -# pca_scores1 = self.pca1.data.scores * self.pca1.data.singular_values -# pca_scores2 = self.pca2.data.scores * self.pca2.data.singular_values -# # Rename the dimensions to adhere to the CrossDecomposer API -# pca_scores1 = pca_scores1.rename({"mode": "feature"}) -# pca_scores2 = pca_scores2.rename({"mode": "feature"}) - -# # Perform the SVD -# decomposer.fit(pca_scores1, pca_scores2) -# V1 = decomposer.singular_vectors1_.rename({"feature": "core"}) -# V2 = decomposer.singular_vectors2_.rename({"feature": "core"}) - -# V1pre = self.pca1.data.components.rename({"mode": "core"}) -# V2pre = self.pca2.data.components.rename({"mode": "core"}) - -# # Compute the singular vectors based on the PCA eigenvectors -# singular_vectors1 = xr.dot(V1pre, V1, dims="core") -# singular_vectors2 = xr.dot(V2pre, V2, dims="core") - -# # Perform SVD directly on data -# else: -# decomposer.fit(data1_processed, data2_processed) -# singular_vectors1 = decomposer.singular_vectors1_ -# singular_vectors2 = decomposer.singular_vectors2_ - -# # Store the results -# singular_values = decomposer.singular_values_ - -# squared_covariance = singular_values**2 -# total_squared_covariance = decomposer.total_squared_covariance_ - -# norm1 = np.sqrt(singular_values) -# norm2 = np.sqrt(singular_values) - -# # Index of the sorted squared covariance -# idx_sorted_modes = squared_covariance.compute().argsort()[::-1] -# idx_sorted_modes.coords.update(squared_covariance.coords) - -# # Project the data onto the singular vectors -# scores1 = xr.dot(data1_processed, singular_vectors1, dims="feature") / norm1 -# scores2 = xr.dot(data2_processed, singular_vectors2, dims="feature") / norm2 - -# self.data.set_data( -# input_data1=data1_processed, -# input_data2=data2_processed, -# components1=singular_vectors1, -# components2=singular_vectors2, -# scores1=scores1, -# scores2=scores2, -# squared_covariance=squared_covariance, -# total_squared_covariance=total_squared_covariance, -# idx_modes_sorted=idx_sorted_modes, -# norm1=norm1, -# norm2=norm2, -# ) -# # Assign analysis-relevant meta data -# self.data.set_attrs(self.attrs) - -# def transform(self, **kwargs): -# """Project new unseen data onto the singular vectors. - -# Parameters -# ---------- -# data1: xr.DataArray or list of xarray.DataArray -# Left input data. Must be provided if `data2` is not provided. -# data2: xr.DataArray or list of xarray.DataArray -# Right input data. Must be provided if `data1` is not provided. - -# Returns -# ------- -# scores1: DataArray | Dataset | List[DataArray] -# Left scores. -# scores2: DataArray | Dataset | List[DataArray] -# Right scores. - -# """ -# results = [] -# if "data1" in kwargs.keys(): -# # Preprocess input data -# data1 = kwargs["data1"] -# data1 = self.preprocessor1.transform(data1) -# # Project data onto singular vectors -# comps1 = self.data.components1 -# norm1 = self.data.norm1 -# scores1 = xr.dot(data1, comps1) / norm1 -# # Inverse transform scores -# scores1 = self.preprocessor1.inverse_transform_scores(scores1) -# results.append(scores1) - -# if "data2" in kwargs.keys(): -# # Preprocess input data -# data2 = kwargs["data2"] -# data2 = self.preprocessor2.transform(data2) -# # Project data onto singular vectors -# comps2 = self.data.components2 -# norm2 = self.data.norm2 -# scores2 = xr.dot(data2, comps2) / norm2 -# # Inverse transform scores -# scores2 = self.preprocessor2.inverse_transform_scores(scores2) -# results.append(scores2) - -# return results - -# def inverse_transform(self, mode): -# """Reconstruct the original data from transformed data. - -# Parameters -# ---------- -# mode: scalars, slices or array of tick labels. -# The mode(s) used to reconstruct the data. If a scalar is given, -# the data will be reconstructed using the given mode. If a slice -# is given, the data will be reconstructed using the modes in the -# given slice. If a array is given, the data will be reconstructed -# using the modes in the given array. - -# Returns -# ------- -# Xrec1: DataArray | Dataset | List[DataArray] -# Reconstructed data of left field. -# Xrec2: DataArray | Dataset | List[DataArray] -# Reconstructed data of right field. - -# """ -# # Singular vectors -# comps1 = self.data.components1.sel(mode=mode) -# comps2 = self.data.components2.sel(mode=mode) - -# # Scores = projections -# scores1 = self.data.scores1.sel(mode=mode) -# scores2 = self.data.scores2.sel(mode=mode) - -# # Norms -# norm1 = self.data.norm1.sel(mode=mode) -# norm2 = self.data.norm2.sel(mode=mode) - -# # Reconstruct the data -# data1 = xr.dot(scores1, comps1.conj() * norm1, dims="mode") -# data2 = xr.dot(scores2, comps2.conj() * norm2, dims="mode") - -# # Enforce real output -# data1 = data1.real -# data2 = data2.real - -# # Unstack and rescale the data -# data1 = self.preprocessor1.inverse_transform_data(data1) -# data2 = self.preprocessor2.inverse_transform_data(data2) - -# return data1, data2 - -# def squared_covariance(self): -# """Get the squared covariance. - -# The squared covariance corresponds to the explained variance in PCA and is given by the -# squared singular values of the covariance matrix. - -# """ -# return self.data.squared_covariance - -# def squared_covariance_fraction(self): -# """Calculate the squared covariance fraction (SCF). - -# The SCF is a measure of the proportion of the total squared covariance that is explained by each mode `i`. It is computed -# as follows: - -# .. math:: -# SCF_i = \\frac{\\sigma_i^2}{\\sum_{i=1}^{m} \\sigma_i^2} - -# where `m` is the total number of modes and :math:`\\sigma_i` is the `ith` singular value of the covariance matrix. - -# """ -# return self.data.squared_covariance_fraction - -# def singular_values(self): -# """Get the singular values of the cross-covariance matrix.""" -# return self.data.singular_values - -# def covariance_fraction(self): -# """Get the covariance fraction (CF). - -# Cheng and Dunkerton (1995) define the CF as follows: - -# .. math:: -# CF_i = \\frac{\\sigma_i}{\\sum_{i=1}^{m} \\sigma_i} - -# where `m` is the total number of modes and :math:`\\sigma_i` is the `ith` singular value of the covariance matrix. - -# In this implementation the sum of singular values is estimated from the first `n` modes, therefore one should aim to -# retain as many modes as possible to get a good estimate of the covariance fraction. - -# Note -# ---- -# It is important to differentiate the CF from the squared covariance fraction (SCF). While the SCF is an invariant quantity in MCA, the CF is not. -# Therefore, the SCF is used to assess the relative importance of each mode. Cheng and Dunkerton (1995) [1]_ introduced the CF in the context of -# Varimax-rotated MCA to compare the relative importance of each mode before and after rotation. In the special case of both data fields in MCA being identical, -# the CF is equivalent to the explained variance ratio in EOF analysis. - -# References -# ---------- -# .. [1] Cheng, X., Dunkerton, T.J., 1995. Orthogonal Rotation of Spatial Patterns Derived from Singular Value Decomposition Analysis. J. Climate 8, 2631–2643. https://doi.org/10.1175/1520-0442(1995)008<2631:OROSPD>2.0.CO;2 - - -# """ -# # Check how sensitive the CF is to the number of modes -# svals = self.data.singular_values -# cf = svals[0] / svals.cumsum() -# change_per_mode = cf.shift({"mode": 1}) - cf -# change_in_cf_in_last_mode = change_per_mode.isel(mode=-1) -# if change_in_cf_in_last_mode > 0.001: -# print( -# f"Warning: CF is sensitive to the number of modes retained. Please increase `n_modes` for a better estimate." -# ) -# return self.data.covariance_fraction - -# def components(self): -# """Return the singular vectors of the left and right field. - -# Returns -# ------- -# components1: DataArray | Dataset | List[DataArray] -# Left components of the fitted model. -# components2: DataArray | Dataset | List[DataArray] -# Right components of the fitted model. - -# """ -# return super().components() - -# def scores(self): -# """Return the scores of the left and right field. - -# The scores in MCA are the projection of the left and right field onto the -# left and right singular vector of the cross-covariance matrix. - -# Returns -# ------- -# scores1: DataArray -# Left scores. -# scores2: DataArray -# Right scores. - -# """ -# return super().scores() - -# def homogeneous_patterns(self, correction=None, alpha=0.05): -# """Return the homogeneous patterns of the left and right field. - -# The homogeneous patterns are the correlation coefficients between the -# input data and the scores. - -# More precisely, the homogeneous patterns `r_{hom}` are defined as - -# .. math:: -# r_{hom, x} = corr \\left(X, A_x \\right) -# .. math:: -# r_{hom, y} = corr \\left(Y, A_y \\right) - -# where :math:`X` and :math:`Y` are the input data, :math:`A_x` and :math:`A_y` -# are the scores of the left and right field, respectively. - -# Parameters -# ---------- -# correction: str, default=None -# Method to apply a multiple testing correction. If None, no correction -# is applied. Available methods are: -# - bonferroni : one-step correction -# - sidak : one-step correction -# - holm-sidak : step down method using Sidak adjustments -# - holm : step-down method using Bonferroni adjustments -# - simes-hochberg : step-up method (independent) -# - hommel : closed method based on Simes tests (non-negative) -# - fdr_bh : Benjamini/Hochberg (non-negative) (default) -# - fdr_by : Benjamini/Yekutieli (negative) -# - fdr_tsbh : two stage fdr correction (non-negative) -# - fdr_tsbky : two stage fdr correction (non-negative) -# alpha: float, default=0.05 -# The desired family-wise error rate. Not used if `correction` is None. - -# Returns -# ------- -# patterns1: DataArray | Dataset | List[DataArray] -# Left homogenous patterns. -# patterns2: DataArray | Dataset | List[DataArray] -# Right homogenous patterns. -# pvals1: DataArray | Dataset | List[DataArray] -# Left p-values. -# pvals2: DataArray | Dataset | List[DataArray] -# Right p-values. - -# """ -# input_data1 = self.data.input_data1 -# input_data2 = self.data.input_data2 - -# scores1 = self.data.scores1 -# scores2 = self.data.scores2 - -# hom_pat1, pvals1 = pearson_correlation( -# input_data1, scores1, correction=correction, alpha=alpha -# ) -# hom_pat2, pvals2 = pearson_correlation( -# input_data2, scores2, correction=correction, alpha=alpha -# ) - -# hom_pat1 = self.preprocessor1.inverse_transform_components(hom_pat1) -# hom_pat2 = self.preprocessor2.inverse_transform_components(hom_pat2) - -# pvals1 = self.preprocessor1.inverse_transform_components(pvals1) -# pvals2 = self.preprocessor2.inverse_transform_components(pvals2) - -# hom_pat1.name = "left_homogeneous_patterns" -# hom_pat2.name = "right_homogeneous_patterns" - -# pvals1.name = "pvalues_of_left_homogeneous_patterns" -# pvals2.name = "pvalues_of_right_homogeneous_patterns" - -# return (hom_pat1, hom_pat2), (pvals1, pvals2) - -# def heterogeneous_patterns(self, correction=None, alpha=0.05): -# """Return the heterogeneous patterns of the left and right field. - -# The heterogeneous patterns are the correlation coefficients between the -# input data and the scores of the other field. - -# More precisely, the heterogeneous patterns `r_{het}` are defined as - -# .. math:: -# r_{het, x} = corr \\left(X, A_y \\right) -# .. math:: -# r_{het, y} = corr \\left(Y, A_x \\right) - -# where :math:`X` and :math:`Y` are the input data, :math:`A_x` and :math:`A_y` -# are the scores of the left and right field, respectively. - -# Parameters -# ---------- -# correction: str, default=None -# Method to apply a multiple testing correction. If None, no correction -# is applied. Available methods are: -# - bonferroni : one-step correction -# - sidak : one-step correction -# - holm-sidak : step down method using Sidak adjustments -# - holm : step-down method using Bonferroni adjustments -# - simes-hochberg : step-up method (independent) -# - hommel : closed method based on Simes tests (non-negative) -# - fdr_bh : Benjamini/Hochberg (non-negative) (default) -# - fdr_by : Benjamini/Yekutieli (negative) -# - fdr_tsbh : two stage fdr correction (non-negative) -# - fdr_tsbky : two stage fdr correction (non-negative) -# alpha: float, default=0.05 -# The desired family-wise error rate. Not used if `correction` is None. - -# """ -# input_data1 = self.data.input_data1 -# input_data2 = self.data.input_data2 - -# scores1 = self.data.scores1 -# scores2 = self.data.scores2 - -# patterns1, pvals1 = pearson_correlation( -# input_data1, scores2, correction=correction, alpha=alpha -# ) -# patterns2, pvals2 = pearson_correlation( -# input_data2, scores1, correction=correction, alpha=alpha -# ) - -# patterns1 = self.preprocessor1.inverse_transform_components(patterns1) -# patterns2 = self.preprocessor2.inverse_transform_components(patterns2) - -# pvals1 = self.preprocessor1.inverse_transform_components(pvals1) -# pvals2 = self.preprocessor2.inverse_transform_components(pvals2) - -# patterns1.name = "left_heterogeneous_patterns" -# patterns2.name = "right_heterogeneous_patterns" - -# pvals1.name = "pvalues_of_left_heterogeneous_patterns" -# pvals2.name = "pvalues_of_right_heterogeneous_patterns" - -# return (patterns1, patterns2), (pvals1, pvals2) - - -# class ComplexMCA(MCA): -# """Complex Maximum Covariance Analysis (MCA). - -# Complex MCA, also referred to as Analytical SVD (ASVD) by Shane et al. (2017)[1]_, -# enhances traditional MCA by accommodating both amplitude and phase information. -# It achieves this by utilizing the Hilbert transform to preprocess the data, -# thus allowing for a more comprehensive analysis in the subsequent MCA computation. - -# An optional padding with exponentially decaying values can be applied prior to -# the Hilbert transform in order to mitigate the impact of spectral leakage. - -# Parameters -# ---------- -# n_modes: int, default=10 -# Number of modes to calculate. -# standardize: bool, default=False -# Whether to standardize the input data. -# use_coslat: bool, default=False -# Whether to use cosine of latitude for scaling. -# use_weights: bool, default=False -# Whether to use additional weights. -# padding : str, optional -# Specifies the method used for padding the data prior to applying the Hilbert -# transform. This can help to mitigate the effect of spectral leakage. -# Currently, only 'exp' for exponential padding is supported. Default is 'exp'. -# decay_factor : float, optional -# Specifies the decay factor used in the exponential padding. This parameter -# is only used if padding='exp'. The recommended value typically ranges between 0.05 to 0.2 -# but ultimately depends on the variability in the data. -# A smaller value (e.g. 0.05) is recommended for -# data with high variability, while a larger value (e.g. 0.2) is recommended -# for data with low variability. Default is 0.2. -# solver_kwargs: dict, default={} -# Additional keyword arguments passed to the SVD solver. - -# Notes -# ----- -# Complex MCA extends MCA to complex-valued data that contain both magnitude and phase information. -# The Hilbert transform is used to transform real-valued data to complex-valued data, from which both -# amplitude and phase can be extracted. - -# Similar to MCA, Complex MCA is used in climate science to identify coupled patterns of variability -# between two different climate variables. But unlike MCA, Complex MCA can identify coupled patterns -# that involve phase shifts. - -# References -# ---------- -# [1]_: Elipot, S., Frajka-Williams, E., Hughes, C.W., Olhede, S., Lankhorst, M., 2017. Observed Basin-Scale Response of the North Atlantic Meridional Overturning Circulation to Wind Stress Forcing. Journal of Climate 30, 2029–2054. https://doi.org/10.1175/JCLI-D-16-0664.1 - - -# Examples -# -------- -# >>> model = ComplexMCA(n_modes=5, standardize=True) -# >>> model.fit(data1, data2) - -# """ - -# def __init__(self, padding="exp", decay_factor=0.2, **kwargs): -# super().__init__(**kwargs) -# self.attrs.update({"model": "Complex MCA"}) -# self._params.update({"padding": padding, "decay_factor": decay_factor}) - -# # Initialize the DataContainer to store the results -# self.data: ComplexMCADataContainer = ComplexMCADataContainer() - -# def fit( -# self, -# data1: AnyDataObject, -# data2: AnyDataObject, -# dim, -# weights1=None, -# weights2=None, -# ): -# """Fit the model. - -# Parameters -# ---------- -# data1: xr.DataArray or list of xarray.DataArray -# Left input data. -# data2: xr.DataArray or list of xarray.DataArray -# Right input data. -# dim: tuple -# Tuple specifying the sample dimensions. The remaining dimensions -# will be treated as feature dimensions. -# weights1: xr.DataArray or xr.Dataset or None, default=None -# If specified, the left input data will be weighted by this array. -# weights2: xr.DataArray or xr.Dataset or None, default=None -# If specified, the right input data will be weighted by this array. - -# """ - -# data1_processed: DataArray = self.preprocessor1.fit_transform( -# data1, dim, weights2 -# ) -# data2_processed: DataArray = self.preprocessor2.fit_transform( -# data2, dim, weights2 -# ) - -# # apply hilbert transform: -# padding = self._params["padding"] -# decay_factor = self._params["decay_factor"] -# data1_processed = hilbert_transform( -# data1_processed, dim="sample", padding=padding, decay_factor=decay_factor -# ) -# data2_processed = hilbert_transform( -# data2_processed, dim="sample", padding=padding, decay_factor=decay_factor -# ) - -# decomposer = CrossDecomposer( -# n_modes=self._params["n_modes"], **self._solver_kwargs -# ) -# decomposer.fit(data1_processed, data2_processed) - -# # Note: -# # - explained variance is given by the singular values of the SVD; -# # - We use the term singular_values_pca as used in the context of PCA: -# # Considering data X1 = X2, MCA is the same as PCA. In this case, -# # singular_values_pca is equivalent to the singular values obtained -# # when performing PCA of X1 or X2. -# singular_values = decomposer.singular_values_ -# singular_vectors1 = decomposer.singular_vectors1_ -# singular_vectors2 = decomposer.singular_vectors2_ - -# squared_covariance = singular_values**2 -# total_squared_covariance = decomposer.total_squared_covariance_ - -# norm1 = np.sqrt(singular_values) -# norm2 = np.sqrt(singular_values) - -# # Index of the sorted squared covariance -# idx_sorted_modes = squared_covariance.compute().argsort()[::-1] -# idx_sorted_modes.coords.update(squared_covariance.coords) - -# # Project the data onto the singular vectors -# scores1 = xr.dot(data1_processed, singular_vectors1) / norm1 -# scores2 = xr.dot(data2_processed, singular_vectors2) / norm2 - -# self.data.set_data( -# input_data1=data1_processed, -# input_data2=data2_processed, -# components1=singular_vectors1, -# components2=singular_vectors2, -# scores1=scores1, -# scores2=scores2, -# squared_covariance=squared_covariance, -# total_squared_covariance=total_squared_covariance, -# idx_modes_sorted=idx_sorted_modes, -# norm1=norm1, -# norm2=norm2, -# ) -# # Assign analysis relevant meta data -# self.data.set_attrs(self.attrs) - -# def components_amplitude(self) -> Tuple[AnyDataObject, AnyDataObject]: -# """Compute the amplitude of the components. - -# The amplitude of the components are defined as - -# .. math:: -# A_ij = |C_ij| - -# where :math:`C_{ij}` is the :math:`i`-th entry of the :math:`j`-th component and -# :math:`|\\cdot|` denotes the absolute value. - -# Returns -# ------- -# AnyDataObject -# Amplitude of the left components. -# AnyDataObject -# Amplitude of the left components. - -# """ -# comps1 = self.data.components_amplitude1 -# comps2 = self.data.components_amplitude2 - -# comps1 = self.preprocessor1.inverse_transform_components(comps1) -# comps2 = self.preprocessor2.inverse_transform_components(comps2) - -# return (comps1, comps2) - -# def components_phase(self) -> Tuple[AnyDataObject, AnyDataObject]: -# """Compute the phase of the components. - -# The phase of the components are defined as - -# .. math:: -# \\phi_{ij} = \\arg(C_{ij}) - -# where :math:`C_{ij}` is the :math:`i`-th entry of the :math:`j`-th component and -# :math:`\\arg(\\cdot)` denotes the argument of a complex number. - -# Returns -# ------- -# AnyDataObject -# Phase of the left components. -# AnyDataObject -# Phase of the right components. - -# """ -# comps1 = self.data.components_phase1 -# comps2 = self.data.components_phase2 - -# comps1 = self.preprocessor1.inverse_transform_components(comps1) -# comps2 = self.preprocessor2.inverse_transform_components(comps2) - -# return (comps1, comps2) - -# def scores_amplitude(self) -> Tuple[DataArray, DataArray]: -# """Compute the amplitude of the scores. - -# The amplitude of the scores are defined as - -# .. math:: -# A_ij = |S_ij| - -# where :math:`S_{ij}` is the :math:`i`-th entry of the :math:`j`-th score and -# :math:`|\\cdot|` denotes the absolute value. - -# Returns -# ------- -# DataArray -# Amplitude of the left scores. -# DataArray -# Amplitude of the right scores. - -# """ -# scores1 = self.data.scores_amplitude1 -# scores2 = self.data.scores_amplitude2 - -# scores1 = self.preprocessor1.inverse_transform_scores(scores1) -# scores2 = self.preprocessor2.inverse_transform_scores(scores2) -# return (scores1, scores2) - -# def scores_phase(self) -> Tuple[DataArray, DataArray]: -# """Compute the phase of the scores. - -# The phase of the scores are defined as - -# .. math:: -# \\phi_{ij} = \\arg(S_{ij}) - -# where :math:`S_{ij}` is the :math:`i`-th entry of the :math:`j`-th score and -# :math:`\\arg(\\cdot)` denotes the argument of a complex number. - -# Returns -# ------- -# DataArray -# Phase of the left scores. -# DataArray -# Phase of the right scores. - -# """ -# scores1 = self.data.scores_phase1 -# scores2 = self.data.scores_phase2 - -# scores1 = self.preprocessor1.inverse_transform_scores(scores1) -# scores2 = self.preprocessor2.inverse_transform_scores(scores2) - -# return (scores1, scores2) - -# def transform(self, data1: AnyDataObject, data2: AnyDataObject): -# raise NotImplementedError("Complex MCA does not support transform method.") - -# def homogeneous_patterns(self, correction=None, alpha=0.05): -# raise NotImplementedError( -# "Complex MCA does not support homogeneous_patterns method." -# ) - -# def heterogeneous_patterns(self, correction=None, alpha=0.05): -# raise NotImplementedError( -# "Complex MCA does not support heterogeneous_patterns method." -# ) +""" +This code is based on the work of James Chapman from cca-zoo. +Source: https://github.com/jameschapman19/cca_zoo + +The original code is licensed under the MIT License. + +Copyright (c) 2020 James Chapman +""" + +from abc import abstractmethod +from datetime import datetime +from typing import Sequence, Self, List, Hashable + +import dask.array as da +import numpy as np +import xarray as xr +from scipy.linalg import eigh +from sklearn.base import BaseEstimator +from sklearn.utils.validation import FLOAT_DTYPES +from xeofs.models import EOF + +from .._version import __version__ +from ..preprocessing.preprocessor import Preprocessor +from ..utils.data_types import DataObject, DataArray, DataList + + +def _check_parameter_number(parameter_name: str, parameter, n_views: int): + if len(parameter) != n_views: + raise ValueError( + f"number of views passed should match number of parameter {parameter_name}" + f"len(views)={n_views} and " + f"len({parameter_name})={len(parameter)}" + ) + + +def _process_parameter(parameter_name: str, parameter, default, n_views: int): + if parameter is None: + parameter = [default] * n_views + elif not isinstance(parameter, (list, tuple)): + parameter = [parameter] * n_views + _check_parameter_number(parameter_name, parameter, n_views) + return parameter + + +class CCABaseModel(BaseEstimator): + def __init__( + self, + n_modes: int = 10, + use_coslat: bool = False, + pca: bool = False, + variance_fraction: float = 0.99, + init_pca_modes: int | float = 0.75, + sample_name: str = "sample", + feature_name: str = "feature", + ): + self.sample_name = sample_name + self.feature_name = feature_name + self.n_modes = n_modes + self.use_coslat = use_coslat + self.pca = pca + self.variance_fraction = variance_fraction + self.init_pca_modes = init_pca_modes + + self.dtypes = FLOAT_DTYPES + + self._preprocessor_kwargs = { + "sample_name": sample_name, + "feature_name": feature_name, + "with_std": False, + "with_weights": False, + } + + # Define analysis-relevant meta data + self.attrs = {"model": "BaseCrossModel"} + self.attrs.update( + { + "software": "xeofs", + "version": __version__, + "date": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + } + ) + + # Initialize the data container only to avoid type errors + # The actual data container will be initialized in respective subclasses + # self.data: _BaseCrossModelDataContainer = _BaseCrossModelDataContainer() + self.data = {} + + def _validate_data(self, views: Sequence[DataArray]): + if not all( + data[self.sample_name].size == views[0][self.sample_name].size + for data in views + ): + raise ValueError("All views must have the same number of samples") + if not all(data.ndim == 2 for data in views): + raise ValueError("All views must have 2 dimensions") + if not all(data.dtype in self.dtypes for data in views): + raise ValueError("All views must have dtype of {}.".format(self.dtypes)) + if not all(data[self.feature_name].size >= self.n_modes for data in views): + raise ValueError( + "All views must have at least {} features.".format(self.n_modes) + ) + + def _process_init_pca_modes(self, n_modes): + err_msg = "init_pca_modes must be either a float <= 1.0 or an integer > 1" + n_modes_list = [] + n_modes_max = [ + min(self.n_samples_, n_features) for n_features in self.n_features_ + ] + for n, n_max in zip(n_modes, n_modes_max): + if isinstance(n, float): + if n > 1.0: + raise ValueError(err_msg) + n = int(n * n_max) + n_modes_list.append(n) + elif isinstance(n, int): + if n <= 1: + raise ValueError(err_msg) + n_modes_list.append(n) + else: + raise ValueError(err_msg) + return n_modes_list + + def fit( + self, + views: Sequence[DataObject], + dim: Hashable | Sequence[Hashable], + ) -> Self: + self.n_views_ = len(views) + self.use_coslat = _process_parameter( + "use_coslat", self.use_coslat, False, self.n_views_ + ) + self.init_pca_modes = _process_parameter( + "init_pca_modes", self.init_pca_modes, 0.75, self.n_views_ + ) + + # Preprocess the input data + self.preprocessors = [ + Preprocessor(with_coslat=self.use_coslat[i], **self._preprocessor_kwargs) + for i in range(self.n_views_) + ] + views2D: List[DataArray] = [ + preprocessor.fit_transform(data, dim) + for preprocessor, data in zip(self.preprocessors, views) + ] + self._validate_data(views2D) + self.n_features_ = [data.coords[self.feature_name].size for data in views2D] + self.n_samples_ = views2D[0][self.sample_name].size + + self.data["input_data"] = views2D + views2D = self._process_data(views2D) + self.data["pca_data"] = views2D + + self._fit_algorithm(views2D) + + return self + + def _process_data(self, views: DataList) -> DataList: + if self.pca: + views = self._apply_pca(views) + return views + + def _apply_pca(self, views: DataList): + self.pca_models = [] + + n_pca_modes = self._process_init_pca_modes(self.init_pca_modes) + + view_transformed = [] + + for i, view in enumerate(views): + pca = EOF(n_modes=n_pca_modes[i]) + pca.fit(view, dim=self.sample_name) + self.pca_models.append(pca) + + # TODO: method to get cumulative explained variance + cum_exp_var_ratio = pca.explained_variance_ratio().cumsum() + if cum_exp_var_ratio.isel(mode=-1) < self.variance_fraction: + print( + "Warning: variance fraction {:.4f} is not reached. ".format( + self.variance_fraction + ) + + "Only {:.4f} of variance is explained.".format( + cum_exp_var_ratio.isel(mode=-1).item() + ) + ) + n_modes_keep = cum_exp_var_ratio.where( + cum_exp_var_ratio < self.variance_fraction, drop=True + ).size + # TODO: it's more convinient to work the common scaling of sklearn; provide additional parameter + # provide this parameter to transform method as well + scores = pca.scores().isel(mode=slice(0, n_modes_keep)) + svals = pca.singular_values().isel(mode=slice(0, n_modes_keep)) + scores = ( + (scores * svals) + .rename({"mode": self.feature_name}) + .transpose(self.sample_name, self.feature_name) + ) + view_transformed.append(scores) + return view_transformed + + @abstractmethod + def _fit_algorithm(self, views: List[DataArray]) -> Self: + raise NotImplementedError + + +class CCA(CCABaseModel): + r""" + Regularised CCA (canonical ridge) model. + + This model adds a regularization term to the CCA objective function to avoid overfitting and improve stability. It uses PCA to perform the optimization efficiently for high dimensional data. + + The objective function of regularised CCA is: + + .. math:: + + w_{opt}=\underset{w}{\mathrm{argmax}}\{ w_1^TX_1^TX_2w_2 \}\\ + + \text{subject to:} + + (1-c_1)w_1^TX_1^TX_1w_1+c_1w_1^Tw_1=n + + (1-c_2)w_2^TX_2^TX_2w_2+c_2w_2^Tw_2=n + + where :math:`c_i` are the regularization parameters for each view. + + Parameters + ---------- + n_modes : int, optional + Number of latent dimensions to use, by default 10 + use_coslat : bool, optional + Whether to use the square root of the cosine of the latitude as weights, by default False + pca : bool, optional + Whether to perform PCA on the input data, by default True + variance_fraction : float, optional + Fraction of variance to keep when performing PCA, by default 0.99 + init_pca_modes : int | float, optional + Number of PCA modes to compute. If float, the number of modes is given by the fraction of maximum number of modes for the given data. + A value of 1.0 will perform a full SVD of the data. Choosing a smaller value can increase computation speed. Default 0.75 + c : Sequence[float] | float], optional + Regularisation parameter, by default 0 (no regularization) + + + Notes + ----- + This implementation is largely based on the MCCA class from the cca_zoo repository [3]_ . + + + References + ---------- + .. [1] Vinod, Hrishikesh _D. "Canonical ridge and econometrics of joint production." Journal of econometrics 4.2 (1976): 147-166. + .. [2] Hotelling, Harold. "Relations between two sets of variates." Breakthroughs in statistics. Springer, New York, NY, 1992. 162-190. + .. [3] Chapman et al., (2021). CCA-Zoo: A collection of Regularized, Deep Learning based, Kernel, and Probabilistic CCA methods in a scikit-learn style framework. Journal of Open Source Software, 6(68), 3823 + + Examples + -------- + >>> from xe.models import CCA + >>> model = CCA(n_modes=5) + >>> model.fit(data) + >>> can_loadings = model.canonical_loadings() + + """ + + def __init__( + self, + n_modes, + use_coslat=False, + c=0, + pca=True, + variance_fraction=0.99, + init_pca_modes=0.75, + eps=1e-6, + ): + super().__init__( + n_modes=n_modes, + use_coslat=use_coslat, + pca=pca, + variance_fraction=variance_fraction, + init_pca_modes=init_pca_modes, + ) + self.attrs.update({"model": "CCA"}) + self.c = c + self.eps = eps + + def _fit_algorithm(self, views: List[DataArray]) -> Self: + self.c = _process_parameter("c", self.c, 0, self.n_views_) + eigvals, eigvecs = self._solve_gevp(views) + self.eigvals = eigvals + self.eigvecs = eigvecs + # Compute the weights for each view + self._weights(eigvals, eigvecs, views) + # Compute loadings (= normalized weights) + self.data["loadings"] = [ + wght / self._apply_norm(wght, [self.feature_name]) + for wght in self.data["weights"] + ] + canonical_variates = self._transform(self.data["input_data"]) + self.data["variates"] = canonical_variates + + self.data["canonical_loadings"] = [ + xr.dot(data, vari, dims=self.sample_name, optimize=True) + for data, vari in zip(self.data["input_data"], canonical_variates) + ] + + # Compute explained variance + # Transform the views using the loadings + transformed_views = [ + xr.dot(view, loading, dims=self.feature_name) + for view, loading in zip(views, self.data["loadings"]) + ] + # Calculate the variance of each latent dimension in the transformed views + self.data["explained_variance"] = [ + transformed.var(self.sample_name) for transformed in transformed_views + ] + + # Explained variance ratio + self.data["total_variance"] = [ + view.var(self.sample_name).sum() for view in views + ] + + # Calculate the explained variance ratio for each latent dimension for each view + self.data["explained_variance_ratio"] = [ + exp_var / total_var + for exp_var, total_var in zip( + self.data["explained_variance"], self.data["total_variance"] + ) + ] + + # Explained Covariance + k = self.n_modes + explained_covariance = [] + + # just take the kth column of each transformed view and _compute_covariance + for i in range(k): + transformed_views_k = [ + view.isel(mode=slice(i, i + 1)) for view in transformed_views + ] + cov_ = self._apply_compute_covariance( + transformed_views_k, dims_in=["sample", "mode"] + ) + svals = self._compute_singular_values(cov_, dims_in=["mode1", "mode2"]) + explained_covariance.append(svals.isel(mode=0).item()) + self.data["explained_covariance"] = xr.DataArray( + explained_covariance, dims=["mode"], coords={"mode": range(1, k + 1)} + ) + + minimum_dimension = min([view[self.feature_name].size for view in views]) + + cov = self._apply_compute_covariance(views, dims_in=["sample", "feature"]) + S = self._compute_singular_values(cov, dims_in=["feature1", "feature2"]) + # select every other element starting from the first until the minimum dimension + self.data["total_explained_covariance"] = ( + S.isel(mode=slice(0, None, 2)).isel(mode=slice(0, minimum_dimension)).sum() + ) + self.data["explained_covariance_ratio"] = ( + self.data["explained_covariance"] / self.data["total_explained_covariance"] + ) + + return self + + def _compute_singular_values( + self, x, dims_in=["feature1", "feature2"], dims_out=["mode"] + ): + svals = xr.apply_ufunc( + np.linalg.svd, + x, + input_core_dims=[dims_in], + output_core_dims=[dims_out], + kwargs={"compute_uv": False}, + vectorize=False, + dask="allowed", + ) + svals = svals.assign_coords({"mode": range(1, svals.mode.size + 1)}) + return svals + + def _apply_norm(self, x, dims): + return xr.apply_ufunc( + np.linalg.norm, + x, + input_core_dims=[dims], + output_core_dims=[[]], + kwargs={"axis": -1}, + vectorize=True, + dask="allowed", + ) + + def _solve_gevp(self, views: Sequence[DataArray], y=None, **kwargs): + # Setup the eigenvalue problem + C = self._C(views, dims_in=[self.sample_name, self.feature_name]) + D = self._D(views, **kwargs) + self.splits = np.cumsum([view.shape[1] for view in views]) + # Solve the eigenvalue problem + # Get the dimension of _C + p = C.shape[0] + subset_by_index = [p - self.n_modes, p - 1] + # Solve the generalized eigenvalue problem Cx=lambda Dx using a subset of eigenvalues and eigenvectors + [eigvals, eigvecs] = self._apply_eigh(C, D, subset_by_index=subset_by_index) + # Sort the eigenvalues and eigenvectors in descending order + idx_sorted_modes = eigvals.compute().argsort()[::-1] + idx_sorted_modes = idx_sorted_modes.assign_coords( + {"mode": range(idx_sorted_modes.mode.size)} + ) + eigvals = eigvals.isel(mode=idx_sorted_modes) + eigvecs = eigvecs.isel(mode=idx_sorted_modes).real + # Set coordiantes + coords_mode = range(1, eigvals.mode.size + 1) + coords_feature = C.coords[self.feature_name + "1"].values + eigvals = eigvals.assign_coords({"mode": coords_mode}) + eigvecs = eigvecs.assign_coords( + { + "mode": coords_mode, + self.feature_name: coords_feature, + } + ) + return eigvals, eigvecs + + def _weights(self, eigvals, eigvecs, views, **kwargs): + # split eigvecs into weights for each view + # add 0 before the np ndarray splits + idx = np.concatenate([[0], self.splits]) + self.data["weights"] = [ + eigvecs.isel({self.feature_name: slice(idx[i], idx[i + 1])}) + for i in range(len(idx) - 1) + ] + if self.pca: + # go from weights in PCA space to weights in original space + n_modes = [data.feature.size for data in self.data["pca_data"]] + self.data["weights"] = [ + xr.dot( + pca.components() + .isel(mode=slice(0, n_modes[i])) + .rename({"mode": "temp_dim"}), + self.data["weights"][i].rename({"feature": "temp_dim"}), + dims="temp_dim", + optimize=True, + ) + for i, pca in enumerate(self.pca_models) + ] + + def _apply_eigh(self, a, b, subset_by_index): + return xr.apply_ufunc( + eigh, + a, + b, + input_core_dims=[ + [self.feature_name + "1", self.feature_name + "2"], + [self.feature_name + "1", self.feature_name + "2"], + ], + output_core_dims=[["mode"], ["feature", "mode"]], + kwargs={"subset_by_index": subset_by_index}, + vectorize=False, + dask="allowed", + ) + + def _C(self, views, dims_in): + C = self._apply_compute_covariance(views, dims_in=dims_in) + return C / len(views) + + def _apply_compute_covariance( + self, views: Sequence[DataArray], dims_in, dims_out=None + ) -> DataArray: + if dims_out is None: + dims_out = [dims_in[1] + "1", dims_in[1] + "2"] + all_views = xr.concat(views, dim=dims_in[1]) + C = self._apply_cov(all_views, dims_in=dims_in, dims_out=dims_out) + Ci = [ + self._apply_cov(view, dims_in=dims_in, dims_out=dims_out) for view in views + ] + return C - self._block_diag_dask(Ci, dims_in=dims_out) + + def _apply_cov( + self, x, dims_in=["sample", "feature"], dims_out=["feature1", "feature2"] + ): + if x[dims_in[1]].size == 1: + return xr.apply_ufunc( + np.cov, + x, + input_core_dims=[dims_in], + output_core_dims=[[]], + kwargs={"rowvar": False}, + vectorize=False, + dask="allowed", + ) + else: + C = xr.apply_ufunc( + np.cov, + x, + input_core_dims=[dims_in], + output_core_dims=[dims_out], + kwargs={"rowvar": False}, + vectorize=False, + dask="allowed", + ) + feature_coords = x.coords[dims_in[1]].values + C = C.assign_coords( + {dims_out[0]: feature_coords, dims_out[1]: feature_coords} + ) + return C + + def _block_diag_dask(self, views, dims_in=["feature1", "featur2"], dims_out=None): + if dims_out is None: + dims_out = dims_in + if all(view.size == 1 for view in views): + result = da.diag(np.array([view.item() for view in views])) + else: + # Extract underlying Dask arrays + arrays = [da.asarray(view.data) for view in views] + + # Construct a block-diagonal dask array + blocks = [ + [ + darr2 if j == i else da.zeros((darr2.shape[0], darr1.shape[0])) + for j, darr1 in enumerate(views) + ] + for i, darr2 in enumerate(arrays) + ] + + # Use Dask's block to stack the arrays + blocked_array = da.block(blocks) + + # Convert the result back to a DataArray + feature_coords = xr.concat(views, dim=dims_in[0])[dims_in[0]].values + result = xr.DataArray( + blocked_array, + dims=dims_out, + coords={dims_out[0]: feature_coords, dims_out[1]: feature_coords}, + ) + if any(isinstance(view.data, da.Array) for view in views): + return result + else: + return result.compute() + + def _D(self, views): + if self.pca: + blocks = [] + for i, view in enumerate(views): + pc = self.pca_models[i] + feature_coords = view.coords[self.feature_name] + n_features = feature_coords.size + expvar = pc.explained_variance().isel(mode=slice(0, n_features)) + block = xr.DataArray( + da.diag((1 - self.c[i]) * expvar.data + self.c[i]), + dims=[self.feature_name + "1", self.feature_name + "2"], + coords={ + self.feature_name + "1": feature_coords.values, + self.feature_name + "2": feature_coords.values, + }, + ) + block = block.compute() + blocks.append(block) + + else: + blocks = [self._apply_E(view, c) for view, c in zip(views, self.c)] + + D = self._block_diag_dask(blocks, dims_in=["feature1", "feature2"]) + + D_smallest_eig = self._apply_smallest_eigval(D, dims=["feature1", "feature2"]) + D_smallest_eig = D_smallest_eig - self.eps + identity_matrix = xr.DataArray(np.eye(D.shape[0]), dims=D.dims, coords=D.coords) + D = D - D_smallest_eig * identity_matrix + return D / len(views) + + def _apply_E(self, view, c): + E = xr.apply_ufunc( + self._E, + view, + input_core_dims=[[self.sample_name, self.feature_name]], + output_core_dims=[[self.feature_name + "1", self.feature_name + "2"]], + kwargs={"c": c}, + vectorize=False, + dask="allowed", + ) + feature_coords = view.coords[self.feature_name].values + E = E.assign_coords( + { + self.feature_name + "1": feature_coords, + self.feature_name + "2": feature_coords, + } + ) + return E + + def _E(self, view, c): + return (1 - c) * np.cov(view, rowvar=False) + c * np.eye(view.shape[1]) + + def _apply_smallest_eigval(self, D, dims): + return xr.apply_ufunc( + self._smallest_eigval, + D, + input_core_dims=[dims], + output_core_dims=[[]], + vectorize=True, + dask="allowed", + ) + + def _smallest_eigval(self, D): + return min(0, np.linalg.eigvalsh(D).min()) + + def weights(self) -> List[DataObject]: + weights = [ + prep.inverse_transform_components(wghts) + for prep, wghts in zip(self.preprocessors, self.data["weights"]) + ] + return weights + + def _transform(self, views: Sequence[DataArray]) -> List[DataArray]: + transformed_views = [] + for i, view in enumerate(views): + transformed_view = xr.dot(view, self.data["weights"][i], dims="feature") + transformed_views.append(transformed_view) + return transformed_views + + def transform(self, views: Sequence[DataObject]) -> List[DataArray]: + view_preprocessed = [] + for i, view in enumerate(views): + view_preprocessed = self.preprocessors[i].transform(view) + + transformed_views = self._transform(view_preprocessed) + + unstacked_transformed_views = [] + for i, view in enumerate(transformed_views): + unstacked_view = self.preprocessors[i].inverse_transform_scores(view) + unstacked_transformed_views.append(unstacked_view) + return unstacked_transformed_views + + def canonical_loadings(self, normalize: bool = True) -> List[DataObject]: + can_loads = self.data["canonical_loadings"] + input_data = self.data["input_data"] + variates = self.data["variates"] + + if normalize: + # Compute correlations + loadings = [ + ( + loads + / data[self.sample_name].size + / data.std(self.sample_name) + / vari.std(self.sample_name) + ).clip(-1, 1) + for loads, data, vari in zip(can_loads, input_data, variates) + ] + else: + loadings = can_loads + + loadings = [ + prep.inverse_transform_components(load) + for prep, load in zip(self.preprocessors, loadings) + ] + return loadings + + def canonical_variates(self) -> List[DataArray]: + variates = [] + for i, view in enumerate(self.data["variates"]): + vari = self.preprocessors[i].inverse_transform_scores(view) + variates.append(vari) + return variates + + def explained_variance(self) -> List[DataArray]: + return self.data["explained_variance"] + + def explained_variance_ratio(self) -> List[DataArray]: + return self.data["explained_variance_ratio"] + + def explained_covariance(self) -> DataArray: + """ + Calculates the covariance matrix of the transformed components for each view. + + Parameters + ---------- + views : list/tuple of numpy arrays or array likes with the same number of rows (samples) + + Returns + ------- + explained_covariances : list of numpy arrays + Covariance matrices for the transformed components of each view. + """ + return self.data["explained_covariance"] + + def explained_covariance_ratio(self) -> DataArray: + return self.data["explained_covariance_ratio"] From 12cfc975c6203942670f3d8f3bb38f796d02701a Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 9 Oct 2023 13:00:14 +0200 Subject: [PATCH 19/43] refactor(BaseModel): create algorithm methods Move actual implementation of method into algorithm methods, while fit, transform, inverse_transform take care of pre and post-processing --- xeofs/models/_base_model.py | 135 ++++++++++++++++++++++++++++++++---- xeofs/models/eof.py | 74 +++++++------------- xeofs/models/eof_rotator.py | 46 +++++++----- xeofs/models/opa.py | 24 +++---- 4 files changed, 187 insertions(+), 92 deletions(-) diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index b9adcc5..df6caf4 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -1,12 +1,12 @@ import warnings -from typing import Optional, Sequence, Hashable, Dict, Any +from typing import Optional, Sequence, Hashable, Dict, Any, Self, List from abc import ABC, abstractmethod from datetime import datetime from dask.diagnostics.progress import ProgressBar from ..preprocessing.preprocessor import Preprocessor from ..data_container import _BaseModelDataContainer -from ..utils.data_types import AnyDataObject, DataArray +from ..utils.data_types import DataObject, DataArray, Dims from .._version import __version__ # Ignore warnings from numpy casting with additional coordinates @@ -86,13 +86,12 @@ def __init__( # The actual data container will be initialized in respective subclasses self.data: _BaseModelDataContainer = _BaseModelDataContainer() - @abstractmethod def fit( self, - data: AnyDataObject, + data: DataObject, dim: Sequence[Hashable] | Hashable, - weights: Optional[AnyDataObject] = None, - ): + weights: Optional[DataObject] = None, + ) -> Self: """ Fit the model to the input data. @@ -107,20 +106,132 @@ def fit( Weighting factors for the input data. """ - # Here follows the implementation to fit the model - # Typically you want to start by calling the Preprocessor first: - # self.preprocessor.fit_transform(data, dim, weights) + # Preprocess the data + data2D: DataArray = self.preprocessor.fit_transform(data, dim, weights) + + return self._fit_algorithm(data2D) + + @abstractmethod + def _fit_algorithm(self, data: DataArray) -> Self: + """Fit the model to the input data assuming a 2D DataArray. + + Parameters + ---------- + data: DataArray + Input data with dimensions (sample_name, feature_name) + + Returns + ------- + self: Self + The fitted model. + + """ raise NotImplementedError + def transform(self, data: DataObject) -> DataArray: + """Project data onto the components. + + Parameters + ---------- + data: DataObject + Data to be transformed. + + Returns + ------- + projections: DataArray + Projections of the data onto the components. + + """ + data2D = self.preprocessor.transform(data) + data2D = self._transform_algorithm(data2D) + return self.preprocessor.inverse_transform_scores(data2D) + @abstractmethod - def transform(self): + def _transform_algorithm(self, data: DataArray) -> DataArray: + """Project data onto the components. + + Parameters + ---------- + data: DataArray + Input data with dimensions (sample_name, feature_name) + + Returns + ------- + projections: DataArray + Projections of the data onto the components. + + """ raise NotImplementedError + def fit_transform( + self, + data: DataObject, + dim: Sequence[Hashable] | Hashable, + weights: Optional[DataObject] = None, + ) -> DataArray: + """Fit the model to the input data and project the data onto the components. + + Parameters + ---------- + data: DataObject + Input data. + dim: Sequence[Hashable] | Hashable + Specify the sample dimensions. The remaining dimensions + will be treated as feature dimensions. + weights: Optional[DataObject] + Weighting factors for the input data. + + Returns + ------- + projections: DataArray + Projections of the data onto the components. + + """ + return self.fit(data, dim, weights).transform(data) + + def inverse_transform(self, mode) -> DataObject: + """Reconstruct the original data from transformed data. + + Parameters + ---------- + mode: integer, a list of integers, or a slice object. + The mode(s) used to reconstruct the data. If a scalar is given, + the data will be reconstructed using the given mode. If a slice + is given, the data will be reconstructed using the modes in the + given slice. If a list of integers is given, the data will be reconstructed + using the modes in the given list. + + Returns + ------- + data: DataArray | Dataset | List[DataArray] + Reconstructed data. + + """ + data_reconstructed = self._inverse_transform_algorithm(mode) + return self.preprocessor.inverse_transform_data(data_reconstructed) + @abstractmethod - def inverse_transform(self, mode): + def _inverse_transform_algorithm(self, mode) -> DataArray: + """Reconstruct the original data from transformed data. + + Parameters + ---------- + mode: integer, a list of integers, or a slice object. + The mode(s) used to reconstruct the data. If a scalar is given, + the data will be reconstructed using the given mode. If a slice + is given, the data will be reconstructed using the modes in the + given slice. If a list of integers is given, the data will be reconstructed + using the modes in the given list. + + Returns + ------- + data: DataArray + Reconstructed 2D data with dimensions (sample_name, feature_name) + + """ raise NotImplementedError - def components(self) -> AnyDataObject: + def components(self) -> DataObject: """Get the components.""" components = self.data.components return self.preprocessor.inverse_transform_components(components) diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index 43658ee..e1d19f9 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -1,8 +1,9 @@ +from typing import Self import xarray as xr from ._base_model import _BaseModel from .decomposer import Decomposer -from ..utils.data_types import AnyDataObject, DataArray +from ..utils.data_types import DataObject, DataArray, Dims from ..data_container import EOFDataContainer, ComplexEOFDataContainer from ..utils.xarray_utils import hilbert_transform from ..utils.xarray_utils import total_variance as compute_total_variance @@ -60,15 +61,12 @@ def __init__( # Initialize the DataContainer to store the results self.data: EOFDataContainer = EOFDataContainer() - def fit(self, data: AnyDataObject, dim, weights=None): + def _fit_algorithm(self, data: DataArray) -> Self: sample_name = self.sample_name feature_name = self.feature_name - # Preprocess the data - input_data: DataArray = self.preprocessor.fit_transform(data, dim, weights) - # Compute the total variance - total_variance = compute_total_variance(input_data, dim=sample_name) + total_variance = compute_total_variance(data, dim=sample_name) # Decompose the data n_modes = self._params["n_modes"] @@ -76,14 +74,14 @@ def fit(self, data: AnyDataObject, dim, weights=None): decomposer = Decomposer( n_modes=n_modes, solver=self._params["solver"], **self._solver_kwargs ) - decomposer.fit(input_data, dims=(sample_name, feature_name)) + decomposer.fit(data, dims=(sample_name, feature_name)) singular_values = decomposer.s_ components = decomposer.V_ scores = decomposer.U_ # Compute the explained variance - n_samples = input_data.coords[sample_name].size + n_samples = data.coords[sample_name].size explained_variance = singular_values**2 / (n_samples - 1) # Index of the sorted explained variance @@ -94,7 +92,7 @@ def fit(self, data: AnyDataObject, dim, weights=None): # Assign the results to the data container self.data.set_data( - input_data=input_data, + input_data=data, components=components, scores=scores, explained_variance=explained_variance, @@ -102,39 +100,21 @@ def fit(self, data: AnyDataObject, dim, weights=None): idx_modes_sorted=idx_modes_sorted, ) self.data.set_attrs(self.attrs) + return self - def transform(self, data: AnyDataObject) -> DataArray: - """Project new unseen data onto the components (EOFs/eigenvectors). - - Parameters - ---------- - data: AnyDataObject - Data to be transformed. - - Returns - ------- - projections: DataArray - Projections of the new data onto the components. - - """ + def _transform_algorithm(self, data: DataObject) -> DataArray: feature_name = self.preprocessor.feature_name - # Preprocess the data - data_stacked: DataArray = self.preprocessor.transform(data) components = self.data.components singular_values = self.data.singular_values # Project the data - projections = ( - xr.dot(data_stacked, components, dims=feature_name) / singular_values - ) + projections = xr.dot(data, components, dims=feature_name) / singular_values projections.name = "scores" - # Unstack the projections - projections = self.preprocessor.inverse_transform_scores(projections) return projections - def inverse_transform(self, mode) -> AnyDataObject: + def _inverse_transform_algorithm(self, mode) -> DataArray: """Reconstruct the original data from transformed data. Parameters @@ -162,13 +142,9 @@ def inverse_transform(self, mode) -> AnyDataObject: # Enforce real output reconstructed_data = reconstructed_data.real - # Unstack and unscale the data - reconstructed_data = self.preprocessor.inverse_transform_data( - reconstructed_data - ) return reconstructed_data - def components(self) -> AnyDataObject: + def components(self) -> DataObject: """Return the (EOF) components. The components in EOF anaylsis are the eigenvectors of the covariance/correlation matrix. @@ -302,25 +278,22 @@ def __init__(self, padding="exp", decay_factor=0.2, **kwargs): # Initialize the DataContainer to store the results self.data: ComplexEOFDataContainer = ComplexEOFDataContainer() - def fit(self, data: AnyDataObject, dim, weights=None): + def _fit_algorithm(self, data: DataArray) -> Self: sample_name = self.sample_name feature_name = self.feature_name - # Preprocess the data - input_data: DataArray = self.preprocessor.fit_transform(data, dim, weights) - # Apply hilbert transform: padding = self._params["padding"] decay_factor = self._params["decay_factor"] - input_data = hilbert_transform( - input_data, + data = hilbert_transform( + data, dims=(sample_name, feature_name), padding=padding, decay_factor=decay_factor, ) # Compute the total variance - total_variance = compute_total_variance(input_data, dim=sample_name) + total_variance = compute_total_variance(data, dim=sample_name) # Decompose the complex data n_modes = self._params["n_modes"] @@ -328,14 +301,14 @@ def fit(self, data: AnyDataObject, dim, weights=None): decomposer = Decomposer( n_modes=n_modes, solver=self._params["solver"], **self._solver_kwargs ) - decomposer.fit(input_data) + decomposer.fit(data) singular_values = decomposer.s_ components = decomposer.V_ scores = decomposer.U_ # Compute the explained variance - n_samples = input_data.coords[sample_name].size + n_samples = data.coords[sample_name].size explained_variance = singular_values**2 / (n_samples - 1) # Index of the sorted explained variance @@ -345,7 +318,7 @@ def fit(self, data: AnyDataObject, dim, weights=None): idx_modes_sorted.coords.update(explained_variance.coords) self.data.set_data( - input_data=input_data, + input_data=data, components=components, scores=scores, explained_variance=explained_variance, @@ -354,11 +327,12 @@ def fit(self, data: AnyDataObject, dim, weights=None): ) # Assign analysis-relevant meta data to the results self.data.set_attrs(self.attrs) + return self - def transform(self, data: AnyDataObject) -> DataArray: - raise NotImplementedError("ComplexEOF does not support transform method.") + def _transform_algorithm(self, data: DataArray) -> DataArray: + raise NotImplementedError("Complex EOF does not support transform method.") - def components_amplitude(self) -> AnyDataObject: + def components_amplitude(self) -> DataObject: """Return the amplitude of the (EOF) components. The amplitude of the components are defined as @@ -378,7 +352,7 @@ def components_amplitude(self) -> AnyDataObject: amplitudes = self.data.components_amplitude return self.preprocessor.inverse_transform_components(amplitudes) - def components_phase(self) -> AnyDataObject: + def components_phase(self) -> DataObject: """Return the phase of the (EOF) components. The phase of the components are defined as diff --git a/xeofs/models/eof_rotator.py b/xeofs/models/eof_rotator.py index 45419cc..b065b8f 100644 --- a/xeofs/models/eof_rotator.py +++ b/xeofs/models/eof_rotator.py @@ -1,8 +1,7 @@ from datetime import datetime import numpy as np import xarray as xr -from dask.diagnostics.progress import ProgressBar -from typing import List +from typing import Self from .eof import EOF, ComplexEOF from ..data_container.eof_rotator_data_container import ( @@ -11,7 +10,7 @@ ) from ..utils.rotation import promax -from ..utils.data_types import DataArray, AnyDataObject +from ..utils.data_types import DataArray from typing import TypeVar from .._version import __version__ @@ -85,7 +84,18 @@ def __init__( # Initialize the DataContainer to store the results self.data: EOFRotatorDataContainer = EOFRotatorDataContainer() - def fit(self, model): + def fit(self, model) -> Self: + """Rotate the solution obtained from ``xe.models.EOF``. + + Parameters + ---------- + model : ``xe.models.EOF`` + The EOF model to be rotated. + + """ + return self._fit_algorithm(model) + + def _fit_algorithm(self, model) -> Self: self.model = model self.preprocessor = model.preprocessor sample_name = model.sample_name @@ -179,8 +189,9 @@ def fit(self, model): ) # Assign analysis-relevant meta data self.data.set_attrs(self.attrs) + return self - def transform(self, data: AnyDataObject) -> DataArray: + def _transform_algorithm(self, data: DataArray) -> DataArray: n_modes = self._params["n_modes"] svals = self.model.data.singular_values.sel( @@ -189,11 +200,8 @@ def transform(self, data: AnyDataObject) -> DataArray: # Select the (non-rotated) singular vectors of the first dataset components = self.model.data.components.sel(mode=slice(1, n_modes)) - # Preprocess the data - da: DataArray = self.preprocessor.transform(data) - # Compute non-rotated scores by project the data onto non-rotated components - projections = xr.dot(da, components) / svals + projections = xr.dot(data, components) / svals projections.name = "scores" # Rotate the scores @@ -211,10 +219,13 @@ def transform(self, data: AnyDataObject) -> DataArray: # Adapt the sign of the scores projections = projections * self.data.modes_sign - # Unstack the projections - projections = self.preprocessor.inverse_transform_scores(projections) return projections + def fit_transform(self, model) -> DataArray: + raise NotImplementedError( + "The fit_transform method is not implemented for the EOFRotator class." + ) + def _compute_rot_mat_inv_trans(self, rotation_matrix, input_dims) -> DataArray: """Compute the inverse transpose of the rotation matrix. @@ -288,9 +299,10 @@ def __init__(self, **kwargs): # Initialize the DataContainer to store the results self.data: ComplexEOFRotatorDataContainer = ComplexEOFRotatorDataContainer() - def transform(self, data: AnyDataObject): - # Here we make use of the Method Resolution Order (MRO) to call the - # transform method of the first class in the MRO after `EOFRotator` - # that has a transform method. In this case it will be `ComplexEOF`, - # which will raise an error because it does not have a transform method. - super(EOFRotator, self).transform(data) + def _transform_algorithm(self, data: DataArray) -> DataArray: + # Here we leverage the Method Resolution Order (MRO) to invoke the + # transform method of the first class in the MRO after EOFRotator that + # has a transform method. In this case, it will be ComplexEOF. However, + # please note that `transform` is not implemented for ComplexEOF, so this + # line of code will actually raise an error. + return super(EOFRotator, self)._transform_algorithm(data) diff --git a/xeofs/models/opa.py b/xeofs/models/opa.py index e1e5a5f..49c711d 100644 --- a/xeofs/models/opa.py +++ b/xeofs/models/opa.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Self import xarray as xr import numpy as np @@ -7,7 +7,7 @@ from .eof import EOF from .decomposer import Decomposer from ..data_container.opa_data_container import OPADataContainer -from ..utils.data_types import AnyDataObject, DataArray +from ..utils.data_types import DataObject, DataArray class OPA(_BaseModel): @@ -84,16 +84,13 @@ def _compute_matrix_inverse(X, dims): dask="allowed", ) - def fit(self, data: AnyDataObject, dim, weights: Optional[AnyDataObject] = None): + def _fit_algorithm(self, data: DataArray) -> Self: sample_name = self.sample_name feature_name = self.feature_name - # Preprocess the data - input_data: DataArray = self.preprocessor.fit_transform(data, dim, weights) - # Perform PCA as a pre-processing step pca = EOF(n_modes=self._params["n_pca_modes"], use_coslat=False) - pca.fit(input_data, dim=sample_name) + pca.fit(data, dim=sample_name) svals = pca.data.singular_values expvar = pca.data.explained_variance comps = pca.data.components * svals / np.sqrt(expvar) @@ -200,14 +197,15 @@ def fit(self, data: AnyDataObject, dim, weights: Optional[AnyDataObject] = None) self.data.set_attrs(self.attrs) self._U = U # store U for testing purposes of orthogonality self._C0 = C0 # store C0 for testing purposes of orthogonality + return self - def transform(self, data: AnyDataObject): - raise NotImplementedError() + def _transform_algorithm(self, data: DataArray) -> DataArray: + raise NotImplementedError("OPA does not (yet) support transform()") - def inverse_transform(self, mode): - raise NotImplementedError() + def _inverse_transform_algorithm(self, mode) -> DataObject: + raise NotImplementedError("OPA does not (yet) support inverse_transform()") - def components(self) -> AnyDataObject: + def components(self) -> DataObject: """Return the optimal persistence pattern (OPP).""" return super().components() @@ -222,7 +220,7 @@ def decorrelation_time(self) -> DataArray: """Return the decorrelation time of the optimal persistence pattern (OPP).""" return self.data.decorrelation_time - def filter_patterns(self) -> DataArray: + def filter_patterns(self) -> DataObject: """Return the filter patterns.""" fps = self.data.filter_patterns return self.preprocessor.inverse_transform_components(fps) From 324c06bba0fdc8afdecc4898c2a40173b01ac9e5 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 9 Oct 2023 13:16:05 +0200 Subject: [PATCH 20/43] style: update typings --- tests/models/test_complex_mca.py | 2 +- xeofs/models/mca_rotator.py | 4 ++-- xeofs/models/rotator_factory.py | 2 -- xeofs/preprocessing/factory.py | 8 ++++---- xeofs/preprocessing/scaler.py | 21 ++++++++++----------- xeofs/preprocessing/stacker.py | 13 +++---------- xeofs/utils/data_types.py | 10 ---------- xeofs/utils/sanity_checks.py | 2 +- xeofs/utils/xarray_utils.py | 11 ++++++----- 9 files changed, 27 insertions(+), 46 deletions(-) diff --git a/tests/models/test_complex_mca.py b/tests/models/test_complex_mca.py index 3d0046f..bfe348b 100644 --- a/tests/models/test_complex_mca.py +++ b/tests/models/test_complex_mca.py @@ -269,7 +269,7 @@ def test_fit_with_dataset(mca_model, mock_dataset, dim): (("lon", "lat")), ], ) -def test_fit_with_dataarraylist(mca_model, mock_data_array_list, dim): +def test_fit_with_datalist(mca_model, mock_data_array_list, dim): mca_model.fit(mock_data_array_list, mock_data_array_list, dim) assert hasattr(mca_model, "preprocessor1") assert hasattr(mca_model, "preprocessor2") diff --git a/xeofs/models/mca_rotator.py b/xeofs/models/mca_rotator.py index 2cd3154..22962cb 100644 --- a/xeofs/models/mca_rotator.py +++ b/xeofs/models/mca_rotator.py @@ -310,9 +310,9 @@ def transform(self, **kwargs) -> DataArray | List[DataArray]: Parameters ---------- - data1 : DataArray | Dataset | DataArraylist + data1 : DataArray | Dataset | List[DataArray] Data to be projected onto the rotated singular vectors of the first dataset. - data2 : DataArray | Dataset | DataArraylist + data2 : DataArray | Dataset | List[DataArray] Data to be projected onto the rotated singular vectors of the second dataset. Returns diff --git a/xeofs/models/rotator_factory.py b/xeofs/models/rotator_factory.py index e6d6139..56f8eb3 100644 --- a/xeofs/models/rotator_factory.py +++ b/xeofs/models/rotator_factory.py @@ -6,8 +6,6 @@ from .mca import MCA, ComplexMCA from .eof_rotator import EOFRotator, ComplexEOFRotator from .mca_rotator import MCARotator, ComplexMCARotator -from ..utils.rotation import promax -from ..utils.data_types import DataArrayList, Dataset, DataArray class RotatorFactory: diff --git a/xeofs/preprocessing/factory.py b/xeofs/preprocessing/factory.py index 6231e5d..4a6632f 100644 --- a/xeofs/preprocessing/factory.py +++ b/xeofs/preprocessing/factory.py @@ -7,12 +7,12 @@ DataSetMultiIndexConverter, DataListMultiIndexConverter, ) -from ..utils.data_types import AnyDataObject +from ..utils.data_types import DataObject class ScalerFactory: @staticmethod - def create_scaler(data: AnyDataObject, **kwargs): + def create_scaler(data: DataObject, **kwargs): if isinstance(data, xr.DataArray): return DataArrayScaler(**kwargs) elif isinstance(data, xr.Dataset): @@ -28,7 +28,7 @@ def create_scaler(data: AnyDataObject, **kwargs): class MultiIndexConverterFactory: @staticmethod def create_converter( - data: AnyDataObject, **kwargs + data: DataObject, **kwargs ) -> DataArrayMultiIndexConverter | DataListMultiIndexConverter: if isinstance(data, xr.DataArray): return DataArrayMultiIndexConverter(**kwargs) @@ -44,7 +44,7 @@ def create_converter( class StackerFactory: @staticmethod - def create_stacker(data: AnyDataObject, **kwargs): + def create_stacker(data: DataObject, **kwargs): if isinstance(data, xr.DataArray): return DataArrayStacker(**kwargs) elif isinstance(data, xr.Dataset): diff --git a/xeofs/preprocessing/scaler.py b/xeofs/preprocessing/scaler.py index c79b90a..381d039 100644 --- a/xeofs/preprocessing/scaler.py +++ b/xeofs/preprocessing/scaler.py @@ -1,12 +1,9 @@ -from typing import List, Optional, Sequence, Hashable +from typing import List, Optional, Sequence, Hashable, Self import numpy as np import xarray as xr from sklearn.base import BaseEstimator, TransformerMixin -from xeofs.utils.data_types import DataArray - -from ..utils.constants import VALID_LATITUDE_NAMES from ..utils.sanity_checks import ( assert_single_dataset, assert_list_dataarrays, @@ -54,14 +51,14 @@ def _compute_sqrt_cos_lat_weights(self, data, dim): Parameters ---------- - data : SingleDataObject + data : DataArray | DataSet Data to be scaled. dim : sequence of hashable Dimensions along which the data is considered to be a feature. Returns ------- - SingleDataObject + DataArray | DataSet Square root of cosine of latitude weights. """ @@ -78,7 +75,7 @@ def fit( sample_dims: Dims, feature_dims: Dims, weights: Optional[DataArray] = None, - ): + ) -> Self: """Fit the scaler to the data. Parameters @@ -186,12 +183,12 @@ def inverse_transform_data(self, data: DataArray) -> DataArray: Parameters ---------- - data : SingleDataObject + data : DataArray | DataSet Data to be unscaled. Returns ------- - SingleDataObject + DataArray | DataSet Unscaled data. """ @@ -237,7 +234,7 @@ def fit( sample_dims: Hashable | Sequence[Hashable], feature_dims: Hashable | Sequence[Hashable], weights: Optional[DataSet] = None, - ): + ) -> Self: return super().fit(data, sample_dims, feature_dims, weights) # type: ignore def transform(self, data: DataSet) -> DataSet: @@ -300,7 +297,7 @@ def fit( sample_dims: Dims, feature_dims_list: DimsList, weights: Optional[DataList] = None, - ): + ) -> Self: """Fit the scaler to the data. Parameters @@ -360,6 +357,8 @@ def fit( scaler.fit(da, sample_dims=sample_dims, feature_dims=fdims, weights=wghts) self.scalers.append(scaler) + return self + def transform(self, da_list: DataList) -> DataList: """Scale the data. diff --git a/xeofs/preprocessing/stacker.py b/xeofs/preprocessing/stacker.py index 7a8d415..574e170 100644 --- a/xeofs/preprocessing/stacker.py +++ b/xeofs/preprocessing/stacker.py @@ -5,14 +5,7 @@ import xarray as xr from sklearn.base import BaseEstimator, TransformerMixin -from xeofs.utils.data_types import DataArray, DataSet, DataList - -from ..utils.data_types import ( - Dims, - DimsList, - DataArray, - Dataset, -) +from ..utils.data_types import Dims, DimsList, DataArray, DataSet, DataList from ..utils.sanity_checks import convert_to_dim_type @@ -358,7 +351,7 @@ def _validate_dimension_names(self, sample_dims, feature_dims): f"Name of feature dimension ({self.feature_name}) is already present in data. Please use another name." ) - def _stack(self, data: Dataset, sample_dims, feature_dims) -> DataArray: + def _stack(self, data: DataSet, sample_dims, feature_dims) -> DataArray: """Reshape a Dataset to 2D. Parameters @@ -511,7 +504,7 @@ def fit( sample_dims: Dims, feature_dims: DimsList, y=None, - ): + ) -> Self: """Fit the stacker. Parameters diff --git a/xeofs/utils/data_types.py b/xeofs/utils/data_types.py index 0e8ad59..84b3583 100644 --- a/xeofs/utils/data_types.py +++ b/xeofs/utils/data_types.py @@ -2,8 +2,6 @@ List, TypeAlias, Sequence, - TypedDict, - Optional, Tuple, TypeVar, Hashable, @@ -25,11 +23,3 @@ DimsTuple: TypeAlias = Tuple[Dims, ...] DimsList: TypeAlias = List[Dims] DimsListTuple: TypeAlias = Tuple[DimsList, ...] - - -# Replace this with the above -Dataset: TypeAlias = xr.Dataset -DataArrayList: TypeAlias = List[DataArray] -SingleDataObject = TypeVar("SingleDataObject", DataArray, Dataset) -XArrayData = TypeVar("XArrayData", DataArray, Dataset) -AnyDataObject = TypeVar("AnyDataObject", DataArray, Dataset, DataArrayList) diff --git a/xeofs/utils/sanity_checks.py b/xeofs/utils/sanity_checks.py index 6104056..280d14f 100644 --- a/xeofs/utils/sanity_checks.py +++ b/xeofs/utils/sanity_checks.py @@ -2,7 +2,7 @@ import xarray as xr -from xeofs.utils.data_types import DataArray, Dataset, DataArrayList, Dims +from xeofs.utils.data_types import Dims def assert_single_dataarray(da, name): diff --git a/xeofs/utils/xarray_utils.py b/xeofs/utils/xarray_utils.py index 9d3dab1..22808ff 100644 --- a/xeofs/utils/xarray_utils.py +++ b/xeofs/utils/xarray_utils.py @@ -7,6 +7,7 @@ from .sanity_checks import convert_to_dim_type from .data_types import ( Dims, + DimsList, DataArray, DataSet, DataList, @@ -72,7 +73,7 @@ def compute_sqrt_cos_lat_weights( def get_dims( data: DataArray | DataSet | DataList, sample_dims: Hashable | Sequence[Hashable], -) -> Tuple[Hashable, Hashable]: +) -> Tuple[Dims, Dims | DimsList]: """Extracts the dimensions of a DataArray or Dataset that are not included in the sample dimensions. Parameters: @@ -93,18 +94,18 @@ def get_dims( # Check for invalid types if isinstance(data, (xr.DataArray, xr.Dataset)): sample_dims = convert_to_dim_type(sample_dims) - feature_dims = _get_feature_dims(data, sample_dims) + feature_dims: Dims = _get_feature_dims(data, sample_dims) + return sample_dims, feature_dims elif isinstance(data, list): sample_dims = convert_to_dim_type(sample_dims) - feature_dims = [_get_feature_dims(da, sample_dims) for da in data] + feature_dims: DimsList = [_get_feature_dims(da, sample_dims) for da in data] + return sample_dims, feature_dims else: err_message = f"Invalid input type: {type(data).__name__}. Expected one of " err_message += f"of the following: DataArray, Dataset or list of DataArrays." raise TypeError(err_message) - return sample_dims, feature_dims # type: ignore - def _get_feature_dims(data: DataArray | DataSet, sample_dims: Dims) -> Dims: """Extracts the dimensions of a DataArray that are not included in the sample dimensions. From 166e9578e5f08dc383de7a64688d73833f1ccaaa Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 9 Oct 2023 14:42:39 +0200 Subject: [PATCH 21/43] refactor: create hilbert_transform.py Move Hilbert transform related methods into own file (resolve refactor: complex utilties #54) --- xeofs/models/eof.py | 2 +- xeofs/models/eof_rotator.py | 18 +-- xeofs/models/mca.py | 2 +- xeofs/models/mca_rotator.py | 16 +-- xeofs/utils/hilbert_transform.py | 114 +++++++++++++++++++ xeofs/utils/rotation.py | 184 ++++++++++++++++--------------- xeofs/utils/xarray_utils.py | 111 ------------------- 7 files changed, 220 insertions(+), 227 deletions(-) create mode 100644 xeofs/utils/hilbert_transform.py diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index e1d19f9..ae36fdf 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -5,7 +5,7 @@ from .decomposer import Decomposer from ..utils.data_types import DataObject, DataArray, Dims from ..data_container import EOFDataContainer, ComplexEOFDataContainer -from ..utils.xarray_utils import hilbert_transform +from ..utils.hilbert_transform import hilbert_transform from ..utils.xarray_utils import total_variance as compute_total_variance diff --git a/xeofs/models/eof_rotator.py b/xeofs/models/eof_rotator.py index b065b8f..59f5d41 100644 --- a/xeofs/models/eof_rotator.py +++ b/xeofs/models/eof_rotator.py @@ -15,8 +15,6 @@ from typing import TypeVar from .._version import __version__ -Model = TypeVar("Model", EOF, ComplexEOF) - class EOFRotator(EOF): """Rotate a solution obtained from ``xe.models.EOF``. @@ -112,19 +110,11 @@ def _fit_algorithm(self, model) -> Self: # Rotate loadings loadings = components * np.sqrt(expvar) - rot_loadings, rot_matrix, phi_matrix = xr.apply_ufunc( - promax, - loadings, - power, - input_core_dims=[[feature_name, "mode"], []], - output_core_dims=[ - [feature_name, "mode"], - ["mode_m", "mode_n"], - ["mode_m", "mode_n"], - ], - kwargs={"max_iter": max_iter, "rtol": rtol}, - dask="allowed", + promax_kwargs = {"power": power, "max_iter": max_iter, "rtol": rtol} + rot_loadings, rot_matrix, phi_matrix = promax( + loadings, feature_dim=feature_name, **promax_kwargs ) + # Assign coordinates to the rotation/correlation matrices rot_matrix = rot_matrix.assign_coords( mode_m=np.arange(1, rot_matrix.mode_m.size + 1), diff --git a/xeofs/models/mca.py b/xeofs/models/mca.py index 8f71e6c..c8cac2b 100644 --- a/xeofs/models/mca.py +++ b/xeofs/models/mca.py @@ -12,7 +12,7 @@ ComplexMCADataContainer, ) from ..utils.statistics import pearson_correlation -from ..utils.xarray_utils import hilbert_transform +from ..utils.hilbert_transform import hilbert_transform from ..utils.dimension_renamer import DimensionRenamer diff --git a/xeofs/models/mca_rotator.py b/xeofs/models/mca_rotator.py index 22962cb..476633a 100644 --- a/xeofs/models/mca_rotator.py +++ b/xeofs/models/mca_rotator.py @@ -159,19 +159,11 @@ def fit(self, model: MCA | ComplexMCA): loadings = xr.concat([comps1, comps2], dim=feature_name) * scaling # Rotate loadings - rot_loadings, rot_matrix, phi_matrix = xr.apply_ufunc( - promax, - loadings, - power, - input_core_dims=[[feature_name, "mode"], []], - output_core_dims=[ - [feature_name, "mode"], - ["mode_m", "mode_n"], - ["mode_m", "mode_n"], - ], - kwargs={"max_iter": max_iter, "rtol": rtol}, - dask="allowed", + promax_kwargs = {"power": power, "max_iter": max_iter, "rtol": rtol} + rot_loadings, rot_matrix, phi_matrix = promax( + loadings=loadings, feature_dim=feature_name, **promax_kwargs ) + # Assign coordinates to the rotation/correlation matrices rot_matrix = rot_matrix.assign_coords( mode_m=np.arange(1, rot_matrix.mode_m.size + 1), diff --git a/xeofs/utils/hilbert_transform.py b/xeofs/utils/hilbert_transform.py new file mode 100644 index 0000000..7f0d5f2 --- /dev/null +++ b/xeofs/utils/hilbert_transform.py @@ -0,0 +1,114 @@ +import numpy as np +import xarray as xr +from scipy.signal import hilbert # type: ignore +from .data_types import DataArray + + +def hilbert_transform( + data: DataArray, dims, padding: str = "exp", decay_factor: float = 0.2 +) -> DataArray: + """Hilbert transform with optional padding to mitigate spectral leakage. + + Parameters: + ------------ + data: DataArray + Input data. + dim: str + Dimension along which to apply the Hilbert transform. + padding: str + Padding type. Can be 'exp' or None. + decay_factor: float + Decay factor of the exponential function. + + Returns: + --------- + data: DataArray + Hilbert transform of the input data. + + """ + return xr.apply_ufunc( + _hilbert_transform_with_padding, + data, + input_core_dims=[dims], + output_core_dims=[dims], + kwargs={"padding": padding, "decay_factor": decay_factor}, + dask="parallelized", + dask_gufunc_kwargs={"allow_rechunk": True}, + ) + + +def _hilbert_transform_with_padding(y, padding: str = "exp", decay_factor: float = 0.2): + """Hilbert transform with optional padding to mitigate spectral leakage. + + Parameters: + ------------ + y: np.ndarray + Input array. + padding: str + Padding type. Can be 'exp' or None. + decay_factor: float + Decay factor of the exponential function. + + Returns: + --------- + y: np.ndarray + Hilbert transform of the input array. + + """ + n_samples = y.shape[0] + + if padding == "exp": + y = _pad_exp(y, decay_factor=decay_factor) + + y = hilbert(y, axis=0) + + if padding == "exp": + y = y[n_samples : 2 * n_samples] + + # Padding can introduce a shift in the mean of the imaginary part + # of the Hilbert transform. Correct for this shift. + y = y - y.mean(axis=0) # type: ignore + + return y + + +def _pad_exp(y, decay_factor: float = 0.2): + """Pad the input array with an exponential decay function. + + The start and end of the input array are padded with an exponential decay + function falling to a reference line given by a linear fit of the data array. + + Parameters: + ------------ + y: np.ndarray + Input array. + decay_factor: float + Decay factor of the exponential function. + + Returns: + --------- + y_ext: np.ndarray + Padded array. + + """ + x = np.arange(y.shape[0]) + x_ext = np.arange(-x.size, 2 * x.size) + + coefs = np.polynomial.polynomial.polyfit(x, y, deg=1) + yfit = np.polynomial.polynomial.polyval(x, coefs).T + yfit_ext = np.polynomial.polynomial.polyval(x_ext, coefs).T + + y_ano = y - yfit + + amp_pre = np.take(y_ano, 0, axis=0)[:, None] + amp_pos = np.take(y_ano, -1, axis=0)[:, None] + + exp_ext = np.exp(-x / x.size / decay_factor) + exp_ext_reverse = exp_ext[::-1] + + pad_pre = amp_pre * exp_ext_reverse + pad_pos = amp_pos * exp_ext + + y_ext = np.concatenate([pad_pre.T, y_ano, pad_pos.T], axis=0) + y_ext += yfit_ext + return y_ext diff --git a/xeofs/utils/rotation.py b/xeofs/utils/rotation.py index 462975b..33577b8 100644 --- a/xeofs/utils/rotation.py +++ b/xeofs/utils/rotation.py @@ -1,96 +1,26 @@ -""" Implementation of VARIMAX and PROMAX rotation. """ - -# ============================================================================= -# Imports -# ============================================================================= import numpy as np +import xarray as xr +from .data_types import DataArray -# ============================================================================= -# VARIMAX -# ============================================================================= -def varimax(X: np.ndarray, gamma: float = 1, max_iter: int = 1000, rtol: float = 1e-8): - """ - Perform (orthogonal) Varimax rotation. - - This implementation also works for complex numbers. - - Parameters - ---------- - X : np.ndarray - 2D matrix to be rotated containing features as rows and modes as - columns. - gamma : float - Parameter which determines the type of rotation performed: varimax (1), - quartimax (0). Other values are possible. The default is 1. - max_iter : int - Number of iterations performed. The default is 1000. - rtol : float - Relative tolerance at which iteration process terminates. - The default is 1e-8. - - Returns - ------- - Xrot : np.ndarray - Rotated matrix with same dimensions as X. - R : array-like - Rotation matrix of shape ``(n_rot x n_rot)`` - - """ - X = X.copy() - n_samples, n_modes = X.shape - - if n_modes < 2: - err_msg = "Cannot rotate {:} modes (columns), but must be 2 or more." - err_msg = err_msg.format(n_modes) - raise ValueError(err_msg) - - # Initialize rotation matrix - R = np.eye(n_modes) - - # Normalize the matrix using square root of the sum of squares (Kaiser) - h = np.sqrt(np.sum(X * X.conj(), axis=1)) - # A = np.diag(1./h) @ A - - # Add a stabilizer to avoid zero communalities - eps = 1e-9 - X = (1.0 / (h + eps))[:, np.newaxis] * X - - # Seek for rotation matrix based on varimax criteria - delta = 0.0 - converged = False - for i in range(max_iter): - delta_old = delta - basis = X @ R - - basis2 = basis * basis.conj() - basis3 = basis2 * basis - W = np.diag(np.sum(basis2, axis=0)) - alpha = gamma / n_samples - - transformed = X.conj().T @ (basis3 - (alpha * basis @ W)) - U, svals, VT = np.linalg.svd(transformed) - R = U @ VT - delta = np.sum(svals) - if (abs(delta - delta_old) / delta) < rtol: - converged = True - break - - if not converged: - raise RuntimeError("Rotation process did not converge.") - # De-normalize - X = h[:, np.newaxis] * X +def promax(loadings: DataArray, feature_dim, **kwargs): + rotated, rot_mat, phi_mat = xr.apply_ufunc( + _promax, + loadings, + input_core_dims=[[feature_dim, "mode"]], + output_core_dims=[ + [feature_dim, "mode"], + ["mode_m", "mode_n"], + ["mode_m", "mode_n"], + ], + kwargs=kwargs, + dask="allowed", + ) + return rotated, rot_mat, phi_mat - # Rotate - Xrot = X @ R - return Xrot, R - -# ============================================================================= -# PROMAX -# ============================================================================= -def promax(X: np.ndarray, power: int = 1, max_iter: int = 1000, rtol: float = 1e-8): +def _promax(X: np.ndarray, power: int = 1, max_iter: int = 1000, rtol: float = 1e-8): """ Perform (oblique) Promax rotation. @@ -127,7 +57,7 @@ def promax(X: np.ndarray, power: int = 1, max_iter: int = 1000, rtol: float = 1e X = X.copy() # Perform varimax rotation - X, rot_mat = varimax(X=X, max_iter=max_iter, rtol=rtol) + X, rot_mat = _varimax(X=X, max_iter=max_iter, rtol=rtol) # Pre-normalization by communalities (sum of squared rows) h = np.sqrt(np.sum(X * X.conj(), axis=1)) @@ -165,3 +95,81 @@ def promax(X: np.ndarray, power: int = 1, max_iter: int = 1000, rtol: float = 1e phi = L_inv @ L_inv.conj().T return Xrot, rot_mat, phi + + +def _varimax(X: np.ndarray, gamma: float = 1, max_iter: int = 1000, rtol: float = 1e-8): + """ + Perform (orthogonal) Varimax rotation. + + This implementation also works for complex numbers. + + Parameters + ---------- + X : np.ndarray + 2D matrix to be rotated containing features as rows and modes as + columns. + gamma : float + Parameter which determines the type of rotation performed: varimax (1), + quartimax (0). Other values are possible. The default is 1. + max_iter : int + Number of iterations performed. The default is 1000. + rtol : float + Relative tolerance at which iteration process terminates. + The default is 1e-8. + + Returns + ------- + Xrot : np.ndarray + Rotated matrix with same dimensions as X. + R : array-like + Rotation matrix of shape ``(n_rot x n_rot)`` + + """ + X = X.copy() + n_samples, n_modes = X.shape + + if n_modes < 2: + err_msg = "Cannot rotate {:} modes (columns), but must be 2 or more." + err_msg = err_msg.format(n_modes) + raise ValueError(err_msg) + + # Initialize rotation matrix + R = np.eye(n_modes) + + # Normalize the matrix using square root of the sum of squares (Kaiser) + h = np.sqrt(np.sum(X * X.conj(), axis=1)) + # A = np.diag(1./h) @ A + + # Add a stabilizer to avoid zero communalities + eps = 1e-9 + X = (1.0 / (h + eps))[:, np.newaxis] * X + + # Seek for rotation matrix based on varimax criteria + delta = 0.0 + converged = False + for i in range(max_iter): + delta_old = delta + basis = X @ R + + basis2 = basis * basis.conj() + basis3 = basis2 * basis + W = np.diag(np.sum(basis2, axis=0)) + alpha = gamma / n_samples + + transformed = X.conj().T @ (basis3 - (alpha * basis @ W)) + U, svals, VT = np.linalg.svd(transformed) + R = U @ VT + delta = np.sum(svals) + if (abs(delta - delta_old) / delta) < rtol: + converged = True + break + + if not converged: + raise RuntimeError("Rotation process did not converge.") + + # De-normalize + X = h[:, np.newaxis] * X + + # Rotate + Xrot = X @ R + return Xrot, R diff --git a/xeofs/utils/xarray_utils.py b/xeofs/utils/xarray_utils.py index 22808ff..212c9d9 100644 --- a/xeofs/utils/xarray_utils.py +++ b/xeofs/utils/xarray_utils.py @@ -2,7 +2,6 @@ import numpy as np import xarray as xr -from scipy.signal import hilbert # type: ignore from .sanity_checks import convert_to_dim_type from .data_types import ( @@ -169,39 +168,6 @@ def total_variance(data: DataArray, dim) -> DataArray: return data.var(dim, ddof=1).sum() -def hilbert_transform( - data: DataArray, dims, padding="exp", decay_factor=0.2 -) -> DataArray: - """Hilbert transform with optional padding to mitigate spectral leakage. - - Parameters: - ------------ - data: DataArray - Input data. - dim: str - Dimension along which to apply the Hilbert transform. - padding: str - Padding type. Can be 'exp' or None. - decay_factor: float - Decay factor of the exponential function. - - Returns: - --------- - data: DataArray - Hilbert transform of the input data. - - """ - return xr.apply_ufunc( - _hilbert_transform_with_padding, - data, - input_core_dims=[dims], - output_core_dims=[dims], - kwargs={"padding": padding, "decay_factor": decay_factor}, - dask="parallelized", - dask_gufunc_kwargs={"allow_rechunk": True}, - ) - - def _np_sqrt_cos_lat_weights(data): """Compute the square root of the cosine of the latitude. @@ -217,80 +183,3 @@ def _np_sqrt_cos_lat_weights(data): """ return np.sqrt(np.cos(np.deg2rad(data)).clip(0, 1)) - - -def _hilbert_transform_with_padding(y, padding="exp", decay_factor=0.2): - """Hilbert transform with optional padding to mitigate spectral leakage. - - Parameters: - ------------ - y: np.ndarray - Input array. - padding: str - Padding type. Can be 'exp' or None. - decay_factor: float - Decay factor of the exponential function. - - Returns: - --------- - y: np.ndarray - Hilbert transform of the input array. - - """ - n_samples = y.shape[0] - - if padding == "exp": - y = _pad_exp(y, decay_factor=decay_factor) - - y = hilbert(y, axis=0) - - if padding == "exp": - y = y[n_samples : 2 * n_samples] - - # Padding can introduce a shift in the mean of the imaginary part - # of the Hilbert transform. Correct for this shift. - y = y - y.mean(axis=0) - - return y - - -def _pad_exp(y, decay_factor=0.2): - """Pad the input array with an exponential decay function. - - The start and end of the input array are padded with an exponential decay - function falling to a reference line given by a linear fit of the data array. - - Parameters: - ------------ - y: np.ndarray - Input array. - decay_factor: float - Decay factor of the exponential function. - - Returns: - --------- - y_ext: np.ndarray - Padded array. - - """ - x = np.arange(y.shape[0]) - x_ext = np.arange(-x.size, 2 * x.size) - - coefs = np.polynomial.polynomial.polyfit(x, y, deg=1) - yfit = np.polynomial.polynomial.polyval(x, coefs).T - yfit_ext = np.polynomial.polynomial.polyval(x_ext, coefs).T - - y_ano = y - yfit - - amp_pre = np.take(y_ano, 0, axis=0)[:, None] - amp_pos = np.take(y_ano, -1, axis=0)[:, None] - - exp_ext = np.exp(-x / x.size / decay_factor) - exp_ext_reverse = exp_ext[::-1] - - pad_pre = amp_pre * exp_ext_reverse - pad_pos = amp_pos * exp_ext - - y_ext = np.concatenate([pad_pre.T, y_ano, pad_pos.T], axis=0) - y_ext += yfit_ext - return y_ext From 09415a3591dc926838c2436c2a94a94adaa37e22 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Thu, 12 Oct 2023 09:43:19 +0200 Subject: [PATCH 22/43] refactor: simplify model data structure Instead of individual classes that store different model results use a single DataContainer class that structures results in a dictionary (resolve #88). --- .../test_base_cross_model_data_container.py | 76 ------ .../test_base_model_data_container.py | 46 ---- .../test_complex_eof_data_container.py | 131 ---------- ...test_complex_eof_rotator_data_container.py | 51 ---- ...test_complex_mca_rotator_data_container.py | 116 --------- .../data_container/test_eof_data_container.py | 125 ---------- .../test_eof_rotator_data_container.py | 76 ------ .../data_container/test_mca_data_container.py | 136 ----------- .../test_mca_rotator_data_container.py | 129 ---------- tests/models/test_complex_eof_rotator.py | 6 +- tests/models/test_eof_rotator.py | 28 +-- tests/models/test_mca.py | 20 +- tests/models/test_mca_rotator.py | 46 ++-- tests/models/test_opa.py | 2 +- tests/models/test_orthogonality.py | 56 ++--- tests/validation/test_eof_bootstrapper.py | 30 +-- xeofs/data_container/__init__.py | 13 +- .../_base_cross_model_data_container.py | 121 ---------- .../_base_model_data_container.py | 79 ------- xeofs/data_container/data_container.py | 39 +++ .../eof_bootstrapper_data_container.py | 25 -- xeofs/data_container/eof_data_container.py | 133 ----------- .../eof_rotator_data_container.py | 117 --------- xeofs/data_container/mca_data_container.py | 223 ------------------ .../mca_rotator_data_container.py | 145 ------------ xeofs/data_container/opa_data_container.py | 76 ------ xeofs/models/_base_cross_model.py | 32 +-- xeofs/models/_base_model.py | 18 +- xeofs/models/eof.py | 113 +++++---- xeofs/models/eof_rotator.py | 66 +++--- xeofs/models/mca.py | 179 ++++++++------ xeofs/models/mca_rotator.py | 77 +++--- xeofs/models/opa.py | 43 ++-- xeofs/models/rotator_factory.py | 4 - xeofs/validation/bootstrapper.py | 39 ++- 35 files changed, 409 insertions(+), 2207 deletions(-) delete mode 100644 tests/data_container/test_base_cross_model_data_container.py delete mode 100644 tests/data_container/test_base_model_data_container.py delete mode 100644 tests/data_container/test_complex_eof_data_container.py delete mode 100644 tests/data_container/test_complex_eof_rotator_data_container.py delete mode 100644 tests/data_container/test_complex_mca_rotator_data_container.py delete mode 100644 tests/data_container/test_eof_data_container.py delete mode 100644 tests/data_container/test_eof_rotator_data_container.py delete mode 100644 tests/data_container/test_mca_data_container.py delete mode 100644 tests/data_container/test_mca_rotator_data_container.py delete mode 100644 xeofs/data_container/_base_cross_model_data_container.py delete mode 100644 xeofs/data_container/_base_model_data_container.py create mode 100644 xeofs/data_container/data_container.py delete mode 100644 xeofs/data_container/eof_bootstrapper_data_container.py delete mode 100644 xeofs/data_container/eof_data_container.py delete mode 100644 xeofs/data_container/eof_rotator_data_container.py delete mode 100644 xeofs/data_container/mca_data_container.py delete mode 100644 xeofs/data_container/mca_rotator_data_container.py delete mode 100644 xeofs/data_container/opa_data_container.py diff --git a/tests/data_container/test_base_cross_model_data_container.py b/tests/data_container/test_base_cross_model_data_container.py deleted file mode 100644 index 2b9068d..0000000 --- a/tests/data_container/test_base_cross_model_data_container.py +++ /dev/null @@ -1,76 +0,0 @@ -import pytest -import xarray as xr -import numpy as np - -from xeofs.data_container._base_cross_model_data_container import ( - _BaseCrossModelDataContainer, -) - - -def test_init(): - """Test the initialization of the BaseCrossModelDataContainer.""" - data_container = _BaseCrossModelDataContainer() - assert data_container._input_data1 is None - assert data_container._input_data2 is None - assert data_container._components1 is None - assert data_container._components2 is None - assert data_container._scores1 is None - assert data_container._scores2 is None - - -def test_set_data(sample_input_data, sample_components, sample_scores): - """Test the set_data() method.""" - data_container = _BaseCrossModelDataContainer() - data_container.set_data( - sample_input_data, - sample_input_data, - sample_components, - sample_components, - sample_scores, - sample_scores, - ) - assert data_container._input_data1 is sample_input_data - assert data_container._input_data2 is sample_input_data - assert data_container._components1 is sample_components - assert data_container._components2 is sample_components - assert data_container._scores1 is sample_scores - assert data_container._scores2 is sample_scores - - -def test_no_data(): - """Test the data accessors without data.""" - data_container = _BaseCrossModelDataContainer() - with pytest.raises(ValueError): - data_container.input_data1 - with pytest.raises(ValueError): - data_container.input_data2 - with pytest.raises(ValueError): - data_container.components1 - with pytest.raises(ValueError): - data_container.components2 - with pytest.raises(ValueError): - data_container.scores1 - with pytest.raises(ValueError): - data_container.scores2 - with pytest.raises(ValueError): - data_container.set_attrs({"test": 1}) - with pytest.raises(ValueError): - data_container.compute() - - -def test_set_attrs(sample_input_data, sample_components, sample_scores): - """Test the set_attrs() method.""" - data_container = _BaseCrossModelDataContainer() - data_container.set_data( - sample_input_data, - sample_input_data, - sample_components, - sample_components, - sample_scores, - sample_scores, - ) - data_container.set_attrs({"test": 1}) - assert data_container.components1.attrs["test"] == 1 - assert data_container.components2.attrs["test"] == 1 - assert data_container.scores1.attrs["test"] == 1 - assert data_container.scores2.attrs["test"] == 1 diff --git a/tests/data_container/test_base_model_data_container.py b/tests/data_container/test_base_model_data_container.py deleted file mode 100644 index 50ee347..0000000 --- a/tests/data_container/test_base_model_data_container.py +++ /dev/null @@ -1,46 +0,0 @@ -import pytest -import xarray as xr -import numpy as np - -from xeofs.data_container._base_model_data_container import _BaseModelDataContainer - - -def test_init(): - """Test the initialization of the BaseModelDataContainer.""" - data_container = _BaseModelDataContainer() - assert data_container._input_data is None - assert data_container._components is None - assert data_container._scores is None - - -def test_set_data(sample_input_data, sample_components, sample_scores): - """Test the set_data() method.""" - data_container = _BaseModelDataContainer() - data_container.set_data(sample_input_data, sample_components, sample_scores) - assert data_container._input_data is sample_input_data - assert data_container._components is sample_components - assert data_container._scores is sample_scores - - -def test_no_data(): - """Test the data accessors without data.""" - data_container = _BaseModelDataContainer() - with pytest.raises(ValueError): - data_container.input_data - with pytest.raises(ValueError): - data_container.components - with pytest.raises(ValueError): - data_container.scores - with pytest.raises(ValueError): - data_container.set_attrs({"test": 1}) - with pytest.raises(ValueError): - data_container.compute() - - -def test_set_attrs(sample_input_data, sample_components, sample_scores): - """Test the set_attrs() method.""" - data_container = _BaseModelDataContainer() - data_container.set_data(sample_input_data, sample_components, sample_scores) - data_container.set_attrs({"test": 1}) - assert data_container.components.attrs["test"] == 1 - assert data_container.scores.attrs["test"] == 1 diff --git a/tests/data_container/test_complex_eof_data_container.py b/tests/data_container/test_complex_eof_data_container.py deleted file mode 100644 index 612436d..0000000 --- a/tests/data_container/test_complex_eof_data_container.py +++ /dev/null @@ -1,131 +0,0 @@ -import pytest -import numpy as np -import xarray as xr -from dask.array import Array as DaskArray # type: ignore - - -from xeofs.data_container.eof_data_container import ComplexEOFDataContainer - - -def test_init(): - """Test the initialization of the ComplexEOFDataContainer.""" - container = ComplexEOFDataContainer() - assert container._input_data is None - assert container._components is None - assert container._scores is None - assert container._explained_variance is None - assert container._total_variance is None - assert container._idx_modes_sorted is None - - -def test_set_data( - sample_input_data, - sample_components, - sample_scores, - sample_exp_var, - sample_total_variance, - sample_idx_modes_sorted, -): - """Test the set_data() method.""" - - container = ComplexEOFDataContainer() - container.set_data( - sample_input_data, - sample_components, - sample_scores, - sample_exp_var, - sample_total_variance, - sample_idx_modes_sorted, - ) - total_variance = sample_exp_var.sum() - idx_modes_sorted = sample_exp_var.argsort()[::-1] - container.set_data( - input_data=sample_input_data, - components=sample_components, - scores=sample_scores, - explained_variance=sample_exp_var, - total_variance=total_variance, - idx_modes_sorted=idx_modes_sorted, - ) - assert container._input_data is sample_input_data - assert container._components is sample_components - assert container._scores is sample_scores - assert container._explained_variance is sample_exp_var - assert container._total_variance is total_variance - assert container._idx_modes_sorted is idx_modes_sorted - - -def test_no_data(): - """Test the data accessors without data.""" - container = ComplexEOFDataContainer() - with pytest.raises(ValueError): - container.input_data - with pytest.raises(ValueError): - container.components - with pytest.raises(ValueError): - container.scores - with pytest.raises(ValueError): - container.explained_variance - with pytest.raises(ValueError): - container.total_variance - with pytest.raises(ValueError): - container.idx_modes_sorted - with pytest.raises(ValueError): - container.set_attrs({"test": 1}) - with pytest.raises(ValueError): - container.compute() - - -def test_set_attrs(sample_input_data, sample_components, sample_scores, sample_exp_var): - """Test the set_attrs() method.""" - total_variance = sample_exp_var.chunk({"mode": 2}).sum() - idx_modes_sorted = sample_exp_var.argsort()[::-1] - container = ComplexEOFDataContainer() - container.set_data( - sample_input_data, - sample_components, - sample_scores, - sample_exp_var, - total_variance, - idx_modes_sorted, - ) - container.set_attrs({"test": 1}) - assert container.components.attrs["test"] == 1 - assert container.scores.attrs["test"] == 1 - assert container.explained_variance.attrs["test"] == 1 - assert container.explained_variance_ratio.attrs["test"] == 1 - assert container.singular_values.attrs["test"] == 1 - assert container.total_variance.attrs["test"] == 1 - assert container.idx_modes_sorted.attrs["test"] == 1 - - -def test_compute(sample_input_data, sample_components, sample_scores, sample_exp_var): - """Check that dask arrays are computed correctly.""" - total_variance = sample_exp_var.chunk({"mode": 2}).sum() - idx_modes_sorted = sample_exp_var.argsort()[::-1] - container = ComplexEOFDataContainer() - container.set_data( - sample_input_data.chunk({"sample": 2}), - sample_components.chunk({"feature": 2}), - sample_scores.chunk({"sample": 2}), - sample_exp_var.chunk({"mode": 2}), - total_variance, - idx_modes_sorted, - ) - # The components and scores are dask arrays - assert isinstance(container.input_data.data, DaskArray) - assert isinstance(container.components.data, DaskArray) - assert isinstance(container.scores.data, DaskArray) - assert isinstance(container.explained_variance.data, DaskArray) - assert isinstance(container.total_variance.data, DaskArray) - - container.compute() - - # The components and scores are computed correctly - assert isinstance( - container.input_data.data, DaskArray - ), "input_data should still be a dask array" - assert isinstance(container.components.data, np.ndarray) - assert isinstance(container.scores.data, np.ndarray) - assert isinstance(container.explained_variance.data, np.ndarray) - assert isinstance(container.total_variance.data, np.ndarray) diff --git a/tests/data_container/test_complex_eof_rotator_data_container.py b/tests/data_container/test_complex_eof_rotator_data_container.py deleted file mode 100644 index a32b91e..0000000 --- a/tests/data_container/test_complex_eof_rotator_data_container.py +++ /dev/null @@ -1,51 +0,0 @@ -import pytest -import numpy as np -import xarray as xr -from dask.array import Array as DaskArray # type: ignore - -from xeofs.data_container.eof_rotator_data_container import ( - ComplexEOFRotatorDataContainer, -) - - -def test_init(): - """Test the initialization of the ComplexEOFRotatorDataContainer.""" - container = ComplexEOFRotatorDataContainer() - assert container._rotation_matrix is None - assert container._phi_matrix is None - assert container._modes_sign is None - - -def test_set_data( - sample_input_data, - sample_components, - sample_scores, - sample_exp_var, - sample_rotation_matrix, - sample_phi_matrix, - sample_modes_sign, -): - """Test the set_data() method of ComplexEOFRotatorDataContainer.""" - total_variance = sample_exp_var.sum() - idx_modes_sorted = sample_exp_var.argsort()[::-1] - container = ComplexEOFRotatorDataContainer() - container.set_data( - sample_input_data, - sample_components, - sample_scores, - sample_exp_var, - total_variance, - idx_modes_sorted, - sample_rotation_matrix, - sample_phi_matrix, - sample_modes_sign, - ) - assert container._input_data is sample_input_data - assert container._components is sample_components - assert container._scores is sample_scores - assert container._explained_variance is sample_exp_var - assert container._total_variance is total_variance - assert container._idx_modes_sorted is idx_modes_sorted - assert container._modes_sign is sample_modes_sign - assert container._rotation_matrix is sample_rotation_matrix - assert container._phi_matrix is sample_phi_matrix diff --git a/tests/data_container/test_complex_mca_rotator_data_container.py b/tests/data_container/test_complex_mca_rotator_data_container.py deleted file mode 100644 index 93f25a0..0000000 --- a/tests/data_container/test_complex_mca_rotator_data_container.py +++ /dev/null @@ -1,116 +0,0 @@ -import pytest -import xarray as xr -import numpy as np - -from xeofs.data_container.mca_rotator_data_container import ( - ComplexMCARotatorDataContainer, -) -from .test_mca_rotator_data_container import test_init as test_mca_rotator_init -from .test_mca_rotator_data_container import test_set_data as test_mca_rotator_set_data -from .test_mca_rotator_data_container import test_no_data as test_mca_rotator_no_data -from .test_mca_rotator_data_container import ( - test_set_attrs as test_mca_rotator_set_attrs, -) - - -def test_init(): - """Test the initialization of the ComplexMCARotatorDataContainer.""" - data_container = ComplexMCARotatorDataContainer() - test_mca_rotator_init() # Re-use the test from MCARotatorDataContainer. - - -def test_set_data( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - sample_modes_sign, -): - """Test the set_data() method of ComplexMCARotatorDataContainer.""" - data_container = ComplexMCARotatorDataContainer() - data_container.set_data( - sample_input_data, - sample_input_data, - sample_components, - sample_components, - sample_scores, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_modes_sign, - sample_norm, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - ) - - test_mca_rotator_set_data( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - sample_modes_sign, - ) # Re-use the test from MCARotatorDataContainer. - - -def test_no_data(): - """Test the data accessors without data in ComplexMCARotatorDataContainer.""" - data_container = ComplexMCARotatorDataContainer() - test_mca_rotator_no_data() # Re-use the test from MCARotatorDataContainer. - - -def test_set_attrs( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - sample_modes_sign, -): - """Test the set_attrs() method of ComplexMCARotatorDataContainer.""" - data_container = ComplexMCARotatorDataContainer() - data_container.set_data( - sample_input_data, - sample_input_data, - sample_components, - sample_components, - sample_scores, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_modes_sign, - sample_norm, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - ) - data_container.set_attrs({"test": 1}) - - test_mca_rotator_set_attrs( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - sample_modes_sign, - ) # Re-use the test from MCARotatorDataContainer. diff --git a/tests/data_container/test_eof_data_container.py b/tests/data_container/test_eof_data_container.py deleted file mode 100644 index ca8fd46..0000000 --- a/tests/data_container/test_eof_data_container.py +++ /dev/null @@ -1,125 +0,0 @@ -import pytest -import numpy as np -import xarray as xr -from dask.array import Array as DaskArray # type: ignore - - -from xeofs.data_container.eof_data_container import EOFDataContainer - - -def test_init(): - """Test the initialization of the EOFDataContainer.""" - container = EOFDataContainer() - assert container._input_data is None - assert container._components is None - assert container._scores is None - assert container._explained_variance is None - assert container._total_variance is None - assert container._idx_modes_sorted is None - - -def test_set_data(sample_input_data, sample_components, sample_scores, sample_exp_var): - """Test the set_data() method.""" - total_variance = sample_exp_var.sum() - idx_modes_sorted = sample_exp_var.argsort()[::-1] - container = EOFDataContainer() - container.set_data( - sample_input_data, - sample_components, - sample_scores, - sample_exp_var, - total_variance, - idx_modes_sorted, - ) - total_variance = sample_exp_var.sum() - idx_modes_sorted = sample_exp_var.argsort()[::-1] - container.set_data( - input_data=sample_input_data, - components=sample_components, - scores=sample_scores, - explained_variance=sample_exp_var, - total_variance=total_variance, - idx_modes_sorted=idx_modes_sorted, - ) - assert container._input_data is sample_input_data - assert container._components is sample_components - assert container._scores is sample_scores - assert container._explained_variance is sample_exp_var - assert container._total_variance is total_variance - assert container._idx_modes_sorted is idx_modes_sorted - - -def test_no_data(): - """Test the data accessors without data.""" - container = EOFDataContainer() - with pytest.raises(ValueError): - container.input_data - with pytest.raises(ValueError): - container.components - with pytest.raises(ValueError): - container.scores - with pytest.raises(ValueError): - container.explained_variance - with pytest.raises(ValueError): - container.total_variance - with pytest.raises(ValueError): - container.idx_modes_sorted - with pytest.raises(ValueError): - container.set_attrs({"test": 1}) - with pytest.raises(ValueError): - container.compute() - - -def test_set_attrs(sample_input_data, sample_components, sample_scores, sample_exp_var): - """Test the set_attrs() method.""" - total_variance = sample_exp_var.chunk({"mode": 2}).sum() - idx_modes_sorted = sample_exp_var.argsort()[::-1] - container = EOFDataContainer() - container.set_data( - sample_input_data, - sample_components, - sample_scores, - sample_exp_var, - total_variance, - idx_modes_sorted, - ) - container.set_attrs({"test": 1}) - assert container.components.attrs["test"] == 1 - assert container.scores.attrs["test"] == 1 - assert container.explained_variance.attrs["test"] == 1 - assert container.explained_variance_ratio.attrs["test"] == 1 - assert container.singular_values.attrs["test"] == 1 - assert container.total_variance.attrs["test"] == 1 - assert container.idx_modes_sorted.attrs["test"] == 1 - - -def test_compute(sample_input_data, sample_components, sample_scores, sample_exp_var): - """Check that dask arrays are computed correctly.""" - total_variance = sample_exp_var.chunk({"mode": 2}).sum() - idx_modes_sorted = sample_exp_var.argsort()[::-1] - container = EOFDataContainer() - container.set_data( - sample_input_data.chunk({"sample": 2}), - sample_components.chunk({"feature": 2}), - sample_scores.chunk({"sample": 2}), - sample_exp_var.chunk({"mode": 2}), - total_variance, - idx_modes_sorted, - ) - # The components and scores are dask arrays - assert isinstance(container.input_data.data, DaskArray) - assert isinstance(container.components.data, DaskArray) - assert isinstance(container.scores.data, DaskArray) - assert isinstance(container.explained_variance.data, DaskArray) - assert isinstance(container.total_variance.data, DaskArray) - - container.compute() - - # The components and scores are computed correctly - assert isinstance( - container.input_data.data, DaskArray - ), "input_data should still be a dask array" - assert isinstance(container.components.data, np.ndarray) - assert isinstance(container.scores.data, np.ndarray) - assert isinstance(container.explained_variance.data, np.ndarray) - assert isinstance(container.total_variance.data, np.ndarray) diff --git a/tests/data_container/test_eof_rotator_data_container.py b/tests/data_container/test_eof_rotator_data_container.py deleted file mode 100644 index 5e55b3a..0000000 --- a/tests/data_container/test_eof_rotator_data_container.py +++ /dev/null @@ -1,76 +0,0 @@ -import pytest -import numpy as np -import xarray as xr -from dask.array import Array as DaskArray # type: ignore - -from xeofs.data_container.eof_rotator_data_container import EOFRotatorDataContainer - - -def test_init(): - """Test the initialization of the EOFRotatorDataContainer.""" - container = EOFRotatorDataContainer() - assert container._rotation_matrix is None - assert container._phi_matrix is None - assert container._modes_sign is None - - -def test_set_data( - sample_input_data, - sample_components, - sample_scores, - sample_exp_var, - sample_rotation_matrix, - sample_phi_matrix, - sample_modes_sign, -): - """Test the set_data() method of EOFRotatorDataContainer.""" - total_variance = sample_exp_var.sum() - idx_modes_sorted = sample_exp_var.argsort()[::-1] - container = EOFRotatorDataContainer() - container.set_data( - sample_input_data, - sample_components, - sample_scores, - sample_exp_var, - total_variance, - idx_modes_sorted, - sample_modes_sign, - sample_rotation_matrix, - sample_phi_matrix, - ) - assert container._input_data is sample_input_data - assert container._components is sample_components - assert container._scores is sample_scores - assert container._explained_variance is sample_exp_var - assert container._total_variance is total_variance - assert container._idx_modes_sorted is idx_modes_sorted - assert container._modes_sign is sample_modes_sign - assert container._rotation_matrix is sample_rotation_matrix - assert container._phi_matrix is sample_phi_matrix - - -def test_no_data(): - """Test the data accessors without data for EOFRotatorDataContainer.""" - container = EOFRotatorDataContainer() - with pytest.raises(ValueError): - container.input_data - with pytest.raises(ValueError): - container.components - with pytest.raises(ValueError): - container.scores - with pytest.raises(ValueError): - container.explained_variance - with pytest.raises(ValueError): - container.total_variance - with pytest.raises(ValueError): - container.idx_modes_sorted - with pytest.raises(ValueError): - container.modes_sign - with pytest.raises(ValueError): - container.rotation_matrix - with pytest.raises(ValueError): - container.phi_matrix - with pytest.raises(ValueError): - container.set_attrs({"test": 1}) - with pytest.raises(ValueError): - container.compute() diff --git a/tests/data_container/test_mca_data_container.py b/tests/data_container/test_mca_data_container.py deleted file mode 100644 index a0834b0..0000000 --- a/tests/data_container/test_mca_data_container.py +++ /dev/null @@ -1,136 +0,0 @@ -import pytest -import xarray as xr -import numpy as np - -from xeofs.data_container.mca_data_container import MCADataContainer - - -def test_init(): - """Test the initialization of the MCADataContainer.""" - data_container = MCADataContainer() - assert data_container._input_data1 is None - assert data_container._input_data2 is None - assert data_container._components1 is None - assert data_container._components2 is None - assert data_container._scores1 is None - assert data_container._scores2 is None - assert data_container._squared_covariance is None - assert data_container._total_squared_covariance is None - assert data_container._idx_modes_sorted is None - assert data_container._norm1 is None - assert data_container._norm2 is None - - -def test_set_data( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, -): - """Test the set_data() method of MCADataContainer.""" - data_container = MCADataContainer() - data_container.set_data( - sample_input_data, - sample_input_data, - sample_components, - sample_components, - sample_scores, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - sample_norm, - ) - - assert data_container._input_data1 is sample_input_data - assert data_container._input_data2 is sample_input_data - assert data_container._components1 is sample_components - assert data_container._components2 is sample_components - assert data_container._scores1 is sample_scores - assert data_container._scores2 is sample_scores - assert data_container._squared_covariance is sample_squared_covariance - assert data_container._total_squared_covariance is sample_total_squared_covariance - assert data_container._idx_modes_sorted is sample_idx_modes_sorted - assert data_container._norm1 is sample_norm - assert data_container._norm2 is sample_norm - - -def test_no_data(): - """Test the data accessors without data in MCADataContainer.""" - data_container = MCADataContainer() - with pytest.raises(ValueError): - data_container.input_data1 - with pytest.raises(ValueError): - data_container.input_data2 - with pytest.raises(ValueError): - data_container.components1 - with pytest.raises(ValueError): - data_container.components2 - with pytest.raises(ValueError): - data_container.scores1 - with pytest.raises(ValueError): - data_container.scores2 - with pytest.raises(ValueError): - data_container.squared_covariance - with pytest.raises(ValueError): - data_container.total_squared_covariance - with pytest.raises(ValueError): - data_container.squared_covariance_fraction - with pytest.raises(ValueError): - data_container.singular_values - with pytest.raises(ValueError): - data_container.covariance_fraction - with pytest.raises(ValueError): - data_container.idx_modes_sorted - with pytest.raises(ValueError): - data_container.norm1 - with pytest.raises(ValueError): - data_container.norm2 - with pytest.raises(ValueError): - data_container.set_attrs({"test": 1}) - with pytest.raises(ValueError): - data_container.compute() - - -def test_set_attrs( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, -): - """Test the set_attrs() method of MCADataContainer.""" - data_container = MCADataContainer() - data_container.set_data( - sample_input_data, - sample_input_data, - sample_components, - sample_components, - sample_scores, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - sample_norm, - ) - data_container.set_attrs({"test": 1}) - - assert data_container.components1.attrs["test"] == 1 - assert data_container.components2.attrs["test"] == 1 - assert data_container.scores1.attrs["test"] == 1 - assert data_container.scores2.attrs["test"] == 1 - assert data_container.squared_covariance.attrs["test"] == 1 - assert data_container.total_squared_covariance.attrs["test"] == 1 - assert data_container.squared_covariance_fraction.attrs["test"] == 1 - assert data_container.singular_values.attrs["test"] == 1 - assert data_container.total_covariance.attrs["test"] == 1 - assert data_container.covariance_fraction.attrs["test"] == 1 - assert data_container.norm1.attrs["test"] == 1 - assert data_container.norm2.attrs["test"] == 1 diff --git a/tests/data_container/test_mca_rotator_data_container.py b/tests/data_container/test_mca_rotator_data_container.py deleted file mode 100644 index f618e83..0000000 --- a/tests/data_container/test_mca_rotator_data_container.py +++ /dev/null @@ -1,129 +0,0 @@ -import pytest -import xarray as xr -import numpy as np - -from xeofs.data_container.mca_rotator_data_container import MCARotatorDataContainer -from .test_mca_data_container import test_init as test_mca_init -from .test_mca_data_container import test_set_data as test_mca_set_data -from .test_mca_data_container import test_no_data as test_mca_no_data -from .test_mca_data_container import test_set_attrs as test_mca_set_attrs - -""" -The idea here is to reuse tests from MCADataContainer in MCARotatorDataContainer -and then tests from MCARotatorDataContainer in ComplexMCARotatorDataContainer, -while also testing the new functionality of each class. This way, we ensure that -inherited behavior still works as expected in subclasses. If some new tests fail, -we'll know it's due to the new functionality and not something inherited. -""" - - -def test_init(): - """Test the initialization of the MCARotatorDataContainer.""" - data_container = MCARotatorDataContainer() - test_mca_init() # Re-use the test from MCADataContainer. - assert data_container._rotation_matrix is None - assert data_container._phi_matrix is None - assert data_container._modes_sign is None - - -def test_set_data( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - sample_modes_sign, -): - """Test the set_data() method of MCARotatorDataContainer.""" - data_container = MCARotatorDataContainer() - data_container.set_data( - sample_input_data, - sample_input_data, - sample_components, - sample_components, - sample_scores, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_modes_sign, - sample_norm, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - ) - - test_mca_set_data( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - ) # Re-use the test from MCADataContainer. - assert data_container._rotation_matrix is sample_rotation_matrix - assert data_container._phi_matrix is sample_phi_matrix - assert data_container._modes_sign is sample_modes_sign - - -def test_no_data(): - """Test the data accessors without data in MCARotatorDataContainer.""" - data_container = MCARotatorDataContainer() - test_mca_no_data() # Re-use the test from MCADataContainer. - with pytest.raises(ValueError): - data_container.rotation_matrix - with pytest.raises(ValueError): - data_container.phi_matrix - with pytest.raises(ValueError): - data_container.modes_sign - - -def test_set_attrs( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - sample_modes_sign, -): - """Test the set_attrs() method of MCARotatorDataContainer.""" - data_container = MCARotatorDataContainer() - data_container.set_data( - sample_input_data, - sample_input_data, - sample_components, - sample_components, - sample_scores, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_modes_sign, - sample_norm, - sample_norm, - sample_rotation_matrix, - sample_phi_matrix, - ) - data_container.set_attrs({"test": 1}) - - test_mca_set_attrs( - sample_input_data, - sample_components, - sample_scores, - sample_squared_covariance, - sample_total_squared_covariance, - sample_idx_modes_sorted, - sample_norm, - ) # Re-use the test from MCADataContainer. - assert data_container.rotation_matrix.attrs["test"] == 1 - assert data_container.phi_matrix.attrs["test"] == 1 - assert data_container.modes_sign.attrs["test"] == 1 diff --git a/tests/models/test_complex_eof_rotator.py b/tests/models/test_complex_eof_rotator.py index b711788..19ac1c5 100644 --- a/tests/models/test_complex_eof_rotator.py +++ b/tests/models/test_complex_eof_rotator.py @@ -4,9 +4,7 @@ from dask.array import Array as DaskArray # type: ignore from xeofs.models import ComplexEOF, ComplexEOFRotator -from xeofs.data_container.eof_rotator_data_container import ( - ComplexEOFRotatorDataContainer, -) +from xeofs.data_container import DataContainer @pytest.fixture @@ -52,7 +50,7 @@ def test_fit(ceof_model): ceof_rotator, "data" ), 'The attribute "data" should be populated after fitting.' assert type(ceof_rotator.model) == ComplexEOF - assert type(ceof_rotator.data) == ComplexEOFRotatorDataContainer + assert type(ceof_rotator.data) == DataContainer @pytest.mark.parametrize( diff --git a/tests/models/test_eof_rotator.py b/tests/models/test_eof_rotator.py index 38374c2..1633ff5 100644 --- a/tests/models/test_eof_rotator.py +++ b/tests/models/test_eof_rotator.py @@ -4,9 +4,7 @@ from dask.array import Array as DaskArray # type: ignore from xeofs.models import EOF, EOFRotator -from xeofs.data_container.eof_rotator_data_container import ( - EOFRotatorDataContainer, -) +from xeofs.data_container import DataContainer @pytest.fixture @@ -52,7 +50,7 @@ def test_fit(eof_model): eof_rotator, "data" ), 'The attribute "data" should be populated after fitting.' assert type(eof_rotator.model) == EOF - assert type(eof_rotator.data) == EOFRotatorDataContainer + assert type(eof_rotator.data) == DataContainer @pytest.mark.parametrize( @@ -183,36 +181,30 @@ def test_compute(eof_model_delayed): # before computation, the attributes should be dask arrays assert isinstance( - eof_rotator.data.explained_variance.data, DaskArray + eof_rotator.data["explained_variance"].data, DaskArray ), "The attribute _explained_variance should be a dask array." assert isinstance( - eof_rotator.data.explained_variance_ratio.data, DaskArray - ), "The attribute _explained_variance_ratio should be a dask array." - assert isinstance( - eof_rotator.data.components.data, DaskArray + eof_rotator.data["components"].data, DaskArray ), "The attribute _components should be a dask array." assert isinstance( - eof_rotator.data.rotation_matrix.data, DaskArray + eof_rotator.data["rotation_matrix"].data, DaskArray ), "The attribute _rotation_matrix should be a dask array." assert isinstance( - eof_rotator.data.scores.data, DaskArray + eof_rotator.data["scores"].data, DaskArray ), "The attribute _scores should be a dask array." eof_rotator.compute() # after computation, the attributes should be numpy ndarrays assert isinstance( - eof_rotator.data.explained_variance.data, np.ndarray + eof_rotator.data["explained_variance"].data, np.ndarray ), "The attribute _explained_variance should be a numpy ndarray." assert isinstance( - eof_rotator.data.explained_variance_ratio.data, np.ndarray - ), "The attribute _explained_variance_ratio should be a numpy ndarray." - assert isinstance( - eof_rotator.data.components.data, np.ndarray + eof_rotator.data["components"].data, np.ndarray ), "The attribute _components should be a numpy ndarray." assert isinstance( - eof_rotator.data.rotation_matrix.data, np.ndarray + eof_rotator.data["rotation_matrix"].data, np.ndarray ), "The attribute _rotation_matrix should be a numpy ndarray." assert isinstance( - eof_rotator.data.scores.data, np.ndarray + eof_rotator.data["scores"].data, np.ndarray ), "The attribute _scores should be a numpy ndarray." diff --git a/tests/models/test_mca.py b/tests/models/test_mca.py index 4bf3ca8..c04b7bd 100644 --- a/tests/models/test_mca.py +++ b/tests/models/test_mca.py @@ -367,16 +367,16 @@ def test_heterogeneous_patterns(mca_model, mock_data_array, dim): def test_compute(mca_model, mock_dask_data_array, dim): mca_model.fit(mock_dask_data_array, mock_dask_data_array, (dim)) - assert isinstance(mca_model.data.squared_covariance.data, DaskArray) - assert isinstance(mca_model.data.components1.data, DaskArray) - assert isinstance(mca_model.data.components2.data, DaskArray) - assert isinstance(mca_model.data.scores1.data, DaskArray) - assert isinstance(mca_model.data.scores2.data, DaskArray) + assert isinstance(mca_model.data["squared_covariance"].data, DaskArray) + assert isinstance(mca_model.data["components1"].data, DaskArray) + assert isinstance(mca_model.data["components2"].data, DaskArray) + assert isinstance(mca_model.data["scores1"].data, DaskArray) + assert isinstance(mca_model.data["scores2"].data, DaskArray) mca_model.compute() - assert isinstance(mca_model.data.squared_covariance.data, np.ndarray) - assert isinstance(mca_model.data.components1.data, np.ndarray) - assert isinstance(mca_model.data.components2.data, np.ndarray) - assert isinstance(mca_model.data.scores1.data, np.ndarray) - assert isinstance(mca_model.data.scores2.data, np.ndarray) + assert isinstance(mca_model.data["squared_covariance"].data, np.ndarray) + assert isinstance(mca_model.data["components1"].data, np.ndarray) + assert isinstance(mca_model.data["components2"].data, np.ndarray) + assert isinstance(mca_model.data["scores1"].data, np.ndarray) + assert isinstance(mca_model.data["scores2"].data, np.ndarray) diff --git a/tests/models/test_mca_rotator.py b/tests/models/test_mca_rotator.py index 1e1c4df..f056c56 100644 --- a/tests/models/test_mca_rotator.py +++ b/tests/models/test_mca_rotator.py @@ -227,64 +227,68 @@ def test_compute(mca_model_delayed): mca_rotator.fit(mca_model_delayed) assert isinstance( - mca_rotator.data.squared_covariance.data, DaskArray + mca_rotator.data["squared_covariance"].data, DaskArray ), "squared_covariance is not a delayed object" assert isinstance( - mca_rotator.data.components1.data, DaskArray + mca_rotator.data["components1"].data, DaskArray ), "components1 is not a delayed object" assert isinstance( - mca_rotator.data.components2.data, DaskArray + mca_rotator.data["components2"].data, DaskArray ), "components2 is not a delayed object" assert isinstance( - mca_rotator.data.scores1.data, DaskArray + mca_rotator.data["scores1"].data, DaskArray ), "scores1 is not a delayed object" assert isinstance( - mca_rotator.data.scores2.data, DaskArray + mca_rotator.data["scores2"].data, DaskArray ), "scores2 is not a delayed object" assert isinstance( - mca_rotator.data.rotation_matrix.data, DaskArray + mca_rotator.data["rotation_matrix"].data, DaskArray ), "rotation_matrix is not a delayed object" assert isinstance( - mca_rotator.data.phi_matrix.data, DaskArray + mca_rotator.data["phi_matrix"].data, DaskArray ), "phi_matrix is not a delayed object" assert isinstance( - mca_rotator.data.norm1.data, DaskArray + mca_rotator.data["norm1"].data, DaskArray ), "norm1 is not a delayed object" assert isinstance( - mca_rotator.data.norm2.data, DaskArray + mca_rotator.data["norm2"].data, DaskArray ), "norm2 is not a delayed object" assert isinstance( - mca_rotator.data.modes_sign.data, DaskArray + mca_rotator.data["modes_sign"].data, DaskArray ), "modes_sign is not a delayed object" mca_rotator.compute() assert isinstance( - mca_rotator.data.squared_covariance.data, np.ndarray + mca_rotator.data["squared_covariance"].data, np.ndarray ), "squared_covariance is not computed" assert isinstance( - mca_rotator.data.total_squared_covariance.data, np.ndarray + mca_rotator.data["total_squared_covariance"].data, np.ndarray ), "total_squared_covariance is not computed" assert isinstance( - mca_rotator.data.components1.data, np.ndarray + mca_rotator.data["components1"].data, np.ndarray ), "components1 is not computed" assert isinstance( - mca_rotator.data.components2.data, np.ndarray + mca_rotator.data["components2"].data, np.ndarray ), "components2 is not computed" assert isinstance( - mca_rotator.data.scores1.data, np.ndarray + mca_rotator.data["scores1"].data, np.ndarray ), "scores1 is not computed" assert isinstance( - mca_rotator.data.scores2.data, np.ndarray + mca_rotator.data["scores2"].data, np.ndarray ), "scores2 is not computed" assert isinstance( - mca_rotator.data.rotation_matrix.data, np.ndarray + mca_rotator.data["rotation_matrix"].data, np.ndarray ), "rotation_matrix is not computed" assert isinstance( - mca_rotator.data.phi_matrix.data, np.ndarray + mca_rotator.data["phi_matrix"].data, np.ndarray ), "phi_matrix is not computed" - assert isinstance(mca_rotator.data.norm1.data, np.ndarray), "norm1 is not computed" - assert isinstance(mca_rotator.data.norm2.data, np.ndarray), "norm2 is not computed" assert isinstance( - mca_rotator.data.modes_sign.data, np.ndarray + mca_rotator.data["norm1"].data, np.ndarray + ), "norm1 is not computed" + assert isinstance( + mca_rotator.data["norm2"].data, np.ndarray + ), "norm2 is not computed" + assert isinstance( + mca_rotator.data["modes_sign"].data, np.ndarray ), "modes_sign is not computed" diff --git a/tests/models/test_opa.py b/tests/models/test_opa.py index 8f3d38c..88dea7c 100644 --- a/tests/models/test_opa.py +++ b/tests/models/test_opa.py @@ -385,7 +385,7 @@ def test_scores_uncorrelated(dim, use_coslat, mock_data_array): use_coslat=use_coslat, ) model.fit(mock_data_array, dim=dim) - scores = model.data.scores.values + scores = model.data["scores"].values check = scores.T @ scores / (scores.shape[0] - 1) assert np.allclose( check, np.eye(check.shape[1]), atol=1e-5 diff --git a/tests/models/test_orthogonality.py b/tests/models/test_orthogonality.py index 98425e2..d1e348c 100644 --- a/tests/models/test_orthogonality.py +++ b/tests/models/test_orthogonality.py @@ -23,7 +23,7 @@ def test_eof_components(dim, use_coslat, mock_data_array): """Components are orthogonal""" model = EOF(n_modes=5, standardize=True, use_coslat=use_coslat) model.fit(mock_data_array, dim=dim) - V = model.data.components.values + V = model.data["components"].values assert np.allclose( V.T @ V, np.eye(V.shape[1]), atol=1e-5 ), "Components are not orthogonal" @@ -41,7 +41,7 @@ def test_eof_scores(dim, use_coslat, mock_data_array): """Scores are orthogonal""" model = EOF(n_modes=5, standardize=True, use_coslat=use_coslat) model.fit(mock_data_array, dim=dim) - U = model.data.scores.values + U = model.data["scores"].values assert np.allclose( U.T @ U, np.eye(U.shape[1]), atol=1e-5 ), "Scores are not orthogonal" @@ -60,7 +60,7 @@ def test_ceof_components(dim, use_coslat, mock_data_array): """Components are unitary""" model = ComplexEOF(n_modes=5, standardize=True, use_coslat=use_coslat) model.fit(mock_data_array, dim=dim) - V = model.data.components.values + V = model.data["components"].values assert np.allclose( V.conj().T @ V, np.eye(V.shape[1]), atol=1e-5 ), "Components are not unitary" @@ -78,7 +78,7 @@ def test_ceof_scores(dim, use_coslat, mock_data_array): """Scores are unitary""" model = ComplexEOF(n_modes=5, standardize=True, use_coslat=use_coslat) model.fit(mock_data_array, dim=dim) - U = model.data.scores.values + U = model.data["scores"].values assert np.allclose( U.conj().T @ U, np.eye(U.shape[1]), atol=1e-5 ), "Scores are not unitary" @@ -102,7 +102,7 @@ def test_reof_components(dim, use_coslat, power, mock_data_array): model.fit(mock_data_array, dim=dim) rot = EOFRotator(n_modes=5, power=power) rot.fit(model) - V = rot.data.components.values + V = rot.data["components"].values K = V.conj().T @ V assert np.allclose( np.diag(K), np.ones(V.shape[1]), atol=1e-5 @@ -128,7 +128,7 @@ def test_reof_scores(dim, use_coslat, power, mock_data_array): model.fit(mock_data_array, dim=dim) rot = EOFRotator(n_modes=5, power=power) rot.fit(model) - U = rot.data.scores.values + U = rot.data["scores"].values K = U.conj().T @ U if power == 1: # Varimax rotation does guarantee orthogonality @@ -157,7 +157,7 @@ def test_creof_components(dim, use_coslat, power, mock_data_array): model.fit(mock_data_array, dim=dim) rot = ComplexEOFRotator(n_modes=5, power=power) rot.fit(model) - V = rot.data.components.values + V = rot.data["components"].values K = V.conj().T @ V assert np.allclose( np.diag(K), np.ones(V.shape[1]), atol=1e-5 @@ -183,7 +183,7 @@ def test_creof_scores(dim, use_coslat, power, mock_data_array): model.fit(mock_data_array, dim=dim) rot = ComplexEOFRotator(n_modes=5, power=power) rot.fit(model) - U = rot.data.scores.values + U = rot.data["scores"].values K = U.conj().T @ U if power == 1: # Varimax rotation does guarantee unitarity @@ -209,8 +209,8 @@ def test_mca_components(dim, use_coslat, mock_data_array): data2 = data1.copy() ** 2 model = MCA(n_modes=5, standardize=True, use_coslat=use_coslat) model.fit(data1, data2, dim=dim) - V1 = model.data.components1.values - V2 = model.data.components2.values + V1 = model.data["components1"].values + V2 = model.data["components2"].values K1 = V1.T @ V1 K2 = V2.T @ V2 assert np.allclose( @@ -235,10 +235,10 @@ def test_mca_scores(dim, use_coslat, mock_data_array): data2 = data1.copy() ** 2 model = MCA(n_modes=5, standardize=True, use_coslat=use_coslat) model.fit(data1, data2, dim=dim) - U1 = model.data.scores1.values - U2 = model.data.scores2.values + U1 = model.data["scores1"].values + U2 = model.data["scores2"].values K = U1.T @ U2 - target = np.eye(K.shape[0]) * (model.data.input_data1.sample.size - 1) + target = np.eye(K.shape[0]) * (model.data["input_data1"].sample.size - 1) assert np.allclose(K, target, atol=1e-5), "Scores are not orthogonal" @@ -257,8 +257,8 @@ def test_cmca_components(dim, use_coslat, mock_data_array): data2 = data1.copy() ** 2 model = ComplexMCA(n_modes=5, standardize=True, use_coslat=use_coslat) model.fit(data1, data2, dim=dim) - V1 = model.data.components1.values - V2 = model.data.components2.values + V1 = model.data["components1"].values + V2 = model.data["components2"].values K1 = V1.conj().T @ V1 K2 = V2.conj().T @ V2 assert np.allclose( @@ -283,10 +283,10 @@ def test_cmca_scores(dim, use_coslat, mock_data_array): data2 = data1.copy() ** 2 model = ComplexMCA(n_modes=10, standardize=True, use_coslat=use_coslat) model.fit(data1, data2, dim=dim) - U1 = model.data.scores1.values - U2 = model.data.scores2.values + U1 = model.data["scores1"].values + U2 = model.data["scores2"].values K = U1.conj().T @ U2 - target = np.eye(K.shape[0]) * (model.data.input_data1.sample.size - 1) + target = np.eye(K.shape[0]) * (model.data["input_data1"].sample.size - 1) assert np.allclose(K, target, atol=1e-5), "Scores are not unitary" @@ -316,8 +316,8 @@ def test_rmca_components(dim, use_coslat, power, squared_loadings, mock_data_arr model.fit(data1, data2, dim=dim) rot = MCARotator(n_modes=5, power=power, squared_loadings=squared_loadings) rot.fit(model) - V1 = rot.data.components1.values - V2 = rot.data.components2.values + V1 = rot.data["components1"].values + V2 = rot.data["components2"].values K1 = V1.conj().T @ V1 K2 = V2.conj().T @ V2 assert np.allclose( @@ -356,10 +356,10 @@ def test_rmca_scores(dim, use_coslat, power, squared_loadings, mock_data_array): model.fit(data1, data2, dim=dim) rot = MCARotator(n_modes=5, power=power, squared_loadings=squared_loadings) rot.fit(model) - U1 = rot.data.scores1.values - U2 = rot.data.scores2.values + U1 = rot.data["scores1"].values + U2 = rot.data["scores2"].values K = U1.conj().T @ U2 - target = np.eye(K.shape[0]) * (model.data.input_data1.sample.size - 1) + target = np.eye(K.shape[0]) * (model.data["input_data1"].sample.size - 1) if power == 1: # Varimax rotation does guarantee orthogonality assert np.allclose(K, target, atol=1e-5), "Components are not orthogonal" @@ -393,8 +393,8 @@ def test_crmca_components(dim, use_coslat, power, squared_loadings, mock_data_ar model.fit(data1, data2, dim=dim) rot = ComplexMCARotator(n_modes=5, power=power, squared_loadings=squared_loadings) rot.fit(model) - V1 = rot.data.components1.values - V2 = rot.data.components2.values + V1 = rot.data["components1"].values + V2 = rot.data["components2"].values K1 = V1.conj().T @ V1 K2 = V2.conj().T @ V2 assert np.allclose( @@ -433,10 +433,10 @@ def test_crmca_scores(dim, use_coslat, power, squared_loadings, mock_data_array) model.fit(data1, data2, dim=dim) rot = ComplexMCARotator(n_modes=5, power=power, squared_loadings=squared_loadings) rot.fit(model) - U1 = rot.data.scores1.values - U2 = rot.data.scores2.values + U1 = rot.data["scores1"].values + U2 = rot.data["scores2"].values K = U1.conj().T @ U2 - target = np.eye(K.shape[0]) * (model.data.input_data1.sample.size - 1) + target = np.eye(K.shape[0]) * (model.data["input_data1"].sample.size - 1) if power == 1: # Varimax rotation does guarantee orthogonality assert np.allclose(K, target, atol=1e-5), "Components are not orthogonal" diff --git a/tests/validation/test_eof_bootstrapper.py b/tests/validation/test_eof_bootstrapper.py index e1a43f4..e78ef12 100644 --- a/tests/validation/test_eof_bootstrapper.py +++ b/tests/validation/test_eof_bootstrapper.py @@ -56,51 +56,51 @@ def test_fit(eof_model): # DataArrays are created assert isinstance( - bootstrapper.data.explained_variance, xr.DataArray + bootstrapper.data["explained_variance"], xr.DataArray ), "explained variance is not a DataArray" assert isinstance( - bootstrapper.data.components, xr.DataArray + bootstrapper.data["components"], xr.DataArray ), "components is not a DataArray" assert isinstance( - bootstrapper.data.scores, xr.DataArray + bootstrapper.data["scores"], xr.DataArray ), "scores is not a DataArray" # DataArrays have expected dims - expected_dims = set(eof_model.data.explained_variance.dims) + expected_dims = set(eof_model.data["explained_variance"].dims) expected_dims.add("n") - true_dims = set(bootstrapper.data.explained_variance.dims) + true_dims = set(bootstrapper.data["explained_variance"].dims) err_message = ( f"explained variance dimensions are {true_dims} instead of {expected_dims}" ) assert true_dims == expected_dims, err_message - expected_dims = set(eof_model.data.components.dims) + expected_dims = set(eof_model.data["components"].dims) expected_dims.add("n") - true_dims = set(bootstrapper.data.components.dims) + true_dims = set(bootstrapper.data["components"].dims) err_message = f"components dimensions are {true_dims} instead of {expected_dims}" assert true_dims == expected_dims, err_message - expected_dims = set(eof_model.data.scores.dims) + expected_dims = set(eof_model.data["scores"].dims) expected_dims.add("n") - true_dims = set(bootstrapper.data.scores.dims) + true_dims = set(bootstrapper.data["scores"].dims) err_message = f"scores dimensions are {true_dims} instead of {expected_dims}" assert true_dims == expected_dims, err_message # DataArrays have expected coords - ref_da = eof_model.data.explained_variance - test_da = bootstrapper.data.explained_variance + ref_da = eof_model.data["explained_variance"] + test_da = bootstrapper.data["explained_variance"] for dim, coords in ref_da.coords.items(): assert test_da[dim].equals( coords ), f"explained variance coords for {dim} are not equal" - ref_da = eof_model.data.components - test_da = bootstrapper.data.components + ref_da = eof_model.data["components"] + test_da = bootstrapper.data["components"] for dim, coords in ref_da.coords.items(): assert test_da[dim].equals(coords), f"components coords for {dim} are not equal" - ref_da = eof_model.data.scores - test_da = bootstrapper.data.scores + ref_da = eof_model.data["scores"] + test_da = bootstrapper.data["scores"] for dim, coords in ref_da.coords.items(): assert test_da[dim].equals(coords), f"scores coords for {dim} are not equal" diff --git a/xeofs/data_container/__init__.py b/xeofs/data_container/__init__.py index 4065e8c..3a8fcd5 100644 --- a/xeofs/data_container/__init__.py +++ b/xeofs/data_container/__init__.py @@ -1,12 +1 @@ -from ._base_model_data_container import _BaseModelDataContainer -from ._base_cross_model_data_container import _BaseCrossModelDataContainer -from .eof_data_container import EOFDataContainer, ComplexEOFDataContainer -from .mca_data_container import MCADataContainer, ComplexMCADataContainer -from .eof_rotator_data_container import ( - EOFRotatorDataContainer, - ComplexEOFRotatorDataContainer, -) -from .mca_rotator_data_container import ( - MCARotatorDataContainer, - ComplexMCARotatorDataContainer, -) +from .data_container import DataContainer diff --git a/xeofs/data_container/_base_cross_model_data_container.py b/xeofs/data_container/_base_cross_model_data_container.py deleted file mode 100644 index c81dedc..0000000 --- a/xeofs/data_container/_base_cross_model_data_container.py +++ /dev/null @@ -1,121 +0,0 @@ -from abc import ABC -from typing import Optional - -from dask.diagnostics.progress import ProgressBar - -from ..utils.data_types import DataArray - - -class _BaseCrossModelDataContainer(ABC): - """Abstract base class that holds the cross model data.""" - - def __init__(self): - self._input_data1: Optional[DataArray] = None - self._input_data2: Optional[DataArray] = None - self._components1: Optional[DataArray] = None - self._components2: Optional[DataArray] = None - self._scores1: Optional[DataArray] = None - self._scores2: Optional[DataArray] = None - - @staticmethod - def _verify_dims(da: DataArray, dims_expected: tuple): - """Verify that the dimensions of the data are correct.""" - if not set(da.dims) == set(dims_expected): - raise ValueError(f"The data must have dimensions {dims_expected}.") - - @staticmethod - def _sanity_check(data) -> DataArray: - """Check whether the Data of the DataContainer has been set.""" - if data is None: - raise ValueError("There is no data. Have you called .fit()?") - else: - return data - - def set_data( - self, - input_data1: DataArray, - input_data2: DataArray, - components1: DataArray, - components2: DataArray, - scores1: DataArray, - scores2: DataArray, - ): - components1.name = "left_components" - components2.name = "right_components" - scores1.name = "left_scores" - scores2.name = "right_scores" - - self._input_data1 = input_data1 - self._input_data2 = input_data2 - self._components1 = components1 - self._components2 = components2 - self._scores1 = scores1 - self._scores2 = scores2 - - @property - def input_data1(self) -> DataArray: - """Get the left input data.""" - data1 = self._sanity_check(self._input_data1) - return data1 - - @property - def input_data2(self) -> DataArray: - """Get the right input data.""" - data2 = self._sanity_check(self._input_data2) - return data2 - - @property - def components1(self) -> DataArray: - """Get the left components.""" - components1 = self._sanity_check(self._components1) - return components1 - - @property - def components2(self) -> DataArray: - """Get the right components.""" - components2 = self._sanity_check(self._components2) - return components2 - - @property - def scores1(self) -> DataArray: - """Get the left scores.""" - scores1 = self._sanity_check(self._scores1) - return scores1 - - @property - def scores2(self) -> DataArray: - """Get the right scores.""" - scores2 = self._sanity_check(self._scores2) - return scores2 - - def compute(self, verbose=False): - """Compute and load delayed dask DataArrays into memory. - - Parameters - ---------- - verbose : bool - Whether or not to provide additional information about the computing progress. - """ - if verbose: - with ProgressBar(): - self._components1 = self.components1.compute() - self._components2 = self.components2.compute() - self._scores1 = self.scores1.compute() - self._scores2 = self.scores2.compute() - else: - self._components1 = self.components1.compute() - self._components2 = self.components2.compute() - self._scores1 = self.scores1.compute() - self._scores2 = self.scores2.compute() - - def set_attrs(self, attrs: dict): - """Set the attributes of the results.""" - components1 = self._sanity_check(self._components1) - components2 = self._sanity_check(self._components2) - scores1 = self._sanity_check(self._scores1) - scores2 = self._sanity_check(self._scores2) - - components1.attrs.update(attrs) - components2.attrs.update(attrs) - scores1.attrs.update(attrs) - scores2.attrs.update(attrs) diff --git a/xeofs/data_container/_base_model_data_container.py b/xeofs/data_container/_base_model_data_container.py deleted file mode 100644 index 59ffc71..0000000 --- a/xeofs/data_container/_base_model_data_container.py +++ /dev/null @@ -1,79 +0,0 @@ -from abc import ABC -from typing import Optional - -from dask.diagnostics.progress import ProgressBar - -from ..utils.data_types import DataArray - - -class _BaseModelDataContainer(ABC): - """Abstract base class that holds the model data.""" - - def __init__(self): - self._input_data: Optional[DataArray] = None - self._components: Optional[DataArray] = None - self._scores: Optional[DataArray] = None - - @staticmethod - def _verify_dims(da: DataArray, dims_expected: tuple): - """Verify that the dimensions of the data are correct.""" - if not set(da.dims) == set(dims_expected): - raise ValueError(f"The data must have dimensions {dims_expected}.") - - @staticmethod - def _sanity_check(data) -> DataArray: - """Check whether the Data of the DataContainer has been set.""" - if data is None: - raise ValueError("There is no data. Have you called .fit()?") - else: - return data - - def set_data(self, input_data: DataArray, components: DataArray, scores: DataArray): - components.name = "components" - scores.name = "scores" - - self._input_data = input_data - self._components = components - self._scores = scores - - @property - def input_data(self) -> DataArray: - """Get the input data.""" - data = self._sanity_check(self._input_data) - return data - - @property - def components(self) -> DataArray: - """Get the components.""" - components = self._sanity_check(self._components) - return components - - @property - def scores(self) -> DataArray: - """Get the scores.""" - scores = self._sanity_check(self._scores) - return scores - - def compute(self, verbose=False): - """Compute and load delayed dask DataArrays into memory. - - Parameters - ---------- - verbose : bool - Whether or not to provide additional information about the computing progress. - """ - if verbose: - with ProgressBar(): - self._components = self.components.compute() - self._scores = self.scores.compute() - else: - self._components = self.components.compute() - self._scores = self.scores.compute() - - def set_attrs(self, attrs: dict): - """Set the attributes of the results.""" - components = self._sanity_check(self._components) - scores = self._sanity_check(self._scores) - - components.attrs.update(attrs) - scores.attrs.update(attrs) diff --git a/xeofs/data_container/data_container.py b/xeofs/data_container/data_container.py new file mode 100644 index 0000000..38a6340 --- /dev/null +++ b/xeofs/data_container/data_container.py @@ -0,0 +1,39 @@ +from dask.diagnostics.progress import ProgressBar + +from ..utils.data_types import DataArray + + +class DataContainer(dict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._allow_compute = dict({k: True for k in self.keys()}) + + def add(self, data: DataArray, name: str, allow_compute: bool = True) -> None: + data.name = name + super().__setitem__(name, data) + self._allow_compute[name] = True if allow_compute else False + + def __setitem__(self, __key: str, __value: DataArray) -> None: + super().__setitem__(__key, __value) + self._allow_compute[__key] = self._allow_compute.get(__key, True) + + def __getitem__(self, __key: str) -> DataArray: + try: + return super().__getitem__(__key) + except KeyError: + raise KeyError( + f"Cannot find data '{__key}'. Please fit the model first by calling .fit()." + ) + + def compute(self, verbose=False): + for k, v in self.items(): + if self._allow_compute[k]: + if verbose: + with ProgressBar(): + self[k] = v.compute() + else: + self[k] = v.compute() + + def set_attrs(self, attrs: dict): + for key in self.keys(): + self[key].attrs = attrs diff --git a/xeofs/data_container/eof_bootstrapper_data_container.py b/xeofs/data_container/eof_bootstrapper_data_container.py deleted file mode 100644 index 0abe5c7..0000000 --- a/xeofs/data_container/eof_bootstrapper_data_container.py +++ /dev/null @@ -1,25 +0,0 @@ -import numpy as np - -from ..data_container.eof_data_container import EOFDataContainer -from ..utils.data_types import DataArray - - -class EOFBootstrapperDataContainer(EOFDataContainer): - """Container that holds the data related to a Bootstrapper EOF model.""" - - @staticmethod - def _verify_dims(da: DataArray, dims: tuple): - """Verify that the dimensions of the data are correct.""" - # Bootstrapper EOFs have an additional dimension for the bootstrap - expected_dims = dims - given_dims = da.dims - - # In the case of the input data, the dimensions are ('sample', 'feature') - # Otherwise, the data should have an additional dimension for the bootstrap `n` - has_input_data_dims = set(given_dims) == set(("sample", "feature")) - if not has_input_data_dims: - expected_dims = ("n",) + dims - - dims_are_equal = set(given_dims) == set(expected_dims) - if not dims_are_equal: - raise ValueError(f"The data must have dimensions {expected_dims}.") diff --git a/xeofs/data_container/eof_data_container.py b/xeofs/data_container/eof_data_container.py deleted file mode 100644 index 2448966..0000000 --- a/xeofs/data_container/eof_data_container.py +++ /dev/null @@ -1,133 +0,0 @@ -from typing import Optional - -import numpy as np -import xarray as xr -from dask.diagnostics.progress import ProgressBar - -from ._base_model_data_container import _BaseModelDataContainer -from ..utils.data_types import DataArray - - -class EOFDataContainer(_BaseModelDataContainer): - """Container to store the results of an EOF analysis.""" - - def __init__(self): - super().__init__() - self._explained_variance: Optional[DataArray] = None - self._total_variance: Optional[DataArray] = None - self._idx_modes_sorted: Optional[DataArray] = None - - def set_data( - self, - input_data: DataArray, - components: DataArray, - scores: DataArray, - explained_variance: DataArray, - total_variance: DataArray, - idx_modes_sorted: DataArray, - ): - super().set_data(input_data=input_data, components=components, scores=scores) - - self._explained_variance = explained_variance - self._explained_variance.name = "explained_variance" - - self._total_variance = total_variance - self._total_variance.name = "total_variance" - - self._idx_modes_sorted = idx_modes_sorted - self._idx_modes_sorted.name = "idx_modes_sorted" - - @property - def total_variance(self) -> DataArray: - """Get the total variance.""" - total_var = super()._sanity_check(self._total_variance) - return total_var - - @property - def explained_variance(self) -> DataArray: - """Get the explained variance.""" - exp_var = super()._sanity_check(self._explained_variance) - return exp_var - - @property - def explained_variance_ratio(self) -> DataArray: - """Get the explained variance ratio.""" - expvar_ratio = self.explained_variance / self.total_variance - expvar_ratio.name = "explained_variance_ratio" - expvar_ratio.attrs.update(self.explained_variance.attrs) - return expvar_ratio - - @property - def idx_modes_sorted(self) -> DataArray: - """Get the index of the sorted explained variance.""" - idx_modes_sorted = super()._sanity_check(self._idx_modes_sorted) - return idx_modes_sorted - - @property - def singular_values(self) -> DataArray: - """Get the explained variance.""" - svals = (self.explained_variance * (self.input_data.shape[0] - 1)) ** 0.5 - svals.attrs.update(self.explained_variance.attrs) - svals.name = "singular_values" - return svals - - def compute(self, verbose=False): - super().compute(verbose) - - if verbose: - with ProgressBar(): - self._explained_variance = self.explained_variance.compute() - self._total_variance = self.total_variance.compute() - self._idx_modes_sorted = self.idx_modes_sorted.compute() - else: - self._explained_variance = self.explained_variance.compute() - self._total_variance = self.total_variance.compute() - self._idx_modes_sorted = self.idx_modes_sorted.compute() - - def set_attrs(self, attrs: dict): - """Set the attributes of the results.""" - super().set_attrs(attrs) - - explained_variance = self._sanity_check(self._explained_variance) - total_variance = self._sanity_check(self._total_variance) - idx_modes_sorted = self._sanity_check(self._idx_modes_sorted) - - explained_variance.attrs.update(attrs) - total_variance.attrs.update(attrs) - idx_modes_sorted.attrs.update(attrs) - - -class ComplexEOFDataContainer(EOFDataContainer): - """Container to store the results of a complex EOF analysis.""" - - @property - def components_amplitude(self) -> DataArray: - """Get the components amplitude.""" - comp_abs = abs(self.components) - comp_abs.name = "components_amplitude" - return comp_abs - - @property - def components_phase(self) -> DataArray: - """Get the components phase.""" - comp_phase = xr.apply_ufunc( - np.angle, self.components, dask="allowed", keep_attrs=True - ) - comp_phase.name = "components_phase" - return comp_phase - - @property - def scores_amplitude(self) -> DataArray: - """Get the scores amplitude.""" - score_abs = abs(self.scores) - score_abs.name = "scores_amplitude" - return score_abs - - @property - def scores_phase(self) -> DataArray: - """Get the scores phase.""" - score_phase = xr.apply_ufunc( - np.angle, self.scores, dask="allowed", keep_attrs=True - ) - score_phase.name = "scores_phase" - return score_phase diff --git a/xeofs/data_container/eof_rotator_data_container.py b/xeofs/data_container/eof_rotator_data_container.py deleted file mode 100644 index cb0d334..0000000 --- a/xeofs/data_container/eof_rotator_data_container.py +++ /dev/null @@ -1,117 +0,0 @@ -from abc import abstractmethod -from typing import TypeVar, Optional - -import numpy as np -import xarray as xr -from dask.diagnostics.progress import ProgressBar - -from .eof_data_container import EOFDataContainer, ComplexEOFDataContainer -from ..utils.data_types import DataArray - - -class EOFRotatorDataContainer(EOFDataContainer): - """Container to store the results of a rotated EOF analysis.""" - - def __init__(self): - super().__init__() - self._rotation_matrix: Optional[DataArray] = None - self._phi_matrix: Optional[DataArray] = None - self._modes_sign: Optional[DataArray] = None - - def set_data( - self, - input_data: DataArray, - components: DataArray, - scores: DataArray, - explained_variance: DataArray, - total_variance: DataArray, - idx_modes_sorted: DataArray, - modes_sign: DataArray, - rotation_matrix: DataArray, - phi_matrix: DataArray, - ): - super().set_data( - input_data=input_data, - components=components, - scores=scores, - explained_variance=explained_variance, - total_variance=total_variance, - idx_modes_sorted=idx_modes_sorted, - ) - - self._rotation_matrix = rotation_matrix - self._rotation_matrix.name = "rotation_matrix" - - self._phi_matrix = phi_matrix - self._phi_matrix.name = "phi_matrix" - - self._modes_sign = modes_sign - self._modes_sign.name = "modes_sign" - - @property - def rotation_matrix(self) -> DataArray: - """Get the rotation matrix.""" - rotation_matrix = super()._sanity_check(self._rotation_matrix) - return rotation_matrix - - @property - def phi_matrix(self) -> DataArray: - """Get the phi matrix.""" - phi_matrix = super()._sanity_check(self._phi_matrix) - return phi_matrix - - @property - def modes_sign(self) -> DataArray: - """Get the modes sign.""" - modes_sign = super()._sanity_check(self._modes_sign) - return modes_sign - - def compute(self, verbose: bool = False): - super().compute(verbose) - - if verbose: - with ProgressBar(): - self._rotation_matrix = self.rotation_matrix.compute() - self._phi_matrix = self.phi_matrix.compute() - self._modes_sign = self.modes_sign.compute() - else: - self._rotation_matrix = self.rotation_matrix.compute() - self._phi_matrix = self.phi_matrix.compute() - self._modes_sign = self.modes_sign.compute() - - def set_attrs(self, attrs: dict): - super().set_attrs(attrs) - self.rotation_matrix.attrs.update(attrs) - self.phi_matrix.attrs.update(attrs) - self.modes_sign.attrs.update(attrs) - - -class ComplexEOFRotatorDataContainer(EOFRotatorDataContainer, ComplexEOFDataContainer): - """Container to store the results of a complex rotated EOF analysis.""" - - def __init__(self): - super(ComplexEOFRotatorDataContainer, self).__init__() - - def set_data( - self, - input_data: DataArray, - components: DataArray, - scores: DataArray, - explained_variance: DataArray, - total_variance: DataArray, - idx_modes_sorted: DataArray, - rotation_matrix: DataArray, - phi_matrix: DataArray, - modes_sign: DataArray, - ): - super().set_data( - input_data=input_data, - components=components, - scores=scores, - explained_variance=explained_variance, - total_variance=total_variance, - idx_modes_sorted=idx_modes_sorted, - rotation_matrix=rotation_matrix, - phi_matrix=phi_matrix, - modes_sign=modes_sign, - ) diff --git a/xeofs/data_container/mca_data_container.py b/xeofs/data_container/mca_data_container.py deleted file mode 100644 index c24169c..0000000 --- a/xeofs/data_container/mca_data_container.py +++ /dev/null @@ -1,223 +0,0 @@ -from typing import Optional - -import numpy as np -import xarray as xr -from dask.diagnostics.progress import ProgressBar - -from ._base_cross_model_data_container import _BaseCrossModelDataContainer -from ..utils.data_types import DataArray - - -class MCADataContainer(_BaseCrossModelDataContainer): - """Container to store the results of a MCA.""" - - def __init__(self): - super().__init__() - self._squared_covariance: Optional[DataArray] = None - self._total_squared_covariance: Optional[DataArray] = None - self._idx_modes_sorted: Optional[DataArray] = None - self._norm1: Optional[DataArray] = None - self._norm2: Optional[DataArray] = None - - def set_data( - self, - input_data1: DataArray, - input_data2: DataArray, - components1: DataArray, - components2: DataArray, - scores1: DataArray, - scores2: DataArray, - squared_covariance: DataArray, - total_squared_covariance: DataArray, - idx_modes_sorted: DataArray, - norm1: DataArray, - norm2: DataArray, - ): - super().set_data( - input_data1=input_data1, - input_data2=input_data2, - components1=components1, - components2=components2, - scores1=scores1, - scores2=scores2, - ) - - self._squared_covariance = squared_covariance - self._squared_covariance.name = "squared_covariance" - - self._total_squared_covariance = total_squared_covariance - self._total_squared_covariance.name = "total_squared_covariance" - - self._idx_modes_sorted = idx_modes_sorted - self._idx_modes_sorted.name = "idx_modes_sorted" - - self._norm1 = norm1 - self._norm1.name = "left_norm" - - self._norm2 = norm2 - self._norm2.name = "right_norm" - - @property - def total_squared_covariance(self) -> DataArray: - """Get the total squared covariance.""" - tsc = super()._sanity_check(self._total_squared_covariance) - return tsc - - @property - def squared_covariance(self) -> DataArray: - """Get the squared covariance.""" - sc = super()._sanity_check(self._squared_covariance) - return sc - - @property - def squared_covariance_fraction(self) -> DataArray: - """Get the squared covariance fraction (SCF).""" - scf = self.squared_covariance / self.total_squared_covariance - scf.attrs.update(self.squared_covariance.attrs) - scf.name = "squared_covariance_fraction" - return scf - - @property - def norm1(self) -> DataArray: - """Get the norm of the left scores.""" - norm1 = super()._sanity_check(self._norm1) - return norm1 - - @property - def norm2(self) -> DataArray: - """Get the norm of the right scores.""" - norm2 = super()._sanity_check(self._norm2) - return norm2 - - @property - def idx_modes_sorted(self) -> DataArray: - """Get the indices of the modes sorted by the squared covariance.""" - idx_modes_sorted = super()._sanity_check(self._idx_modes_sorted) - return idx_modes_sorted - - @property - def singular_values(self) -> DataArray: - """Get the singular values.""" - singular_values = xr.apply_ufunc( - np.sqrt, - self.squared_covariance, - dask="allowed", - vectorize=False, - keep_attrs=True, - ) - singular_values.name = "singular_values" - return singular_values - - @property - def total_covariance(self) -> DataArray: - """Get the total covariance. - - This measure follows the defintion of Cheng and Dunkerton (1995). - Note that this measure is not an invariant in MCA. - - """ - tot_cov = self.singular_values.sum() - tot_cov.attrs.update(self.singular_values.attrs) - tot_cov.name = "total_covariance" - return tot_cov - - @property - def covariance_fraction(self) -> DataArray: - """Get the covariance fraction (CF). - - This measure follows the defintion of Cheng and Dunkerton (1995). - Note that this measure is not an invariant in MCA. - - """ - cov_frac = self.singular_values / self.total_covariance - cov_frac.attrs.update(self.singular_values.attrs) - cov_frac.name = "covariance_fraction" - return cov_frac - - def compute(self, verbose=False): - super().compute(verbose) - - if verbose: - with ProgressBar(): - self._total_squared_covariance = self.total_squared_covariance.compute() - self._squared_covariance = self.squared_covariance.compute() - self._norm1 = self.norm1.compute() - self._norm2 = self.norm2.compute() - else: - self._total_squared_covariance = self.total_squared_covariance.compute() - self._squared_covariance = self.squared_covariance.compute() - self._norm1 = self.norm1.compute() - self._norm2 = self.norm2.compute() - - def set_attrs(self, attrs: dict): - super().set_attrs(attrs) - - total_squared_covariance = super()._sanity_check(self._total_squared_covariance) - squared_covariance = super()._sanity_check(self._squared_covariance) - norm1 = super()._sanity_check(self._norm1) - norm2 = super()._sanity_check(self._norm2) - - total_squared_covariance.attrs.update(attrs) - squared_covariance.attrs.update(attrs) - norm1.attrs.update(attrs) - norm2.attrs.update(attrs) - - -class ComplexMCADataContainer(MCADataContainer): - """Container that holds the data related to a Complex MCA model.""" - - @property - def components_amplitude1(self) -> DataArray: - """Get the component amplitudes of the left field.""" - comp_amps1 = abs(self.components1) - comp_amps1.name = "left_components_amplitude" - return comp_amps1 - - @property - def components_amplitude2(self) -> DataArray: - """Get the component amplitudes of the right field.""" - comp_amps2 = abs(self.components2) - comp_amps2.name = "right_components_amplitude" - return comp_amps2 - - @property - def components_phase1(self) -> DataArray: - """Get the component phases of the left field.""" - comp_phs1 = xr.apply_ufunc(np.angle, self.components1, keep_attrs=True) - comp_phs1.name = "left_components_phase" - return comp_phs1 - - @property - def components_phase2(self) -> DataArray: - """Get the component phases of the right field.""" - comp_phs2 = xr.apply_ufunc(np.angle, self._components2, keep_attrs=True) - comp_phs2.name = "right_components_phase" - return comp_phs2 - - @property - def scores_amplitude1(self) -> DataArray: - """Get the scores amplitudes of the left field.""" - scores_amps1 = abs(self.scores1) - scores_amps1.name = "left_scores_amplitude" - return scores_amps1 - - @property - def scores_amplitude2(self) -> DataArray: - """Get the scores amplitudes of the right field.""" - scores_amps2 = abs(self.scores2) - scores_amps2.name = "right_scores_amplitude" - return scores_amps2 - - @property - def scores_phase1(self) -> DataArray: - """Get the scores phases of the left field.""" - scores_phs1 = xr.apply_ufunc(np.angle, self.scores1, keep_attrs=True) - scores_phs1.name = "left_scores_phase" - return scores_phs1 - - @property - def scores_phase2(self) -> DataArray: - """Get the scores phases of the right field.""" - scores_phs2 = xr.apply_ufunc(np.angle, self.scores2, keep_attrs=True) - scores_phs2.name = "right_scores_phase" - return scores_phs2 diff --git a/xeofs/data_container/mca_rotator_data_container.py b/xeofs/data_container/mca_rotator_data_container.py deleted file mode 100644 index 4727ab7..0000000 --- a/xeofs/data_container/mca_rotator_data_container.py +++ /dev/null @@ -1,145 +0,0 @@ -from typing import Optional - -import numpy as np -import xarray as xr -from dask.diagnostics.progress import ProgressBar - -from xeofs.utils.data_types import DataArray - -from .mca_data_container import MCADataContainer, ComplexMCADataContainer -from ..utils.data_types import DataArray - - -class MCARotatorDataContainer(MCADataContainer): - """Container that holds the data related to a rotated MCA model.""" - - def __init__(self): - super().__init__() - self._rotation_matrix: Optional[DataArray] = None - self._phi_matrix: Optional[DataArray] = None - self._modes_sign: Optional[DataArray] = None - - def set_data( - self, - input_data1: DataArray, - input_data2: DataArray, - components1: DataArray, - components2: DataArray, - scores1: DataArray, - scores2: DataArray, - squared_covariance: DataArray, - total_squared_covariance: DataArray, - idx_modes_sorted: DataArray, - modes_sign: DataArray, - norm1: DataArray, - norm2: DataArray, - rotation_matrix: DataArray, - phi_matrix: DataArray, - ): - super().set_data( - input_data1=input_data1, - input_data2=input_data2, - components1=components1, - components2=components2, - scores1=scores1, - scores2=scores2, - squared_covariance=squared_covariance, - total_squared_covariance=total_squared_covariance, - idx_modes_sorted=idx_modes_sorted, - norm1=norm1, - norm2=norm2, - ) - - self._rotation_matrix = rotation_matrix - self._rotation_matrix.name = "rotation_matrix" - - self._phi_matrix = phi_matrix - self._phi_matrix.name = "phi_matrix" - - self._modes_sign = modes_sign - self._modes_sign.name = "modes_sign" - - @property - def rotation_matrix(self) -> DataArray: - """Get the rotation matrix.""" - rotation_matrix = super()._sanity_check(self._rotation_matrix) - return rotation_matrix - - @property - def phi_matrix(self) -> DataArray: - """Get the phi matrix.""" - phi_matrix = super()._sanity_check(self._phi_matrix) - return phi_matrix - - @property - def modes_sign(self) -> DataArray: - """Get the mode signs.""" - modes_sign = super()._sanity_check(self._modes_sign) - return modes_sign - - def compute(self, verbose: bool = False): - """Compute the rotated MCA model.""" - super().compute(verbose=verbose) - - if verbose: - with ProgressBar(): - self._rotation_matrix = self.rotation_matrix.compute() - self._phi_matrix = self.phi_matrix.compute() - self._modes_sign = self.modes_sign.compute() - else: - self._rotation_matrix = self.rotation_matrix.compute() - self._phi_matrix = self.phi_matrix.compute() - self._modes_sign = self.modes_sign.compute() - - def set_attrs(self, attrs: dict): - """Set the attributes of the data container.""" - super().set_attrs(attrs) - - rotation_matrix = super()._sanity_check(self._rotation_matrix) - phi_matrix = super()._sanity_check(self._phi_matrix) - modes_sign = super()._sanity_check(self._modes_sign) - - rotation_matrix.attrs.update(attrs) - phi_matrix.attrs.update(attrs) - modes_sign.attrs.update(attrs) - - -class ComplexMCARotatorDataContainer(MCARotatorDataContainer, ComplexMCADataContainer): - """Container that holds the data related to a rotated complex MCA model.""" - - def __init__(self): - super(ComplexMCARotatorDataContainer, self).__init__() - - def set_data( - self, - input_data1: DataArray, - input_data2: DataArray, - components1: DataArray, - components2: DataArray, - scores1: DataArray, - scores2: DataArray, - squared_covariance: DataArray, - total_squared_covariance: DataArray, - idx_modes_sorted: DataArray, - modes_sign: DataArray, - norm1: DataArray, - norm2: DataArray, - rotation_matrix: DataArray, - phi_matrix: DataArray, - ): - super().set_data( - input_data1=input_data1, - input_data2=input_data2, - components1=components1, - components2=components2, - scores1=scores1, - scores2=scores2, - squared_covariance=squared_covariance, - total_squared_covariance=total_squared_covariance, - idx_modes_sorted=idx_modes_sorted, - modes_sign=modes_sign, - norm1=norm1, - norm2=norm2, - rotation_matrix=rotation_matrix, - phi_matrix=phi_matrix, - ) diff --git a/xeofs/data_container/opa_data_container.py b/xeofs/data_container/opa_data_container.py deleted file mode 100644 index 035c529..0000000 --- a/xeofs/data_container/opa_data_container.py +++ /dev/null @@ -1,76 +0,0 @@ -from typing import Optional - -import numpy as np -import xarray as xr -from dask.diagnostics.progress import ProgressBar - -from xeofs.utils.data_types import DataArray - -from ._base_model_data_container import _BaseModelDataContainer -from ..utils.data_types import DataArray - - -class OPADataContainer(_BaseModelDataContainer): - """Container to store the results of an Optimal Persistence Analysis (OPA).""" - - def __init__(self): - super().__init__() - self._filter_patterns: Optional[DataArray] = None - self._decorrelation_time: Optional[DataArray] = None - - def set_data( - self, - input_data: DataArray, - components: DataArray, - scores: DataArray, - filter_patterns: DataArray, - decorrelation_time: DataArray, - ): - super().set_data(input_data=input_data, components=components, scores=scores) - - self._decorrelation_time = decorrelation_time - self._decorrelation_time.name = "decorrelation_time" - - self._filter_patterns = filter_patterns - self._filter_patterns.name = "filter_patterns" - - @property - def components(self) -> DataArray: - comps = super().components - comps.name = "optimal_persistence_pattern" - return comps - - @property - def decorrelation_time(self) -> DataArray: - """Get the decorrelation time.""" - decorr = super()._sanity_check(self._decorrelation_time) - decorr.name = "decorrelation_time" - return decorr - - @property - def filter_patterns(self) -> DataArray: - """Get the filter patterns.""" - filter_patterns = super()._sanity_check(self._filter_patterns) - filter_patterns.name = "filter_patterns" - return filter_patterns - - def compute(self, verbose=False): - super().compute(verbose) - - if verbose: - with ProgressBar(): - self._filter_patterns = self.filter_patterns.compute() - self._decorrelation_time = self.decorrelation_time.compute() - else: - self._filter_patterns = self.filter_patterns.compute() - self._decorrelation_time = self.decorrelation_time.compute() - - def set_attrs(self, attrs: dict): - """Set the attributes of the results.""" - super().set_attrs(attrs) - - filter_patterns = self._sanity_check(self._filter_patterns) - decorrelation_time = self._sanity_check(self._decorrelation_time) - - filter_patterns.attrs.update(attrs) - decorrelation_time.attrs.update(attrs) diff --git a/xeofs/models/_base_cross_model.py b/xeofs/models/_base_cross_model.py index 69b5c66..b2ea454 100644 --- a/xeofs/models/_base_cross_model.py +++ b/xeofs/models/_base_cross_model.py @@ -2,11 +2,9 @@ from abc import ABC, abstractmethod from datetime import datetime -from dask.diagnostics.progress import ProgressBar - from .eof import EOF from ..preprocessing.preprocessor import Preprocessor -from ..data_container import _BaseCrossModelDataContainer +from ..data_container import DataContainer from ..utils.data_types import DataObject, DataArray from .._version import __version__ @@ -86,9 +84,8 @@ def __init__( self.preprocessor1 = Preprocessor(**self._preprocessor_kwargs) self.preprocessor2 = Preprocessor(**self._preprocessor_kwargs) - # Initialize the data container only to avoid type errors - # The actual data container will be initialized in respective subclasses - self.data: _BaseCrossModelDataContainer = _BaseCrossModelDataContainer() + # Initialize the data container that stores the results + self.data = DataContainer() # Initialize PCA objects self.pca1 = EOF(n_modes=n_pca_modes) if n_pca_modes else None @@ -183,8 +180,8 @@ def inverse_transform(self, mode) -> Tuple[DataObject, DataObject]: def components(self) -> Tuple[DataObject, DataObject]: """Get the components.""" - comps1 = self.data.components1 - comps2 = self.data.components2 + comps1 = self.data["components1"] + comps2 = self.data["components2"] components1: DataObject = self.preprocessor1.inverse_transform_components( comps1 @@ -196,20 +193,23 @@ def components(self) -> Tuple[DataObject, DataObject]: def scores(self) -> Tuple[DataArray, DataArray]: """Get the scores.""" - scores1 = self.data.scores1 - scores2 = self.data.scores2 + scores1 = self.data["scores1"] + scores2 = self.data["scores2"] scores1: DataArray = self.preprocessor1.inverse_transform_scores(scores1) scores2: DataArray = self.preprocessor2.inverse_transform_scores(scores2) return scores1, scores2 def compute(self, verbose: bool = False): - """Compute the results.""" - if verbose: - with ProgressBar(): - self.data.compute() - else: - self.data.compute() + """Compute and load delayed model results. + + Parameters + ---------- + verbose : bool + Whether or not to provide additional information about the computing progress. + + """ + self.data.compute(verbose=verbose) def get_params(self) -> Dict: """Get the model parameters.""" diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index df6caf4..d5a7ca2 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -2,10 +2,9 @@ from typing import Optional, Sequence, Hashable, Dict, Any, Self, List from abc import ABC, abstractmethod from datetime import datetime -from dask.diagnostics.progress import ProgressBar from ..preprocessing.preprocessor import Preprocessor -from ..data_container import _BaseModelDataContainer +from ..data_container import DataContainer from ..utils.data_types import DataObject, DataArray, Dims from .._version import __version__ @@ -82,9 +81,8 @@ def __init__( with_weights=use_weights, **self._preprocessor_kwargs ) - # Initialize the data container only to avoid type errors - # The actual data container will be initialized in respective subclasses - self.data: _BaseModelDataContainer = _BaseModelDataContainer() + # Initialize the data container that stores the results + self.data = DataContainer() def fit( self, @@ -233,12 +231,12 @@ def _inverse_transform_algorithm(self, mode) -> DataArray: def components(self) -> DataObject: """Get the components.""" - components = self.data.components + components = self.data["components"] return self.preprocessor.inverse_transform_components(components) def scores(self) -> DataArray: """Get the scores.""" - scores = self.data.scores + scores = self.data["scores"] return self.preprocessor.inverse_transform_scores(scores) def compute(self, verbose: bool = False): @@ -250,11 +248,7 @@ def compute(self, verbose: bool = False): Whether or not to provide additional information about the computing progress. """ - if verbose: - with ProgressBar(): - self.data.compute() - else: - self.data.compute() + self.data.compute(verbose=verbose) def get_params(self) -> Dict[str, Any]: """Get the model parameters.""" diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index ae36fdf..7b8bcc3 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -1,10 +1,10 @@ from typing import Self +import numpy as np import xarray as xr from ._base_model import _BaseModel from .decomposer import Decomposer from ..utils.data_types import DataObject, DataArray, Dims -from ..data_container import EOFDataContainer, ComplexEOFDataContainer from ..utils.hilbert_transform import hilbert_transform from ..utils.xarray_utils import total_variance as compute_total_variance @@ -58,9 +58,6 @@ def __init__( ) self.attrs.update({"model": "EOF analysis"}) - # Initialize the DataContainer to store the results - self.data: EOFDataContainer = EOFDataContainer() - def _fit_algorithm(self, data: DataArray) -> Self: sample_name = self.sample_name feature_name = self.feature_name @@ -80,33 +77,26 @@ def _fit_algorithm(self, data: DataArray) -> Self: components = decomposer.V_ scores = decomposer.U_ - # Compute the explained variance - n_samples = data.coords[sample_name].size - explained_variance = singular_values**2 / (n_samples - 1) - - # Index of the sorted explained variance - # It's already sorted, we just need to assign it to the DataContainer - # for the sake of consistency - idx_modes_sorted = explained_variance.compute().argsort()[::-1] - idx_modes_sorted.coords.update(explained_variance.coords) - - # Assign the results to the data container - self.data.set_data( - input_data=data, - components=components, - scores=scores, - explained_variance=explained_variance, - total_variance=total_variance, - idx_modes_sorted=idx_modes_sorted, - ) + # Compute the explained variance per mode + n_samples = data.coords[self.sample_name].size + exp_var = singular_values**2 / (n_samples - 1) + exp_var.name = "explained_variance" + + # Store the results + self.data.add(data, "input_data", allow_compute=False) + self.data.add(components, "components") + self.data.add(scores, "scores") + self.data.add(exp_var, "explained_variance") + self.data.add(total_variance, "total_variance") + self.data.set_attrs(self.attrs) return self def _transform_algorithm(self, data: DataObject) -> DataArray: feature_name = self.preprocessor.feature_name - components = self.data.components - singular_values = self.data.singular_values + components = self.data["components"] + singular_values = self.singular_values() # Project the data projections = xr.dot(data, components, dims=feature_name) / singular_values @@ -133,9 +123,9 @@ def _inverse_transform_algorithm(self, mode) -> DataArray: """ # Reconstruct the data - svals = self.data.singular_values.sel(mode=mode) - comps = self.data.components.sel(mode=mode) - scores = self.data.scores.sel(mode=mode) * svals + svals = self.singular_values().sel(mode=mode) + comps = self.data["components"].sel(mode=mode) + scores = self.data["scores"].sel(mode=mode) * svals reconstructed_data = xr.dot(comps.conj(), scores) reconstructed_data.name = "reconstructed_data" @@ -156,7 +146,7 @@ def components(self) -> DataObject: Components of the fitted model. """ - components = self.data.components + components = self.data["components"] return self.preprocessor.inverse_transform_components(components) def scores(self) -> DataArray: @@ -172,7 +162,7 @@ def scores(self) -> DataArray: Scores of the fitted model. """ - scores = self.data.scores + scores = self.data["scores"] return self.preprocessor.inverse_transform_scores(scores) def singular_values(self) -> DataArray: @@ -184,7 +174,12 @@ def singular_values(self) -> DataArray: Singular values obtained from the SVD. """ - return self.data.singular_values + n_samples = self.data["input_data"].coords[self.sample_name].size + exp_var = self.explained_variance() + svals = (exp_var * (n_samples - 1)) ** 0.5 + svals.attrs.update(exp_var.attrs) + svals.name = "singular_values" + return svals def explained_variance(self) -> DataArray: """Return explained variance. @@ -203,7 +198,7 @@ def explained_variance(self) -> DataArray: explained_variance: DataArray Explained variance. """ - return self.data.explained_variance + return self.data["explained_variance"] def explained_variance_ratio(self) -> DataArray: """Return explained variance ratio. @@ -221,7 +216,10 @@ def explained_variance_ratio(self) -> DataArray: explained_variance_ratio: DataArray Explained variance ratio. """ - return self.data.explained_variance_ratio + exp_var_ratio = self.data["explained_variance"] / self.data["total_variance"] + exp_var_ratio.attrs.update(self.data["explained_variance"].attrs) + exp_var_ratio.name = "explained_variance_ratio" + return exp_var_ratio class ComplexEOF(EOF): @@ -275,9 +273,6 @@ def __init__(self, padding="exp", decay_factor=0.2, **kwargs): self.attrs.update({"model": "Complex EOF analysis"}) self._params.update({"padding": padding, "decay_factor": decay_factor}) - # Initialize the DataContainer to store the results - self.data: ComplexEOFDataContainer = ComplexEOFDataContainer() - def _fit_algorithm(self, data: DataArray) -> Self: sample_name = self.sample_name feature_name = self.feature_name @@ -307,24 +302,18 @@ def _fit_algorithm(self, data: DataArray) -> Self: components = decomposer.V_ scores = decomposer.U_ - # Compute the explained variance - n_samples = data.coords[sample_name].size - explained_variance = singular_values**2 / (n_samples - 1) - - # Index of the sorted explained variance - # It's already sorted, we just need to assign it to the DataContainer - # for the sake of consistency - idx_modes_sorted = explained_variance.compute().argsort()[::-1] - idx_modes_sorted.coords.update(explained_variance.coords) - - self.data.set_data( - input_data=data, - components=components, - scores=scores, - explained_variance=explained_variance, - total_variance=total_variance, - idx_modes_sorted=idx_modes_sorted, - ) + # Compute the explained variance per mode + n_samples = data.coords[self.sample_name].size + exp_var = singular_values**2 / (n_samples - 1) + exp_var.name = "explained_variance" + + # Store the results + self.data.add(data, "input_data", allow_compute=False) + self.data.add(components, "components") + self.data.add(scores, "scores") + self.data.add(exp_var, "explained_variance") + self.data.add(total_variance, "total_variance") + # Assign analysis-relevant meta data to the results self.data.set_attrs(self.attrs) return self @@ -349,7 +338,8 @@ def components_amplitude(self) -> DataObject: Amplitude of the components of the fitted model. """ - amplitudes = self.data.components_amplitude + amplitudes = abs(self.data["components"]) + amplitudes.name = "components_amplitude" return self.preprocessor.inverse_transform_components(amplitudes) def components_phase(self) -> DataObject: @@ -369,8 +359,10 @@ def components_phase(self) -> DataObject: Phase of the components of the fitted model. """ - phases = self.data.components_phase - return self.preprocessor.inverse_transform_components(phases) + comps = self.data["components"] + comp_phase = xr.apply_ufunc(np.angle, comps, dask="allowed", keep_attrs=True) + comp_phase.name = "components_phase" + return self.preprocessor.inverse_transform_components(comp_phase) def scores_amplitude(self) -> DataArray: """Return the amplitude of the (PC) scores. @@ -389,7 +381,8 @@ def scores_amplitude(self) -> DataArray: Amplitude of the scores of the fitted model. """ - amplitudes = self.data.scores_amplitude + amplitudes = abs(self.data["scores"]) + amplitudes.name = "scores_amplitude" return self.preprocessor.inverse_transform_scores(amplitudes) def scores_phase(self) -> DataArray: @@ -409,5 +402,7 @@ def scores_phase(self) -> DataArray: Phase of the scores of the fitted model. """ - phases = self.data.scores_phase + scores = self.data["scores"] + phases = xr.apply_ufunc(np.angle, scores, dask="allowed", keep_attrs=True) + phases.name = "scores_phase" return self.preprocessor.inverse_transform_scores(phases) diff --git a/xeofs/models/eof_rotator.py b/xeofs/models/eof_rotator.py index 59f5d41..fbb2d52 100644 --- a/xeofs/models/eof_rotator.py +++ b/xeofs/models/eof_rotator.py @@ -4,15 +4,9 @@ from typing import Self from .eof import EOF, ComplexEOF -from ..data_container.eof_rotator_data_container import ( - EOFRotatorDataContainer, - ComplexEOFRotatorDataContainer, -) - +from ..data_container import DataContainer from ..utils.rotation import promax from ..utils.data_types import DataArray - -from typing import TypeVar from .._version import __version__ @@ -79,8 +73,8 @@ def __init__( } ) - # Initialize the DataContainer to store the results - self.data: EOFRotatorDataContainer = EOFRotatorDataContainer() + # Define data container + self.data = DataContainer() def fit(self, model) -> Self: """Rotate the solution obtained from ``xe.models.EOF``. @@ -96,8 +90,8 @@ def fit(self, model) -> Self: def _fit_algorithm(self, model) -> Self: self.model = model self.preprocessor = model.preprocessor - sample_name = model.sample_name - feature_name = model.feature_name + self.sample_name = model.sample_name + self.feature_name = model.feature_name n_modes = self._params.get("n_modes") power = self._params.get("power") @@ -105,14 +99,14 @@ def _fit_algorithm(self, model) -> Self: rtol = self._params.get("rtol") # Select modes to rotate - components = model.data.components.sel(mode=slice(1, n_modes)) - expvar = model.data.explained_variance.sel(mode=slice(1, n_modes)) + components = model.data["components"].sel(mode=slice(1, n_modes)) + expvar = model.explained_variance().sel(mode=slice(1, n_modes)) # Rotate loadings loadings = components * np.sqrt(expvar) promax_kwargs = {"power": power, "max_iter": max_iter, "rtol": rtol} rot_loadings, rot_matrix, phi_matrix = promax( - loadings, feature_dim=feature_name, **promax_kwargs + loadings, feature_dim=self.feature_name, **promax_kwargs ) # Assign coordinates to the rotation/correlation matrices @@ -126,7 +120,7 @@ def _fit_algorithm(self, model) -> Self: ) # Reorder according to variance - expvar = (abs(rot_loadings) ** 2).sum(feature_name) + expvar = (abs(rot_loadings) ** 2).sum(self.feature_name) # NOTE: For delayed objects, the index must be computed. # NOTE: The index must be computed before sorting since argsort is not (yet) implemented in dask idx_sort = expvar.compute().argsort()[::-1] @@ -141,7 +135,7 @@ def _fit_algorithm(self, model) -> Self: rot_components = rot_loadings / np.sqrt(expvar) # Rotate scores - scores = model.data.scores.sel(mode=slice(1, n_modes)) + scores = model.data["scores"].sel(mode=slice(1, n_modes)) RinvT = self._compute_rot_mat_inv_trans( rot_matrix, input_dims=("mode_m", "mode_n") ) @@ -154,7 +148,7 @@ def _fit_algorithm(self, model) -> Self: scores = scores.isel(mode=idx_sort.values).assign_coords(mode=scores.mode) # Ensure consitent signs for deterministic output - idx_max_value = abs(rot_loadings).argmax(feature_name).compute() + idx_max_value = abs(rot_loadings).argmax(self.feature_name).compute() modes_sign = xr.apply_ufunc( np.sign, rot_loadings.isel(feature=idx_max_value), dask="allowed" ) @@ -165,18 +159,17 @@ def _fit_algorithm(self, model) -> Self: rot_components = rot_components * modes_sign scores = scores * modes_sign - # Create the data container - self.data.set_data( - input_data=model.data.input_data, - components=rot_components, - scores=scores, - explained_variance=expvar, - total_variance=model.data.total_variance, - idx_modes_sorted=idx_sort, - rotation_matrix=rot_matrix, - phi_matrix=phi_matrix, - modes_sign=modes_sign, - ) + # Store the results + self.data.add(model.data["input_data"], "input_data", allow_compute=False) + self.data.add(rot_components, "components") + self.data.add(scores, "scores") + self.data.add(expvar, "explained_variance") + self.data.add(model.data["total_variance"], "total_variance") + self.data.add(idx_sort, "idx_modes_sorted") + self.data.add(rot_matrix, "rotation_matrix") + self.data.add(phi_matrix, "phi_matrix") + self.data.add(modes_sign, "modes_sign") + # Assign analysis-relevant meta data self.data.set_attrs(self.attrs) return self @@ -184,18 +177,16 @@ def _fit_algorithm(self, model) -> Self: def _transform_algorithm(self, data: DataArray) -> DataArray: n_modes = self._params["n_modes"] - svals = self.model.data.singular_values.sel( - mode=slice(1, self._params["n_modes"]) - ) + svals = self.model.singular_values().sel(mode=slice(1, self._params["n_modes"])) # Select the (non-rotated) singular vectors of the first dataset - components = self.model.data.components.sel(mode=slice(1, n_modes)) + components = self.model.data["components"].sel(mode=slice(1, n_modes)) # Compute non-rotated scores by project the data onto non-rotated components projections = xr.dot(data, components) / svals projections.name = "scores" # Rotate the scores - R = self.data.rotation_matrix + R = self.data["rotation_matrix"] RinvT = self._compute_rot_mat_inv_trans(R, input_dims=("mode_m", "mode_n")) projections = projections.rename({"mode": "mode_m"}) RinvT = RinvT.rename({"mode_n": "mode"}) @@ -203,11 +194,11 @@ def _transform_algorithm(self, data: DataArray) -> DataArray: # Reorder according to variance # this must be done in one line: i) select modes according to their variance, ii) replace coords with modes from 1 ... n projections = projections.isel( - mode=self.data.idx_modes_sorted.values + mode=self.data["idx_modes_sorted"].values ).assign_coords(mode=projections.mode) # Adapt the sign of the scores - projections = projections * self.data.modes_sign + projections = projections * self.data["modes_sign"] return projections @@ -286,9 +277,6 @@ def __init__(self, **kwargs): super().__init__(**kwargs) self.attrs.update({"model": "Rotated Complex EOF analysis"}) - # Initialize the DataContainer to store the results - self.data: ComplexEOFRotatorDataContainer = ComplexEOFRotatorDataContainer() - def _transform_algorithm(self, data: DataArray) -> DataArray: # Here we leverage the Method Resolution Order (MRO) to invoke the # transform method of the first class in the MRO after EOFRotator that diff --git a/xeofs/models/mca.py b/xeofs/models/mca.py index c8cac2b..0de2da7 100644 --- a/xeofs/models/mca.py +++ b/xeofs/models/mca.py @@ -2,15 +2,10 @@ import numpy as np import xarray as xr -from dask.diagnostics.progress import ProgressBar from ._base_cross_model import _BaseCrossModel from .decomposer import Decomposer from ..utils.data_types import DataObject, DataArray -from ..data_container.mca_data_container import ( - MCADataContainer, - ComplexMCADataContainer, -) from ..utils.statistics import pearson_correlation from ..utils.hilbert_transform import hilbert_transform from ..utils.dimension_renamer import DimensionRenamer @@ -66,9 +61,6 @@ def __init__(self, solver_kwargs={}, **kwargs): super().__init__(solver_kwargs=solver_kwargs, **kwargs) self.attrs.update({"model": "MCA"}) - # Initialize the DataContainer to store the results - self.data: MCADataContainer = MCADataContainer() - def _compute_cross_covariance_matrix(self, X1, X2): """Compute the cross-covariance matrix of two data objects. @@ -104,8 +96,8 @@ def _fit_algorithm( self.pca1.fit(data1, dim=sample_name) self.pca2.fit(data2, dim=sample_name) # Get the PCA scores - pca_scores1 = self.pca1.data.scores * self.pca1.data.singular_values - pca_scores2 = self.pca2.data.scores * self.pca2.data.singular_values + pca_scores1 = self.pca1.data["scores"] * self.pca1.singular_values() + pca_scores2 = self.pca2.data["scores"] * self.pca2.singular_values() # Compute the cross-covariance matrix of the PCA scores pca_scores1 = pca_scores1.rename({"mode": "feature1"}) pca_scores2 = pca_scores2.rename({"mode": "feature2"}) @@ -116,8 +108,9 @@ def _fit_algorithm( V1 = decomposer.U_ # left singular vectors (feature1 x mode) V2 = decomposer.V_ # right singular vectors (feature2 x mode) - V1pre = self.pca1.data.components # left PCA eigenvectors (feature x mode) - V2pre = self.pca2.data.components # right PCA eigenvectors (feature x mode) + # left and right PCA eigenvectors (feature x mode) + V1pre = self.pca1.data["components"] + V2pre = self.pca2.data["components"] # Compute the singular vectors V1pre = V1pre.rename({"mode": "feature1"}) @@ -162,19 +155,18 @@ def _fit_algorithm( scores1 = xr.dot(data1, singular_vectors1, dims=feature_name) / norm1 scores2 = xr.dot(data2, singular_vectors2, dims=feature_name) / norm2 - self.data.set_data( - input_data1=data1, - input_data2=data2, - components1=singular_vectors1, - components2=singular_vectors2, - scores1=scores1, - scores2=scores2, - squared_covariance=squared_covariance, - total_squared_covariance=total_squared_covariance, - idx_modes_sorted=idx_sorted_modes, - norm1=norm1, - norm2=norm2, - ) + self.data.add(name="input_data1", data=data1, allow_compute=False) + self.data.add(name="input_data2", data=data2, allow_compute=False) + self.data.add(name="components1", data=singular_vectors1) + self.data.add(name="components2", data=singular_vectors2) + self.data.add(name="scores1", data=scores1) + self.data.add(name="scores2", data=scores2) + self.data.add(name="squared_covariance", data=squared_covariance) + self.data.add(name="total_squared_covariance", data=total_squared_covariance) + self.data.add(name="idx_modes_sorted", data=idx_sorted_modes) + self.data.add(name="norm1", data=norm1) + self.data.add(name="norm2", data=norm2) + # Assign analysis-relevant meta data self.data.set_attrs(self.attrs) return self @@ -209,8 +201,8 @@ def _transform_algorithm( results = [] if data1 is not None: # Project data onto singular vectors - comps1 = self.data.components1 - norm1 = self.data.norm1 + comps1 = self.data["components1"] + norm1 = self.data["norm1"] scores1 = xr.dot(data1, comps1) / norm1 # Inverse transform scores scores1 = self.preprocessor1.inverse_transform_scores(scores1) @@ -218,8 +210,8 @@ def _transform_algorithm( if data2 is not None: # Project data onto singular vectors - comps2 = self.data.components2 - norm2 = self.data.norm2 + comps2 = self.data["components2"] + norm2 = self.data["norm2"] scores2 = xr.dot(data2, comps2) / norm2 # Inverse transform scores scores2 = self.preprocessor2.inverse_transform_scores(scores2) @@ -248,16 +240,16 @@ def inverse_transform(self, mode): """ # Singular vectors - comps1 = self.data.components1.sel(mode=mode) - comps2 = self.data.components2.sel(mode=mode) + comps1 = self.data["components1"].sel(mode=mode) + comps2 = self.data["components2"].sel(mode=mode) # Scores = projections - scores1 = self.data.scores1.sel(mode=mode) - scores2 = self.data.scores2.sel(mode=mode) + scores1 = self.data["scores1"].sel(mode=mode) + scores2 = self.data["scores2"].sel(mode=mode) # Norms - norm1 = self.data.norm1.sel(mode=mode) - norm2 = self.data.norm2.sel(mode=mode) + norm1 = self.data["norm1"].sel(mode=mode) + norm2 = self.data["norm2"].sel(mode=mode) # Reconstruct the data data1 = xr.dot(scores1, comps1.conj() * norm1, dims="mode") @@ -280,7 +272,7 @@ def squared_covariance(self): squared singular values of the covariance matrix. """ - return self.data.squared_covariance + return self.data["squared_covariance"] def squared_covariance_fraction(self): """Calculate the squared covariance fraction (SCF). @@ -294,11 +286,31 @@ def squared_covariance_fraction(self): where `m` is the total number of modes and :math:`\\sigma_i` is the `ith` singular value of the covariance matrix. """ - return self.data.squared_covariance_fraction + return self.data["squared_covariance"] / self.data["total_squared_covariance"] def singular_values(self): """Get the singular values of the cross-covariance matrix.""" - return self.data.singular_values + singular_values = xr.apply_ufunc( + np.sqrt, + self.data["squared_covariance"], + dask="allowed", + vectorize=False, + keep_attrs=True, + ) + singular_values.name = "singular_values" + return singular_values + + def total_covariance(self) -> DataArray: + """Get the total covariance. + + This measure follows the defintion of Cheng and Dunkerton (1995). + Note that this measure is not an invariant in MCA. + + """ + tot_cov = self.singular_values().sum() + tot_cov.attrs.update(self.singular_values().attrs) + tot_cov.name = "total_covariance" + return tot_cov def covariance_fraction(self): """Get the covariance fraction (CF). @@ -327,7 +339,8 @@ def covariance_fraction(self): """ # Check how sensitive the CF is to the number of modes - svals = self.data.singular_values + svals = self.singular_values() + tot_var = self.total_covariance() cf = svals[0] / svals.cumsum() change_per_mode = cf.shift({"mode": 1}) - cf change_in_cf_in_last_mode = change_per_mode.isel(mode=-1) @@ -335,7 +348,10 @@ def covariance_fraction(self): print( f"Warning: CF is sensitive to the number of modes retained. Please increase `n_modes` for a better estimate." ) - return self.data.covariance_fraction + cov_frac = svals / tot_var + cov_frac.name = "covariance_fraction" + cov_frac.attrs.update(svals.attrs) + return cov_frac def components(self): """Return the singular vectors of the left and right field. @@ -412,11 +428,11 @@ def homogeneous_patterns(self, correction=None, alpha=0.05): Right p-values. """ - input_data1 = self.data.input_data1 - input_data2 = self.data.input_data2 + input_data1 = self.data["input_data1"] + input_data2 = self.data["input_data2"] - scores1 = self.data.scores1 - scores2 = self.data.scores2 + scores1 = self.data["scores1"] + scores2 = self.data["scores2"] hom_pat1, pvals1 = pearson_correlation( input_data1, scores1, correction=correction, alpha=alpha @@ -474,11 +490,11 @@ def heterogeneous_patterns(self, correction=None, alpha=0.05): The desired family-wise error rate. Not used if `correction` is None. """ - input_data1 = self.data.input_data1 - input_data2 = self.data.input_data2 + input_data1 = self.data["input_data1"] + input_data2 = self.data["input_data2"] - scores1 = self.data.scores1 - scores2 = self.data.scores2 + scores1 = self.data["scores1"] + scores2 = self.data["scores2"] patterns1, pvals1 = pearson_correlation( input_data1, scores2, correction=correction, alpha=alpha @@ -564,9 +580,6 @@ def __init__(self, padding="exp", decay_factor=0.2, **kwargs): self.attrs.update({"model": "Complex MCA"}) self._params.update({"padding": padding, "decay_factor": decay_factor}) - # Initialize the DataContainer to store the results - self.data: ComplexMCADataContainer = ComplexMCADataContainer() - def _fit_algorithm(self, data1: DataArray, data2: DataArray) -> Self: sample_name = self.sample_name feature_name = self.feature_name @@ -590,8 +603,8 @@ def _fit_algorithm(self, data1: DataArray, data2: DataArray) -> Self: self.pca1.fit(data1, sample_name) self.pca2.fit(data2, sample_name) # Get the PCA scores - pca_scores1 = self.pca1.data.scores * self.pca1.data.singular_values - pca_scores2 = self.pca2.data.scores * self.pca2.data.singular_values + pca_scores1 = self.pca1.data["scores"] * self.pca1.singular_values() + pca_scores2 = self.pca2.data["scores"] * self.pca2.singular_values() # Apply hilbert transform pca_scores1 = hilbert_transform( pca_scores1, dims=(sample_name, "mode"), **hilbert_kwargs @@ -609,10 +622,9 @@ def _fit_algorithm(self, data1: DataArray, data2: DataArray) -> Self: V1 = decomposer.U_ # left singular vectors (feature_temp1 x mode) V2 = decomposer.V_ # right singular vectors (feature_temp2 x mode) - # left PCA eigenvectors (feature_name x mode) - V1pre = self.pca1.data.components - # right PCA eigenvectors (feature_name x mode) - V2pre = self.pca2.data.components + # left and right PCA eigenvectors (feature_name x mode) + V1pre = self.pca1.data["components"] + V2pre = self.pca2.data["components"] # Compute the singular vectors V1pre = V1pre.rename({"mode": "feature_temp1"}) @@ -664,19 +676,18 @@ def _fit_algorithm(self, data1: DataArray, data2: DataArray) -> Self: scores1 = xr.dot(data1, singular_vectors1) / norm1 scores2 = xr.dot(data2, singular_vectors2) / norm2 - self.data.set_data( - input_data1=data1, - input_data2=data2, - components1=singular_vectors1, - components2=singular_vectors2, - scores1=scores1, - scores2=scores2, - squared_covariance=squared_covariance, - total_squared_covariance=total_squared_covariance, - idx_modes_sorted=idx_sorted_modes, - norm1=norm1, - norm2=norm2, - ) + self.data.add(name="input_data1", data=data1, allow_compute=False) + self.data.add(name="input_data2", data=data2, allow_compute=False) + self.data.add(name="components1", data=singular_vectors1) + self.data.add(name="components2", data=singular_vectors2) + self.data.add(name="scores1", data=scores1) + self.data.add(name="scores2", data=scores2) + self.data.add(name="squared_covariance", data=squared_covariance) + self.data.add(name="total_squared_covariance", data=total_squared_covariance) + self.data.add(name="idx_modes_sorted", data=idx_sorted_modes) + self.data.add(name="norm1", data=norm1) + self.data.add(name="norm2", data=norm2) + # Assign analysis relevant meta data self.data.set_attrs(self.attrs) return self @@ -700,8 +711,11 @@ def components_amplitude(self) -> Tuple[DataObject, DataObject]: Amplitude of the left components. """ - comps1 = self.data.components_amplitude1 - comps2 = self.data.components_amplitude2 + comps1 = abs(self.data["components1"]) + comps1.name = "left_components_amplitude" + + comps2 = abs(self.data["components2"]) + comps2.name = "right_components_amplitude" comps1 = self.preprocessor1.inverse_transform_components(comps1) comps2 = self.preprocessor2.inverse_transform_components(comps2) @@ -727,8 +741,11 @@ def components_phase(self) -> Tuple[DataObject, DataObject]: Phase of the right components. """ - comps1 = self.data.components_phase1 - comps2 = self.data.components_phase2 + comps1 = xr.apply_ufunc(np.angle, self.data["components1"], keep_attrs=True) + comps1.name = "left_components_phase" + + comps2 = xr.apply_ufunc(np.angle, self.data["components2"], keep_attrs=True) + comps2.name = "right_components_phase" comps1 = self.preprocessor1.inverse_transform_components(comps1) comps2 = self.preprocessor2.inverse_transform_components(comps2) @@ -754,8 +771,11 @@ def scores_amplitude(self) -> Tuple[DataArray, DataArray]: Amplitude of the right scores. """ - scores1 = self.data.scores_amplitude1 - scores2 = self.data.scores_amplitude2 + scores1 = abs(self.data["scores1"]) + scores2 = abs(self.data["scores2"]) + + scores1.name = "left_scores_amplitude" + scores2.name = "right_scores_amplitude" scores1 = self.preprocessor1.inverse_transform_scores(scores1) scores2 = self.preprocessor2.inverse_transform_scores(scores2) @@ -780,8 +800,11 @@ def scores_phase(self) -> Tuple[DataArray, DataArray]: Phase of the right scores. """ - scores1 = self.data.scores_phase1 - scores2 = self.data.scores_phase2 + scores1 = xr.apply_ufunc(np.angle, self.data["scores1"], keep_attrs=True) + scores2 = xr.apply_ufunc(np.angle, self.data["scores2"], keep_attrs=True) + + scores1.name = "left_scores_phase" + scores2.name = "right_scores_phase" scores1 = self.preprocessor1.inverse_transform_scores(scores1) scores2 = self.preprocessor2.inverse_transform_scores(scores2) diff --git a/xeofs/models/mca_rotator.py b/xeofs/models/mca_rotator.py index 476633a..f2dc14c 100644 --- a/xeofs/models/mca_rotator.py +++ b/xeofs/models/mca_rotator.py @@ -6,10 +6,7 @@ from .mca import MCA, ComplexMCA from ..utils.rotation import promax from ..utils.data_types import DataArray -from ..data_container.mca_rotator_data_container import ( - MCARotatorDataContainer, - ComplexMCARotatorDataContainer, -) +from ..data_container import DataContainer from .._version import __version__ @@ -83,8 +80,8 @@ def __init__( } ) - # Initialize the DataContainer to hold the rotated solution - self.data: MCARotatorDataContainer = MCARotatorDataContainer() + # Define data container to store the rotated solution + self.data = DataContainer() def _compute_rot_mat_inv_trans(self, rotation_matrix, input_dims) -> xr.DataArray: """Compute the inverse transpose of the rotation matrix. @@ -145,8 +142,8 @@ def fit(self, model: MCA | ComplexMCA): # or weighted with the singular values ("squared loadings"), as opposed to the square root of the singular values. # In doing so, the squared covariance remains conserved under rotation, allowing for the estimation of the # modes' importance. - norm1 = self.model.data.norm1.sel(mode=slice(1, n_modes)) - norm2 = self.model.data.norm2.sel(mode=slice(1, n_modes)) + norm1 = self.model.data["norm1"].sel(mode=slice(1, n_modes)) + norm2 = self.model.data["norm2"].sel(mode=slice(1, n_modes)) if use_squared_loadings: # Squared loadings approach conserving squared covariance scaling = norm1 * norm2 @@ -154,8 +151,8 @@ def fit(self, model: MCA | ComplexMCA): # Cheng & Dunkerton approach conserving covariance scaling = np.sqrt(norm1 * norm2) - comps1 = self.model.data.components1.sel(mode=slice(1, n_modes)) - comps2 = self.model.data.components2.sel(mode=slice(1, n_modes)) + comps1 = self.model.data["components1"].sel(mode=slice(1, n_modes)) + comps2 = self.model.data["components2"].sel(mode=slice(1, n_modes)) loadings = xr.concat([comps1, comps2], dim=feature_name) * scaling # Rotate loadings @@ -241,8 +238,8 @@ def fit(self, model: MCA | ComplexMCA): ) # Rotate scores using rotation matrix - scores1 = self.model.data.scores1.sel(mode=slice(1, n_modes)) - scores2 = self.model.data.scores2.sel(mode=slice(1, n_modes)) + scores1 = self.model.data["scores1"].sel(mode=slice(1, n_modes)) + scores2 = self.model.data["scores2"].sel(mode=slice(1, n_modes)) RinvT = self._compute_rot_mat_inv_trans( rot_matrix, input_dims=("mode_m", "mode_n") @@ -277,22 +274,27 @@ def fit(self, model: MCA | ComplexMCA): scores2_rot = scores2_rot * modes_sign # Create data container - self.data.set_data( - input_data1=self.model.data.input_data1, - input_data2=self.model.data.input_data2, - components1=comps1_rot, - components2=comps2_rot, - scores1=scores1_rot, - scores2=scores2_rot, - squared_covariance=squared_covariance, - total_squared_covariance=self.model.data.total_squared_covariance, - idx_modes_sorted=idx_modes_sorted, - norm1=norm1_rot, - norm2=norm2_rot, - rotation_matrix=rot_matrix, - phi_matrix=phi_matrix, - modes_sign=modes_sign, + self.data.add( + name="input_data1", data=self.model.data["input_data1"], allow_compute=False ) + self.data.add( + name="input_data2", data=self.model.data["input_data2"], allow_compute=False + ) + self.data.add(name="components1", data=comps1_rot) + self.data.add(name="components2", data=comps2_rot) + self.data.add(name="scores1", data=scores1_rot) + self.data.add(name="scores2", data=scores2_rot) + self.data.add(name="squared_covariance", data=squared_covariance) + self.data.add( + name="total_squared_covariance", + data=self.model.data["total_squared_covariance"], + ) + self.data.add(name="idx_modes_sorted", data=idx_modes_sorted) + self.data.add(name="norm1", data=norm1_rot) + self.data.add(name="norm2", data=norm2_rot) + self.data.add(name="rotation_matrix", data=rot_matrix) + self.data.add(name="phi_matrix", data=phi_matrix) + self.data.add(name="modes_sign", data=modes_sign) # Assign analysis-relevant meta data self.data.set_attrs(self.attrs) @@ -318,7 +320,7 @@ def transform(self, **kwargs) -> DataArray | List[DataArray]: raise ValueError("No data provided. Please provide data1 and/or data2.") n_modes = self._params["n_modes"] - rot_matrix = self.data.rotation_matrix + rot_matrix = self.data["rotation_matrix"] RinvT = self._compute_rot_mat_inv_trans( rot_matrix, input_dims=("mode_m", "mode_n") ) @@ -329,8 +331,8 @@ def transform(self, **kwargs) -> DataArray | List[DataArray]: if "data1" in kwargs.keys(): data1 = kwargs["data1"] # Select the (non-rotated) singular vectors of the first dataset - comps1 = self.model.data.components1.sel(mode=slice(1, n_modes)) - norm1 = self.model.data.norm1.sel(mode=slice(1, n_modes)) + comps1 = self.model.data["components1"].sel(mode=slice(1, n_modes)) + norm1 = self.model.data["norm1"].sel(mode=slice(1, n_modes)) # Preprocess the data data1 = self.preprocessor1.transform(data1) @@ -342,10 +344,10 @@ def transform(self, **kwargs) -> DataArray | List[DataArray]: projections1 = xr.dot(projections1, RinvT, dims="mode_m") # Reorder according to variance projections1 = projections1.isel( - mode=self.data.idx_modes_sorted.values + mode=self.data["idx_modes_sorted"].values ).assign_coords(mode=projections1.mode) # Adapt the sign of the scores - projections1 = projections1 * self.data.modes_sign + projections1 = projections1 * self.data["modes_sign"] # Unstack the projections projections1 = self.preprocessor1.inverse_transform_scores(projections1) @@ -355,8 +357,8 @@ def transform(self, **kwargs) -> DataArray | List[DataArray]: if "data2" in kwargs.keys(): data2 = kwargs["data2"] # Select the (non-rotated) singular vectors of the second dataset - comps2 = self.model.data.components2.sel(mode=slice(1, n_modes)) - norm2 = self.model.data.norm2.sel(mode=slice(1, n_modes)) + comps2 = self.model.data["components2"].sel(mode=slice(1, n_modes)) + norm2 = self.model.data["norm2"].sel(mode=slice(1, n_modes)) # Preprocess the data data2 = self.preprocessor2.transform(data2) @@ -368,10 +370,10 @@ def transform(self, **kwargs) -> DataArray | List[DataArray]: projections2 = xr.dot(projections2, RinvT, dims="mode_m") # Reorder according to variance projections2 = projections2.isel( - mode=self.data.idx_modes_sorted.values + mode=self.data["idx_modes_sorted"].values ).assign_coords(mode=projections2.mode) # Determine the sign of the scores - projections2 = projections2 * self.data.modes_sign + projections2 = projections2 * self.data["modes_sign"] # Unstack the projections projections2 = self.preprocessor2.inverse_transform_scores(projections2) @@ -435,9 +437,6 @@ def __init__(self, **kwargs): super().__init__(**kwargs) self.attrs.update({"model": "Complex Rotated MCA"}) - # Initialize the DataContainer to hold the rotated solution - self.data: ComplexMCARotatorDataContainer = ComplexMCARotatorDataContainer() - def transform(self, **kwargs): # Here we make use of the Method Resolution Order (MRO) to call the # transform method of the first class in the MRO after `MCARotator` diff --git a/xeofs/models/opa.py b/xeofs/models/opa.py index 49c711d..382924d 100644 --- a/xeofs/models/opa.py +++ b/xeofs/models/opa.py @@ -6,14 +6,14 @@ from ._base_model import _BaseModel from .eof import EOF from .decomposer import Decomposer -from ..data_container.opa_data_container import OPADataContainer from ..utils.data_types import DataObject, DataArray class OPA(_BaseModel): """Optimal Persistence Analysis (OPA). - OPA identifies the optimal persistence patterns (OPP) with the + OPA identifies the optimal persistence patterns or + optimally persistent patterns (OPP) with the largest decorrelation time in a time-varying field. Introduced by DelSole in 2001 [1]_, and further developed in 2006 [2]_, it's a method used to find patterns whose time series show strong persistence over time. @@ -38,12 +38,12 @@ class OPA(_BaseModel): >>> model = OPA(n_modes=10, tau_max=50, n_pca_modes=100) >>> model.fit(data, dim=("time")) - Retrieve the optimal perstitence patterns (OPP) and their time series: + Retrieve the optimally persistent patterns (OPP) and their time series: >>> opp = model.components() >>> opp_ts = model.scores() - Retrieve the decorrelation time of the optimal persistence patterns (OPP): + Retrieve the decorrelation time of the OPPs: >>> decorrelation_time = model.decorrelation_time() """ @@ -57,9 +57,6 @@ def __init__(self, n_modes, tau_max, n_pca_modes, **kwargs): self.attrs.update({"model": "OPA"}) self._params.update({"tau_max": tau_max, "n_pca_modes": n_pca_modes}) - # Initialize the DataContainer to store the results - self.data: OPADataContainer = OPADataContainer() - def _Ctau(self, X, tau: int) -> DataArray: """Compute the time-lage covariance matrix C(tau) of the data X.""" sample_name = self.preprocessor.sample_name @@ -91,17 +88,17 @@ def _fit_algorithm(self, data: DataArray) -> Self: # Perform PCA as a pre-processing step pca = EOF(n_modes=self._params["n_pca_modes"], use_coslat=False) pca.fit(data, dim=sample_name) - svals = pca.data.singular_values - expvar = pca.data.explained_variance - comps = pca.data.components * svals / np.sqrt(expvar) + svals = pca.singular_values() + expvar = pca.data["explained_variance"] + comps = pca.data["components"] * svals / np.sqrt(expvar) # -> comps (feature x mode) - scores = pca.data.scores * np.sqrt(expvar) + scores = pca.data["scores"] * np.sqrt(expvar) # -> scores (sample x mode) # Compute the covariance matrix with zero time lag C0 = self._Ctau(scores, 0) # -> C0 (feature1 x feature2) - C0inv = self._compute_matrix_inverse(C0, dims=("feature1", "feature2")) + # C0inv = self._compute_matrix_inverse(C0, dims=("feature1", "feature2")) # -> C0inv (feature2 x feature1) M = 0.5 * C0 # -> M (feature1 x feature2) @@ -187,13 +184,13 @@ def _fit_algorithm(self, data: DataArray) -> Self: scores = scores.rename({"mode": feature_name}) # -> (sample x feature) # Store the results - self.data.set_data( - input_data=scores, - components=W, - scores=P, - filter_patterns=V, - decorrelation_time=lbda, - ) + # NOTE: not sure if "scores" should be taken as input data here, "data" may be more correct -> to be verified + self.data.add(name="input_data", data=scores, allow_compute=False) + self.data.add(name="components", data=W, allow_compute=True) + self.data.add(name="scores", data=P, allow_compute=True) + self.data.add(name="filter_patterns", data=V, allow_compute=True) + self.data.add(name="decorrelation_time", data=lbda, allow_compute=True) + self.data.set_attrs(self.attrs) self._U = U # store U for testing purposes of orthogonality self._C0 = C0 # store C0 for testing purposes of orthogonality @@ -206,11 +203,11 @@ def _inverse_transform_algorithm(self, mode) -> DataObject: raise NotImplementedError("OPA does not (yet) support inverse_transform()") def components(self) -> DataObject: - """Return the optimal persistence pattern (OPP).""" + """Return the optimally persistent patterns (OPPs).""" return super().components() def scores(self) -> DataArray: - """Return the time series of the optimal persistence pattern (OPP). + """Return the time series of the OPPs. The time series have a maximum decorrelation time that are uncorrelated with each other. """ @@ -218,9 +215,9 @@ def scores(self) -> DataArray: def decorrelation_time(self) -> DataArray: """Return the decorrelation time of the optimal persistence pattern (OPP).""" - return self.data.decorrelation_time + return self.data["decorrelation_time"] def filter_patterns(self) -> DataObject: """Return the filter patterns.""" - fps = self.data.filter_patterns + fps = self.data["filter_patterns"] return self.preprocessor.inverse_transform_components(fps) diff --git a/xeofs/models/rotator_factory.py b/xeofs/models/rotator_factory.py index 56f8eb3..1b685d3 100644 --- a/xeofs/models/rotator_factory.py +++ b/xeofs/models/rotator_factory.py @@ -1,7 +1,3 @@ -import numpy as np -import xarray as xr -from typing import Optional, Union, List, Tuple - from .eof import EOF, ComplexEOF from .mca import MCA, ComplexMCA from .eof_rotator import EOFRotator, ComplexEOFRotator diff --git a/xeofs/validation/bootstrapper.py b/xeofs/validation/bootstrapper.py index 416c3db..6404f49 100644 --- a/xeofs/validation/bootstrapper.py +++ b/xeofs/validation/bootstrapper.py @@ -7,9 +7,7 @@ from tqdm import trange from ..models import EOF -from ..data_container.eof_bootstrapper_data_container import ( - EOFBootstrapperDataContainer, -) +from ..data_container import DataContainer from ..utils.data_types import DataArray from .._version import __version__ @@ -34,6 +32,9 @@ def __init__(self, n_bootstraps=20, seed=None): } ) + # Initialize the DataContainer to store the results + self.data = DataContainer() + @abstractmethod def fit(self, model): """Bootstrap a given model.""" @@ -50,16 +51,13 @@ def __init__(self, n_bootstraps=20, seed=None): super().__init__(n_bootstraps=n_bootstraps, seed=seed) self.attrs.update({"model": "Bootstrapped EOF analysis"}) - # Initialize the DataContainer to store the results - self.data: EOFBootstrapperDataContainer = EOFBootstrapperDataContainer() - def fit(self, model: EOF): """Bootstrap a given model.""" self.model = model self.preprocessor = model.preprocessor - input_data = model.data.input_data + input_data = model.data["input_data"] n_samples = input_data.sample.size model_params = model.get_params() @@ -74,7 +72,6 @@ def fit(self, model: EOF): bst_total_variance = [] # type: ignore bst_components = [] # type: ignore bst_scores = [] # type: ignore - bst_idx_modes_sorted = [] # type: ignore for i in trange(n_bootstraps): # Sample with replacement idx_rnd = rng.choice(n_samples, n_samples, replace=True) @@ -86,21 +83,18 @@ def fit(self, model: EOF): ) bst_model.fit(bst_data, dim="sample") # Save results - expvar = bst_model.data.explained_variance - totvar = bst_model.data.total_variance - idx_modes_sorted = bst_model.data.idx_modes_sorted - components = bst_model.data.components + expvar = bst_model.data["explained_variance"] + totvar = bst_model.data["total_variance"] + components = bst_model.data["components"] scores = bst_model.transform(input_data) bst_expvar.append(expvar) bst_total_variance.append(totvar) - bst_idx_modes_sorted.append(idx_modes_sorted) bst_components.append(components) bst_scores.append(scores) # Concatenate the bootstrap results along a new dimension bst_expvar: DataArray = xr.concat(bst_expvar, dim="n") bst_total_variance: DataArray = xr.concat(bst_total_variance, dim="n") - bst_idx_modes_sorted: DataArray = xr.concat(bst_idx_modes_sorted, dim="n") bst_components: DataArray = xr.concat(bst_components, dim="n") bst_scores: DataArray = xr.concat(bst_scores, dim="n") @@ -108,14 +102,13 @@ def fit(self, model: EOF): coords_n = np.arange(1, n_bootstraps + 1) bst_expvar = bst_expvar.assign_coords(n=coords_n) bst_total_variance = bst_total_variance.assign_coords(n=coords_n) - bst_idx_modes_sorted = bst_idx_modes_sorted.assign_coords(n=coords_n) bst_components = bst_components.assign_coords(n=coords_n) bst_scores = bst_scores.assign_coords(n=coords_n) # Fix sign of individual components determined by correlation coefficients # for a given mode with all the individual bootstrap members # NOTE: we use scores as they have typically a lower dimensionality than components - model_scores = model.data.scores + model_scores = model.data["scores"] corr = ( (bst_scores * model_scores).mean("sample") / bst_scores.std("sample") @@ -125,13 +118,13 @@ def fit(self, model: EOF): bst_components = bst_components * signs bst_scores = bst_scores * signs - self.data.set_data( - input_data=self.model.data.input_data, - components=bst_components, - scores=bst_scores, - explained_variance=bst_expvar, - total_variance=bst_total_variance, - idx_modes_sorted=bst_idx_modes_sorted, + self.data.add( + name="input_data", data=model.data["input_data"], allow_compute=False ) + self.data.add(name="components", data=bst_components) + self.data.add(name="scores", data=bst_scores) + self.data.add(name="explained_variance", data=bst_expvar) + self.data.add(name="total_variance", data=bst_total_variance) + # Assign the same attributes as the original model self.data.set_attrs(self.attrs) From ebadaa21fc0d212034ac617f42ccd3b5e579c54b Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 16 Oct 2023 12:13:25 +0200 Subject: [PATCH 23/43] refactor(Preprocessor): enforce input as list --- tests/models/test_eof.py | 21 +- tests/models/test_opa.py | 27 +- .../test_dataarray_multiindex_converter.py | 6 +- tests/preprocessing/test_dataarray_renamer.py | 86 +++ .../preprocessing/test_dataarray_sanitizer.py | 20 +- tests/preprocessing/test_dataarray_scaler.py | 161 ++--- .../test_datalist_multiindex_converter.py | 186 +++--- tests/preprocessing/test_datalist_scaler.py | 471 +++++++------- tests/preprocessing/test_datalist_stacker.py | 472 +++++++------- .../test_dataset_multiindex_converter.py | 6 +- tests/preprocessing/test_dataset_renamer.py | 90 +++ tests/preprocessing/test_dataset_scaler.py | 133 +--- .../test_preprocessor_dataarray.py | 25 +- .../test_preprocessor_datalist.py | 24 +- .../test_preprocessor_dataset.py | 28 +- xeofs/models/_base_cross_model.py | 10 +- xeofs/models/_base_model.py | 68 +- xeofs/models/eof.py | 4 - xeofs/preprocessing/__init__.py | 12 + xeofs/preprocessing/_base_scaler.py | 32 - xeofs/preprocessing/_base_stacker.py | 153 ----- xeofs/preprocessing/concatenator.py | 117 ++++ xeofs/preprocessing/dimension_renamer.py | 61 ++ xeofs/preprocessing/factory.py | 100 +-- xeofs/preprocessing/list_processor.py | 106 ++++ xeofs/preprocessing/multi_index_converter.py | 108 ++-- xeofs/preprocessing/preprocessor.py | 246 ++++++-- xeofs/preprocessing/sanitizer.py | 74 +-- xeofs/preprocessing/scaler.py | 581 +++++++----------- xeofs/preprocessing/stacker.py | 536 ++++++---------- xeofs/preprocessing/transformer.py | 67 ++ xeofs/utils/data_types.py | 22 +- xeofs/utils/xarray_utils.py | 144 +++-- xeofs/validation/bootstrapper.py | 4 +- 34 files changed, 2138 insertions(+), 2063 deletions(-) create mode 100644 tests/preprocessing/test_dataarray_renamer.py create mode 100644 tests/preprocessing/test_dataset_renamer.py delete mode 100644 xeofs/preprocessing/_base_scaler.py delete mode 100644 xeofs/preprocessing/_base_stacker.py create mode 100644 xeofs/preprocessing/concatenator.py create mode 100644 xeofs/preprocessing/dimension_renamer.py create mode 100644 xeofs/preprocessing/list_processor.py create mode 100644 xeofs/preprocessing/transformer.py diff --git a/tests/models/test_eof.py b/tests/models/test_eof.py index c822e4e..7d12b04 100644 --- a/tests/models/test_eof.py +++ b/tests/models/test_eof.py @@ -11,16 +11,8 @@ def test_init(): """Tests the initialization of the EOF class""" eof = EOF(n_modes=5, standardize=True, use_coslat=True) - # Assert parameters are correctly stored in the _params attribute - assert eof._params == { - "n_modes": 5, - "standardize": True, - "use_coslat": True, - "use_weights": False, - "solver": "auto", - } - # Assert preprocessor has been initialized + assert hasattr(eof, "_params") assert hasattr(eof, "preprocessor") @@ -254,13 +246,10 @@ def test_get_params(): # Test get_params method params = eof.get_params() assert isinstance(params, dict) - assert params == { - "n_modes": 5, - "standardize": True, - "use_coslat": True, - "use_weights": False, - "solver": "auto", - } + assert params.get("n_modes") == 5 + assert params.get("standardize") is True + assert params.get("use_coslat") is True + assert params.get("solver") == "auto" @pytest.mark.parametrize( diff --git a/tests/models/test_opa.py b/tests/models/test_opa.py index 88dea7c..d587c78 100644 --- a/tests/models/test_opa.py +++ b/tests/models/test_opa.py @@ -16,18 +16,8 @@ def test_init(): """Tests the initialization of the OPA class""" opa = OPA(n_modes=3, tau_max=3, n_pca_modes=19, use_coslat=True) - # Assert parameters are correctly stored in the _params attribute - assert opa._params == { - "n_modes": 3, - "tau_max": 3, - "n_pca_modes": 19, - "standardize": False, - "use_coslat": True, - "use_weights": False, - "solver": "auto", - } - # Assert preprocessor has been initialized + assert hasattr(opa, "_params") assert hasattr(opa, "preprocessor") @@ -229,15 +219,12 @@ def test_get_params(opa_model): # Test get_params method params = opa_model.get_params() assert isinstance(params, dict) - assert params == { - "n_modes": 3, - "tau_max": 3, - "n_pca_modes": 19, - "standardize": False, - "use_coslat": False, - "use_weights": False, - "solver": "auto", - } + assert params.get("n_modes") == 3 + assert params.get("tau_max") == 3 + assert params.get("n_pca_modes") == 19 + assert params.get("standardize") is False + assert params.get("use_coslat") is False + assert params.get("solver") == "auto" @pytest.mark.parametrize( diff --git a/tests/preprocessing/test_dataarray_multiindex_converter.py b/tests/preprocessing/test_dataarray_multiindex_converter.py index d0fa9f6..876ec5f 100644 --- a/tests/preprocessing/test_dataarray_multiindex_converter.py +++ b/tests/preprocessing/test_dataarray_multiindex_converter.py @@ -2,7 +2,7 @@ import pandas as pd from xeofs.preprocessing.multi_index_converter import ( - DataArrayMultiIndexConverter, + MultiIndexConverter, ) from ..conftest import generate_synthetic_dataarray from xeofs.utils.data_types import DataArray @@ -34,7 +34,7 @@ indirect=["synthetic_dataarray"], ) def test_transform(synthetic_dataarray): - converter = DataArrayMultiIndexConverter() + converter = MultiIndexConverter() converter.fit(synthetic_dataarray) transformed_data = converter.transform(synthetic_dataarray) @@ -63,7 +63,7 @@ def test_transform(synthetic_dataarray): indirect=["synthetic_dataarray"], ) def test_inverse_transform_data(synthetic_dataarray): - converter = DataArrayMultiIndexConverter() + converter = MultiIndexConverter() converter.fit(synthetic_dataarray) transformed_data = converter.transform(synthetic_dataarray) inverse_transformed_data = converter.inverse_transform_data(transformed_data) diff --git a/tests/preprocessing/test_dataarray_renamer.py b/tests/preprocessing/test_dataarray_renamer.py new file mode 100644 index 0000000..efb5d25 --- /dev/null +++ b/tests/preprocessing/test_dataarray_renamer.py @@ -0,0 +1,86 @@ +import pytest + +from xeofs.preprocessing.dimension_renamer import DimensionRenamer +from ..utilities import ( + data_is_dask, + get_dims_from_data, +) + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index", "multiindex"] +NAN_POLICY = ["no_nan"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (ns, nf, index, nan, dask) + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_transform(synthetic_dataarray): + all_dims, sample_dims, feature_dims = get_dims_from_data(synthetic_dataarray) + + n_dims = len(all_dims) + + base = "new" + start = 10 + expected_dims = set(base + str(i) for i in range(start, start + n_dims)) + + renamer = DimensionRenamer(base=base, start=start) + renamer.fit(synthetic_dataarray, sample_dims, feature_dims) + transformed_data = renamer.transform(synthetic_dataarray) + + is_dask_before = data_is_dask(synthetic_dataarray) + is_dask_after = data_is_dask(transformed_data) + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + # Transforming converts dimension names + given_dims = set(transformed_data.dims) + assert given_dims == expected_dims + + # Result is robust to calling the method multiple times + transformed_data = renamer.transform(synthetic_dataarray) + given_dims = set(transformed_data.dims) + assert given_dims == expected_dims + + +@pytest.mark.parametrize( + "synthetic_dataarray", + VALID_TEST_DATA, + indirect=["synthetic_dataarray"], +) +def test_inverse_transform_data(synthetic_dataarray): + all_dims, sample_dims, feature_dims = get_dims_from_data(synthetic_dataarray) + + base = "new" + start = 10 + + renamer = DimensionRenamer(base=base, start=start) + renamer.fit(synthetic_dataarray, sample_dims, feature_dims) + transformed_data = renamer.transform(synthetic_dataarray) + inverse_transformed_data = renamer.inverse_transform_data(transformed_data) + + is_dask_before = data_is_dask(synthetic_dataarray) + is_dask_after = data_is_dask(transformed_data) + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + assert inverse_transformed_data.identical(synthetic_dataarray) + assert set(inverse_transformed_data.dims) == set(synthetic_dataarray.dims) diff --git a/tests/preprocessing/test_dataarray_sanitizer.py b/tests/preprocessing/test_dataarray_sanitizer.py index 7b46622..c125705 100644 --- a/tests/preprocessing/test_dataarray_sanitizer.py +++ b/tests/preprocessing/test_dataarray_sanitizer.py @@ -2,7 +2,7 @@ import numpy as np import xarray as xr -from xeofs.preprocessing.sanitizer import DataArraySanitizer +from xeofs.preprocessing.sanitizer import Sanitizer from xeofs.utils.data_types import DataArray from ..conftest import generate_synthetic_dataarray from ..utilities import ( @@ -63,7 +63,7 @@ def test_fit_valid_dimension_names(sample_name, feature_name, data_params): data = generate_synthetic_dataarray(*data_params) data = data.rename({"sample0": sample_name, "feature0": feature_name}) - sanitizer = DataArraySanitizer(sample_name=sample_name, feature_name=feature_name) + sanitizer = Sanitizer(sample_name=sample_name, feature_name=feature_name) sanitizer.fit(data) data_clean = sanitizer.transform(data) reconstructed_data = sanitizer.inverse_transform_data(data_clean) @@ -84,7 +84,7 @@ def test_fit_valid_dimension_names(sample_name, feature_name, data_params): def test_fit_invalid_dimension_names(sample_name, feature_name, data_params): data = generate_synthetic_dataarray(*data_params) - sanitizer = DataArraySanitizer(sample_name=sample_name, feature_name=feature_name) + sanitizer = Sanitizer(sample_name=sample_name, feature_name=feature_name) with pytest.raises(ValueError): sanitizer.fit(data) @@ -99,7 +99,7 @@ def test_fit(synthetic_dataarray): data = synthetic_dataarray data = data.rename({"sample0": "sample", "feature0": "feature"}) - sanitizer = DataArraySanitizer() + sanitizer = Sanitizer() with pytest.raises(ValueError): sanitizer.fit(data) @@ -113,7 +113,7 @@ def test_transform(synthetic_dataarray): data = synthetic_dataarray data = data.rename({"sample0": "sample", "feature0": "feature"}) - sanitizer = DataArraySanitizer() + sanitizer = Sanitizer() sanitizer.fit(data) transformed_data = sanitizer.transform(data) transformed_data2 = sanitizer.transform(data) @@ -138,7 +138,7 @@ def test_transform_invalid(synthetic_dataarray): data = synthetic_dataarray data = data.rename({"sample0": "sample", "feature0": "feature"}) - sanitizer = DataArraySanitizer() + sanitizer = Sanitizer() sanitizer.fit(data) with pytest.raises(ValueError): sanitizer.transform(data.isel(feature0=slice(0, 2))) @@ -153,7 +153,7 @@ def test_fit_transform(synthetic_dataarray): data = synthetic_dataarray data = data.rename({"sample0": "sample", "feature0": "feature"}) - sanitizer = DataArraySanitizer() + sanitizer = Sanitizer() transformed_data = sanitizer.fit_transform(data) is_dask_before = data_is_dask(data) @@ -175,7 +175,7 @@ def test_invserse_transform_data(synthetic_dataarray): data = synthetic_dataarray data = data.rename({"sample0": "sample", "feature0": "feature"}) - sanitizer = DataArraySanitizer() + sanitizer = Sanitizer() sanitizer.fit(data) cleaned_data = sanitizer.transform(data) uncleaned_data = sanitizer.inverse_transform_data(cleaned_data) @@ -201,7 +201,7 @@ def test_invserse_transform_components(synthetic_dataarray): data: DataArray = synthetic_dataarray data = data.rename({"sample0": "sample", "feature0": "feature"}) - sanitizer = DataArraySanitizer() + sanitizer = Sanitizer() sanitizer.fit(data) stacked_data = sanitizer.transform(data) @@ -228,7 +228,7 @@ def test_invserse_transform_scores(synthetic_dataarray): data: DataArray = synthetic_dataarray data = data.rename({"sample0": "sample", "feature0": "feature"}) - sanitizer = DataArraySanitizer() + sanitizer = Sanitizer() sanitizer.fit(data) stacked_data = sanitizer.transform(data) diff --git a/tests/preprocessing/test_dataarray_scaler.py b/tests/preprocessing/test_dataarray_scaler.py index f38e466..a0c7b6a 100644 --- a/tests/preprocessing/test_dataarray_scaler.py +++ b/tests/preprocessing/test_dataarray_scaler.py @@ -2,64 +2,47 @@ import xarray as xr import numpy as np -from xeofs.preprocessing.scaler import DataArrayScaler +from xeofs.preprocessing.scaler import Scaler @pytest.mark.parametrize( - "with_std, with_coslat, with_weights", + "with_std, with_coslat", [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), + (True, True), + (True, True), + (True, False), + (True, False), + (False, True), + (False, True), + (False, False), + (False, False), + (True, True), + (True, True), + (True, False), + (True, False), + (False, True), + (False, True), + (False, False), + (False, False), ], ) -def test_init_params(with_std, with_coslat, with_weights): - s = DataArrayScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) +def test_init_params(with_std, with_coslat): + s = Scaler(with_std=with_std, with_coslat=with_coslat) assert s.get_params()["with_std"] == with_std assert s.get_params()["with_coslat"] == with_coslat - assert s.get_params()["with_weights"] == with_weights @pytest.mark.parametrize( - "with_std, with_coslat, with_weights", + "with_std, with_coslat", [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), + (True, True), + (True, False), + (False, True), + (False, False), ], ) -def test_fit_params(with_std, with_coslat, with_weights, mock_data_array): - s = DataArrayScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) +def test_fit_params(with_std, with_coslat, mock_data_array): + s = Scaler(with_std=with_std, with_coslat=with_coslat) sample_dims = ["time"] feature_dims = ["lat", "lon"] size_lats = mock_data_array.lat.size @@ -70,48 +53,37 @@ def test_fit_params(with_std, with_coslat, with_weights, mock_data_array): assert hasattr(s, "std_"), "Scaler has no std attribute." if with_coslat: assert hasattr(s, "coslat_weights_"), "Scaler has no coslat_weights attribute." - if with_weights: - assert hasattr(s, "weights_"), "Scaler has no weights attribute." assert s.mean_ is not None, "Scaler mean is None." if with_std: assert s.std_ is not None, "Scaler std is None." if with_coslat: assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." - if with_weights: - assert s.weights_ is not None, "Scaler weights is None." @pytest.mark.parametrize( "with_std, with_coslat, with_weights", [ (True, True, True), - (True, True, False), (True, False, True), - (True, False, False), (False, True, True), - (False, True, False), (False, False, True), - (False, False, False), - (True, True, True), (True, True, False), - (True, False, True), (True, False, False), - (False, True, True), (False, True, False), - (False, False, True), (False, False, False), ], ) def test_transform_params(with_std, with_coslat, with_weights, mock_data_array): - s = DataArrayScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) + s = Scaler(with_std=with_std, with_coslat=with_coslat) sample_dims = ["time"] feature_dims = ["lat", "lon"] size_lats = mock_data_array.lat.size - weights = xr.DataArray( - np.random.rand(size_lats), dims=["lat"], coords={"lat": mock_data_array.lat} - ) + if with_weights: + weights = xr.DataArray( + np.random.rand(size_lats), dims=["lat"], coords={"lat": mock_data_array.lat} + ) + else: + weights = None s.fit(mock_data_array, sample_dims, feature_dims, weights) transformed = s.transform(mock_data_array) assert transformed is not None, "Transformed data is None." @@ -119,16 +91,12 @@ def test_transform_params(with_std, with_coslat, with_weights, mock_data_array): transformed_mean = transformed.mean(sample_dims, skipna=False) assert np.allclose(transformed_mean, 0), "Mean of the transformed data is not zero." - if with_std: + if with_std and not (with_coslat or with_weights): transformed_std = transformed.std(sample_dims, skipna=False) - if with_coslat or with_weights: - assert ( - transformed_std <= 1 - ).all(), "Standard deviation of the transformed data is larger one." - else: - assert np.allclose( - transformed_std, 1 - ), "Standard deviation of the transformed data is not one." + + assert np.allclose( + transformed_std, 1 + ), "Standard deviation of the transformed data is not one." if with_coslat: assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." @@ -136,40 +104,23 @@ def test_transform_params(with_std, with_coslat, with_weights, mock_data_array): transformed, mock_data_array ), "Data has not been transformed." - if with_weights: - assert s.weights_ is not None, "Scaler weights is None." - assert not np.array_equal( - transformed, mock_data_array - ), "Data has not been transformed." - transformed2 = s.fit_transform(mock_data_array, sample_dims, feature_dims, weights) xr.testing.assert_allclose(transformed, transformed2) @pytest.mark.parametrize( - "with_std, with_coslat, with_weights", + "with_std, with_coslat", [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), + (True, True), + (True, False), + (False, True), + (False, False), ], ) -def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_data_array): - s = DataArrayScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights +def test_inverse_transform_params(with_std, with_coslat, mock_data_array): + s = Scaler( + with_std=with_std, + with_coslat=with_coslat, ) sample_dims = ["time"] feature_dims = ["lat", "lon"] @@ -193,7 +144,7 @@ def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_data ], ) def test_fit_dims(dim_sample, dim_feature, mock_data_array): - s = DataArrayScaler(with_std=True) + s = Scaler(with_std=True) s.fit(mock_data_array, dim_sample, dim_feature) assert hasattr(s, "mean_"), "Scaler has no mean attribute." assert s.mean_ is not None, "Scaler mean is None." @@ -218,7 +169,7 @@ def test_fit_dims(dim_sample, dim_feature, mock_data_array): ], ) def test_fit_transform_dims(dim_sample, dim_feature, mock_data_array): - s = DataArrayScaler() + s = Scaler() transformed = s.fit_transform(mock_data_array, dim_sample, dim_feature) # check that all dimensions are present assert set(transformed.dims) == set( @@ -231,14 +182,12 @@ def test_fit_transform_dims(dim_sample, dim_feature, mock_data_array): # Test input types def test_fit_input_type(mock_data_array, mock_dataset, mock_data_array_list): - s = DataArrayScaler() - with pytest.raises(ValueError): - s.fit(mock_dataset, ["time"], ["lon", "lat"]) - with pytest.raises(ValueError): + s = Scaler() + + with pytest.raises(TypeError): s.fit(mock_data_array_list, ["time"], ["lon", "lat"]) s.fit(mock_data_array, ["time"], ["lon", "lat"]) - with pytest.raises(ValueError): - s.transform(mock_dataset) - with pytest.raises(ValueError): + + with pytest.raises(TypeError): s.transform(mock_data_array_list) diff --git a/tests/preprocessing/test_datalist_multiindex_converter.py b/tests/preprocessing/test_datalist_multiindex_converter.py index 6c90101..1e8003c 100644 --- a/tests/preprocessing/test_datalist_multiindex_converter.py +++ b/tests/preprocessing/test_datalist_multiindex_converter.py @@ -1,93 +1,93 @@ -import pytest -import pandas as pd - -from xeofs.preprocessing.multi_index_converter import ( - DataListMultiIndexConverter, -) -from xeofs.utils.data_types import DataArray -from ..utilities import assert_expected_dims, data_is_dask, data_has_multiindex - -# ============================================================================= -# GENERALLY VALID TEST CASES -# ============================================================================= -N_ARRAYS = [1, 2] -N_SAMPLE_DIMS = [1, 2] -N_FEATURE_DIMS = [1, 2] -INDEX_POLICY = ["index"] -NAN_POLICY = ["no_nan"] -DASK_POLICY = ["no_dask", "dask"] -SEED = [0] - -VALID_TEST_DATA = [ - (na, ns, nf, index, nan, dask) - for na in N_ARRAYS - for ns in N_SAMPLE_DIMS - for nf in N_FEATURE_DIMS - for index in INDEX_POLICY - for nan in NAN_POLICY - for dask in DASK_POLICY -] - - -# TESTS -# ============================================================================= -@pytest.mark.parametrize( - "synthetic_datalist", - VALID_TEST_DATA, - indirect=["synthetic_datalist"], -) -def test_transform(synthetic_datalist): - converter = DataListMultiIndexConverter() - converter.fit(synthetic_datalist) - transformed_data = converter.transform(synthetic_datalist) - - is_dask_before = data_is_dask(synthetic_datalist) - is_dask_after = data_is_dask(transformed_data) - - # Transforming does not affect dimensions - assert_expected_dims(transformed_data, synthetic_datalist, policy="all") - - # Transforming doesn't change the dask-ness of the data - assert is_dask_before == is_dask_after - - # Transforming removes MultiIndex - assert data_has_multiindex(transformed_data) is False - - # Result is robust to calling the method multiple times - transformed_data = converter.transform(synthetic_datalist) - assert data_has_multiindex(transformed_data) is False - - # Transforming data twice won't change the data - transformed_data2 = converter.transform(transformed_data) - assert data_has_multiindex(transformed_data2) is False - assert all( - trans.identical(data) - for trans, data in zip(transformed_data, transformed_data2) - ) - - -@pytest.mark.parametrize( - "synthetic_datalist", - VALID_TEST_DATA, - indirect=["synthetic_datalist"], -) -def test_inverse_transform(synthetic_datalist): - converter = DataListMultiIndexConverter() - converter.fit(synthetic_datalist) - transformed_data = converter.transform(synthetic_datalist) - inverse_transformed_data = converter.inverse_transform_data(transformed_data) - - is_dask_before = data_is_dask(synthetic_datalist) - is_dask_after = data_is_dask(transformed_data) - - # Transforming doesn't change the dask-ness of the data - assert is_dask_before == is_dask_after - - has_multiindex_before = data_has_multiindex(synthetic_datalist) - has_multiindex_after = data_has_multiindex(inverse_transformed_data) - - assert all( - trans.identical(data) - for trans, data in zip(inverse_transformed_data, synthetic_datalist) - ) - assert has_multiindex_before == has_multiindex_after +# import pytest +# import pandas as pd + +# from xeofs.preprocessing.multi_index_converter import ( +# DataListMultiIndexConverter, +# ) +# from xeofs.utils.data_types import DataArray +# from ..utilities import assert_expected_dims, data_is_dask, data_has_multiindex + +# # ============================================================================= +# # GENERALLY VALID TEST CASES +# # ============================================================================= +# N_ARRAYS = [1, 2] +# N_SAMPLE_DIMS = [1, 2] +# N_FEATURE_DIMS = [1, 2] +# INDEX_POLICY = ["index"] +# NAN_POLICY = ["no_nan"] +# DASK_POLICY = ["no_dask", "dask"] +# SEED = [0] + +# VALID_TEST_DATA = [ +# (na, ns, nf, index, nan, dask) +# for na in N_ARRAYS +# for ns in N_SAMPLE_DIMS +# for nf in N_FEATURE_DIMS +# for index in INDEX_POLICY +# for nan in NAN_POLICY +# for dask in DASK_POLICY +# ] + + +# # TESTS +# # ============================================================================= +# @pytest.mark.parametrize( +# "synthetic_datalist", +# VALID_TEST_DATA, +# indirect=["synthetic_datalist"], +# ) +# def test_transform(synthetic_datalist): +# converter = DataListMultiIndexConverter() +# converter.fit(synthetic_datalist) +# transformed_data = converter.transform(synthetic_datalist) + +# is_dask_before = data_is_dask(synthetic_datalist) +# is_dask_after = data_is_dask(transformed_data) + +# # Transforming does not affect dimensions +# assert_expected_dims(transformed_data, synthetic_datalist, policy="all") + +# # Transforming doesn't change the dask-ness of the data +# assert is_dask_before == is_dask_after + +# # Transforming removes MultiIndex +# assert data_has_multiindex(transformed_data) is False + +# # Result is robust to calling the method multiple times +# transformed_data = converter.transform(synthetic_datalist) +# assert data_has_multiindex(transformed_data) is False + +# # Transforming data twice won't change the data +# transformed_data2 = converter.transform(transformed_data) +# assert data_has_multiindex(transformed_data2) is False +# assert all( +# trans.identical(data) +# for trans, data in zip(transformed_data, transformed_data2) +# ) + + +# @pytest.mark.parametrize( +# "synthetic_datalist", +# VALID_TEST_DATA, +# indirect=["synthetic_datalist"], +# ) +# def test_inverse_transform(synthetic_datalist): +# converter = DataListMultiIndexConverter() +# converter.fit(synthetic_datalist) +# transformed_data = converter.transform(synthetic_datalist) +# inverse_transformed_data = converter.inverse_transform_data(transformed_data) + +# is_dask_before = data_is_dask(synthetic_datalist) +# is_dask_after = data_is_dask(transformed_data) + +# # Transforming doesn't change the dask-ness of the data +# assert is_dask_before == is_dask_after + +# has_multiindex_before = data_has_multiindex(synthetic_datalist) +# has_multiindex_after = data_has_multiindex(inverse_transformed_data) + +# assert all( +# trans.identical(data) +# for trans, data in zip(inverse_transformed_data, synthetic_datalist) +# ) +# assert has_multiindex_before == has_multiindex_after diff --git a/tests/preprocessing/test_datalist_scaler.py b/tests/preprocessing/test_datalist_scaler.py index 883c08f..3aa64f4 100644 --- a/tests/preprocessing/test_datalist_scaler.py +++ b/tests/preprocessing/test_datalist_scaler.py @@ -1,256 +1,215 @@ -import pytest -import xarray as xr -import numpy as np - -from xeofs.preprocessing.scaler import DataListScaler -from xeofs.utils.data_types import DimsList - - -@pytest.mark.parametrize( - "with_std, with_coslat, with_weights", - [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - ], -) -def test_fit_params(with_std, with_coslat, with_weights, mock_data_array_list): - listscalers = DataListScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) - data = mock_data_array_list.copy() - sample_dims = ["time"] - feature_dims: DimsList = [["lat", "lon"]] * 3 - size_lats_list = [da.lat.size for da in data] - weights = [ - xr.DataArray(np.random.rand(size), dims=["lat"]) for size in size_lats_list - ] - listscalers.fit(mock_data_array_list, sample_dims, feature_dims, weights) - - for s in listscalers.scalers: - assert hasattr(s, "mean_"), "Scaler has no mean attribute." - if with_std: - assert hasattr(s, "std_"), "Scaler has no std attribute." - if with_coslat: - assert hasattr( - s, "coslat_weights_" - ), "Scaler has no coslat_weights attribute." - if with_weights: - assert hasattr(s, "weights_"), "Scaler has no weights attribute." - assert s.mean_ is not None, "Scaler mean is None." - if with_std: - assert s.std_ is not None, "Scaler std is None." - if with_coslat: - assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." - if with_weights: - assert s.weights_ is not None, "Scaler weights is None." - - -@pytest.mark.parametrize( - "with_std, with_coslat, with_weights", - [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - ], -) -def test_transform_params(with_std, with_coslat, with_weights, mock_data_array_list): - listscalers = DataListScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) - data = mock_data_array_list.copy() - sample_dims = ["time"] - feature_dims: DimsList = [("lat", "lon")] * 3 - size_lats_list = [da.lat.size for da in data] - weights = [ - xr.DataArray(np.random.rand(size), dims=["lat"]) for size in size_lats_list - ] - listscalers.fit( - mock_data_array_list, - sample_dims, - feature_dims, - weights, - ) - - transformed = listscalers.transform(mock_data_array_list) - transformed2 = listscalers.fit_transform( - mock_data_array_list, sample_dims, feature_dims, weights - ) - - for t, t2, s, ref in zip(transformed, transformed2, listscalers.scalers, data): - assert t is not None, "Transformed data is None." - - t_mean = t.mean(sample_dims, skipna=False) - assert np.allclose(t_mean, 0), "Mean of the transformed data is not zero." - - if with_std: - t_std = t.std(sample_dims, skipna=False) - if with_coslat or with_weights: - assert ( - t_std <= 1 - ).all(), "Standard deviation of the transformed data is larger one." - else: - assert np.allclose( - t_std, 1 - ), "Standard deviation of the transformed data is not one." - - if with_coslat: - assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." - assert not np.array_equal( - t, mock_data_array_list - ), "Data has not been transformed." - - if with_weights: - assert s.weights_ is not None, "Scaler weights is None." - assert not np.array_equal(t, ref), "Data has not been transformed." - - xr.testing.assert_allclose(t, t2) - - -@pytest.mark.parametrize( - "with_std, with_coslat, with_weights", - [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - ], -) -def test_inverse_transform_params( - with_std, with_coslat, with_weights, mock_data_array_list -): - listscalers = DataListScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) - data = mock_data_array_list.copy() - sample_dims = ["time"] - feature_dims: DimsList = [["lat", "lon"]] * 3 - size_lats_list = [da.lat.size for da in data] - weights = [ - xr.DataArray(np.random.rand(size), dims=["lat"]) for size in size_lats_list - ] - listscalers.fit(mock_data_array_list, sample_dims, feature_dims, weights) - transformed = listscalers.transform(mock_data_array_list) - inverted = listscalers.inverse_transform_data(transformed) - - # check that inverse transform is the same as the original data - for inv, ref in zip(inverted, mock_data_array_list): - xr.testing.assert_allclose(inv, ref) - - -@pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], -) -def test_fit_dims(dim_sample, dim_feature, mock_data_array_list): - listscalers = DataListScaler(with_std=True) - data = mock_data_array_list.copy() - dim_feature = [dim_feature] * 3 - - for s in listscalers.scalers: - assert hasattr(s, "mean"), "Scaler has no mean attribute." - assert s.mean is not None, "Scaler mean is None." - assert hasattr(s, "std"), "Scaler has no std attribute." - assert s.std is not None, "Scaler std is None." - # check that all dimensions are present except the sample dimensions - assert set(s.mean.dims) == set(mock_data_array_list.dims) - set( - dim_sample - ), "Mean has wrong dimensions." - assert set(s.std.dims) == set(mock_data_array_list.dims) - set( - dim_sample - ), "Standard deviation has wrong dimensions." - - -@pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], -) -def test_fit_transform_dims(dim_sample, dim_feature, mock_data_array_list): - listscalers = DataListScaler(with_std=True) - data = mock_data_array_list.copy() - dim_feature = [dim_feature] * 3 - transformed = listscalers.fit_transform( - mock_data_array_list, dim_sample, dim_feature - ) - - for trns, ref in zip(transformed, mock_data_array_list): - # check that all dimensions are present - assert set(trns.dims) == set(ref.dims), "Transformed data has wrong dimensions." - # check that the coordinates are the same - for dim in ref.dims: - xr.testing.assert_allclose(trns[dim], ref[dim]) - - -# Test input types -@pytest.mark.parametrize( - "dim_sample, dim_feature", - [ - (("time",), ("lat", "lon")), - (("time",), ("lon", "lat")), - (("lat", "lon"), ("time",)), - (("lon", "lat"), ("time",)), - ], -) -def test_fit_input_type( - dim_sample, dim_feature, mock_data_array, mock_dataset, mock_data_array_list -): - s = DataListScaler() - dim_feature = [dim_feature] * 3 - with pytest.raises(TypeError): - s.fit(mock_dataset, dim_sample, dim_feature) - with pytest.raises(TypeError): - s.fit(mock_data_array, dim_sample, dim_feature) - - s.fit(mock_data_array_list, dim_sample, dim_feature) - with pytest.raises(TypeError): - s.transform(mock_dataset) - with pytest.raises(TypeError): - s.transform(mock_data_array) +# import pytest +# import xarray as xr +# import numpy as np + +# from xeofs.preprocessing.scaler import DataListScaler +# from xeofs.utils.data_types import DimsList + + +# @pytest.mark.parametrize( +# "with_std, with_coslat", +# [ +# (True, True), +# (True, False), +# (False, True), +# (False, False), +# ], +# ) +# def test_fit_params(with_std, with_coslat, mock_data_array_list): +# listscalers = DataListScaler(with_std=with_std, with_coslat=with_coslat) +# data = mock_data_array_list.copy() +# sample_dims = ["time"] +# feature_dims: DimsList = [["lat", "lon"]] * 3 +# size_lats_list = [da.lat.size for da in data] +# weights = [ +# xr.DataArray(np.random.rand(size), dims=["lat"]) for size in size_lats_list +# ] +# listscalers.fit(mock_data_array_list, sample_dims, feature_dims, weights) + +# for s in listscalers.scalers: +# assert hasattr(s, "mean_"), "Scaler has no mean attribute." +# if with_std: +# assert hasattr(s, "std_"), "Scaler has no std attribute." +# if with_coslat: +# assert hasattr( +# s, "coslat_weights_" +# ), "Scaler has no coslat_weights attribute." + +# assert s.mean_ is not None, "Scaler mean is None." +# if with_std: +# assert s.std_ is not None, "Scaler std is None." +# if with_coslat: +# assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." + + +# @pytest.mark.parametrize( +# "with_std, with_coslat, with_weights", +# [ +# (True, True, True), +# (True, False, True), +# (False, True, True), +# (False, False, True), +# (True, True, False), +# (True, False, False), +# (False, True, False), +# (False, False, False), +# ], +# ) +# def test_transform_params(with_std, with_coslat, with_weights, mock_data_array_list): +# listscalers = DataListScaler(with_std=with_std, with_coslat=with_coslat) +# data = mock_data_array_list.copy() +# sample_dims = ["time"] +# feature_dims: DimsList = [("lat", "lon")] * 3 +# size_lats_list = [da.lat.size for da in data] +# if with_weights: +# weights = [ +# xr.DataArray(np.random.rand(size), dims=["lat"]) for size in size_lats_list +# ] +# else: +# weights = None +# listscalers.fit( +# mock_data_array_list, +# sample_dims, +# feature_dims, +# weights, +# ) + +# transformed = listscalers.transform(mock_data_array_list) +# transformed2 = listscalers.fit_transform( +# mock_data_array_list, sample_dims, feature_dims, weights +# ) + +# for t, t2, s, ref in zip(transformed, transformed2, listscalers.scalers, data): +# assert t is not None, "Transformed data is None." + +# t_mean = t.mean(sample_dims, skipna=False) +# assert np.allclose(t_mean, 0), "Mean of the transformed data is not zero." + +# if with_std: +# t_std = t.std(sample_dims, skipna=False) +# if with_coslat or with_weights: +# assert ( +# t_std <= 1 +# ).all(), "Standard deviation of the transformed data is larger one." +# else: +# assert np.allclose( +# t_std, 1 +# ), "Standard deviation of the transformed data is not one." + +# if with_coslat: +# assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." +# assert not np.array_equal( +# t, mock_data_array_list +# ), "Data has not been transformed." + +# xr.testing.assert_allclose(t, t2) + + +# @pytest.mark.parametrize( +# "with_std, with_coslat", +# [ +# (True, True), +# (True, False), +# (False, True), +# (False, False), +# ], +# ) +# def test_inverse_transform_params(with_std, with_coslat, mock_data_array_list): +# listscalers = DataListScaler( +# with_std=with_std, +# with_coslat=with_coslat, +# ) +# data = mock_data_array_list.copy() +# sample_dims = ["time"] +# feature_dims: DimsList = [["lat", "lon"]] * 3 +# size_lats_list = [da.lat.size for da in data] +# weights = [ +# xr.DataArray(np.random.rand(size), dims=["lat"]) for size in size_lats_list +# ] +# listscalers.fit(mock_data_array_list, sample_dims, feature_dims, weights) +# transformed = listscalers.transform(mock_data_array_list) +# inverted = listscalers.inverse_transform_data(transformed) + +# # check that inverse transform is the same as the original data +# for inv, ref in zip(inverted, mock_data_array_list): +# xr.testing.assert_allclose(inv, ref) + + +# @pytest.mark.parametrize( +# "dim_sample, dim_feature", +# [ +# (("time",), ("lat", "lon")), +# (("time",), ("lon", "lat")), +# (("lat", "lon"), ("time",)), +# (("lon", "lat"), ("time",)), +# ], +# ) +# def test_fit_dims(dim_sample, dim_feature, mock_data_array_list): +# listscalers = DataListScaler(with_std=True) +# data = mock_data_array_list.copy() +# dim_feature = [dim_feature] * 3 + +# for s in listscalers.scalers: +# assert hasattr(s, "mean"), "Scaler has no mean attribute." +# assert s.mean is not None, "Scaler mean is None." +# assert hasattr(s, "std"), "Scaler has no std attribute." +# assert s.std is not None, "Scaler std is None." +# # check that all dimensions are present except the sample dimensions +# assert set(s.mean.dims) == set(mock_data_array_list.dims) - set( +# dim_sample +# ), "Mean has wrong dimensions." +# assert set(s.std.dims) == set(mock_data_array_list.dims) - set( +# dim_sample +# ), "Standard deviation has wrong dimensions." + + +# @pytest.mark.parametrize( +# "dim_sample, dim_feature", +# [ +# (("time",), ("lat", "lon")), +# (("time",), ("lon", "lat")), +# (("lat", "lon"), ("time",)), +# (("lon", "lat"), ("time",)), +# ], +# ) +# def test_fit_transform_dims(dim_sample, dim_feature, mock_data_array_list): +# listscalers = DataListScaler(with_std=True) +# data = mock_data_array_list.copy() +# dim_feature = [dim_feature] * 3 +# transformed = listscalers.fit_transform( +# mock_data_array_list, dim_sample, dim_feature +# ) + +# for trns, ref in zip(transformed, mock_data_array_list): +# # check that all dimensions are present +# assert set(trns.dims) == set(ref.dims), "Transformed data has wrong dimensions." +# # check that the coordinates are the same +# for dim in ref.dims: +# xr.testing.assert_allclose(trns[dim], ref[dim]) + + +# # Test input types +# @pytest.mark.parametrize( +# "dim_sample, dim_feature", +# [ +# (("time",), ("lat", "lon")), +# (("time",), ("lon", "lat")), +# (("lat", "lon"), ("time",)), +# (("lon", "lat"), ("time",)), +# ], +# ) +# def test_fit_input_type( +# dim_sample, dim_feature, mock_data_array, mock_dataset, mock_data_array_list +# ): +# s = DataListScaler() +# dim_feature = [dim_feature] * 3 +# with pytest.raises(TypeError): +# s.fit(mock_dataset, dim_sample, dim_feature) +# with pytest.raises(TypeError): +# s.fit(mock_data_array, dim_sample, dim_feature) + +# s.fit(mock_data_array_list, dim_sample, dim_feature) +# with pytest.raises(TypeError): +# s.transform(mock_dataset) +# with pytest.raises(TypeError): +# s.transform(mock_data_array) diff --git a/tests/preprocessing/test_datalist_stacker.py b/tests/preprocessing/test_datalist_stacker.py index 5ae91f4..8b0b5be 100644 --- a/tests/preprocessing/test_datalist_stacker.py +++ b/tests/preprocessing/test_datalist_stacker.py @@ -1,236 +1,236 @@ -import pytest -import numpy as np -import xarray as xr - -from xeofs.preprocessing.stacker import DataListStacker -from xeofs.utils.data_types import DataArray, DataList -from ..conftest import generate_list_of_synthetic_dataarrays -from ..utilities import ( - get_dims_from_data_list, - data_is_dask, - assert_expected_dims, - assert_expected_coords, -) - -# ============================================================================= -# GENERALLY VALID TEST CASES -# ============================================================================= -N_ARRAYS = [1, 2] -N_SAMPLE_DIMS = [1, 2] -N_FEATURE_DIMS = [1, 2] -INDEX_POLICY = ["index"] -NAN_POLICY = ["no_nan"] -DASK_POLICY = ["no_dask", "dask"] -SEED = [0] - -VALID_TEST_DATA = [ - (na, ns, nf, index, nan, dask) - for na in N_ARRAYS - for ns in N_SAMPLE_DIMS - for nf in N_FEATURE_DIMS - for index in INDEX_POLICY - for nan in NAN_POLICY - for dask in DASK_POLICY -] - - -# TESTS -# ============================================================================= -@pytest.mark.parametrize( - "sample_name, feature_name, data_params", - [ - ("sample", "feature", (2, 1, 1)), - ("sample0", "feature0", (2, 1, 1)), - ("sample0", "feature", (2, 1, 2)), - ("sample", "feature0", (2, 2, 1)), - ("sample", "feature", (2, 2, 2)), - ("another_sample", "another_feature", (2, 1, 1)), - ("another_sample", "another_feature", (2, 2, 2)), - ], -) -def test_fit_valid_dimension_names(sample_name, feature_name, data_params): - data_list = generate_list_of_synthetic_dataarrays(*data_params) - all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) - - stacker = DataListStacker(sample_name=sample_name, feature_name=feature_name) - stacker.fit(data_list, sample_dims[0], feature_dims) - stacked_data = stacker.transform(data_list) - reconstructed_data_list = stacker.inverse_transform_data(stacked_data) - - assert stacked_data.ndim == 2 - assert set(stacked_data.dims) == set((sample_name, feature_name)) - for reconstructed_data, data in zip(reconstructed_data_list, data_list): - assert set(reconstructed_data.dims) == set(data.dims) - - -@pytest.mark.parametrize( - "sample_name, feature_name, data_params", - [ - ("sample1", "feature", (2, 2, 1)), - ("sample", "feature1", (2, 1, 2)), - ("sample1", "feature1", (2, 3, 3)), - ], -) -def test_fit_invalid_dimension_names(sample_name, feature_name, data_params): - data_list = generate_list_of_synthetic_dataarrays(*data_params) - all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) - - stacker = DataListStacker(sample_name=sample_name, feature_name=feature_name) - - with pytest.raises(ValueError): - stacker.fit(data_list, sample_dims[0], feature_dims) - - -@pytest.mark.parametrize( - "synthetic_datalist", - VALID_TEST_DATA, - indirect=["synthetic_datalist"], -) -def test_fit(synthetic_datalist): - data_list = synthetic_datalist - all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) - - stacker = DataListStacker() - stacker.fit(data_list, sample_dims[0], feature_dims) - - -@pytest.mark.parametrize( - "synthetic_datalist", - VALID_TEST_DATA, - indirect=["synthetic_datalist"], -) -def test_transform(synthetic_datalist): - data_list = synthetic_datalist - all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) - - stacker = DataListStacker() - stacker.fit(data_list, sample_dims[0], feature_dims) - transformed_data = stacker.transform(data_list) - transformed_data2 = stacker.transform(data_list) - - is_dask_before = data_is_dask(data_list) - is_dask_after = data_is_dask(transformed_data) - - assert isinstance(transformed_data, DataArray) - assert transformed_data.ndim == 2 - assert transformed_data.dims == ("sample", "feature") - assert is_dask_before == is_dask_after - assert transformed_data.identical(transformed_data2) - - -@pytest.mark.parametrize( - "synthetic_datalist", - VALID_TEST_DATA, - indirect=["synthetic_datalist"], -) -def test_transform_invalid(synthetic_datalist): - data_list = synthetic_datalist - all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) - - stacker = DataListStacker() - stacker.fit(data_list, sample_dims[0], feature_dims) - - data_list = [da.isel(feature0=slice(0, 2)) for da in data_list] - with pytest.raises(ValueError): - stacker.transform(data_list) - - -@pytest.mark.parametrize( - "synthetic_datalist", - VALID_TEST_DATA, - indirect=["synthetic_datalist"], -) -def test_fit_transform(synthetic_datalist): - data_list = synthetic_datalist - all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) - - stacker = DataListStacker() - transformed_data = stacker.fit_transform(data_list, sample_dims[0], feature_dims) - - is_dask_before = data_is_dask(data_list) - is_dask_after = data_is_dask(transformed_data) - - assert isinstance(transformed_data, DataArray) - assert transformed_data.ndim == 2 - assert transformed_data.dims == ("sample", "feature") - assert is_dask_before == is_dask_after - - -@pytest.mark.parametrize( - "synthetic_datalist", - VALID_TEST_DATA, - indirect=["synthetic_datalist"], -) -def test_invserse_transform_data(synthetic_datalist): - data_list = synthetic_datalist - all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) - - stacker = DataListStacker() - stacker.fit(data_list, sample_dims[0], feature_dims) - stacked_data = stacker.transform(data_list) - unstacked_data = stacker.inverse_transform_data(stacked_data) - - is_dask_before = data_is_dask(data_list) - is_dask_after = data_is_dask(unstacked_data) - - # Unstacked data has dimensions of original data - assert_expected_dims(data_list, unstacked_data, policy="all") - # Unstacked data has coordinates of original data - assert_expected_coords(data_list, unstacked_data, policy="all") - # inverse transform should not change dask-ness - assert is_dask_before == is_dask_after - - -@pytest.mark.parametrize( - "synthetic_datalist", - VALID_TEST_DATA, - indirect=["synthetic_datalist"], -) -def test_invserse_transform_components(synthetic_datalist): - data_list = synthetic_datalist - all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) - - stacker = DataListStacker() - stacker.fit(data_list, sample_dims[0], feature_dims) - - stacked_data = stacker.transform(data_list) - components = stacked_data.rename({"sample": "mode"}) - components.coords.update({"mode": range(components.mode.size)}) - unstacked_data = stacker.inverse_transform_components(components) - - is_dask_before = data_is_dask(data_list) - is_dask_after = data_is_dask(unstacked_data) - - # Unstacked components has correct feature dimensions - assert_expected_dims(data_list, unstacked_data, policy="feature") - # Unstacked data has feature coordinates of original data - assert_expected_coords(data_list, unstacked_data, policy="feature") - # inverse transform should not change dask-ness - assert is_dask_before == is_dask_after - - -@pytest.mark.parametrize( - "synthetic_datalist", - VALID_TEST_DATA, - indirect=["synthetic_datalist"], -) -def test_invserse_transform_scores(synthetic_datalist): - data_list = synthetic_datalist - all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) - - stacker = DataListStacker() - stacker.fit(data_list, sample_dims[0], feature_dims) - - stacked_data = stacker.transform(data_list) - scores = stacked_data.rename({"feature": "mode"}) - unstacked_data = stacker.inverse_transform_scores(scores) - - is_dask_before = data_is_dask(data_list) - is_dask_after = data_is_dask(unstacked_data) - - # Unstacked scores has correct feature dimensions - assert_expected_dims(data_list[0], unstacked_data, policy="sample") - # Unstacked data has coordinates of original data - assert_expected_coords(data_list[0], unstacked_data, policy="sample") - # inverse transform should not change dask-ness - assert is_dask_before == is_dask_after +# import pytest +# import numpy as np +# import xarray as xr + +# from xeofs.preprocessing.stacker import DataListStacker +# from xeofs.utils.data_types import DataArray, DataList +# from ..conftest import generate_list_of_synthetic_dataarrays +# from ..utilities import ( +# get_dims_from_data_list, +# data_is_dask, +# assert_expected_dims, +# assert_expected_coords, +# ) + +# # ============================================================================= +# # GENERALLY VALID TEST CASES +# # ============================================================================= +# N_ARRAYS = [1, 2] +# N_SAMPLE_DIMS = [1, 2] +# N_FEATURE_DIMS = [1, 2] +# INDEX_POLICY = ["index"] +# NAN_POLICY = ["no_nan"] +# DASK_POLICY = ["no_dask", "dask"] +# SEED = [0] + +# VALID_TEST_DATA = [ +# (na, ns, nf, index, nan, dask) +# for na in N_ARRAYS +# for ns in N_SAMPLE_DIMS +# for nf in N_FEATURE_DIMS +# for index in INDEX_POLICY +# for nan in NAN_POLICY +# for dask in DASK_POLICY +# ] + + +# # TESTS +# # ============================================================================= +# @pytest.mark.parametrize( +# "sample_name, feature_name, data_params", +# [ +# ("sample", "feature", (2, 1, 1)), +# ("sample0", "feature0", (2, 1, 1)), +# ("sample0", "feature", (2, 1, 2)), +# ("sample", "feature0", (2, 2, 1)), +# ("sample", "feature", (2, 2, 2)), +# ("another_sample", "another_feature", (2, 1, 1)), +# ("another_sample", "another_feature", (2, 2, 2)), +# ], +# ) +# def test_fit_valid_dimension_names(sample_name, feature_name, data_params): +# data_list = generate_list_of_synthetic_dataarrays(*data_params) +# all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + +# stacker = DataListStacker(sample_name=sample_name, feature_name=feature_name) +# stacker.fit(data_list, sample_dims[0], feature_dims) +# stacked_data = stacker.transform(data_list) +# reconstructed_data_list = stacker.inverse_transform_data(stacked_data) + +# assert stacked_data.ndim == 2 +# assert set(stacked_data.dims) == set((sample_name, feature_name)) +# for reconstructed_data, data in zip(reconstructed_data_list, data_list): +# assert set(reconstructed_data.dims) == set(data.dims) + + +# @pytest.mark.parametrize( +# "sample_name, feature_name, data_params", +# [ +# ("sample1", "feature", (2, 2, 1)), +# ("sample", "feature1", (2, 1, 2)), +# ("sample1", "feature1", (2, 3, 3)), +# ], +# ) +# def test_fit_invalid_dimension_names(sample_name, feature_name, data_params): +# data_list = generate_list_of_synthetic_dataarrays(*data_params) +# all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + +# stacker = DataListStacker(sample_name=sample_name, feature_name=feature_name) + +# with pytest.raises(ValueError): +# stacker.fit(data_list, sample_dims[0], feature_dims) + + +# @pytest.mark.parametrize( +# "synthetic_datalist", +# VALID_TEST_DATA, +# indirect=["synthetic_datalist"], +# ) +# def test_fit(synthetic_datalist): +# data_list = synthetic_datalist +# all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + +# stacker = DataListStacker() +# stacker.fit(data_list, sample_dims[0], feature_dims) + + +# @pytest.mark.parametrize( +# "synthetic_datalist", +# VALID_TEST_DATA, +# indirect=["synthetic_datalist"], +# ) +# def test_transform(synthetic_datalist): +# data_list = synthetic_datalist +# all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + +# stacker = DataListStacker() +# stacker.fit(data_list, sample_dims[0], feature_dims) +# transformed_data = stacker.transform(data_list) +# transformed_data2 = stacker.transform(data_list) + +# is_dask_before = data_is_dask(data_list) +# is_dask_after = data_is_dask(transformed_data) + +# assert isinstance(transformed_data, DataArray) +# assert transformed_data.ndim == 2 +# assert transformed_data.dims == ("sample", "feature") +# assert is_dask_before == is_dask_after +# assert transformed_data.identical(transformed_data2) + + +# @pytest.mark.parametrize( +# "synthetic_datalist", +# VALID_TEST_DATA, +# indirect=["synthetic_datalist"], +# ) +# def test_transform_invalid(synthetic_datalist): +# data_list = synthetic_datalist +# all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + +# stacker = DataListStacker() +# stacker.fit(data_list, sample_dims[0], feature_dims) + +# data_list = [da.isel(feature0=slice(0, 2)) for da in data_list] +# with pytest.raises(ValueError): +# stacker.transform(data_list) + + +# @pytest.mark.parametrize( +# "synthetic_datalist", +# VALID_TEST_DATA, +# indirect=["synthetic_datalist"], +# ) +# def test_fit_transform(synthetic_datalist): +# data_list = synthetic_datalist +# all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + +# stacker = DataListStacker() +# transformed_data = stacker.fit_transform(data_list, sample_dims[0], feature_dims) + +# is_dask_before = data_is_dask(data_list) +# is_dask_after = data_is_dask(transformed_data) + +# assert isinstance(transformed_data, DataArray) +# assert transformed_data.ndim == 2 +# assert transformed_data.dims == ("sample", "feature") +# assert is_dask_before == is_dask_after + + +# @pytest.mark.parametrize( +# "synthetic_datalist", +# VALID_TEST_DATA, +# indirect=["synthetic_datalist"], +# ) +# def test_invserse_transform_data(synthetic_datalist): +# data_list = synthetic_datalist +# all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + +# stacker = DataListStacker() +# stacker.fit(data_list, sample_dims[0], feature_dims) +# stacked_data = stacker.transform(data_list) +# unstacked_data = stacker.inverse_transform_data(stacked_data) + +# is_dask_before = data_is_dask(data_list) +# is_dask_after = data_is_dask(unstacked_data) + +# # Unstacked data has dimensions of original data +# assert_expected_dims(data_list, unstacked_data, policy="all") +# # Unstacked data has coordinates of original data +# assert_expected_coords(data_list, unstacked_data, policy="all") +# # inverse transform should not change dask-ness +# assert is_dask_before == is_dask_after + + +# @pytest.mark.parametrize( +# "synthetic_datalist", +# VALID_TEST_DATA, +# indirect=["synthetic_datalist"], +# ) +# def test_invserse_transform_components(synthetic_datalist): +# data_list = synthetic_datalist +# all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + +# stacker = DataListStacker() +# stacker.fit(data_list, sample_dims[0], feature_dims) + +# stacked_data = stacker.transform(data_list) +# components = stacked_data.rename({"sample": "mode"}) +# components.coords.update({"mode": range(components.mode.size)}) +# unstacked_data = stacker.inverse_transform_components(components) + +# is_dask_before = data_is_dask(data_list) +# is_dask_after = data_is_dask(unstacked_data) + +# # Unstacked components has correct feature dimensions +# assert_expected_dims(data_list, unstacked_data, policy="feature") +# # Unstacked data has feature coordinates of original data +# assert_expected_coords(data_list, unstacked_data, policy="feature") +# # inverse transform should not change dask-ness +# assert is_dask_before == is_dask_after + + +# @pytest.mark.parametrize( +# "synthetic_datalist", +# VALID_TEST_DATA, +# indirect=["synthetic_datalist"], +# ) +# def test_invserse_transform_scores(synthetic_datalist): +# data_list = synthetic_datalist +# all_dims, sample_dims, feature_dims = get_dims_from_data_list(data_list) + +# stacker = DataListStacker() +# stacker.fit(data_list, sample_dims[0], feature_dims) + +# stacked_data = stacker.transform(data_list) +# scores = stacked_data.rename({"feature": "mode"}) +# unstacked_data = stacker.inverse_transform_scores(scores) + +# is_dask_before = data_is_dask(data_list) +# is_dask_after = data_is_dask(unstacked_data) + +# # Unstacked scores has correct feature dimensions +# assert_expected_dims(data_list[0], unstacked_data, policy="sample") +# # Unstacked data has coordinates of original data +# assert_expected_coords(data_list[0], unstacked_data, policy="sample") +# # inverse transform should not change dask-ness +# assert is_dask_before == is_dask_after diff --git a/tests/preprocessing/test_dataset_multiindex_converter.py b/tests/preprocessing/test_dataset_multiindex_converter.py index d7d1aa3..b93e75b 100644 --- a/tests/preprocessing/test_dataset_multiindex_converter.py +++ b/tests/preprocessing/test_dataset_multiindex_converter.py @@ -2,7 +2,7 @@ import pandas as pd from xeofs.preprocessing.multi_index_converter import ( - DataSetMultiIndexConverter, + MultiIndexConverter, ) from ..conftest import generate_synthetic_dataset from xeofs.utils.data_types import DataArray @@ -38,7 +38,7 @@ indirect=["synthetic_dataset"], ) def test_transform(synthetic_dataset): - converter = DataSetMultiIndexConverter() + converter = MultiIndexConverter() converter.fit(synthetic_dataset) transformed_data = converter.transform(synthetic_dataset) @@ -70,7 +70,7 @@ def test_transform(synthetic_dataset): indirect=["synthetic_dataset"], ) def test_inverse_transform(synthetic_dataset): - converter = DataSetMultiIndexConverter() + converter = MultiIndexConverter() converter.fit(synthetic_dataset) transformed_data = converter.transform(synthetic_dataset) inverse_transformed_data = converter.inverse_transform_data(transformed_data) diff --git a/tests/preprocessing/test_dataset_renamer.py b/tests/preprocessing/test_dataset_renamer.py new file mode 100644 index 0000000..7ba110b --- /dev/null +++ b/tests/preprocessing/test_dataset_renamer.py @@ -0,0 +1,90 @@ +import pytest + +from xeofs.preprocessing.dimension_renamer import DimensionRenamer +from ..utilities import ( + data_is_dask, + get_dims_from_data, +) + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_VARIABLES = [1, 2] +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index", "multiindex"] +NAN_POLICY = ["no_nan", "fulldim"] +DASK_POLICY = ["no_dask", "dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (nv, ns, nf, index, nan, dask) + for nv in N_VARIABLES + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +# TESTS +# ============================================================================= +@pytest.mark.parametrize( + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], +) +def test_transform(synthetic_dataset): + all_dims, sample_dims, feature_dims = get_dims_from_data(synthetic_dataset) + + n_dims = len(all_dims) + + base = "new" + start = 10 + expected_dims = set(base + str(i) for i in range(start, start + n_dims)) + + renamer = DimensionRenamer(base=base, start=start) + renamer.fit(synthetic_dataset, sample_dims, feature_dims) + transformed_data = renamer.transform(synthetic_dataset) + + is_dask_before = data_is_dask(synthetic_dataset) + is_dask_after = data_is_dask(transformed_data) + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + # Transforming converts dimension names + given_dims = set(transformed_data.dims) + assert given_dims == expected_dims + + # Result is robust to calling the method multiple times + transformed_data = renamer.transform(synthetic_dataset) + given_dims = set(transformed_data.dims) + assert given_dims == expected_dims + + +@pytest.mark.parametrize( + "synthetic_dataset", + VALID_TEST_DATA, + indirect=["synthetic_dataset"], +) +def test_inverse_transform_data(synthetic_dataset): + all_dims, sample_dims, feature_dims = get_dims_from_data(synthetic_dataset) + + base = "new" + start = 10 + + renamer = DimensionRenamer(base=base, start=start) + renamer.fit(synthetic_dataset, sample_dims, feature_dims) + transformed_data = renamer.transform(synthetic_dataset) + inverse_transformed_data = renamer.inverse_transform_data(transformed_data) + + is_dask_before = data_is_dask(synthetic_dataset) + is_dask_after = data_is_dask(transformed_data) + + # Transforming doesn't change the dask-ness of the data + assert is_dask_before == is_dask_after + + assert inverse_transformed_data.identical(synthetic_dataset) + assert set(inverse_transformed_data.dims) == set(synthetic_dataset.dims) diff --git a/tests/preprocessing/test_dataset_scaler.py b/tests/preprocessing/test_dataset_scaler.py index 90ce905..c992d68 100644 --- a/tests/preprocessing/test_dataset_scaler.py +++ b/tests/preprocessing/test_dataset_scaler.py @@ -2,64 +2,35 @@ import xarray as xr import numpy as np -from xeofs.preprocessing.scaler import DataSetScaler +from xeofs.preprocessing.scaler import Scaler @pytest.mark.parametrize( - "with_std, with_coslat, with_weights", + "with_std, with_coslat", [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), + (True, True), + (True, False), + (False, True), + (False, False), ], ) -def test_init_params(with_std, with_coslat, with_weights): - s = DataSetScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) +def test_init_params(with_std, with_coslat): + s = Scaler(with_std=with_std, with_coslat=with_coslat) assert s.get_params()["with_std"] == with_std assert s.get_params()["with_coslat"] == with_coslat - assert s.get_params()["with_weights"] == with_weights @pytest.mark.parametrize( - "with_std, with_coslat, with_weights", + "with_std, with_coslat", [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), + (True, True), + (True, False), + (False, True), + (False, False), ], ) -def test_fit_params(with_std, with_coslat, with_weights, mock_dataset): - s = DataSetScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) +def test_fit_params(with_std, with_coslat, mock_dataset): + s = Scaler(with_std=with_std, with_coslat=with_coslat) sample_dims = ["time"] feature_dims = ["lat", "lon"] size_lats = mock_dataset.lat.size @@ -72,48 +43,37 @@ def test_fit_params(with_std, with_coslat, with_weights, mock_dataset): assert hasattr(s, "std_"), "Scaler has no std attribute." if with_coslat: assert hasattr(s, "coslat_weights_"), "Scaler has no coslat_weights attribute." - if with_weights: - assert hasattr(s, "weights_"), "Scaler has no weights attribute." assert s.mean_ is not None, "Scaler mean is None." if with_std: assert s.std_ is not None, "Scaler std is None." if with_coslat: assert s.coslat_weights_ is not None, "Scaler coslat_weights is None." - if with_weights: - assert s.weights_ is not None, "Scaler weights is None." @pytest.mark.parametrize( "with_std, with_coslat, with_weights", [ (True, True, True), - (True, True, False), (True, False, True), - (True, False, False), (False, True, True), - (False, True, False), (False, False, True), - (False, False, False), - (True, True, True), (True, True, False), - (True, False, True), (True, False, False), - (False, True, True), (False, True, False), - (False, False, True), (False, False, False), ], ) def test_transform_params(with_std, with_coslat, with_weights, mock_dataset): - s = DataSetScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) + s = Scaler(with_std=with_std, with_coslat=with_coslat) sample_dims = ["time"] feature_dims = ["lat", "lon"] size_lats = mock_dataset.lat.size - weights1 = xr.DataArray(np.random.rand(size_lats), dims=["lat"], name="t2m") - weights2 = xr.DataArray(np.random.rand(size_lats), dims=["lat"], name="prcp") - weights = xr.merge([weights1, weights2]) + if with_weights: + weights1 = xr.DataArray(np.random.rand(size_lats), dims=["lat"], name="t2m") + weights2 = xr.DataArray(np.random.rand(size_lats), dims=["lat"], name="prcp") + weights = xr.merge([weights1, weights2]) + else: + weights = None s.fit(mock_dataset, sample_dims, feature_dims, weights) transformed = s.transform(mock_dataset) assert transformed is not None, "Transformed data is None." @@ -140,41 +100,21 @@ def test_transform_params(with_std, with_coslat, with_weights, mock_dataset): transformed, mock_dataset ), "Data has not been transformed." - if with_weights: - assert s.weights_ is not None, "Scaler weights is None." - assert not np.array_equal( - transformed, mock_dataset - ), "Data has not been transformed." - transformed2 = s.fit_transform(mock_dataset, sample_dims, feature_dims, weights) xr.testing.assert_allclose(transformed, transformed2) @pytest.mark.parametrize( - "with_std, with_coslat, with_weights", + "with_std, with_coslat", [ - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), + (True, True), + (True, False), + (False, True), + (False, False), ], ) -def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_dataset): - s = DataSetScaler( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) +def test_inverse_transform_params(with_std, with_coslat, mock_dataset): + s = Scaler(with_std=with_std, with_coslat=with_coslat) sample_dims = ["time"] feature_dims = ["lat", "lon"] size_lats = mock_dataset.lat.size @@ -197,7 +137,7 @@ def test_inverse_transform_params(with_std, with_coslat, with_weights, mock_data ], ) def test_fit_dims(dim_sample, dim_feature, mock_dataset): - s = DataSetScaler(with_std=True) + s = Scaler(with_std=True) s.fit(mock_dataset, dim_sample, dim_feature) assert hasattr(s, "mean_"), "Scaler has no mean attribute." assert s.mean_ is not None, "Scaler mean is None." @@ -222,7 +162,7 @@ def test_fit_dims(dim_sample, dim_feature, mock_dataset): ], ) def test_fit_transform_dims(dim_sample, dim_feature, mock_dataset): - s = DataSetScaler() + s = Scaler() transformed = s.fit_transform(mock_dataset, dim_sample, dim_feature) # check that all dimensions are present assert set(transformed.dims) == set( @@ -235,25 +175,20 @@ def test_fit_transform_dims(dim_sample, dim_feature, mock_dataset): # Test input types def test_fit_input_type(mock_dataset, mock_data_array, mock_data_array_list): - s = DataSetScaler() - # Cannot fit DataArray - with pytest.raises(TypeError): - s.fit(mock_data_array, ["time"], ["lon", "lat"]) + s = Scaler() # Cannot fit list of DataArrays with pytest.raises(TypeError): s.fit(mock_data_array_list, ["time"], ["lon", "lat"]) s.fit(mock_dataset, ["time"], ["lon", "lat"]) - # Cannot transform DataArray - with pytest.raises(TypeError): - s.transform(mock_data_array) + # Cannot transform list of DataArrays with pytest.raises(TypeError): s.transform(mock_data_array_list) # def test_fit_weights_input_type(mock_dataset): -# s = DataSetScaler() +# s = Scaler() # # Fitting with weights requires that the weights have the same variables as the dataset # # used for fitting; otherwise raise an error # size_lats = mock_dataset.lat.size diff --git a/tests/preprocessing/test_preprocessor_dataarray.py b/tests/preprocessing/test_preprocessor_dataarray.py index 1388104..805e5f2 100644 --- a/tests/preprocessing/test_preprocessor_dataarray.py +++ b/tests/preprocessing/test_preprocessor_dataarray.py @@ -55,28 +55,22 @@ (False, True, False), (False, False, True), (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), ], ) def test_fit_transform_scalings(with_std, with_coslat, with_weights, mock_data_array): """fit method should not be implemented.""" - prep = Preprocessor( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) + prep = Preprocessor(with_std=with_std, with_coslat=with_coslat) weights = None if with_weights: weights = mock_data_array.mean("time").copy() weights[:] = 0.5 - data_trans = prep.fit_transform(mock_data_array, weights=weights, dim="time") + data_trans = prep.fit_transform( + mock_data_array, + weights=weights, + sample_dims=("time",), + ) assert hasattr(prep, "scaler") assert hasattr(prep, "preconverter") @@ -104,9 +98,10 @@ def test_fit_transform_scalings(with_std, with_coslat, with_weights, mock_data_a ) def test_fit_transform_same_dim_names(index_policy, nan_policy, dask_policy): data = generate_synthetic_dataarray(1, 1, index_policy, nan_policy, dask_policy) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) prep = Preprocessor(sample_name="sample0", feature_name="feature0") - transformed = prep.fit_transform(data, dim=("sample0",)) + transformed = prep.fit_transform(data, sample_dims) reconstructed = prep.inverse_transform_data(transformed) data_is_dask_before = data_is_dask(data) @@ -130,7 +125,7 @@ def test_fit_transform(sample_name, feature_name, data_params): all_dims, sample_dims, feature_dims = get_dims_from_data(data) prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) - transformed = prep.fit_transform(data, dim=sample_dims) + transformed = prep.fit_transform(data, sample_dims) data_is_dask_before = data_is_dask(data) data_is_dask_after = data_is_dask(transformed) @@ -150,7 +145,7 @@ def test_inverse_transform(sample_name, feature_name, data_params): all_dims, sample_dims, feature_dims = get_dims_from_data(data) prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) - transformed = prep.fit_transform(data, dim=sample_dims) + transformed = prep.fit_transform(data, sample_dims) components = transformed.rename({sample_name: "mode"}) scores = transformed.rename({feature_name: "mode"}) diff --git a/tests/preprocessing/test_preprocessor_datalist.py b/tests/preprocessing/test_preprocessor_datalist.py index 0d17627..8569e41 100644 --- a/tests/preprocessing/test_preprocessor_datalist.py +++ b/tests/preprocessing/test_preprocessor_datalist.py @@ -57,30 +57,21 @@ (False, True, False), (False, False, True), (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), ], ) def test_fit_transform_scalings( with_std, with_coslat, with_weights, mock_data_array_list ): - """fit method should not be implemented.""" - prep = Preprocessor( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) + prep = Preprocessor(with_std=with_std, with_coslat=with_coslat) + n_data = len(mock_data_array_list) + sample_dims = ("time",) weights = None if with_weights: weights = [da.mean("time").copy() for da in mock_data_array_list] weights = [xr.ones_like(w) * 0.5 for w in weights] - data_trans = prep.fit_transform(mock_data_array_list, weights=weights, dim="time") + data_trans = prep.fit_transform(mock_data_array_list, sample_dims, weights) assert hasattr(prep, "scaler") assert hasattr(prep, "preconverter") @@ -110,9 +101,10 @@ def test_fit_transform_same_dim_names(index_policy, nan_policy, dask_policy): data = generate_list_of_synthetic_dataarrays( 1, 1, 1, index_policy, nan_policy, dask_policy ) + all_dims, sample_dims, feature_dims = get_dims_from_data_list(data) prep = Preprocessor(sample_name="sample0", feature_name="feature") - transformed = prep.fit_transform(data, dim=("sample0",)) + transformed = prep.fit_transform(data, sample_dims[0]) reconstructed = prep.inverse_transform_data(transformed) data_is_dask_before = data_is_dask(data) @@ -136,7 +128,7 @@ def test_fit_transform(sample_name, feature_name, data_params): all_dims, sample_dims, feature_dims = get_dims_from_data_list(data) prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) - transformed = prep.fit_transform(data, dim=sample_dims[0]) + transformed = prep.fit_transform(data, sample_dims[0]) data_is_dask_before = data_is_dask(data) data_is_dask_after = data_is_dask(transformed) @@ -156,7 +148,7 @@ def test_inverse_transform(sample_name, feature_name, data_params): all_dims, sample_dims, feature_dims = get_dims_from_data_list(data) prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) - transformed = prep.fit_transform(data, dim=sample_dims[0]) + transformed = prep.fit_transform(data, sample_dims[0]) components = transformed.rename({sample_name: "mode"}) scores = transformed.rename({feature_name: "mode"}) diff --git a/tests/preprocessing/test_preprocessor_dataset.py b/tests/preprocessing/test_preprocessor_dataset.py index 6ffb859..c93a581 100644 --- a/tests/preprocessing/test_preprocessor_dataset.py +++ b/tests/preprocessing/test_preprocessor_dataset.py @@ -57,30 +57,21 @@ (False, True, False), (False, False, True), (False, False, False), - (True, True, True), - (True, True, False), - (True, False, True), - (True, False, False), - (False, True, True), - (False, True, False), - (False, False, True), - (False, False, False), ], ) -def test_fit_transform_scalings(with_std, with_coslat, with_weights, mock_data_array): +def test_fit_transform_scalings(with_std, with_coslat, with_weights, mock_dataset): """fit method should not be implemented.""" - prep = Preprocessor( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) + prep = Preprocessor(with_std=with_std, with_coslat=with_coslat) weights = None if with_weights: - weights = mock_data_array.mean("time").copy() - weights[:] = 0.5 + weights = mock_dataset.mean("time").copy() + weights = weights.where(weights == True, 0.5) - data_trans = prep.fit_transform(mock_data_array, weights=weights, dim="time") + data_trans = prep.fit_transform(mock_dataset, "time", weights) assert hasattr(prep, "scaler") + assert hasattr(prep, "renamer") assert hasattr(prep, "preconverter") assert hasattr(prep, "stacker") assert hasattr(prep, "postconverter") @@ -106,9 +97,10 @@ def test_fit_transform_scalings(with_std, with_coslat, with_weights, mock_data_a ) def test_fit_transform_same_dim_names(index_policy, nan_policy, dask_policy): data = generate_synthetic_dataset(1, 1, 1, index_policy, nan_policy, dask_policy) + all_dims, sample_dims, feature_dims = get_dims_from_data(data) prep = Preprocessor(sample_name="sample0", feature_name="feature") - transformed = prep.fit_transform(data, dim=("sample0",)) + transformed = prep.fit_transform(data, sample_dims) reconstructed = prep.inverse_transform_data(transformed) data_is_dask_before = data_is_dask(data) @@ -132,7 +124,7 @@ def test_fit_transform(sample_name, feature_name, data_params): all_dims, sample_dims, feature_dims = get_dims_from_data(data) prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) - transformed = prep.fit_transform(data, dim=sample_dims) + transformed = prep.fit_transform(data, sample_dims) data_is_dask_before = data_is_dask(data) data_is_dask_after = data_is_dask(transformed) @@ -152,7 +144,7 @@ def test_inverse_transform(sample_name, feature_name, data_params): all_dims, sample_dims, feature_dims = get_dims_from_data(data) prep = Preprocessor(sample_name=sample_name, feature_name=feature_name) - transformed = prep.fit_transform(data, dim=sample_dims) + transformed = prep.fit_transform(data, sample_dims) components = transformed.rename({sample_name: "mode"}) scores = transformed.rename({feature_name: "mode"}) diff --git a/xeofs/models/_base_cross_model.py b/xeofs/models/_base_cross_model.py index b2ea454..d5d1f93 100644 --- a/xeofs/models/_base_cross_model.py +++ b/xeofs/models/_base_cross_model.py @@ -17,12 +17,12 @@ class _BaseCrossModel(ABC): ------------- n_modes: int, default=10 Number of modes to calculate. + center: bool, default=True + Whether to center the input data. standardize: bool, default=False Whether to standardize the input data. use_coslat: bool, default=False Whether to use cosine of latitude for scaling. - use_weights: bool, default=False - Whether to use weights. n_pca_modes: int, default=None Number of PCA modes to calculate. sample_name: str, default="sample" @@ -39,9 +39,9 @@ class _BaseCrossModel(ABC): def __init__( self, n_modes=10, + center=True, standardize=False, use_coslat=False, - use_weights=False, n_pca_modes=None, sample_name="sample", feature_name="feature", @@ -54,9 +54,9 @@ def __init__( # Define model parameters self._params = { "n_modes": n_modes, + "center": center, "standardize": standardize, "use_coslat": use_coslat, - "use_weights": use_weights, "n_pca_modes": n_pca_modes, "solver": solver, } @@ -64,9 +64,9 @@ def __init__( self._preprocessor_kwargs = { "sample_name": sample_name, "feature_name": feature_name, + "with_center": center, "with_std": standardize, "with_coslat": use_coslat, - "with_weights": use_weights, } # Define analysis-relevant meta data diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index d5a7ca2..0848e6e 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -1,11 +1,22 @@ import warnings -from typing import Optional, Sequence, Hashable, Dict, Any, Self, List +from typing import Optional, Sequence, Hashable, Dict, Any, Self, List, TypeVar, Tuple from abc import ABC, abstractmethod from datetime import datetime +import numpy as np +import xarray as xr + from ..preprocessing.preprocessor import Preprocessor from ..data_container import DataContainer -from ..utils.data_types import DataObject, DataArray, Dims +from ..utils.data_types import DataObject, Data, DataArray, DataSet, DataList, Dims +from ..utils.xarray_utils import ( + convert_to_dim_type, + get_dims, + feature_ones_like, + convert_to_list, + process_parameter, + _check_parameter_number, +) from .._version import __version__ # Ignore warnings from numpy casting with additional coordinates @@ -24,8 +35,6 @@ class _BaseModel(ABC): Whether to standardize the input data. use_coslat: bool, default=False Whether to use cosine of latitude for scaling. - use_weights: bool, default=False - Whether to use weights. sample_name: str, default="sample" Name of the sample dimension. feature_name: str, default="feature" @@ -40,9 +49,9 @@ class _BaseModel(ABC): def __init__( self, n_modes=10, + center=True, standardize=False, use_coslat=False, - use_weights=False, sample_name="sample", feature_name="feature", solver="auto", @@ -53,15 +62,12 @@ def __init__( # Define model parameters self._params = { "n_modes": n_modes, + "center": center, "standardize": standardize, "use_coslat": use_coslat, - "use_weights": use_weights, "solver": solver, } self._solver_kwargs = solver_kwargs - self._preprocessor_kwargs = dict( - sample_name=sample_name, feature_name=feature_name - ) # Define analysis-relevant meta data self.attrs = {"model": "BaseModel"} @@ -76,26 +82,39 @@ def __init__( # Initialize the Preprocessor to scale and stack the data self.preprocessor = Preprocessor( + sample_name=sample_name, + feature_name=feature_name, + with_center=center, with_std=standardize, with_coslat=use_coslat, - with_weights=use_weights, - **self._preprocessor_kwargs ) # Initialize the data container that stores the results self.data = DataContainer() + def _validate_type(self, data) -> None: + err_msg = "Invalid input type: {:}. Expected one of the following: DataArray, Dataset or list of these.".format( + type(data).__name__ + ) + if isinstance(data, (xr.DataArray, xr.Dataset)): + pass + elif isinstance(data, (list, tuple)): + if not all(isinstance(d, (xr.DataArray, xr.Dataset)) for d in data): + raise TypeError(err_msg) + else: + raise TypeError(err_msg) + def fit( self, - data: DataObject, + X: List[Data] | Data, dim: Sequence[Hashable] | Hashable, - weights: Optional[DataObject] = None, + weights: Optional[List[Data] | Data] = None, ) -> Self: """ Fit the model to the input data. Parameters ---------- - data: DataArray | Dataset | List[DataArray] + X: DataArray | Dataset | List[DataArray] Input data. dim: Sequence[Hashable] | Hashable Specify the sample dimensions. The remaining dimensions @@ -104,8 +123,17 @@ def fit( Weighting factors for the input data. """ - # Preprocess the data - data2D: DataArray = self.preprocessor.fit_transform(data, dim, weights) + # Check for invalid types + self._validate_type(X) + if weights is not None: + self._validate_type(weights) + + self.sample_dims = convert_to_dim_type(dim) + + # Preprocess the data & transform to 2D + data2D: DataArray = self.preprocessor.fit_transform( + X, self.sample_dims, weights + ) return self._fit_algorithm(data2D) @@ -126,7 +154,7 @@ def _fit_algorithm(self, data: DataArray) -> Self: """ raise NotImplementedError - def transform(self, data: DataObject) -> DataArray: + def transform(self, data: List[Data] | Data) -> DataArray: """Project data onto the components. Parameters @@ -140,6 +168,8 @@ def transform(self, data: DataObject) -> DataArray: Projections of the data onto the components. """ + self._validate_type(data) + data2D = self.preprocessor.transform(data) data2D = self._transform_algorithm(data2D) return self.preprocessor.inverse_transform_scores(data2D) @@ -163,9 +193,9 @@ def _transform_algorithm(self, data: DataArray) -> DataArray: def fit_transform( self, - data: DataObject, + data: List[Data] | Data, dim: Sequence[Hashable] | Hashable, - weights: Optional[DataObject] = None, + weights: Optional[List[Data] | Data] = None, ) -> DataArray: """Fit the model to the input data and project the data onto the components. diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index 7b8bcc3..72cc899 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -22,8 +22,6 @@ class EOF(_BaseModel): Whether to standardize the input data. use_coslat: bool, default=False Whether to use cosine of latitude for scaling. - use_weights: bool, default=False - Whether to use weights. solver: {"auto", "full", "randomized"}, default="auto" Solver to use for the SVD computation. solver_kwargs: dict, default={} @@ -42,7 +40,6 @@ def __init__( n_modes=10, standardize=False, use_coslat=False, - use_weights=False, solver="auto", solver_kwargs={}, **kwargs, @@ -51,7 +48,6 @@ def __init__( n_modes=n_modes, standardize=standardize, use_coslat=use_coslat, - use_weights=use_weights, solver=solver, solver_kwargs=solver_kwargs, **kwargs, diff --git a/xeofs/preprocessing/__init__.py b/xeofs/preprocessing/__init__.py index e69de29..54b31f3 100644 --- a/xeofs/preprocessing/__init__.py +++ b/xeofs/preprocessing/__init__.py @@ -0,0 +1,12 @@ +from .scaler import Scaler +from .sanitizer import Sanitizer +from .multi_index_converter import MultiIndexConverter +from .stacker import DataArrayStacker, DataSetStacker + +__all__ = [ + "Scaler", + "Sanitizer", + "MultiIndexConverter", + "DataArrayStacker", + "DataSetStacker", +] diff --git a/xeofs/preprocessing/_base_scaler.py b/xeofs/preprocessing/_base_scaler.py deleted file mode 100644 index 44b19eb..0000000 --- a/xeofs/preprocessing/_base_scaler.py +++ /dev/null @@ -1,32 +0,0 @@ -from abc import ABC, abstractmethod - - -class _BaseScaler(ABC): - def __init__(self, with_std=True, with_coslat=False, with_weights=False): - self._params = dict( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) - - self.mean = None - self.std = None - self.coslat_weights = None - self.weights = None - - @abstractmethod - def fit(self, X, sample_dims, feature_dims, weights=None): - raise NotImplementedError - - @abstractmethod - def transform(self, X): - raise NotImplementedError - - @abstractmethod - def fit_transform(self, X, sample_dims, feature_dims, weights=None): - raise NotImplementedError - - @abstractmethod - def inverse_transform(self, X): - raise NotImplementedError - - def get_params(self): - return self._params.copy() diff --git a/xeofs/preprocessing/_base_stacker.py b/xeofs/preprocessing/_base_stacker.py deleted file mode 100644 index 54c4958..0000000 --- a/xeofs/preprocessing/_base_stacker.py +++ /dev/null @@ -1,153 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Sequence, Hashable, List - -import xarray as xr - - -class _BaseStacker(ABC): - """Abstract base class for stacking data into a 2D array. - - Every multi-dimensional array is be reshaped into a 2D array with the - dimensions (sample x feature). - - - Attributes - ---------- - dims_in_ : tuple - The dimensions of the data used to fit the stacker. - dims_out_ : dict['sample': ..., 'feature': ...] - The dimensions of the stacked data. - coords_in_ : dict - The coordinates of the data used to fit the stacker. - coords_out_ : dict['sample': ..., 'feature': ...] - The coordinates of the stacked data. Typically consist of MultiIndex. - - """ - - def __init__(self, sample_name: str = "sample", feature_name: str = "feature"): - self.sample_name = sample_name - self.feature_name = feature_name - - def fit( - self, - data, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], - ): - """Invoking a `fit` operation for a stacker object isn't practical because it requires stacking the data, - only to ascertain the output dimensions. This step is computationally expensive and unnecessary. - Therefore, instead of using a separate `fit` method, we combine the fit and transform steps - into the `fit_transform` method for efficiency. However, to maintain consistency with other classes - that do utilize a `fit` method, we retain the `fit` method here, albeit unimplemented. - - """ - raise NotImplementedError( - "Stacker does not implement fit method. Use fit_transform instead." - ) - - @abstractmethod - def fit_transform( - self, - data, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], - ) -> xr.DataArray: - """Fit the stacker to the data and then transform the data. - - Parameters - ---------- - data : DataArray - The data to be reshaped. - sample_dims : Hashable or Sequence[Hashable] - The dimensions of the data that will be stacked along the `sample` dimension. - feature_dims : Hashable or Sequence[Hashable] - The dimensions of the data that will be stacked along the `feature` dimension. - - Returns - ------- - DataArray - The reshaped data. - - Raises - ------ - ValueError - If any of the dimensions in `sample_dims` or `feature_dims` are not present in the data. - """ - raise NotImplementedError - - @abstractmethod - def transform(self, data) -> xr.DataArray: - """Reshape the data into a 2D version. - - Parameters - ---------- - data : DataArray - The data to be reshaped. - - Returns - ------- - DataArray - The reshaped data. - - Raises - ------ - ValueError - If the data to be transformed has different dimensions than the data used to fit the stacker. - ValueError - If the data to be transformed has different feature coordinates than the data used to fit the stacker. - ValueError - If the data to be transformed has individual NaNs. - - """ - raise NotImplementedError - - @abstractmethod - def inverse_transform_data(self, data: xr.DataArray): - """Reshape the 2D data (sample x feature) back into its original shape. - - Parameters - ---------- - data : DataArray - The data to be reshaped. - - Returns - ------- - DataArray - The reshaped data. - - """ - raise NotImplementedError - - @abstractmethod - def inverse_transform_components(self, data: xr.DataArray): - """Reshape the 2D data (mode x feature) back into its original shape. - - Parameters - ---------- - data : DataArray - The data to be reshaped. - - Returns - ------- - DataArray - The reshaped data. - - """ - raise NotImplementedError - - @abstractmethod - def inverse_transform_scores(self, data: xr.DataArray): - """Reshape the 2D data (sample x mode) back into its original shape. - - Parameters - ---------- - data : DataArray - The data to be reshaped. - - Returns - ------- - DataArray - The reshaped data. - - """ - raise NotImplementedError diff --git a/xeofs/preprocessing/concatenator.py b/xeofs/preprocessing/concatenator.py new file mode 100644 index 0000000..1e138dc --- /dev/null +++ b/xeofs/preprocessing/concatenator.py @@ -0,0 +1,117 @@ +from typing import List, Self, Optional + +import pandas as pd +import numpy as np +import xarray as xr + +from .transformer import Transformer +from ..utils.data_types import ( + Dims, + DimsList, + DataArray, + DataSet, + Data, + DataVar, + DataList, + DataArrayList, + DataSetList, + DataVarList, +) + + +class Concatenator(Transformer): + """Concatenate a list of DataArrays along the feature dimensions.""" + + def __init__(self, sample_name: str = "sample", feature_name: str = "feature"): + super().__init__(sample_name, feature_name) + + self.stackers = [] + + def fit( + self, + X: List[DataArray], + sample_dims: Optional[Dims] = None, + feature_dims: Optional[DimsList] = None, + ) -> Self: + # Check that all inputs are DataArrays + if not all([isinstance(data, DataArray) for data in X]): + raise ValueError("Input must be a list of DataArrays") + + # Check that all inputs have shape 2 + if not all([len(data.dims) == 2 for data in X]): + raise ValueError("Input DataArrays must have shape 2") + + # Check that all inputs have the same sample_name and feature_name + if not all([data.dims == (self.sample_name, self.feature_name) for data in X]): + raise ValueError("Input DataArrays must have the same dimensions") + + self.n_data = len(X) + + # Set input feature coordinates + self.coords_in = [data.coords[self.feature_name] for data in X] + self.n_features = [coord.size for coord in self.coords_in] + + return self + + def transform(self, X: List[DataArray]) -> DataArray: + # Test whether the input list has same length as the number of stackers + if len(X) != self.n_data: + raise ValueError( + f"Invalid input. Number of DataArrays ({len(X)}) does not match the number of fitted DataArrays ({self.n_data})." + ) + + reindexed_data_list: List[DataArray] = [] + dummy_feature_coords = [] + + idx_range = np.cumsum([0] + self.n_features) + for i, data in enumerate(X): + # Create dummy feature coordinates for DataArray + new_coords = np.arange(idx_range[i], idx_range[i + 1]) + + # Replace original feature coordiantes with dummy coordinates + data = data.drop_vars(self.feature_name) + reindexed = data.assign_coords({self.feature_name: new_coords}) + + # Store dummy feature coordinates + dummy_feature_coords.append(new_coords) + reindexed_data_list.append(reindexed) + + self._dummy_feature_coords = dummy_feature_coords + + X_concat: DataArray = xr.concat(reindexed_data_list, dim=self.feature_name) + self.coords_out = X_concat.coords[self.feature_name] + + return X_concat + + def fit_transform( + self, + X: List[DataArray], + sample_dims: Optional[Dims] = None, + feature_dims: Optional[DimsList] = None, + ) -> DataArray: + return self.fit(X, sample_dims, feature_dims).transform(X) + + def _split_dataarray_into_list(self, data: DataArray) -> List[DataArray]: + feature_name = self.feature_name + data_list: List[DataArray] = [] + + for coords, features in zip(self.coords_in, self._dummy_feature_coords): + # Select the features corresponding to the current DataArray + sub_selection = data.sel({feature_name: features}) + # Replace dummy feature coordinates with original feature coordinates + sub_selection = sub_selection.assign_coords({feature_name: coords}) + data_list.append(sub_selection) + + return data_list + + def inverse_transform_data(self, X: DataArray) -> List[DataArray]: + """Reshape the 2D data (sample x feature) back into its original shape.""" + return self._split_dataarray_into_list(X) + + def inverse_transform_components(self, X: DataArray) -> List[DataArray]: + """Reshape the 2D components (sample x feature) back into its original shape.""" + return self._split_dataarray_into_list(X) + + def inverse_transform_scores(self, X: DataArray) -> DataArray: + """Reshape the 2D scores (sample x mode) back into its original shape.""" + return X diff --git a/xeofs/preprocessing/dimension_renamer.py b/xeofs/preprocessing/dimension_renamer.py new file mode 100644 index 0000000..7647ba7 --- /dev/null +++ b/xeofs/preprocessing/dimension_renamer.py @@ -0,0 +1,61 @@ +from typing import Self + +from .transformer import Transformer +from ..utils.data_types import Dims, DataArray, DataSet, Data, DataVar, DataVarBound + + +class DimensionRenamer(Transformer): + """Rename dimensions of a DataArray or Dataset. + + Parameters + ---------- + base: str + Base string for the new dimension names. + start: int + Start index for the new dimension names. + + """ + + def __init__(self, base="dim", start=0): + super().__init__() + self.base = base + self.start = start + self.dim_mapping = {} + + def fit(self, X: Data, sample_dims: Dims, feature_dims: Dims, **kwargs) -> Self: + self.sample_dims_before = sample_dims + self.feature_dims_before = feature_dims + + self.dim_mapping = { + dim: f"{self.base}{i}" for i, dim in enumerate(X.dims, start=self.start) + } + + self.sample_dims_after: Dims = tuple( + [self.dim_mapping[dim] for dim in self.sample_dims_before] + ) + self.feature_dims_after: Dims = tuple( + [self.dim_mapping[dim] for dim in self.feature_dims_before] + ) + + return self + + def transform(self, X: DataVarBound) -> DataVarBound: + try: + return X.rename(self.dim_mapping) + except ValueError: + raise ValueError("Cannot transform data. Dimensions are different.") + + def _inverse_transform(self, X: DataVarBound) -> DataVarBound: + given_dims = set(X.dims) + expected_dims = set(self.dim_mapping.values()) + dims = given_dims.intersection(expected_dims) + return X.rename({v: k for k, v in self.dim_mapping.items() if v in dims}) + + def inverse_transform_data(self, X: DataVarBound) -> DataVarBound: + return self._inverse_transform(X) + + def inverse_transform_components(self, X: DataVarBound) -> DataVarBound: + return self._inverse_transform(X) + + def inverse_transform_scores(self, X: DataArray) -> DataArray: + return self._inverse_transform(X) diff --git a/xeofs/preprocessing/factory.py b/xeofs/preprocessing/factory.py index 4a6632f..19c9a0c 100644 --- a/xeofs/preprocessing/factory.py +++ b/xeofs/preprocessing/factory.py @@ -1,57 +1,57 @@ -import xarray as xr +# import xarray as xr -from .scaler import DataArrayScaler, DataSetScaler, DataListScaler -from .stacker import DataArrayStacker, DataSetStacker, DataListStacker -from .multi_index_converter import ( - DataArrayMultiIndexConverter, - DataSetMultiIndexConverter, - DataListMultiIndexConverter, -) -from ..utils.data_types import DataObject +# from .scaler import DataArrayScaler, DataSetScaler, DataListScaler +# from .stacker import DataArrayStacker, DataSetStacker, DataListStacker +# from .multi_index_converter import ( +# DataArrayMultiIndexConverter, +# DataSetMultiIndexConverter, +# DataListMultiIndexConverter, +# ) +# from ..utils.data_types import DataObject -class ScalerFactory: - @staticmethod - def create_scaler(data: DataObject, **kwargs): - if isinstance(data, xr.DataArray): - return DataArrayScaler(**kwargs) - elif isinstance(data, xr.Dataset): - return DataSetScaler(**kwargs) - elif isinstance(data, list) and all( - isinstance(da, xr.DataArray) for da in data - ): - return DataListScaler(**kwargs) - else: - raise ValueError("Invalid data type") +# class ScalerFactory: +# @staticmethod +# def create_scaler(data: DataObject, **kwargs): +# if isinstance(data, xr.DataArray): +# return DataArrayScaler(**kwargs) +# elif isinstance(data, xr.Dataset): +# return DataSetScaler(**kwargs) +# elif isinstance(data, list) and all( +# isinstance(da, xr.DataArray) for da in data +# ): +# return DataListScaler(**kwargs) +# else: +# raise ValueError("Invalid data type") -class MultiIndexConverterFactory: - @staticmethod - def create_converter( - data: DataObject, **kwargs - ) -> DataArrayMultiIndexConverter | DataListMultiIndexConverter: - if isinstance(data, xr.DataArray): - return DataArrayMultiIndexConverter(**kwargs) - elif isinstance(data, xr.Dataset): - return DataSetMultiIndexConverter(**kwargs) - elif isinstance(data, list) and all( - isinstance(da, xr.DataArray) for da in data - ): - return DataListMultiIndexConverter(**kwargs) - else: - raise ValueError("Invalid data type") +# class MultiIndexConverterFactory: +# @staticmethod +# def create_converter( +# data: DataObject, **kwargs +# ) -> DataArrayMultiIndexConverter | DataListMultiIndexConverter: +# if isinstance(data, xr.DataArray): +# return DataArrayMultiIndexConverter(**kwargs) +# elif isinstance(data, xr.Dataset): +# return DataSetMultiIndexConverter(**kwargs) +# elif isinstance(data, list) and all( +# isinstance(da, xr.DataArray) for da in data +# ): +# return DataListMultiIndexConverter(**kwargs) +# else: +# raise ValueError("Invalid data type") -class StackerFactory: - @staticmethod - def create_stacker(data: DataObject, **kwargs): - if isinstance(data, xr.DataArray): - return DataArrayStacker(**kwargs) - elif isinstance(data, xr.Dataset): - return DataSetStacker(**kwargs) - elif isinstance(data, list) and all( - isinstance(da, xr.DataArray) for da in data - ): - return DataListStacker(**kwargs) - else: - raise ValueError("Invalid data type") +# class StackerFactory: +# @staticmethod +# def create_stacker(data: DataObject, **kwargs): +# if isinstance(data, xr.DataArray): +# return DataArrayStacker(**kwargs) +# elif isinstance(data, xr.Dataset): +# return DataSetStacker(**kwargs) +# elif isinstance(data, list) and all( +# isinstance(da, xr.DataArray) for da in data +# ): +# return DataListStacker(**kwargs) +# else: +# raise ValueError("Invalid data type") diff --git a/xeofs/preprocessing/list_processor.py b/xeofs/preprocessing/list_processor.py new file mode 100644 index 0000000..e683d37 --- /dev/null +++ b/xeofs/preprocessing/list_processor.py @@ -0,0 +1,106 @@ +from typing import List, Self, TypeVar, Generic, Type, Dict, Any + +from .transformer import Transformer + +from .dimension_renamer import DimensionRenamer +from .scaler import Scaler +from .sanitizer import Sanitizer +from .multi_index_converter import MultiIndexConverter +from .stacker import DataArrayStacker, DataSetStacker, Stacker +from ..utils.data_types import ( + Data, + DataVar, + DataVarBound, + DataArray, + DataSet, + Dims, + DimsList, +) + +T = TypeVar( + "T", + bound=(DimensionRenamer | Scaler | MultiIndexConverter | Stacker | Sanitizer), +) + + +class GenericListTransformer(Transformer, Generic[T]): + """Apply a Transformer to each of the elements of a list. + + Parameters + ---------- + transformer: Transformer + Transformer class to apply to list elements. + kwargs: dict + Keyword arguments for the transformer. + """ + + def __init__(self, transformer: Type[T], **kwargs): + self.transformer_class = transformer + self.transformers: List[T] = [] + self.init_kwargs = kwargs + + def fit( + self, + X: List[DataVar], + sample_dims: Dims, + feature_dims: DimsList, + iter_kwargs: Dict[str, List[Any]] = {}, + ) -> Self: + """Fit transformer to each data element in the list. + + Parameters + ---------- + X: List[Data] + List of data elements. + sample_dims: Dims + Sample dimensions. + feature_dims: DimsList + Feature dimensions. + iter_kwargs: Dict[str, List[Any]] + Keyword arguments for the transformer that should be iterated over. + + """ + self.sample_dims = sample_dims + self.feature_dims = feature_dims + self.iter_kwargs = iter_kwargs + + for i, x in enumerate(X): + # Add transformer specific keyword arguments + # For iterable kwargs, use the i-th element of the iterable + kwargs = {k: v[i] for k, v in self.iter_kwargs.items()} + proc: T = self.transformer_class(**self.init_kwargs) + proc.fit(x, sample_dims, feature_dims[i], **kwargs) + self.transformers.append(proc) + return self + + def transform(self, X: List[Data]) -> List[Data]: + X_transformed: List[Data] = [] + for x, proc in zip(X, self.transformers): + X_transformed.append(proc.transform(x)) # type: ignore + return X_transformed + + def fit_transform( + self, + X: List[Data], + sample_dims: Dims, + feature_dims: DimsList, + iter_kwargs: Dict[str, List[Any]] = {}, + ) -> List[Data]: + return self.fit(X, sample_dims, feature_dims, iter_kwargs).transform(X) # type: ignore + + def inverse_transform_data(self, X: List[Data]) -> List[Data]: + X_inverse_transformed: List[Data] = [] + for x, proc in zip(X, self.transformers): + x_inv_trans = proc.inverse_transform_data(x) # type: ignore + X_inverse_transformed.append(x_inv_trans) + return X_inverse_transformed + + def inverse_transform_components(self, X: List[Data]) -> List[Data]: + X_inverse_transformed: List[Data] = [] + for x, proc in zip(X, self.transformers): + x_inv_trans = proc.inverse_transform_components(x) # type: ignore + X_inverse_transformed.append(x_inv_trans) + return X_inverse_transformed + + def inverse_transform_scores(self, X: DataArray) -> DataArray: + return self.transformers[0].inverse_transform_scores(X) diff --git a/xeofs/preprocessing/multi_index_converter.py b/xeofs/preprocessing/multi_index_converter.py index db70e1d..626bb15 100644 --- a/xeofs/preprocessing/multi_index_converter.py +++ b/xeofs/preprocessing/multi_index_converter.py @@ -1,22 +1,26 @@ -from typing import List, Self +from typing import List, Self, Optional -import xarray as xr import pandas as pd -from sklearn.base import BaseEstimator, TransformerMixin -from xeofs.utils.data_types import DataArray +from .transformer import Transformer +from ..utils.data_types import Dims, DataArray, DataSet, Data, DataVar, DataVarBound -from ..utils.data_types import DataArray, DataSet, DataList - -class DataArrayMultiIndexConverter(BaseEstimator, TransformerMixin): - """Convert MultiIndexes of a ND DataArray to regular indexes.""" +class MultiIndexConverter(Transformer): + """Convert MultiIndexes of an ND DataArray or Dataset to regular indexes.""" def __init__(self): + super().__init__() self.original_indexes = {} self.modified_dimensions = [] - def fit(self, X: DataArray, y=None) -> Self: + def fit( + self, + X: Data, + sample_dims: Optional[Dims] = None, + feature_dims: Optional[Dims] = None, + **kwargs + ) -> Self: # Store original MultiIndexes and replace with simple index for dim in X.dims: index = X.indexes[dim] @@ -26,7 +30,7 @@ def fit(self, X: DataArray, y=None) -> Self: return self - def transform(self, X: DataArray) -> DataArray: + def transform(self, X: DataVar) -> DataVar: X_transformed = X.copy(deep=True) # Replace MultiIndexes with simple index @@ -37,10 +41,7 @@ def transform(self, X: DataArray) -> DataArray: return X_transformed - def fit_transform(self, X: DataArray, y=None) -> DataArray: - return self.fit(X, y).transform(X) - - def _inverse_transform(self, X: DataArray) -> DataArray: + def _inverse_transform(self, X: DataVarBound) -> DataVarBound: X_inverse_transformed = X.copy(deep=True) # Restore original MultiIndexes @@ -57,71 +58,52 @@ def _inverse_transform(self, X: DataArray) -> DataArray: return X_inverse_transformed - def inverse_transform_data(self, X: DataArray) -> DataArray: + def inverse_transform_data(self, X: DataVarBound) -> DataVarBound: return self._inverse_transform(X) - def inverse_transform_components(self, X: DataArray) -> DataArray: + def inverse_transform_components(self, X: DataVarBound) -> DataVarBound: return self._inverse_transform(X) def inverse_transform_scores(self, X: DataArray) -> DataArray: return self._inverse_transform(X) -class DataSetMultiIndexConverter(DataArrayMultiIndexConverter): - """Converts MultiIndexes to simple indexes and vice versa.""" - - def fit(self, X: DataSet, y=None) -> Self: - return super().fit(X, y) # type: ignore - - def transform(self, X: DataSet) -> DataSet: - return super().transform(X) # type: ignore - - def fit_transform(self, X: DataSet, y=None) -> DataSet: - return super().fit_transform(X, y) # type: ignore - - def inverse_transform_data(self, X: DataSet) -> DataSet: - return super().inverse_transform_data(X) # type: ignore - - def inverse_transform_components(self, X: DataSet) -> DataSet: - return super().inverse_transform_components(X) # type: ignore - +# class DataListMultiIndexConverter(BaseEstimator, TransformerMixin): +# """Converts MultiIndexes to simple indexes and vice versa.""" -class DataListMultiIndexConverter(BaseEstimator, TransformerMixin): - """Converts MultiIndexes to simple indexes and vice versa.""" +# def __init__(self): +# self.converters: List[MultiIndexConverter] = [] - def __init__(self): - self.converters: List[DataArrayMultiIndexConverter] = [] - - def fit(self, X: DataList, y=None): - for x in X: - converter = DataArrayMultiIndexConverter() - converter.fit(x) - self.converters.append(converter) +# def fit(self, X: List[Data], y=None): +# for x in X: +# converter = MultiIndexConverter() +# converter.fit(x) +# self.converters.append(converter) - return self +# return self - def transform(self, X: DataList) -> DataList: - X_transformed: List[DataArray] = [] - for x, converter in zip(X, self.converters): - X_transformed.append(converter.transform(x)) +# def transform(self, X: List[Data]) -> List[Data]: +# X_transformed: List[Data] = [] +# for x, converter in zip(X, self.converters): +# X_transformed.append(converter.transform(x)) - return X_transformed +# return X_transformed - def fit_transform(self, X: DataList, y=None) -> DataList: - return self.fit(X, y).transform(X) +# def fit_transform(self, X: List[Data], y=None) -> List[Data]: +# return self.fit(X, y).transform(X) - def _inverse_transform(self, X: DataList) -> DataList: - X_inverse_transformed: List[DataArray] = [] - for x, converter in zip(X, self.converters): - X_inverse_transformed.append(converter._inverse_transform(x)) +# def _inverse_transform(self, X: List[Data]) -> List[Data]: +# X_inverse_transformed: List[Data] = [] +# for x, converter in zip(X, self.converters): +# X_inverse_transformed.append(converter._inverse_transform(x)) - return X_inverse_transformed +# return X_inverse_transformed - def inverse_transform_data(self, X: DataList) -> DataList: - return self._inverse_transform(X) +# def inverse_transform_data(self, X: List[Data]) -> List[Data]: +# return self._inverse_transform(X) - def inverse_transform_components(self, X: DataList) -> DataList: - return self._inverse_transform(X) +# def inverse_transform_components(self, X: List[Data]) -> List[Data]: +# return self._inverse_transform(X) - def inverse_transform_scores(self, X: DataArray) -> DataArray: - return self.converters[0].inverse_transform_scores(X) +# def inverse_transform_scores(self, X: DataArray) -> DataArray: +# return self.converters[0].inverse_transform_scores(X) diff --git a/xeofs/preprocessing/preprocessor.py b/xeofs/preprocessing/preprocessor.py index 63f18dd..8da82bc 100644 --- a/xeofs/preprocessing/preprocessor.py +++ b/xeofs/preprocessing/preprocessor.py @@ -1,9 +1,55 @@ -from typing import Optional, Sequence, Hashable, List +from typing import Optional, Sequence, Hashable, List, Tuple, Any, Type -from .factory import StackerFactory, ScalerFactory, MultiIndexConverterFactory -from .sanitizer import DataArraySanitizer -from ..utils.xarray_utils import get_dims -from ..utils.data_types import DataObject, DataArray +import numpy as np + +from .list_processor import GenericListTransformer +from .dimension_renamer import DimensionRenamer +from .scaler import Scaler +from .stacker import StackerFactory, Stacker +from .multi_index_converter import MultiIndexConverter +from .sanitizer import Sanitizer +from .concatenator import Concatenator +from ..utils.xarray_utils import ( + get_dims, + unwrap_singleton_list, + process_parameter, + _check_parameter_number, + convert_to_list, +) +from ..utils.data_types import ( + DataArray, + Data, + DataVar, + DataVarBound, + DataList, + Dims, + DimsList, +) + + +def extract_new_dim_names(X: List[DimensionRenamer]) -> Tuple[Dims, DimsList]: + """Extract the new dimension names from a list of DimensionRenamer objects. + + Parameters + ---------- + X : list of DimensionRenamer + List of DimensionRenamer objects. + + Returns + ------- + Dims + Sample dimensions + DimsList + Feature dimenions + + """ + new_sample_dims = [] + new_feature_dims: DimsList = [] + for x in X: + new_sample_dims.append(x.sample_dims_after) + new_feature_dims.append(x.feature_dims_after) + new_sample_dims: Dims = tuple(np.unique(np.asarray(new_sample_dims))) + return new_sample_dims, new_feature_dims class Preprocessor: @@ -17,87 +63,136 @@ class Preprocessor: Parameters ---------- + sample_name : str, default="sample" + Name of the sample dimension. + feature_name : str, default="feature" + Name of the feature dimension. + with_center : bool, default=True + If True, the data is centered by subtracting the mean. with_std : bool, default=True If True, the data is divided by the standard deviation. with_coslat : bool, default=False If True, the data is multiplied by the square root of cosine of latitude weights. with_weights : bool, default=False If True, the data is multiplied by additional user-defined weights. + return_list : bool, default=True + If True, the output is returned as a list of DataArrays. If False, the output is returned as a single DataArray if possible. """ def __init__( self, - sample_name="sample", - feature_name="feature", - with_std=True, - with_coslat=False, - with_weights=False, + sample_name: str = "sample", + feature_name: str = "feature", + with_center: bool = True, + with_std: bool = False, + with_coslat: bool = False, + return_list: bool = True, ): + # Set parameters self.sample_name = sample_name self.feature_name = feature_name + self.with_center = with_center self.with_std = with_std self.with_coslat = with_coslat - self.with_weights = with_weights + self.return_list = return_list def fit( self, - data: DataObject, - dim: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], - weights: Optional[DataObject] = None, + X: List[Data] | Data, + sample_dims: Dims, + weights: Optional[List[Data] | Data] = None, ): + self._set_return_list(X) + X = convert_to_list(X) + self.n_data = len(X) + sample_dims, feature_dims = get_dims(X, sample_dims) + # Set sample and feature dimensions - sample_dims, feature_dims = get_dims(data, sample_dims=dim) - self.dims = {self.sample_name: sample_dims, self.feature_name: feature_dims} + self.dims = { + self.sample_name: sample_dims, + self.feature_name: feature_dims, + } + + # However, for each DataArray a list of feature dimensions must be provided + _check_parameter_number("feature_dims", feature_dims, self.n_data) - # Create Scaler - scaler_params = { + # Ensure that weights are provided as a list + weights = process_parameter("weights", weights, None, self.n_data) + + # 1 | Center, scale and weigh the data + scaler_kwargs = { + "with_center": self.with_center, "with_std": self.with_std, "with_coslat": self.with_coslat, - "with_weights": self.with_weights, } - self.scaler = ScalerFactory.create_scaler(data, **scaler_params) - data = self.scaler.fit_transform(data, sample_dims, feature_dims, weights) + scaler_ikwargs = { + "weights": weights, + } + self.scaler = GenericListTransformer(Scaler, **scaler_kwargs) + X = self.scaler.fit_transform(X, sample_dims, feature_dims, scaler_ikwargs) + + # 2 | Rename dimensions + self.renamer = GenericListTransformer(DimensionRenamer) + X = self.renamer.fit_transform(X, sample_dims, feature_dims) + sample_dims, feature_dims = extract_new_dim_names(self.renamer.transformers) - # Create MultiIndexConverter (Pre) - self.preconverter = MultiIndexConverterFactory.create_converter(data) - data = self.preconverter.fit_transform(data) + # 3 | Convert MultiIndexes (before stacking) + self.preconverter = GenericListTransformer(MultiIndexConverter) + X = self.preconverter.fit_transform(X, sample_dims, feature_dims) - # Create Stacker + # 4 | Stack the data to 2D DataArray stacker_kwargs = { "sample_name": self.sample_name, "feature_name": self.feature_name, } - self.stacker = StackerFactory.create_stacker(data, **stacker_kwargs) - data: DataArray = self.stacker.fit_transform(data, sample_dims, feature_dims) - - # Create MultiIndexConverter (Post) - self.postconverter = MultiIndexConverterFactory.create_converter(data) - data = self.postconverter.fit_transform(data) - - # Create Sanitizer - self.sanitizer = DataArraySanitizer( - sample_name=self.sample_name, feature_name=self.feature_name - ) - self.sanitizer.fit(data) + stack_type: Type[Stacker] = StackerFactory.create(X[0]) + self.stacker = GenericListTransformer(stack_type, **stacker_kwargs) + X = self.stacker.fit_transform(X, sample_dims, feature_dims) + # 5 | Convert MultiIndexes (after stacking) + self.postconverter = GenericListTransformer(MultiIndexConverter) + X = self.postconverter.fit_transform(X, sample_dims, feature_dims) + # 6 | Remove NaNs + sanitizer_kwargs = { + "sample_name": self.sample_name, + "feature_name": self.feature_name, + } + self.sanitizer = GenericListTransformer(Sanitizer, **sanitizer_kwargs) + X = self.sanitizer.fit_transform(X, sample_dims, feature_dims) + + # 7 | Concatenate into one 2D DataArray + self.concatenator = Concatenator(self.sample_name, self.feature_name) + self.concatenator.fit(X) # type: ignore + return self - def transform(self, data: DataObject) -> DataArray: - data = self.scaler.transform(data) - data = self.preconverter.transform(data) - data = self.stacker.transform(data) - data = self.postconverter.transform(data) - return self.sanitizer.transform(data) + def transform(self, X: List[Data] | Data) -> DataArray: + X = convert_to_list(X) + + if len(X) != self.n_data: + raise ValueError( + f"number of data objects passed should match number of data objects used for fitting" + f"len(data objects)={len(X)} and " + f"len(data objects used for fitting)={self.n_data}" + ) + + X = self.scaler.transform(X) + X = self.renamer.transform(X) + X = self.preconverter.transform(X) + X = self.stacker.transform(X) + X = self.postconverter.transform(X) + X = self.sanitizer.transform(X) + return self.concatenator.transform(X) # type: ignore def fit_transform( self, - data: DataObject, - dim: Hashable | Sequence[Hashable] | List[Sequence[Hashable]], - weights: Optional[DataObject] = None, + X: List[Data] | Data, + sample_dims: Dims, + weights: Optional[List[Data] | Data] = None, ) -> DataArray: - return self.fit(data, dim, weights).transform(data) + return self.fit(X, sample_dims, weights).transform(X) - def inverse_transform_data(self, data: DataArray) -> DataObject: + def inverse_transform_data(self, X: DataArray) -> List[Data] | Data: """Inverse transform the data. Parameters: @@ -111,13 +206,16 @@ def inverse_transform_data(self, data: DataArray) -> DataObject: The inverse transformed data. """ - data = self.sanitizer.inverse_transform_data(data) - data = self.postconverter.inverse_transform_data(data) - data = self.stacker.inverse_transform_data(data) - data = self.preconverter.inverse_transform_data(data) - return self.scaler.inverse_transform_data(data) + X_list = self.concatenator.inverse_transform_data(X) + X_list = self.sanitizer.inverse_transform_data(X_list) # type: ignore + X_list = self.postconverter.inverse_transform_data(X_list) + X_list_ND = self.stacker.inverse_transform_data(X_list) + X_list_ND = self.preconverter.inverse_transform_data(X_list_ND) + X_list_ND = self.renamer.inverse_transform_data(X_list_ND) + X_list_ND = self.scaler.inverse_transform_data(X_list_ND) + return self._process_output(X_list_ND) - def inverse_transform_components(self, data: DataArray) -> DataObject: + def inverse_transform_components(self, X: DataArray) -> List[Data] | Data: """Inverse transform the components. Parameters: @@ -131,13 +229,16 @@ def inverse_transform_components(self, data: DataArray) -> DataObject: The inverse transformed components. """ - data = self.sanitizer.inverse_transform_components(data) - data = self.postconverter.inverse_transform_components(data) - data = self.stacker.inverse_transform_components(data) - data = self.preconverter.inverse_transform_components(data) - return self.scaler.inverse_transform_components(data) + X_list = self.concatenator.inverse_transform_components(X) + X_list = self.sanitizer.inverse_transform_components(X_list) # type: ignore + X_list = self.postconverter.inverse_transform_components(X_list) + X_list_ND = self.stacker.inverse_transform_components(X_list) + X_list_ND = self.preconverter.inverse_transform_components(X_list_ND) + X_list_ND = self.renamer.inverse_transform_components(X_list_ND) + X_list_ND = self.scaler.inverse_transform_components(X_list_ND) + return self._process_output(X_list_ND) - def inverse_transform_scores(self, data: DataArray) -> DataArray: + def inverse_transform_scores(self, X: DataArray) -> DataArray: """Inverse transform the scores. Parameters: @@ -151,8 +252,23 @@ def inverse_transform_scores(self, data: DataArray) -> DataArray: The inverse transformed scores. """ - data = self.sanitizer.inverse_transform_scores(data) - data = self.postconverter.inverse_transform_scores(data) - data = self.stacker.inverse_transform_scores(data) - data = self.preconverter.inverse_transform_scores(data) - return self.scaler.inverse_transform_scores(data) + X_list = self.concatenator.inverse_transform_scores(X) + X_list = self.sanitizer.inverse_transform_scores(X_list) + X_list = self.postconverter.inverse_transform_scores(X_list) + X_list_ND = self.stacker.inverse_transform_scores(X_list) + X_list_ND = self.preconverter.inverse_transform_scores(X_list_ND) + X_list_ND = self.renamer.inverse_transform_scores(X_list_ND) + X_list_ND = self.scaler.inverse_transform_scores(X_list_ND) + return X_list_ND + + def _process_output(self, X: List[Data]) -> List[Data] | Data: + if self.return_list: + return X + else: + return unwrap_singleton_list(X) + + def _set_return_list(self, X): + if isinstance(X, (list, tuple)): + self.return_list = True + else: + self.return_list = False diff --git a/xeofs/preprocessing/sanitizer.py b/xeofs/preprocessing/sanitizer.py index 64c27b7..d72c1cb 100644 --- a/xeofs/preprocessing/sanitizer.py +++ b/xeofs/preprocessing/sanitizer.py @@ -1,88 +1,90 @@ -from typing import Self +from typing import Self, Optional import xarray as xr -from sklearn.base import BaseEstimator, TransformerMixin -from ..utils.data_types import DataArray +from .transformer import Transformer +from ..utils.data_types import Dims, DataArray, DataSet, Data, DataVar -class DataArraySanitizer(BaseEstimator, TransformerMixin): +class Sanitizer(Transformer): """ Removes NaNs from the feature dimension of a 2D DataArray. """ def __init__(self, sample_name="sample", feature_name="feature"): - self.sample_name = sample_name - self.feature_name = feature_name + super().__init__(sample_name=sample_name, feature_name=feature_name) - def _check_input_type(self, data) -> None: - if not isinstance(data, xr.DataArray): + def _check_input_type(self, X) -> None: + if not isinstance(X, xr.DataArray): raise ValueError("Input must be an xarray DataArray") - def _check_input_dims(self, data: DataArray) -> None: - if set(data.dims) != set([self.sample_name, self.feature_name]): + def _check_input_dims(self, X) -> None: + if set(X.dims) != set([self.sample_name, self.feature_name]): raise ValueError( "Input must have dimensions ({:}, {:})".format( self.sample_name, self.feature_name ) ) - def _check_input_coords(self, data: DataArray) -> None: - if not data.coords[self.feature_name].identical(self.feature_coords): + def _check_input_coords(self, X) -> None: + if not X.coords[self.feature_name].identical(self.feature_coords): raise ValueError( "Cannot transform data. Feature coordinates are different." ) - def fit(self, data: DataArray, y=None) -> Self: + def fit( + self, + X: Data, + sample_dims: Optional[Dims] = None, + feature_dims: Optional[Dims] = None, + **kwargs + ) -> Self: # Check if input is a DataArray - self._check_input_type(data) + self._check_input_type(X) # Check if input has the correct dimensions - self._check_input_dims(data) + self._check_input_dims(X) - self.feature_coords = data.coords[self.feature_name] + self.feature_coords = X.coords[self.feature_name] # Identify NaN locations - self.is_valid_feature = data.notnull().all(self.sample_name).compute() + self.is_valid_feature = X.notnull().all(self.sample_name).compute() return self - def transform(self, data: DataArray) -> DataArray: + def transform(self, X: DataArray) -> DataArray: # Check if input is a DataArray - self._check_input_type(data) + self._check_input_type(X) # Check if input has the correct dimensions - self._check_input_dims(data) + self._check_input_dims(X) # Check if input has the correct coordinates - self._check_input_coords(data) + self._check_input_coords(X) # Remove NaN entries - data = data.isel({self.feature_name: self.is_valid_feature}) + X = X.isel({self.feature_name: self.is_valid_feature}) - return data + return X - def fit_transform(self, data: DataArray, y=None) -> DataArray: - return self.fit(data, y).transform(data) - - def inverse_transform_data(self, data: DataArray) -> DataArray: + def inverse_transform_data(self, X: DataArray) -> DataArray: # Reindex only if feature coordinates are different - is_same_coords = data.coords[self.feature_name].identical(self.feature_coords) + is_same_coords = X.coords[self.feature_name].identical(self.feature_coords) if is_same_coords: - return data + return X else: - return data.reindex({self.feature_name: self.feature_coords.values}) + return X.reindex({self.feature_name: self.feature_coords.values}) - def inverse_transform_components(self, data: DataArray) -> DataArray: + def inverse_transform_components(self, X: DataArray) -> DataArray: # Reindex only if feature coordinates are different - is_same_coords = data.coords[self.feature_name].identical(self.feature_coords) + is_same_coords = X.coords[self.feature_name].identical(self.feature_coords) if is_same_coords: - return data + return X else: - return data.reindex({self.feature_name: self.feature_coords.values}) + return X.reindex({self.feature_name: self.feature_coords.values}) - def inverse_transform_scores(self, data: DataArray) -> DataArray: - return data + def inverse_transform_scores(self, X: DataArray) -> DataArray: + return X diff --git a/xeofs/preprocessing/scaler.py b/xeofs/preprocessing/scaler.py index 381d039..d90143a 100644 --- a/xeofs/preprocessing/scaler.py +++ b/xeofs/preprocessing/scaler.py @@ -1,25 +1,14 @@ -from typing import List, Optional, Sequence, Hashable, Self +from typing import Optional, Self import numpy as np import xarray as xr -from sklearn.base import BaseEstimator, TransformerMixin - -from ..utils.sanity_checks import ( - assert_single_dataset, - assert_list_dataarrays, - convert_to_dim_type, -) -from ..utils.data_types import ( - Dims, - DimsList, - DataArray, - DataSet, - DataList, -) -from ..utils.xarray_utils import compute_sqrt_cos_lat_weights - - -class DataArrayScaler(BaseEstimator, TransformerMixin): + +from .transformer import Transformer +from ..utils.data_types import Dims, DataArray, DataSet, Data, DataVar, DataVarBound +from ..utils.xarray_utils import compute_sqrt_cos_lat_weights, feature_ones_like + + +class Scaler(Transformer): """Scale the data along sample dimensions. Scaling includes (i) removing the mean and, optionally, (ii) dividing by the standard deviation, @@ -28,162 +17,132 @@ class DataArrayScaler(BaseEstimator, TransformerMixin): Parameters ---------- + with_center : bool, default=True + If True, the data is centered by subtracting the mean. with_std : bool, default=True If True, the data is divided by the standard deviation. with_coslat : bool, default=False If True, the data is multiplied by the square root of cosine of latitude weights. - with_weights : bool, default=False - If True, the data is multiplied by additional user-defined weights. - + weights : DataArray | Dataset, optional + Weights to be applied to the data. Must have the same dimensions as the data. + If None, no weights are applied. """ - def __init__(self, with_std=False, with_coslat=False, with_weights=False): + def __init__( + self, + with_center: bool = True, + with_std: bool = False, + with_coslat: bool = False, + ): + super().__init__() + self.with_center = with_center self.with_std = with_std self.with_coslat = with_coslat - self.with_weights = with_weights - - def _verify_input(self, data: DataArray, name: str): - if not isinstance(data, xr.DataArray): - raise ValueError(f"{name} must be an xarray DataArray") - - def _compute_sqrt_cos_lat_weights(self, data, dim): - """Compute the square root of cosine of latitude weights. - - Parameters - ---------- - data : DataArray | DataSet - Data to be scaled. - dim : sequence of hashable - Dimensions along which the data is considered to be a feature. - - Returns - ------- - DataArray | DataSet - Square root of cosine of latitude weights. - """ - self._verify_input(data, "data") + def _verify_input(self, X, name: str): + if not isinstance(X, (xr.DataArray, xr.Dataset)): + raise TypeError(f"{name} must be an xarray DataArray or Dataset") - weights = compute_sqrt_cos_lat_weights(data, dim) - weights.name = "coslat_weights" + def _process_weights(self, X: DataVarBound, weights) -> DataVarBound: + if weights is None: + wghts: DataVarBound = feature_ones_like(X, self.feature_dims) + else: + wghts: DataVarBound = weights - return weights + return wghts def fit( self, - data: DataArray, + X: DataVar, sample_dims: Dims, feature_dims: Dims, - weights: Optional[DataArray] = None, + weights: Optional[DataVar] = None, ) -> Self: """Fit the scaler to the data. Parameters ---------- - data : DataArray + X : DataArray | Dataset Data to be scaled. sample_dims : sequence of hashable Dimensions along which the data is considered to be a sample. feature_dims : sequence of hashable Dimensions along which the data is considered to be a feature. - weights : DataArray, optional + weights : DataArray | Dataset, optional Weights to be applied to the data. Must have the same dimensions as the data. If None, no weights are applied. """ # Check input types - self._verify_input(data, "data") - if weights is not None: - self._verify_input(weights, "weights") - - sample_dims = convert_to_dim_type(sample_dims) - feature_dims = convert_to_dim_type(feature_dims) + self._verify_input(X, "data") + self.sample_dims = sample_dims + self.feature_dims = feature_dims # Store sample and feature dimensions for later use - self.dims_ = {"sample": sample_dims, "feature": feature_dims} + self.dims = {"sample": sample_dims, "feature": feature_dims} + + params = self.get_params() # Scaling parameters are computed along sample dimensions - self.mean_: DataArray = data.mean(sample_dims).compute() + if params["with_center"]: + self.mean_: DataVar = X.mean(self.sample_dims).compute() - params = self.get_params() if params["with_std"]: - self.std_: DataArray = data.std(sample_dims).compute() + self.std_: DataVar = X.std(self.sample_dims).compute() if params["with_coslat"]: - self.coslat_weights_: DataArray = self._compute_sqrt_cos_lat_weights( - data, feature_dims + self.coslat_weights_: DataVar = compute_sqrt_cos_lat_weights( + data=X, feature_dims=self.feature_dims ).compute() - if params["with_weights"]: - if weights is None: - raise ValueError("Weights must be provided when with_weights is True") - self.weights_: DataArray = weights.compute() + # Convert None weights to ones + self.weights_: DataVar = self._process_weights(X, weights).compute() return self - def transform(self, data: DataArray) -> DataArray: + def transform(self, X: DataVarBound) -> DataVarBound: """Scale the data. Parameters ---------- - data : DataArray + data : DataArray | Dataset Data to be scaled. Returns ------- - DataArray + DataArray | Dataset Scaled data. """ - self._verify_input(data, "data") - - data = data - self.mean_ + self._verify_input(X, "X") params = self.get_params() + + if params["with_center"]: + X = X - self.mean_ if params["with_std"]: - data = data / self.std_ + X = X / self.std_ if params["with_coslat"]: - data = data * self.coslat_weights_ - if params["with_weights"]: - data = data * self.weights_ - return data + X = X * self.coslat_weights_ + + X = X * self.weights_ + return X def fit_transform( self, - data: DataArray, + X: DataVarBound, sample_dims: Dims, feature_dims: Dims, - weights: Optional[DataArray] = None, - ) -> DataArray: - """Fit the scaler to the data and scale it. - - Parameters - ---------- - data : DataArray - Data to be scaled. - sample_dims : sequence of hashable - Dimensions along which the data is considered to be a sample. - feature_dims : sequence of hashable - Dimensions along which the data is considered to be a feature. - weights : DataArray, optional - Weights to be applied to the data. Must have the same dimensions as the data. - If None, no weights are applied. - - Returns - ------- - DataArray - Scaled data. - - """ - - return self.fit(data, sample_dims, feature_dims, weights).transform(data) + weights: Optional[DataVarBound] = None, + ) -> DataVarBound: + return self.fit(X, sample_dims, feature_dims, weights).transform(X) - def inverse_transform_data(self, data: DataArray) -> DataArray: + def inverse_transform_data(self, X: DataVarBound) -> DataVarBound: """Unscale the data. Parameters ---------- - data : DataArray | DataSet + X : DataArray | DataSet Data to be unscaled. Returns @@ -192,243 +151,181 @@ def inverse_transform_data(self, data: DataArray) -> DataArray: Unscaled data. """ - self._verify_input(data, "data") + self._verify_input(X, "X") params = self.get_params() - if params["with_weights"]: - data = data / self.weights_ + X = X / self.weights_ if params["with_coslat"]: - data = data / self.coslat_weights_ + X = X / self.coslat_weights_ if params["with_std"]: - data = data * self.std_ - - data = data + self.mean_ - - return data - - def inverse_transform_components(self, data: DataArray) -> DataArray: - return data - - def inverse_transform_scores(self, data: DataArray) -> DataArray: - return data - - -class DataSetScaler(DataArrayScaler): - def _verify_input(self, data: DataSet, name: str): - """Verify that the input data is a Dataset. - - Parameters - ---------- - data : xarray.Dataset - Data to be checked. - - """ - assert_single_dataset(data, name) - - def _compute_sqrt_cos_lat_weights(self, data: DataSet, dim) -> DataArray: - return super()._compute_sqrt_cos_lat_weights(data, dim) - - def fit( - self, - data: DataSet, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable], - weights: Optional[DataSet] = None, - ) -> Self: - return super().fit(data, sample_dims, feature_dims, weights) # type: ignore - - def transform(self, data: DataSet) -> DataSet: - return super().transform(data) # type: ignore - - def fit_transform( - self, - data: DataSet, - sample_dims: Hashable | Sequence[Hashable], - feature_dims: Hashable | Sequence[Hashable], - weights: Optional[DataSet] = None, - ) -> DataSet: - return super().fit_transform(data, sample_dims, feature_dims, weights) # type: ignore - - def inverse_transform_data(self, data: DataSet) -> DataSet: - return super().inverse_transform_data(data) # type: ignore - - def inverse_transform_components(self, data: DataSet) -> DataSet: - return super().inverse_transform_components(data) # type: ignore - - -class DataListScaler(DataArrayScaler): - """Scale a list of xr.DataArray along sample dimensions. - - Scaling includes (i) removing the mean and, optionally, (ii) dividing by the standard deviation, - (iii) multiplying by the square root of cosine of latitude weights (area weighting; coslat weighting), - and (iv) multiplying by additional user-defined weights. - - Parameters - ---------- - with_std : bool, default=True - If True, the data is divided by the standard deviation. - with_coslat : bool, default=False - If True, the data is multiplied by the square root of cosine of latitude weights. - with_weights : bool, default=False - If True, the data is multiplied by additional user-defined weights. - - """ - - def __init__(self, with_std=False, with_coslat=False, with_weights=False): - super().__init__( - with_std=with_std, with_coslat=with_coslat, with_weights=with_weights - ) - self.scalers = [] - - def _verify_input(self, data: DataList, name: str): - """Verify that the input data is a list of DataArrays. - - Parameters - ---------- - data : list of xarray.DataArray - Data to be checked. - - """ - assert_list_dataarrays(data, name) - - def fit( - self, - data: DataList, - sample_dims: Dims, - feature_dims_list: DimsList, - weights: Optional[DataList] = None, - ) -> Self: - """Fit the scaler to the data. - - Parameters - ---------- - data : list of xarray.DataArray - Data to be scaled. - sample_dims : hashable or sequence of hashable - Dimensions along which the data is considered to be a sample. - feature_dims_list : list of hashable or list of sequence of hashable - List of dimensions along which the data is considered to be a feature. - weights : list of xarray.DataArray, optional - List of weights to be applied to the data. Must have the same dimensions as the data. - - """ - self._verify_input(data, "data") - - # Check input - if not isinstance(feature_dims_list, list): - err_message = "feature dims must be a list of the feature dimensions of each DataArray, " - err_message += 'e.g. [("lon", "lat"), ("lon")]' - raise TypeError(err_message) - - sample_dims = convert_to_dim_type(sample_dims) - feature_dims = [convert_to_dim_type(fdims) for fdims in feature_dims_list] - - # Sample dimensions are the same for all data arrays - # Feature dimensions may be different for each data array - self.dims = {"sample": sample_dims, "feature": feature_dims} - - # However, for each DataArray a list of feature dimensions must be provided - if len(data) != len(feature_dims): - err_message = ( - "Number of data arrays and feature dimensions must be the same. " - ) - err_message += f"Got {len(data)} data arrays and {len(feature_dims)} feature dimensions" - raise ValueError(err_message) - - # If no weights are provided, create a list of None - if weights is None: - self.weights = [None] * len(data) - else: - self.weights = weights - - # Check that number of weights is the same as number of data arrays - params = self.get_params() - if params["with_weights"]: - if len(data) != len(self.weights): - err_message = "Number of data arrays and weights must be the same. " - err_message += ( - f"Got {len(data)} data arrays and {len(self.weights)} weights" - ) - raise ValueError(err_message) - - for da, wghts, fdims in zip(data, self.weights, feature_dims): - # Create DataArrayScaler object for each data array - scaler = DataArrayScaler(**params) - scaler.fit(da, sample_dims=sample_dims, feature_dims=fdims, weights=wghts) - self.scalers.append(scaler) - - return self - - def transform(self, da_list: DataList) -> DataList: - """Scale the data. - - Parameters - ---------- - da_list : list of xarray.DataArray - Data to be scaled. - - Returns - ------- - list of xarray.DataArray - Scaled data. - - """ - self._verify_input(da_list, "da_list") - - da_list_transformed = [] - for scaler, da in zip(self.scalers, da_list): - da_list_transformed.append(scaler.transform(da)) - return da_list_transformed - - def fit_transform( - self, - data: DataList, - sample_dims: Dims, - feature_dims_list: DimsList, - weights: Optional[DataList] = None, - ) -> DataList: - """Fit the scaler to the data and scale it. - - Parameters - ---------- - data : list of xr.DataArray - Data to be scaled. - sample_dims : hashable or sequence of hashable - Dimensions along which the data is considered to be a sample. - feature_dims_list : list of hashable or list of sequence of hashable - List of dimensions along which the data is considered to be a feature. - weights : list of xr.DataArray, optional - List of weights to be applied to the data. Must have the same dimensions as the data. - - Returns - ------- - list of xarray.DataArray - Scaled data. - - """ - self.fit(data, sample_dims, feature_dims_list, weights) - return self.transform(data) - - def inverse_transform_data(self, da_list: DataList) -> DataList: - """Unscale the data. - - Parameters - ---------- - da_list : list of xarray.DataArray - Data to be scaled. - - Returns - ------- - list of xarray.DataArray - Scaled data. - - """ - self._verify_input(da_list, "da_list") - - da_list_transformed = [] - for scaler, da in zip(self.scalers, da_list): - da_list_transformed.append(scaler.inverse_transform_data(da)) - return da_list_transformed - - def inverse_transform_components(self, da_list: DataList) -> DataList: - return da_list + X = X * self.std_ + if params["with_center"]: + X = X + self.mean_ + + return X + + def inverse_transform_components(self, X: DataVarBound) -> DataVarBound: + return X + + def inverse_transform_scores(self, X: DataArray) -> DataArray: + return X + + +# class DataListScaler(Scaler): +# """Scale a list of xr.DataArray along sample dimensions. + +# Scaling includes (i) removing the mean and, optionally, (ii) dividing by the standard deviation, +# (iii) multiplying by the square root of cosine of latitude weights (area weighting; coslat weighting), +# and (iv) multiplying by additional user-defined weights. + +# Parameters +# ---------- +# with_std : bool, default=True +# If True, the data is divided by the standard deviation. +# with_coslat : bool, default=False +# If True, the data is multiplied by the square root of cosine of latitude weights. +# with_weights : bool, default=False +# If True, the data is multiplied by additional user-defined weights. + +# """ + +# def __init__(self, with_std=False, with_coslat=False): +# super().__init__(with_std=with_std, with_coslat=with_coslat) +# self.scalers = [] + +# def _verify_input(self, data, name: str): +# """Verify that the input data is a list of DataArrays. + +# Parameters +# ---------- +# data : list of xarray.DataArray +# Data to be checked. + +# """ +# if not isinstance(data, list): +# raise TypeError(f"{name} must be a list of xarray DataArrays or Datasets") +# if not all(isinstance(da, (xr.DataArray, xr.Dataset)) for da in data): +# raise TypeError(f"{name} must be a list of xarray DataArrays or Datasets") + +# def fit( +# self, +# data: List[Data], +# sample_dims: Dims, +# feature_dims_list: DimsList, +# weights: Optional[List[Data] | Data] = None, +# ) -> Self: +# """Fit the scaler to the data. + +# Parameters +# ---------- +# data : list of xarray.DataArray +# Data to be scaled. +# sample_dims : hashable or sequence of hashable +# Dimensions along which the data is considered to be a sample. +# feature_dims_list : list of hashable or list of sequence of hashable +# List of dimensions along which the data is considered to be a feature. +# weights : list of xarray.DataArray, optional +# List of weights to be applied to the data. Must have the same dimensions as the data. + +# """ +# self._verify_input(data, "data") + +# # Check input +# if not isinstance(feature_dims_list, list): +# err_message = "feature dims must be a list of the feature dimensions of each DataArray, " +# err_message += 'e.g. [("lon", "lat"), ("lon")]' +# raise TypeError(err_message) + +# # Sample dimensions are the same for all data arrays +# # Feature dimensions may be different for each data array +# self.dims = {"sample": sample_dims, "feature": feature_dims_list} + +# # However, for each DataArray a list of feature dimensions must be provided +# _check_parameter_number("feature_dims", feature_dims_list, len(data)) + +# # If no weights are provided, create a list of None +# self.weights = process_parameter("weights", weights, None, len(data)) + +# params = self.get_params() + +# for da, wghts, fdims in zip(data, self.weights, feature_dims_list): +# # Create Scaler object for each data array +# scaler = Scaler(**params) +# scaler.fit(da, sample_dims=sample_dims, feature_dims=fdims, weights=wghts) +# self.scalers.append(scaler) + +# return self + +# def transform(self, da_list: List[Data]) -> List[Data]: +# """Scale the data. + +# Parameters +# ---------- +# da_list : list of xarray.DataArray +# Data to be scaled. + +# Returns +# ------- +# list of xarray.DataArray +# Scaled data. + +# """ +# self._verify_input(da_list, "da_list") + +# da_list_transformed = [] +# for scaler, da in zip(self.scalers, da_list): +# da_list_transformed.append(scaler.transform(da)) +# return da_list_transformed + +# def fit_transform( +# self, +# data: List[Data], +# sample_dims: Dims, +# feature_dims_list: DimsList, +# weights: Optional[List[Data] | Data] = None, +# ) -> List[Data]: +# """Fit the scaler to the data and scale it. + +# Parameters +# ---------- +# data : list of xr.DataArray +# Data to be scaled. +# sample_dims : hashable or sequence of hashable +# Dimensions along which the data is considered to be a sample. +# feature_dims_list : list of hashable or list of sequence of hashable +# List of dimensions along which the data is considered to be a feature. +# weights : list of xr.DataArray, optional +# List of weights to be applied to the data. Must have the same dimensions as the data. + +# Returns +# ------- +# list of xarray.DataArray +# Scaled data. + +# """ +# self.fit(data, sample_dims, feature_dims_list, weights) +# return self.transform(data) + +# def inverse_transform_data(self, da_list: List[Data]) -> List[Data]: +# """Unscale the data. + +# Parameters +# ---------- +# da_list : list of xarray.DataArray +# Data to be scaled. + +# Returns +# ------- +# list of xarray.DataArray +# Scaled data. + +# """ +# self._verify_input(da_list, "da_list") + +# da_list_transformed = [] +# for scaler, da in zip(self.scalers, da_list): +# da_list_transformed.append(scaler.inverse_transform_data(da)) +# return da_list_transformed + +# def inverse_transform_components(self, da_list: List[Data]) -> List[Data]: +# return da_list diff --git a/xeofs/preprocessing/stacker.py b/xeofs/preprocessing/stacker.py index 574e170..e0ef1e9 100644 --- a/xeofs/preprocessing/stacker.py +++ b/xeofs/preprocessing/stacker.py @@ -1,19 +1,24 @@ -from typing import List, Self +from abc import abstractmethod +from typing import List, Optional, Self, Type import numpy as np import pandas as pd import xarray as xr -from sklearn.base import BaseEstimator, TransformerMixin -from ..utils.data_types import Dims, DimsList, DataArray, DataSet, DataList +from .transformer import Transformer +from ..utils.data_types import Dims, DataArray, DataSet, Data, DataVar, DataVarBound from ..utils.sanity_checks import convert_to_dim_type -class DataArrayStacker(BaseEstimator, TransformerMixin): +class Stacker(Transformer): """Converts a DataArray of any dimensionality into a 2D structure. Attributes ---------- + sample_dims : Sequence[Hashable] + The dimensions of the data that will be stacked along the `sample` dimension. + feature_dims : Sequence[Hashable] + The dimensions of the data that will be stacked along the `feature` dimension. sample_name : str The name of the sample dimension. feature_name : str @@ -35,33 +40,33 @@ def __init__( sample_name: str = "sample", feature_name: str = "feature", ): - self.sample_name = sample_name - self.feature_name = feature_name + super().__init__(sample_name, feature_name) self.dims_in = tuple() self.dims_out = tuple((sample_name, feature_name)) - self.dims_mapping = {d: tuple() for d in self.dims_out} + self.dims_mapping = {} + self.dims_mapping.update({d: tuple() for d in self.dims_out}) self.coords_in = {} self.coords_out = {} - def _validate_matching_dimensions(self, data: DataArray): + def _validate_matching_dimensions(self, X: Data): """Verify that the dimensions of the data are consistent with the dimensions used to fit the stacker.""" # Test whether sample and feature dimensions are present in data array expected_sample_dims = set(self.dims_mapping[self.sample_name]) expected_feature_dims = set(self.dims_mapping[self.feature_name]) expected_dims = expected_sample_dims | expected_feature_dims - given_dims = set(data.dims) + given_dims = set(X.dims) if not (expected_dims == given_dims): raise ValueError( f"One or more dimensions in {expected_dims} are not present in data." ) - def _validate_matching_feature_coords(self, data: DataArray): + def _validate_matching_feature_coords(self, X: Data): """Verify that the feature coordinates of the data are consistent with the feature coordinates used to fit the stacker.""" feature_dims = self.dims_mapping[self.feature_name] coords_are_equal = [ - data.coords[dim].equals(self.coords_in[dim]) for dim in feature_dims + X.coords[dim].equals(self.coords_in[dim]) for dim in feature_dims ] if not all(coords_are_equal): raise ValueError( @@ -80,19 +85,18 @@ def _validate_dimension_names(self, sample_dims, feature_dims): f"Name of feature dimension ({self.feature_name}) is already present in data. Please use another name." ) - def _validate_indices(self, data: DataArray): + def _validate_indices(self, X: Data): """Check that the indices of the data are no MultiIndex""" - if any([isinstance(index, pd.MultiIndex) for index in data.indexes.values()]): + if any([isinstance(index, pd.MultiIndex) for index in X.indexes.values()]): raise ValueError(f"Cannot stack data containing a MultiIndex.") - def _sanity_check(self, data: DataArray, sample_dims, feature_dims): + def _sanity_check(self, X: Data, sample_dims, feature_dims): self._validate_dimension_names(sample_dims, feature_dims) - self._validate_indices(data) + self._validate_indices(X) - def _stack( - self, data: DataArray, sample_dims: Dims, feature_dims: Dims - ) -> DataArray: - """Reshape a DataArray to 2D. + @abstractmethod + def _stack(self, X: Data, sample_dims: Dims, feature_dims: Dims) -> DataArray: + """Stack data to 2D. Parameters ---------- @@ -108,45 +112,9 @@ def _stack( data_stacked : DataArray The reshaped 2d-data. """ - sample_name = self.sample_name - feature_name = self.feature_name - - # 3 cases: - # 1. uni-dimensional with correct feature/sample name ==> do nothing - # 2. uni-dimensional with name different from feature/sample ==> rename - # 3. multi-dimensinoal with names different from feature/sample ==> stack - - # - SAMPLE - - if len(sample_dims) == 1: - # Case 1 - if sample_dims[0] == sample_name: - pass - # Case 2 - else: - data = data.rename({sample_dims[0]: sample_name}) - # Case 3 - else: - data = data.stack({sample_name: sample_dims}) - - # - FEATURE - - if len(feature_dims) == 1: - # Case 1 - if feature_dims[0] == feature_name: - pass - # Case 2 - else: - data = data.rename({feature_dims[0]: feature_name}) - # Case 3 - else: - data = data.stack({feature_name: feature_dims}) - # Reorder dimensions to be always (sample, feature) - if data.dims == (feature_name, sample_name): - data = data.transpose(sample_name, feature_name) - - return data - - def _unstack(self, data: DataArray) -> DataArray: + @abstractmethod + def _unstack(self, X: DataArray) -> Data: """Unstack 2D DataArray to its original dimensions. Parameters @@ -159,49 +127,17 @@ def _unstack(self, data: DataArray) -> DataArray: data_unstacked : DataArray The unstacked data. """ - sample_name = self.sample_name - feature_name = self.feature_name - # pass if feature/sample dimensions do not exist in data - if feature_name in data.dims: - # If sample dimensions is one dimensional, rename is sufficient, otherwise unstack - if len(self.dims_mapping[feature_name]) == 1: - if self.dims_mapping[feature_name][0] != feature_name: - data = data.rename( - {feature_name: self.dims_mapping[feature_name][0]} - ) - else: - data = data.unstack(feature_name) - - if sample_name in data.dims: - # If sample dimensions is one dimensional, rename is sufficient, otherwise unstack - if len(self.dims_mapping[sample_name]) == 1: - if self.dims_mapping[sample_name][0] != sample_name: - data = data.rename({sample_name: self.dims_mapping[sample_name][0]}) - else: - data = data.unstack(sample_name) - - else: - pass - - return data - - def _reorder_dims(self, data): + def _reorder_dims(self, X: DataVarBound) -> DataVarBound: """Reorder dimensions to original order; catch ('mode') dimensions via ellipsis""" order_input_dims = [ - valid_dim for valid_dim in self.dims_in if valid_dim in data.dims + valid_dim for valid_dim in self.dims_in if valid_dim in X.dims ] - if order_input_dims != data.dims: - data = data.transpose(..., *order_input_dims) - return data + if order_input_dims != X.dims: + X = X.transpose(..., *order_input_dims) + return X - def fit( - self, - data: DataArray, - sample_dims: Dims, - feature_dims: Dims, - y=None, - ) -> Self: + def fit(self, X: Data, sample_dims: Dims, feature_dims: Dims) -> Self: """Fit the stacker. Parameters @@ -215,26 +151,28 @@ def fit( The fitted stacker. """ - self._sanity_check(data, sample_dims, feature_dims) - - # Set in/out dimensions - self.dims_in = data.dims - self.dims_mapping = { - self.sample_name: sample_dims, - self.feature_name: feature_dims, - } + self.sample_dims = sample_dims + self.feature_dims = feature_dims + self.dims_mapping.update( + { + self.sample_name: sample_dims, + self.feature_name: feature_dims, + } + ) + self._sanity_check(X, sample_dims, feature_dims) - # Set in coordinates - self.coords_in = {dim: data.coords[dim] for dim in data.dims} + # Set dimensions and coordinates + self.dims_in = X.dims + self.coords_in = {dim: X.coords[dim] for dim in X.dims} return self - def transform(self, data: DataArray) -> DataArray: + def transform(self, X: Data) -> DataArray: """Reshape DataArray to 2D. Parameters ---------- - data : DataArray + X : DataArray The data to be reshaped. Returns @@ -251,16 +189,16 @@ def transform(self, data: DataArray) -> DataArray: """ # Test whether sample and feature dimensions are present in data array - self._validate_matching_dimensions(data) + self._validate_matching_dimensions(X) # Check if data to be transformed has the same feature coordinates as the data used to fit the stacker - self._validate_matching_feature_coords(data) + self._validate_matching_feature_coords(X) # Stack data sample_dims = self.dims_mapping[self.sample_name] feature_dims = self.dims_mapping[self.feature_name] da: DataArray = self._stack( - data, sample_dims=sample_dims, feature_dims=feature_dims + X, sample_dims=sample_dims, feature_dims=feature_dims ) # Set out coordinates @@ -274,19 +212,18 @@ def transform(self, data: DataArray) -> DataArray: def fit_transform( self, - data: DataArray, + X: DataVar, sample_dims: Dims, feature_dims: Dims, - y=None, ) -> DataArray: - return self.fit(data, sample_dims, feature_dims, y).transform(data) + return self.fit(X, sample_dims, feature_dims).transform(X) - def inverse_transform_data(self, data: DataArray) -> DataArray: + def inverse_transform_data(self, X: DataArray) -> Data: """Reshape the 2D data (sample x feature) back into its original dimensions. Parameters ---------- - data : DataArray + X : DataArray The data to be reshaped. Returns @@ -295,11 +232,11 @@ def inverse_transform_data(self, data: DataArray) -> DataArray: The reshaped data. """ - data = self._unstack(data) - data = self._reorder_dims(data) - return data + Xnd = self._unstack(X) + Xnd = self._reorder_dims(Xnd) + return Xnd - def inverse_transform_components(self, data: DataArray) -> DataArray: + def inverse_transform_components(self, X: DataArray) -> Data: """Reshape the 2D components (sample x feature) back into its original dimensions. Parameters @@ -313,9 +250,9 @@ def inverse_transform_components(self, data: DataArray) -> DataArray: The reshaped data. """ - data = self._unstack(data) - data = self._reorder_dims(data) - return data + Xnd = self._unstack(X) + Xnd = self._reorder_dims(Xnd) + return Xnd def inverse_transform_scores(self, data: DataArray) -> DataArray: """Reshape the 2D scores (sample x feature) back into its original dimensions. @@ -331,12 +268,111 @@ def inverse_transform_scores(self, data: DataArray) -> DataArray: The reshaped data. """ - data = self._unstack(data) + data = self._unstack(data) # type: ignore data = self._reorder_dims(data) return data -class DataSetStacker(DataArrayStacker): +class DataArrayStacker(Stacker): + def _stack( + self, data: DataArray, sample_dims: Dims, feature_dims: Dims + ) -> DataArray: + """Reshape a DataArray to 2D. + + Parameters + ---------- + data : DataArray + The data to be reshaped. + sample_dims : Hashable or Sequence[Hashable] + The dimensions of the data that will be stacked along the `sample` dimension. + feature_dims : Hashable or Sequence[Hashable] + The dimensions of the data that will be stacked along the `feature` dimension. + + Returns + ------- + data_stacked : DataArray + The reshaped 2d-data. + """ + sample_name = self.sample_name + feature_name = self.feature_name + + # 3 cases: + # 1. uni-dimensional with correct feature/sample name ==> do nothing + # 2. uni-dimensional with name different from feature/sample ==> rename + # 3. multi-dimensinoal with names different from feature/sample ==> stack + + # - SAMPLE - + if len(sample_dims) == 1: + # Case 1 + if sample_dims[0] == sample_name: + pass + # Case 2 + else: + data = data.rename({sample_dims[0]: sample_name}) + # Case 3 + else: + data = data.stack({sample_name: sample_dims}) + + # - FEATURE - + if len(feature_dims) == 1: + # Case 1 + if feature_dims[0] == feature_name: + pass + # Case 2 + else: + data = data.rename({feature_dims[0]: feature_name}) + # Case 3 + else: + data = data.stack({feature_name: feature_dims}) + + # Reorder dimensions to be always (sample, feature) + if data.dims == (feature_name, sample_name): + data = data.transpose(sample_name, feature_name) + + return data + + def _unstack(self, data: DataArray) -> DataArray: + """Unstack 2D DataArray to its original dimensions. + + Parameters + ---------- + data : DataArray + The data to be unstacked. + + Returns + ------- + data_unstacked : DataArray + The unstacked data. + """ + sample_name = self.sample_name + feature_name = self.feature_name + + # pass if feature/sample dimensions do not exist in data + if feature_name in data.dims: + # If sample dimensions is one dimensional, rename is sufficient, otherwise unstack + if len(self.dims_mapping[feature_name]) == 1: + if self.dims_mapping[feature_name][0] != feature_name: + data = data.rename( + {feature_name: self.dims_mapping[feature_name][0]} + ) + else: + data = data.unstack(feature_name) + + if sample_name in data.dims: + # If sample dimensions is one dimensional, rename is sufficient, otherwise unstack + if len(self.dims_mapping[sample_name]) == 1: + if self.dims_mapping[sample_name][0] != sample_name: + data = data.rename({sample_name: self.dims_mapping[sample_name][0]}) + else: + data = data.unstack(sample_name) + + else: + pass + + return data + + +class DataSetStacker(Stacker): """Converts a Dataset of any dimensionality into a 2D structure.""" def _validate_dimension_names(self, sample_dims, feature_dims): @@ -438,242 +474,34 @@ def _unstack_scores(self, data: DataArray) -> DataArray: data = self._reorder_dims(data) return data - def fit( - self, - data: DataSet, - sample_dims: Dims, - feature_dims: Dims, - y=None, - ) -> Self: - """Fit the stacker. - - Parameters - ---------- - data : DataArray - The data to be reshaped. - - Returns - ------- - self : DataArrayStacker - The fitted stacker. - - """ - return super().fit(data, sample_dims, feature_dims, y) # type: ignore - - def transform(self, data: DataSet) -> DataArray: - return super().transform(data) # type: ignore - - def fit_transform( - self, data: DataSet, sample_dims: Dims, feature_dims: Dims, y=None - ) -> DataArray: - return super().fit_transform(data, sample_dims, feature_dims, y) # type: ignore - - def inverse_transform_data(self, data: DataArray) -> DataSet: + def inverse_transform_data(self, X: DataArray) -> DataSet: """Reshape the 2D data (sample x feature) back into its original shape.""" - data_ds: DataSet = self._unstack_data(data) - return data_ds + X_ds: DataSet = self._unstack_data(X) + return X_ds - def inverse_transform_components(self, data: DataArray) -> DataSet: + def inverse_transform_components(self, X: DataArray) -> DataSet: """Reshape the 2D components (sample x feature) back into its original shape.""" - data_ds: DataSet = self._unstack_components(data) - return data_ds + X_ds: DataSet = self._unstack_components(X) + return X_ds - def inverse_transform_scores(self, data: DataArray) -> DataArray: + def inverse_transform_scores(self, X: DataArray) -> DataArray: """Reshape the 2D scores (sample x feature) back into its original shape.""" - data = self._unstack_scores(data) - return data + X = self._unstack_scores(X) + return X -class DataListStacker(DataArrayStacker): - """Converts a list of DataArrays of any dimensionality into a 2D structure. +class StackerFactory: + """Factory class for creating stackers.""" - This operation generates a reshaped DataArray with two distinct dimensions: 'sample' and 'feature'. - - At a minimum, the `sample` dimension must be present in all DataArrays. The `feature` dimension can be different - for each DataArray and must be specified as a list of dimensions. - - """ - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.stackers = [] - - def fit( - self, - X: DataList, - sample_dims: Dims, - feature_dims: DimsList, - y=None, - ) -> Self: - """Fit the stacker. + def __init__(self): + pass - Parameters - ---------- - X : DataArray - The data to be reshaped. - - Returns - ------- - self : DataArrayStacker - The fitted stacker. - - """ - - # Check input - if not isinstance(feature_dims, list): - raise TypeError( - "feature dims must be a list of the feature dimensions of each DataArray" - ) - - sample_dims = convert_to_dim_type(sample_dims) - feature_dims = [convert_to_dim_type(fdims) for fdims in feature_dims] - - if len(X) != len(feature_dims): - err_message = ( - "Number of data arrays and feature dimensions must be the same. " - ) - err_message += ( - f"Got {len(X)} data arrays and {len(feature_dims)} feature dimensions" - ) - raise ValueError(err_message) - - # Set in/out dimensions - self.dims_in = [data.dims for data in X] - self.dims_out = tuple((self.sample_name, self.feature_name)) - self.dims_mapping = { - self.sample_name: sample_dims, - self.feature_name: feature_dims, - } - - # Set in/out coordinates - self.coords_in = [{dim: data.coords[dim] for dim in data.dims} for data in X] - - # Fit stacker for each DataArray - for data, fdims in zip(X, feature_dims): - stacker = DataArrayStacker( - sample_name=self.sample_name, feature_name=self.feature_name - ) - stacker.fit(data, sample_dims=sample_dims, feature_dims=fdims) - self.stackers.append(stacker) - - return self - - def transform(self, X: DataList) -> DataArray: - """Reshape DataArray to 2D. - - Parameters - ---------- - X : DataList - The data to be reshaped. - - Returns - ------- - DataArray - The reshaped data. - - Raises - ------ - ValueError - If the data to be transformed has different dimensions than the data used to fit the stacker. - ValueError - If the data to be transformed has different coordinates than the data used to fit the stacker. - - """ - # Test whether the input list has same length as the number of stackers - if len(X) != len(self.stackers): - raise ValueError( - f"Invalid input. Number of DataArrays ({len(X)}) does not match the number of fitted DataArrays ({len(self.stackers)})." - ) - - stacked_data_list: List[DataArray] = [] - idx_coords_size = [] - dummy_feature_coords = [] - - # Stack individual DataArrays - for stacker, data in zip(self.stackers, X): - data_stacked = stacker.transform(data) - idx_coords_size.append(data_stacked.coords[self.feature_name].size) - stacked_data_list.append(data_stacked) - - # Create dummy feature coordinates for each DataArray - idx_range = np.cumsum([0] + idx_coords_size) - for i in range(len(idx_range) - 1): - dummy_feature_coords.append(np.arange(idx_range[i], idx_range[i + 1])) - - # Replace original feature coordiantes with dummy coordinates - for i, data in enumerate(stacked_data_list): - data = data.drop_vars(self.feature_name) - stacked_data_list[i] = data.assign_coords( - {self.feature_name: dummy_feature_coords[i]} - ) - - self._dummy_feature_coords = dummy_feature_coords - - stacked_data: DataArray = xr.concat(stacked_data_list, dim=self.feature_name) - - self.coords_out = { - self.sample_name: stacked_data.coords[self.sample_name], - self.feature_name: stacked_data.coords[self.feature_name], - } - return stacked_data - - def fit_transform( - self, - X: DataList, - sample_dims: Dims, - feature_dims: DimsList, - y=None, - ) -> DataArray: - return self.fit(X, sample_dims, feature_dims, y).transform(X) - - def _split_dataarray_into_list(self, data: DataArray) -> DataList: - feature_name = self.feature_name - data_list: DataList = [] - - for stacker, features in zip(self.stackers, self._dummy_feature_coords): - # Select the features corresponding to the current DataArray - sub_selection = data.sel({feature_name: features}) - # Replace dummy feature coordinates with original feature coordinates - sub_selection = sub_selection.assign_coords( - {feature_name: stacker.coords_out[feature_name]} - ) - - # In case of MultiIndex we have to set the index to the feature dimension again - if isinstance(sub_selection.indexes[feature_name], pd.MultiIndex): - sub_selection = sub_selection.set_index( - {feature_name: stacker.dims_mapping[feature_name]} - ) - else: - # NOTE: This is a workaround for the case where the feature dimension is a tuple of length 1 - # the problem is described here: https://github.com/pydata/xarray/discussions/7958 - sub_selection = sub_selection.rename( - {feature_name: stacker.dims_mapping[feature_name][0]} - ) - data_list.append(sub_selection) - - return data_list - - def inverse_transform_data(self, data: DataArray) -> DataList: - """Reshape the 2D data (sample x feature) back into its original shape.""" - data_split: DataList = self._split_dataarray_into_list(data) - data_transformed = [] - for stacker, data in zip(self.stackers, data_split): - # Inverse transform the data using the corresponding stacker - data_transformed.append(stacker.inverse_transform_data(data)) - - return data_transformed - - def inverse_transform_components(self, data: DataArray) -> DataList: - """Reshape the 2D components (sample x feature) back into its original shape.""" - data_split: DataList = self._split_dataarray_into_list(data) - - data_transformed = [] - for stacker, data in zip(self.stackers, data_split): - # Inverse transform the data using the corresponding stacker - data_transformed.append(stacker.inverse_transform_components(data)) - - return data_transformed - - def inverse_transform_scores(self, data: DataArray) -> DataArray: - """Reshape the 2D scores (sample x mode) back into its original shape.""" - return self.stackers[0].inverse_transform_scores(data) + @staticmethod + def create(data: Data) -> Type[DataArrayStacker] | Type[DataSetStacker]: + """Create a stacker for the given data.""" + if isinstance(data, xr.DataArray): + return DataArrayStacker + elif isinstance(data, xr.Dataset): + return DataSetStacker + else: + raise TypeError(f"Invalid data type {type(data)}.") diff --git a/xeofs/preprocessing/transformer.py b/xeofs/preprocessing/transformer.py new file mode 100644 index 0000000..9ccc74a --- /dev/null +++ b/xeofs/preprocessing/transformer.py @@ -0,0 +1,67 @@ +from typing import Self, Optional +from abc import abstractmethod + +from sklearn.base import BaseEstimator, TransformerMixin + +from ..utils.data_types import Dims, DataVar, DataArray, DataSet, Data, DataVarBound + + +class Transformer(BaseEstimator, TransformerMixin): + """ + Abstract base class to transform an xarray DataArray/Dataset. + + """ + + def __init__( + self, + sample_name: str = "sample", + feature_name: str = "feature", + ): + self.sample_name = sample_name + self.feature_name = feature_name + + @abstractmethod + def fit( + self, + X: Data, + sample_dims: Optional[Dims] = None, + feature_dims: Optional[Dims] = None, + **kwargs + ) -> Self: + """Fit transformer to data. + + Parameters: + ------------- + X: xr.DataArray | xr.Dataset + Input data. + sample_dims: Sequence[Hashable], optional + Sample dimensions. + feature_dims: Sequence[Hashable], optional + Feature dimensions. + """ + pass + + @abstractmethod + def transform(self, X: Data) -> Data: + return X + + def fit_transform( + self, + X: Data, + sample_dims: Optional[Dims] = None, + feature_dims: Optional[Dims] = None, + **kwargs + ) -> Data: + return self.fit(X, sample_dims, feature_dims, **kwargs).transform(X) + + @abstractmethod + def inverse_transform_data(self, X: Data) -> Data: + return X + + @abstractmethod + def inverse_transform_components(self, X: Data) -> Data: + return X + + @abstractmethod + def inverse_transform_scores(self, X: DataArray) -> DataArray: + return X diff --git a/xeofs/utils/data_types.py b/xeofs/utils/data_types.py index 84b3583..4e77a82 100644 --- a/xeofs/utils/data_types.py +++ b/xeofs/utils/data_types.py @@ -7,17 +7,25 @@ Hashable, ) -import xarray as xr import dask.array as da +import xarray as xr +from xarray.core import dataarray as xr_dataarray +from xarray.core import dataset as xr_dataset + +DataArray: TypeAlias = xr_dataarray.DataArray +DataSet: TypeAlias = xr_dataset.Dataset +Data: TypeAlias = DataArray | DataSet +DataVar = TypeVar("DataVar", DataArray, DataSet) +DataVarBound = TypeVar("DataVarBound", bound=Data) + +DataArrayList: TypeAlias = List[DataArray] +DataSetList: TypeAlias = List[DataSet] +DataList: TypeAlias = List[Data] +DataVarList: TypeAlias = List[DataVar] + -DataArray: TypeAlias = xr.DataArray -DataSet: TypeAlias = xr.Dataset -DataList: TypeAlias = List[xr.DataArray] DaskArray: TypeAlias = da.Array # type: ignore DataObject: TypeAlias = DataArray | DataSet | DataList -DataX2 = TypeVar("DataX2", DataArray, DataSet) -DataX3 = TypeVar("DataX3", DataArray, DataSet, DataList) - Dims: TypeAlias = Sequence[Hashable] DimsTuple: TypeAlias = Tuple[Dims, ...] diff --git a/xeofs/utils/xarray_utils.py b/xeofs/utils/xarray_utils.py index 212c9d9..5610b05 100644 --- a/xeofs/utils/xarray_utils.py +++ b/xeofs/utils/xarray_utils.py @@ -1,4 +1,4 @@ -from typing import Sequence, Hashable, Tuple +from typing import Sequence, Hashable, Tuple, TypeVar, List, Any import numpy as np import xarray as xr @@ -7,59 +7,112 @@ from .data_types import ( Dims, DimsList, + Data, + DataVar, DataArray, DataSet, DataList, ) from .constants import VALID_LATITUDE_NAMES +T = TypeVar("T") -def compute_sqrt_cos_lat_weights( - data: DataArray | DataSet, dim: Hashable | Sequence[Hashable] -) -> DataArray: + +def unwrap_singleton_list(input_list: List[T]) -> T | List[T]: + if len(input_list) == 1: + return input_list[0] + else: + return input_list + + +def process_parameter( + parameter_name: str, parameter, default, n_data: int +) -> List[Any]: + if parameter is None: + return convert_to_list(default) * n_data + elif isinstance(parameter, (list, tuple)): + _check_parameter_number(parameter_name, parameter, n_data) + return convert_to_list(parameter) + else: + return convert_to_list(parameter) * n_data + + +def convert_to_list(data: T | List[T] | Tuple[T]) -> List[T]: + if isinstance(data, list): + return data + elif isinstance(data, tuple): + return list(data) + else: + return list([data]) + + +def _check_parameter_number(parameter_name: str, parameter, n_data: int): + if len(parameter) != n_data: + raise ValueError( + f"number of data objects passed should match number of parameter {parameter_name}" + f"len(data objects)={n_data} and " + f"len({parameter_name})={len(parameter)}" + ) + + +def feature_ones_like(data: DataVar, feature_dims: Dims) -> DataVar: + if isinstance(data, xr.DataArray): + valid_dims = set(data.dims) & set(feature_dims) + feature_coords = {dim: data[dim] for dim in valid_dims} + shape = tuple(coords.size for coords in feature_coords.values()) + return xr.DataArray( + np.ones(shape, dtype=float), + dims=tuple(valid_dims), + coords=feature_coords, + ) + elif isinstance(data, xr.Dataset): + return xr.Dataset( + { + var: feature_ones_like(da, feature_dims) + for var, da in data.data_vars.items() + } + ) + else: + raise TypeError( + "Invalid input type: {:}. Expected one of the following: DataArray or Dataset".format( + type(data).__name__ + ) + ) + + +def compute_sqrt_cos_lat_weights(data: DataVar, feature_dims: Dims) -> DataVar: """Compute the square root of cosine of latitude weights. Parameters ---------- - data : xr.DataArray + data : xr.DataArray | xr.Dataset Data to be scaled. dim : sequence of hashable Dimensions along which the data is considered to be a feature. Returns ------- - xr.DataArray + xr.DataArray | xr.Dataset Square root of cosine of latitude weights. """ - if isinstance(data, (xr.DataArray, xr.Dataset)): - dim = convert_to_dim_type(dim) - # Find latitude coordinate - is_lat_coord = np.isin(np.array(dim), VALID_LATITUDE_NAMES) + if isinstance(data, xr.DataArray): + lat_dim = extract_latitude_dimension(feature_dims) - # Select latitude coordinate and compute coslat weights - lat_coord = np.array(dim)[is_lat_coord] - - if len(lat_coord) > 1: - raise ValueError( - f"{lat_coord} are ambiguous latitude coordinates. Only ONE of the following is allowed for computing coslat weights: {VALID_LATITUDE_NAMES}" - ) - - if len(lat_coord) == 1: - latitudes: DataArray = data.coords[lat_coord[0]] - assert isinstance(latitudes, xr.DataArray) - weights = sqrt_cos_lat_weights(latitudes) - # Features that cannot be associated to a latitude receive a weight of 1 - weights = weights.where(weights.notnull(), 1) - else: - raise ValueError( - "No latitude coordinate was found to compute coslat weights. Must be one of the following: {:}".format( - VALID_LATITUDE_NAMES - ) - ) + latitudes = data.coords[lat_dim] + weights = sqrt_cos_lat_weights(latitudes) + # Features that cannot be associated to a latitude receive a weight of 1 + # weights = weights.where(weights.notnull(), 1) weights.name = "coslat_weights" return weights + elif isinstance(data, xr.Dataset): + return xr.Dataset( + { + var: compute_sqrt_cos_lat_weights(da, feature_dims) + for var, da in data.data_vars.items() + } + ) else: raise TypeError( @@ -69,10 +122,28 @@ def compute_sqrt_cos_lat_weights( ) +def extract_latitude_dimension(feature_dims: Dims) -> Hashable: + # Find latitude coordinate + lat_dim = set(feature_dims) & set(VALID_LATITUDE_NAMES) + + if len(lat_dim) == 0: + raise ValueError( + "No latitude coordinate was found to compute coslat weights. Must be one of the following: {:}".format( + VALID_LATITUDE_NAMES + ) + ) + elif len(lat_dim) == 1: + return lat_dim.pop() + else: + raise ValueError( + f"Found ambiguous latitude dimensions: {lat_dim}. Only ONE of the following is allowed for computing coslat weights: {VALID_LATITUDE_NAMES}" + ) + + def get_dims( - data: DataArray | DataSet | DataList, + data: DataList, sample_dims: Hashable | Sequence[Hashable], -) -> Tuple[Dims, Dims | DimsList]: +) -> Tuple[Dims, DimsList]: """Extracts the dimensions of a DataArray or Dataset that are not included in the sample dimensions. Parameters: @@ -91,18 +162,13 @@ def get_dims( """ # Check for invalid types - if isinstance(data, (xr.DataArray, xr.Dataset)): - sample_dims = convert_to_dim_type(sample_dims) - feature_dims: Dims = _get_feature_dims(data, sample_dims) - return sample_dims, feature_dims - - elif isinstance(data, list): + if isinstance(data, list): sample_dims = convert_to_dim_type(sample_dims) feature_dims: DimsList = [_get_feature_dims(da, sample_dims) for da in data] return sample_dims, feature_dims else: err_message = f"Invalid input type: {type(data).__name__}. Expected one of " - err_message += f"of the following: DataArray, Dataset or list of DataArrays." + err_message += f"of the following: list of DataArrays or Datasets." raise TypeError(err_message) diff --git a/xeofs/validation/bootstrapper.py b/xeofs/validation/bootstrapper.py index 6404f49..b8c155c 100644 --- a/xeofs/validation/bootstrapper.py +++ b/xeofs/validation/bootstrapper.py @@ -78,9 +78,7 @@ def fit(self, model: EOF): bst_data = input_data.isel(sample=idx_rnd) # Perform EOF analysis with the subsampled data # No scaling because we use the pre-scaled data from the model - bst_model = EOF( - n_modes=n_modes, standardize=False, use_coslat=False, use_weights=False - ) + bst_model = EOF(n_modes=n_modes, standardize=False, use_coslat=False) bst_model.fit(bst_data, dim="sample") # Save results expvar = bst_model.data["explained_variance"] From 183779a188e68550f886d08648075bc9d9ba106b Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 16 Oct 2023 18:08:16 +0200 Subject: [PATCH 24/43] refactor(BaseModel): move input check to utils --- xeofs/models/_base_cross_model.py | 16 ++++++++++++++-- xeofs/models/_base_model.py | 19 ++++--------------- xeofs/utils/sanity_checks.py | 13 +++++++++++++ 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/xeofs/models/_base_cross_model.py b/xeofs/models/_base_cross_model.py index d5d1f93..cec95fd 100644 --- a/xeofs/models/_base_cross_model.py +++ b/xeofs/models/_base_cross_model.py @@ -6,6 +6,8 @@ from ..preprocessing.preprocessor import Preprocessor from ..data_container import DataContainer from ..utils.data_types import DataObject, DataArray +from ..utils.xarray_utils import convert_to_dim_type +from ..utils.sanity_checks import validate_input_type from .._version import __version__ @@ -117,10 +119,18 @@ def fit( Weights to be applied to the right input data. """ + validate_input_type(data1) + validate_input_type(data2) + if weights1 is not None: + validate_input_type(weights1) + if weights2 is not None: + validate_input_type(weights2) + + self.sample_dims = convert_to_dim_type(dim) # Preprocess data1 - data1 = self.preprocessor1.fit_transform(data1, dim, weights1) + data1 = self.preprocessor1.fit_transform(data1, self.sample_dims, weights1) # Preprocess data2 - data2 = self.preprocessor2.fit_transform(data2, dim, weights2) + data2 = self.preprocessor2.fit_transform(data2, self.sample_dims, weights2) return self._fit_algorithm(data1, data2) @@ -136,9 +146,11 @@ def transform( raise ValueError("Either data1 or data2 must be provided.") if data1 is not None: + validate_input_type(data1) # Preprocess data1 data1 = self.preprocessor1.transform(data1) if data2 is not None: + validate_input_type(data2) # Preprocess data2 data2 = self.preprocessor2.transform(data2) diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index 0848e6e..be266ee 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -9,6 +9,7 @@ from ..preprocessing.preprocessor import Preprocessor from ..data_container import DataContainer from ..utils.data_types import DataObject, Data, DataArray, DataSet, DataList, Dims +from ..utils.sanity_checks import validate_input_type from ..utils.xarray_utils import ( convert_to_dim_type, get_dims, @@ -91,18 +92,6 @@ def __init__( # Initialize the data container that stores the results self.data = DataContainer() - def _validate_type(self, data) -> None: - err_msg = "Invalid input type: {:}. Expected one of the following: DataArray, Dataset or list of these.".format( - type(data).__name__ - ) - if isinstance(data, (xr.DataArray, xr.Dataset)): - pass - elif isinstance(data, (list, tuple)): - if not all(isinstance(d, (xr.DataArray, xr.Dataset)) for d in data): - raise TypeError(err_msg) - else: - raise TypeError(err_msg) - def fit( self, X: List[Data] | Data, @@ -124,9 +113,9 @@ def fit( """ # Check for invalid types - self._validate_type(X) + validate_input_type(X) if weights is not None: - self._validate_type(weights) + validate_input_type(weights) self.sample_dims = convert_to_dim_type(dim) @@ -168,7 +157,7 @@ def transform(self, data: List[Data] | Data) -> DataArray: Projections of the data onto the components. """ - self._validate_type(data) + validate_input_type(data) data2D = self.preprocessor.transform(data) data2D = self._transform_algorithm(data2D) diff --git a/xeofs/utils/sanity_checks.py b/xeofs/utils/sanity_checks.py index 280d14f..ddc5bb9 100644 --- a/xeofs/utils/sanity_checks.py +++ b/xeofs/utils/sanity_checks.py @@ -80,3 +80,16 @@ def convert_to_dim_type(arg: Any) -> Dims: return tuple(arg) else: return (arg,) + + +def validate_input_type(X) -> None: + err_msg = "Invalid input type: {:}. Expected one of the following: DataArray, Dataset or list of these.".format( + type(X).__name__ + ) + if isinstance(X, (xr.DataArray, xr.Dataset)): + pass + elif isinstance(X, (list, tuple)): + if not all(isinstance(x, (xr.DataArray, xr.Dataset)) for x in X): + raise TypeError(err_msg) + else: + raise TypeError(err_msg) From 2307169a58d42357cca9edb2a60de82a17c95718 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 16 Oct 2023 18:09:40 +0200 Subject: [PATCH 25/43] refactor(GenericListTransformer): remove inherit --- xeofs/preprocessing/list_processor.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/xeofs/preprocessing/list_processor.py b/xeofs/preprocessing/list_processor.py index e683d37..1d413df 100644 --- a/xeofs/preprocessing/list_processor.py +++ b/xeofs/preprocessing/list_processor.py @@ -1,12 +1,10 @@ from typing import List, Self, TypeVar, Generic, Type, Dict, Any -from .transformer import Transformer - from .dimension_renamer import DimensionRenamer from .scaler import Scaler from .sanitizer import Sanitizer from .multi_index_converter import MultiIndexConverter -from .stacker import DataArrayStacker, DataSetStacker, Stacker +from .stacker import Stacker from ..utils.data_types import ( Data, DataVar, @@ -23,7 +21,7 @@ ) -class GenericListTransformer(Transformer, Generic[T]): +class GenericListTransformer(Generic[T]): """Apply a Transformer to each of the elements of a list. Parameters @@ -60,14 +58,14 @@ def fit( Keyword arguments for the transformer that should be iterated over. """ - self.sample_dims = sample_dims - self.feature_dims = feature_dims - self.iter_kwargs = iter_kwargs + self._sample_dims = sample_dims + self._feature_dims = feature_dims + self._iter_kwargs = iter_kwargs for i, x in enumerate(X): # Add transformer specific keyword arguments # For iterable kwargs, use the i-th element of the iterable - kwargs = {k: v[i] for k, v in self.iter_kwargs.items()} + kwargs = {k: v[i] for k, v in self._iter_kwargs.items()} proc: T = self.transformer_class(**self.init_kwargs) proc.fit(x, sample_dims, feature_dims[i], **kwargs) self.transformers.append(proc) From 6f8a9cf192e7e660aa4bec112b624ceccea8df93 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 16 Oct 2023 20:24:03 +0200 Subject: [PATCH 26/43] style(decomposer): use structural pattern matching --- xeofs/models/decomposer.py | 130 ++++++++++++++++++------------------- 1 file changed, 63 insertions(+), 67 deletions(-) diff --git a/xeofs/models/decomposer.py b/xeofs/models/decomposer.py index 3a0df3b..05ebb39 100644 --- a/xeofs/models/decomposer.py +++ b/xeofs/models/decomposer.py @@ -65,35 +65,24 @@ def fit(self, X, dims=("sample", "feature")): is_small_data = max(n_coords1, n_coords2) < 500 - if self.solver == "auto": - use_exact = ( - True if is_small_data and self.n_modes > int(0.8 * rank) else False - ) - elif self.solver == "full": - use_exact = True - elif self.solver == "randomized": - use_exact = False - else: - raise ValueError( - f"Unrecognized solver '{self.solver}'. " - "Valid options are 'auto', 'full', and 'randomized'." - ) + match self.solver: + case "auto": + use_exact = ( + True if is_small_data and self.n_modes > int(0.8 * rank) else False + ) + case "full": + use_exact = True + case "randomized": + use_exact = False + case _: + raise ValueError( + f"Unrecognized solver '{self.solver}'. " + "Valid options are 'auto', 'full', and 'randomized'." + ) # Use exact SVD for small data sets if use_exact: - U, s, VT = xr.apply_ufunc( - np.linalg.svd, - X, - kwargs=self.solver_kwargs, - input_core_dims=[dims], - output_core_dims=[ - [dims[0], "mode"], - ["mode"], - ["mode", dims[1]], - ], - dask="allowed", - vectorize=False, - ) + U, s, VT = self._svd(X, dims, np.linalg.svd, self.solver_kwargs) U = U[:, : self.n_modes] s = s[: self.n_modes] VT = VT[: self.n_modes, :] @@ -101,39 +90,13 @@ def fit(self, X, dims=("sample", "feature")): # Use randomized SVD for large, real-valued data sets elif (not use_complex) and (not use_dask): self.solver_kwargs.update({"n_components": self.n_modes}) - - U, s, VT = xr.apply_ufunc( - randomized_svd, - X, - kwargs=self.solver_kwargs, - input_core_dims=[dims], - output_core_dims=[ - [dims[0], "mode"], - ["mode"], - ["mode", dims[1]], - ], - ) + U, s, VT = self._svd(X, dims, randomized_svd, self.solver_kwargs) # Use scipy sparse SVD for large, complex-valued data sets elif use_complex and (not use_dask): # Scipy sparse version - self.solver_kwargs.update( - { - "k": self.n_modes, - "solver": "lobpcg", - } - ) - U, s, VT = xr.apply_ufunc( - complex_svd, - X, - kwargs=self.solver_kwargs, - input_core_dims=[dims], - output_core_dims=[ - [dims[0], "mode"], - ["mode"], - ["mode", dims[1]], - ], - ) + self.solver_kwargs.update({"k": self.n_modes, "solver": "lobpcg"}) + U, s, VT = self._svd(X, dims, complex_svd, self.solver_kwargs) idx_sort = np.argsort(s)[::-1] U = U[:, idx_sort] s = s[idx_sort] @@ -142,18 +105,7 @@ def fit(self, X, dims=("sample", "feature")): # Use dask SVD for large, real-valued, delayed data sets elif (not use_complex) and use_dask: self.solver_kwargs.update({"k": self.n_modes}) - U, s, VT = xr.apply_ufunc( - dask_svd, - X, - kwargs=self.solver_kwargs, - input_core_dims=[dims], - output_core_dims=[ - [dims[0], "mode"], - ["mode"], - ["mode", dims[1]], - ], - dask="allowed", - ) + U, s, VT = self._svd(X, dims, dask_svd, self.solver_kwargs) else: err_msg = ( "Complex data together with dask is currently not implemented. See dask issue 7639 " @@ -184,3 +136,47 @@ def fit(self, X, dims=("sample", "feature")): self.U_ = U self.s_ = s self.V_ = VT.conj().transpose(dims[1], "mode") + + def _svd(self, X, dims, func, kwargs): + """Performs SVD on the data + + Parameters + ---------- + X : DataArray + A 2-dimensional data object to be decomposed. + dims : tuple of str + Dimensions of the data object. + func : Callable + Method to perform SVD. + kwargs : dict + Additional keyword arguments passed to the SVD solver. + + Returns + ------- + U : DataArray + Left singular vectors. + s : DataArray + Singular values. + VT : DataArray + Right singular vectors. + """ + try: + U, s, VT = xr.apply_ufunc( + func, + X, + kwargs=kwargs, + input_core_dims=[dims], + output_core_dims=[ + [dims[0], "mode"], + ["mode"], + ["mode", dims[1]], + ], + dask="allowed", + ) + return U, s, VT + except ValueError: + raise ValueError( + "SVD failed. This may be due to isolated NaN values in the data. Please consider the following steps:\n" + "1. Check for and remove any isolated NaNs in your dataset.\n" + "2. If the error persists, please raise an issue at https://github.com/nicrie/xeofs/issues." + ) From 731eec2825dbe091634cf41a52f38187cce3109c Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 16 Oct 2023 20:42:57 +0200 Subject: [PATCH 27/43] fix: score coordinates in T-mode EOF analysis --- tests/models/test_eof.py | 101 +++++++++++++++++++++++++++++++ xeofs/preprocessing/sanitizer.py | 36 ++++++++--- 2 files changed, 130 insertions(+), 7 deletions(-) diff --git a/tests/models/test_eof.py b/tests/models/test_eof.py index 7d12b04..1630fa6 100644 --- a/tests/models/test_eof.py +++ b/tests/models/test_eof.py @@ -100,6 +100,21 @@ def test_explained_variance_ratio(dim, mock_data_array): ), "The sum of the explained variance ratio must be <= 1" +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_isolated_nans(dim, mock_data_array_isolated_nans): + """Tests the components method of the EOF class""" + eof = EOF() + with pytest.raises(ValueError): + eof.fit(mock_data_array_isolated_nans, dim) + + @pytest.mark.parametrize( "dim", [ @@ -122,6 +137,50 @@ def test_components(dim, mock_data_array): ), "Components does not have the right feature dimensions" +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_components_fulldim_nans(dim, mock_data_array_full_dimensional_nans): + """Tests the components method of the EOF class""" + eof = EOF() + eof.fit(mock_data_array_full_dimensional_nans, dim) + + # Test components method + components = eof.components() + feature_dims = tuple(set(mock_data_array_full_dimensional_nans.dims) - set(dim)) + assert isinstance(components, xr.DataArray), "Components is not a DataArray" + assert set(components.dims) == set( + ("mode",) + feature_dims + ), "Components does not have the right feature dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_components_boundary_nans(dim, mock_data_array_boundary_nans): + """Tests the components method of the EOF class""" + eof = EOF() + eof.fit(mock_data_array_boundary_nans, dim) + + # Test components method + components = eof.components() + feature_dims = tuple(set(mock_data_array_boundary_nans.dims) - set(dim)) + assert isinstance(components, xr.DataArray), "Components is not a DataArray" + assert set(components.dims) == set( + ("mode",) + feature_dims + ), "Components does not have the right feature dimensions" + + @pytest.mark.parametrize( "dim", [ @@ -197,6 +256,48 @@ def test_scores(dim, mock_data_array): ), "Scores does not have the right dimensions" +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_scores_fulldim_nans(dim, mock_data_array_full_dimensional_nans): + """Tests the scores method of the EOF class""" + eof = EOF() + eof.fit(mock_data_array_full_dimensional_nans, dim) + + # Test scores method + scores = eof.scores() + assert isinstance(scores, xr.DataArray), "Scores is not a DataArray" + assert set(scores.dims) == set( + (dim + ("mode",)) + ), "Scores does not have the right dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_scores_boundary_nans(dim, mock_data_array_boundary_nans): + """Tests the scores method of the EOF class""" + eof = EOF() + eof.fit(mock_data_array_boundary_nans, dim) + + # Test scores method + scores = eof.scores() + assert isinstance(scores, xr.DataArray), "Scores is not a DataArray" + assert set(scores.dims) == set( + (dim + ("mode",)) + ), "Scores does not have the right dimensions" + + @pytest.mark.parametrize( "dim", [ diff --git a/xeofs/preprocessing/sanitizer.py b/xeofs/preprocessing/sanitizer.py index d72c1cb..8b9ad95 100644 --- a/xeofs/preprocessing/sanitizer.py +++ b/xeofs/preprocessing/sanitizer.py @@ -47,9 +47,20 @@ def fit( self._check_input_dims(X) self.feature_coords = X.coords[self.feature_name] + self.sample_coords = X.coords[self.sample_name] # Identify NaN locations - self.is_valid_feature = X.notnull().all(self.sample_name).compute() + self.is_valid_feature = ~X.isnull().all(self.sample_name).compute() + # NOTE: We must also consider the presence of valid samples. For + # instance, when PCA is applied with "longitude" and "latitude" as + # sample dimensions, certain grid cells may be masked (e.g., due to + # ocean areas). To ensure correct reconstruction of scores, + # we need to identify the sample positions of NaNs in the fitted + # dataset. Keep in mind that when transforming new data, + # we have to recheck for valid samples, as the new dataset may have + # different samples. + X_valid = X.sel({self.feature_name: self.is_valid_feature}) + self.is_valid_sample = ~X_valid.isnull().all(self.feature_name).compute() return self @@ -63,28 +74,39 @@ def transform(self, X: DataArray) -> DataArray: # Check if input has the correct coordinates self._check_input_coords(X) - # Remove NaN entries + # Remove NaN entries; only consider full-dimensional NaNs + # We already know valid features from the fitted dataset X = X.isel({self.feature_name: self.is_valid_feature}) + # However, we need to recheck for valid samples, as the new dataset may + # have different samples + is_valid_sample = ~X.isnull().all(self.feature_name).compute() + X = X.isel({self.sample_name: is_valid_sample}) return X def inverse_transform_data(self, X: DataArray) -> DataArray: # Reindex only if feature coordinates are different - is_same_coords = X.coords[self.feature_name].identical(self.feature_coords) + coords_are_equal = X.coords[self.feature_name].identical(self.feature_coords) - if is_same_coords: + if coords_are_equal: return X else: return X.reindex({self.feature_name: self.feature_coords.values}) def inverse_transform_components(self, X: DataArray) -> DataArray: # Reindex only if feature coordinates are different - is_same_coords = X.coords[self.feature_name].identical(self.feature_coords) + coords_are_equal = X.coords[self.feature_name].identical(self.feature_coords) - if is_same_coords: + if coords_are_equal: return X else: return X.reindex({self.feature_name: self.feature_coords.values}) def inverse_transform_scores(self, X: DataArray) -> DataArray: - return X + # Reindex only if sample coordinates are different + coords_are_equal = X.coords[self.sample_name].identical(self.sample_coords) + + if coords_are_equal: + return X + else: + return X.reindex({self.sample_name: self.sample_coords.values}) From 40801431873acc87f851e88090dc0d919884bfc1 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 16 Oct 2023 21:54:08 +0200 Subject: [PATCH 28/43] test(EOF): fix random seed --- tests/models/test_orthogonality.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/models/test_orthogonality.py b/tests/models/test_orthogonality.py index d1e348c..93d44e6 100644 --- a/tests/models/test_orthogonality.py +++ b/tests/models/test_orthogonality.py @@ -457,7 +457,12 @@ def test_crmca_scores(dim, use_coslat, power, squared_loadings, mock_data_array) ) def test_eof_transform(dim, use_coslat, mock_data_array): """Transforming the original data results in the model scores""" - model = EOF(n_modes=5, standardize=True, use_coslat=use_coslat) + model = EOF( + n_modes=5, + standardize=True, + use_coslat=use_coslat, + solver_kwargs={"random_state": 5}, + ) model.fit(mock_data_array, dim=dim) scores = model.scores() pseudo_scores = model.transform(mock_data_array) From d4b5a77bc91b7c13426d424dad72eb94a4f1c30f Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 16 Oct 2023 21:56:03 +0200 Subject: [PATCH 29/43] test: isolated NaNs are invalid test cases --- .../preprocessing/test_dataarray_sanitizer.py | 35 +------------------ .../test_preprocessor_dataarray.py | 5 +-- .../test_preprocessor_datalist.py | 3 +- .../test_preprocessor_dataset.py | 3 +- 4 files changed, 8 insertions(+), 38 deletions(-) diff --git a/tests/preprocessing/test_dataarray_sanitizer.py b/tests/preprocessing/test_dataarray_sanitizer.py index c125705..ac46af2 100644 --- a/tests/preprocessing/test_dataarray_sanitizer.py +++ b/tests/preprocessing/test_dataarray_sanitizer.py @@ -17,7 +17,7 @@ N_SAMPLE_DIMS = [1] N_FEATURE_DIMS = [1] INDEX_POLICY = ["index"] -NAN_POLICY = ["no_nan", "isolated", "fulldim"] +NAN_POLICY = ["no_nan", "fulldim"] DASK_POLICY = ["no_dask", "dask"] SEED = [0] @@ -30,25 +30,6 @@ for dask in DASK_POLICY ] -# ============================================================================= -# INVALID TEST CASES -# ============================================================================= -N_SAMPLE_DIMS = [2] -N_FEATURE_DIMS = [2] -INDEX_POLICY = ["index", "multiindex"] -NAN_POLICY = ["no_nan", "isolated", "fulldim"] -DASK_POLICY = ["no_dask", "dask"] -SEED = [0] - -INVALID_TEST_DATA = [ - (ns, nf, index, nan, dask) - for ns in N_SAMPLE_DIMS - for nf in N_FEATURE_DIMS - for index in INDEX_POLICY - for nan in NAN_POLICY - for dask in DASK_POLICY -] - # TESTS # ============================================================================= @@ -90,20 +71,6 @@ def test_fit_invalid_dimension_names(sample_name, feature_name, data_params): sanitizer.fit(data) -@pytest.mark.parametrize( - "synthetic_dataarray", - INVALID_TEST_DATA, - indirect=["synthetic_dataarray"], -) -def test_fit(synthetic_dataarray): - data = synthetic_dataarray - data = data.rename({"sample0": "sample", "feature0": "feature"}) - - sanitizer = Sanitizer() - with pytest.raises(ValueError): - sanitizer.fit(data) - - @pytest.mark.parametrize( "synthetic_dataarray", VALID_TEST_DATA, diff --git a/tests/preprocessing/test_preprocessor_dataarray.py b/tests/preprocessing/test_preprocessor_dataarray.py index 805e5f2..2873423 100644 --- a/tests/preprocessing/test_preprocessor_dataarray.py +++ b/tests/preprocessing/test_preprocessor_dataarray.py @@ -18,7 +18,7 @@ N_SAMPLE_DIMS = [1, 2] N_FEATURE_DIMS = [1, 2] INDEX_POLICY = ["index", "multiindex"] -NAN_POLICY = ["no_nan", "isolated", "fulldim"] +NAN_POLICY = ["no_nan", "fulldim"] DASK_POLICY = ["no_dask", "dask"] SEED = [0] @@ -92,7 +92,8 @@ def test_fit_transform_scalings(with_std, with_coslat, with_weights, mock_data_a "index_policy, nan_policy, dask_policy", [ ("index", "no_nan", "no_dask"), - ("multiindex", "isolated", "dask"), + ("multiindex", "no_nan", "dask"), + ("index", "fulldim", "no_dask"), ("multiindex", "fulldim", "dask"), ], ) diff --git a/tests/preprocessing/test_preprocessor_datalist.py b/tests/preprocessing/test_preprocessor_datalist.py index 8569e41..8602d08 100644 --- a/tests/preprocessing/test_preprocessor_datalist.py +++ b/tests/preprocessing/test_preprocessor_datalist.py @@ -93,7 +93,8 @@ def test_fit_transform_scalings( "index_policy, nan_policy, dask_policy", [ ("index", "no_nan", "no_dask"), - ("multiindex", "isolated", "dask"), + ("multiindex", "no_nan", "dask"), + ("index", "fulldim", "no_dask"), ("multiindex", "fulldim", "dask"), ], ) diff --git a/tests/preprocessing/test_preprocessor_dataset.py b/tests/preprocessing/test_preprocessor_dataset.py index c93a581..9c047f3 100644 --- a/tests/preprocessing/test_preprocessor_dataset.py +++ b/tests/preprocessing/test_preprocessor_dataset.py @@ -91,7 +91,8 @@ def test_fit_transform_scalings(with_std, with_coslat, with_weights, mock_datase "index_policy, nan_policy, dask_policy", [ ("index", "no_nan", "no_dask"), - ("multiindex", "isolated", "dask"), + ("multiindex", "no_nan", "dask"), + ("index", "fulldim", "no_dask"), ("multiindex", "fulldim", "dask"), ], ) From ffd11fcf21041b7defba6adb9973807d39678678 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 16 Oct 2023 21:58:51 +0200 Subject: [PATCH 30/43] fix(Bootstrapper): avoid pertubating sample coords --- xeofs/validation/bootstrapper.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/xeofs/validation/bootstrapper.py b/xeofs/validation/bootstrapper.py index b8c155c..710c126 100644 --- a/xeofs/validation/bootstrapper.py +++ b/xeofs/validation/bootstrapper.py @@ -56,6 +56,8 @@ def fit(self, model: EOF): self.model = model self.preprocessor = model.preprocessor + sample_name = model.sample_name + feature_name = model.feature_name input_data = model.data["input_data"] n_samples = input_data.sample.size @@ -75,7 +77,11 @@ def fit(self, model: EOF): for i in trange(n_bootstraps): # Sample with replacement idx_rnd = rng.choice(n_samples, n_samples, replace=True) - bst_data = input_data.isel(sample=idx_rnd) + bst_data = input_data.isel({sample_name: idx_rnd}) + # We need to assign the sample coordinates of the real data + # otherwise the transform() method will try to align the sample coordinates + # with the of the bootstrap data + bst_data = bst_data.assign_coords({sample_name: input_data[sample_name]}) # Perform EOF analysis with the subsampled data # No scaling because we use the pre-scaled data from the model bst_model = EOF(n_modes=n_modes, standardize=False, use_coslat=False) From 58c2dbedea9e72f62a80e11415338aa2862e778b Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Thu, 12 Oct 2023 13:41:42 +0200 Subject: [PATCH 31/43] feat: add GWPCA support --- docs/_autosummary/xeofs.models.ComplexEOF.rst | 1 + .../xeofs.models.ComplexEOFRotator.rst | 1 + docs/_autosummary/xeofs.models.ComplexMCA.rst | 1 + .../xeofs.models.ComplexMCARotator.rst | 1 + docs/_autosummary/xeofs.models.EOF.rst | 1 + docs/_autosummary/xeofs.models.EOFRotator.rst | 1 + docs/_autosummary/xeofs.models.MCA.rst | 1 + docs/_autosummary/xeofs.models.MCARotator.rst | 1 + docs/_autosummary/xeofs.models.OPA.rst | 1 + .../xeofs.validation.EOFBootstrapper.rst | 1 + .../1eof/images/sphx_glr_plot_gwpca_001.png | Bin 0 -> 239423 bytes .../1eof/images/sphx_glr_plot_gwpca_002.png | Bin 0 -> 224661 bytes .../thumb/sphx_glr_plot_gwpca_thumb.png | Bin 0 -> 50794 bytes docs/auto_examples/1eof/index.rst | 18 + docs/auto_examples/1eof/plot_gwpca.ipynb | 169 ++ docs/auto_examples/1eof/plot_gwpca.py | 174 ++ docs/auto_examples/1eof/plot_gwpca.py.md5 | 1 + docs/auto_examples/1eof/plot_gwpca.rst | 1812 +++++++++++++++++ .../1eof/plot_gwpca_codeobj.pickle | Bin 0 -> 28006 bytes .../auto_examples/1eof/sg_execution_times.rst | 7 +- docs/auto_examples/auto_examples_jupyter.zip | Bin 38877 -> 49273 bytes docs/auto_examples/auto_examples_python.zip | Bin 22145 -> 29508 bytes docs/auto_examples/index.rst | 17 + examples/1eof/plot_gwpca.py | 174 ++ poetry.lock | 296 ++- pyproject.toml | 18 +- tests/models/test_gwpca.py | 55 + xeofs/models/__init__.py | 10 +- xeofs/models/_base_model.py | 1 + xeofs/models/gwpca.py | 432 ++++ xeofs/preprocessing/stacker.py | 4 + xeofs/utils/constants.py | 6 + xeofs/utils/distance_metrics.py | 134 ++ xeofs/utils/kernels.py | 34 + 34 files changed, 3270 insertions(+), 102 deletions(-) create mode 100644 docs/auto_examples/1eof/images/sphx_glr_plot_gwpca_001.png create mode 100644 docs/auto_examples/1eof/images/sphx_glr_plot_gwpca_002.png create mode 100644 docs/auto_examples/1eof/images/thumb/sphx_glr_plot_gwpca_thumb.png create mode 100644 docs/auto_examples/1eof/plot_gwpca.ipynb create mode 100644 docs/auto_examples/1eof/plot_gwpca.py create mode 100644 docs/auto_examples/1eof/plot_gwpca.py.md5 create mode 100644 docs/auto_examples/1eof/plot_gwpca.rst create mode 100644 docs/auto_examples/1eof/plot_gwpca_codeobj.pickle create mode 100644 examples/1eof/plot_gwpca.py create mode 100644 tests/models/test_gwpca.py create mode 100644 xeofs/models/gwpca.py create mode 100644 xeofs/utils/distance_metrics.py create mode 100644 xeofs/utils/kernels.py diff --git a/docs/_autosummary/xeofs.models.ComplexEOF.rst b/docs/_autosummary/xeofs.models.ComplexEOF.rst index dad48cf..5cdb7ee 100644 --- a/docs/_autosummary/xeofs.models.ComplexEOF.rst +++ b/docs/_autosummary/xeofs.models.ComplexEOF.rst @@ -24,6 +24,7 @@ ~ComplexEOF.explained_variance ~ComplexEOF.explained_variance_ratio ~ComplexEOF.fit + ~ComplexEOF.fit_transform ~ComplexEOF.get_params ~ComplexEOF.inverse_transform ~ComplexEOF.scores diff --git a/docs/_autosummary/xeofs.models.ComplexEOFRotator.rst b/docs/_autosummary/xeofs.models.ComplexEOFRotator.rst index 1f5267a..2fe12b8 100644 --- a/docs/_autosummary/xeofs.models.ComplexEOFRotator.rst +++ b/docs/_autosummary/xeofs.models.ComplexEOFRotator.rst @@ -24,6 +24,7 @@ ~ComplexEOFRotator.explained_variance ~ComplexEOFRotator.explained_variance_ratio ~ComplexEOFRotator.fit + ~ComplexEOFRotator.fit_transform ~ComplexEOFRotator.get_params ~ComplexEOFRotator.inverse_transform ~ComplexEOFRotator.scores diff --git a/docs/_autosummary/xeofs.models.ComplexMCA.rst b/docs/_autosummary/xeofs.models.ComplexMCA.rst index 92206b3..8dbc52d 100644 --- a/docs/_autosummary/xeofs.models.ComplexMCA.rst +++ b/docs/_autosummary/xeofs.models.ComplexMCA.rst @@ -33,6 +33,7 @@ ~ComplexMCA.singular_values ~ComplexMCA.squared_covariance ~ComplexMCA.squared_covariance_fraction + ~ComplexMCA.total_covariance ~ComplexMCA.transform diff --git a/docs/_autosummary/xeofs.models.ComplexMCARotator.rst b/docs/_autosummary/xeofs.models.ComplexMCARotator.rst index 518c069..61b4a59 100644 --- a/docs/_autosummary/xeofs.models.ComplexMCARotator.rst +++ b/docs/_autosummary/xeofs.models.ComplexMCARotator.rst @@ -33,6 +33,7 @@ ~ComplexMCARotator.singular_values ~ComplexMCARotator.squared_covariance ~ComplexMCARotator.squared_covariance_fraction + ~ComplexMCARotator.total_covariance ~ComplexMCARotator.transform diff --git a/docs/_autosummary/xeofs.models.EOF.rst b/docs/_autosummary/xeofs.models.EOF.rst index 07069dd..20b35dc 100644 --- a/docs/_autosummary/xeofs.models.EOF.rst +++ b/docs/_autosummary/xeofs.models.EOF.rst @@ -22,6 +22,7 @@ ~EOF.explained_variance ~EOF.explained_variance_ratio ~EOF.fit + ~EOF.fit_transform ~EOF.get_params ~EOF.inverse_transform ~EOF.scores diff --git a/docs/_autosummary/xeofs.models.EOFRotator.rst b/docs/_autosummary/xeofs.models.EOFRotator.rst index adcca8e..93c0df2 100644 --- a/docs/_autosummary/xeofs.models.EOFRotator.rst +++ b/docs/_autosummary/xeofs.models.EOFRotator.rst @@ -22,6 +22,7 @@ ~EOFRotator.explained_variance ~EOFRotator.explained_variance_ratio ~EOFRotator.fit + ~EOFRotator.fit_transform ~EOFRotator.get_params ~EOFRotator.inverse_transform ~EOFRotator.scores diff --git a/docs/_autosummary/xeofs.models.MCA.rst b/docs/_autosummary/xeofs.models.MCA.rst index 59a40b7..b8de506 100644 --- a/docs/_autosummary/xeofs.models.MCA.rst +++ b/docs/_autosummary/xeofs.models.MCA.rst @@ -29,6 +29,7 @@ ~MCA.singular_values ~MCA.squared_covariance ~MCA.squared_covariance_fraction + ~MCA.total_covariance ~MCA.transform diff --git a/docs/_autosummary/xeofs.models.MCARotator.rst b/docs/_autosummary/xeofs.models.MCARotator.rst index eabd0bc..4c604c0 100644 --- a/docs/_autosummary/xeofs.models.MCARotator.rst +++ b/docs/_autosummary/xeofs.models.MCARotator.rst @@ -29,6 +29,7 @@ ~MCARotator.singular_values ~MCARotator.squared_covariance ~MCARotator.squared_covariance_fraction + ~MCARotator.total_covariance ~MCARotator.transform diff --git a/docs/_autosummary/xeofs.models.OPA.rst b/docs/_autosummary/xeofs.models.OPA.rst index 478174a..4d029bd 100644 --- a/docs/_autosummary/xeofs.models.OPA.rst +++ b/docs/_autosummary/xeofs.models.OPA.rst @@ -22,6 +22,7 @@ ~OPA.decorrelation_time ~OPA.filter_patterns ~OPA.fit + ~OPA.fit_transform ~OPA.get_params ~OPA.inverse_transform ~OPA.scores diff --git a/docs/_autosummary/xeofs.validation.EOFBootstrapper.rst b/docs/_autosummary/xeofs.validation.EOFBootstrapper.rst index 902cc3a..8a68c2d 100644 --- a/docs/_autosummary/xeofs.validation.EOFBootstrapper.rst +++ b/docs/_autosummary/xeofs.validation.EOFBootstrapper.rst @@ -22,6 +22,7 @@ ~EOFBootstrapper.explained_variance ~EOFBootstrapper.explained_variance_ratio ~EOFBootstrapper.fit + ~EOFBootstrapper.fit_transform ~EOFBootstrapper.get_params ~EOFBootstrapper.inverse_transform ~EOFBootstrapper.scores diff --git a/docs/auto_examples/1eof/images/sphx_glr_plot_gwpca_001.png b/docs/auto_examples/1eof/images/sphx_glr_plot_gwpca_001.png new file mode 100644 index 0000000000000000000000000000000000000000..14d3f633e6e3740b147ac979b4ec42b5b570b92b GIT binary patch literal 239423 zcmeEuWmHt}+Bb-TN|}J9qKHaMw+bc#CLtv?fW*+vfS{nH3DO7xD$M{x3@xEFNDeS` zNap}Uyw}#V&hzR0@UHjk<65rsKRCeN_rCAz`qi~x-&4Cuf0*?!1qB8D?OVzZC@83= zkiQ41;VY_1zsKNzq#dtoIX*Q_oiP0Woa zwTYKQBGYG0c%D4DH{lz&|5D~xc2oI)aoc$!*LJf!JLO3S=?U_t>*A65wc-Vb7b``I z-D^es{)Q~C{{5-9GnF5tKXUZnp953e|Nc;3r7ZsUfm;;!d<6e}@fgL4grI+4-B0m9 zAHDLwAN}7I_}>-y|6hUlFLACThxnOjnSx%j*%S`DN=|iu~Q(oMd#1hc8#+ z&4QSP6rNdIrb!OER$Fx+SC=ys%7kREsG}UEzDb~F$^sfnG#!Z*%YiOX#6bD^L zL#kyaKi|50SB&6v&Uz5vD>wg1#5i;+hwA4M$44vHizy$)802K{-^y$u7drefcM^^kj^~RC_Y*@bK`9moM8&T+lYf z(t1!=(?_m^nRy#CG&Y=79 zUhAMsKMt099A{;{f_>e|fUpUHe=gyt&=I$rVBzO=uMdM7f&a!iT z`NrEDOX+$g>u-uf%uG#1ta^lD@uvwmA@4mhQG|}loV~U+O;gB$FgrWTR1&l>SQ>rt zVM=Scw>NywqC1o0;>C-NQQ{49;VrP3MbjzKjRpE}H|hGN#hvTW5_wt)^)+Bs(W&bo8pP^ z+~2#`heEH}_GPhu;10iwzJelnCaZkchrn>^@{fM!y{zU~SvIxQj(!Po@lM_D&q_4q z*7ViFWjpISXT`+wdhLo`76y$x?CtG?zkdCy(W2<|=aBVkBjV@h7s9Bx-P-Tmk)n#}?psasBCk(4BP*&SfBq?c zrCD~O04}Du2(F9^y_DaRXYt;U#ow;;v1VjL8F^mTfxxWqTB*DHCoPL}QDLKx`0XLD zLvOEHy!HHEyNb{$=)m#byG2E_yW|B)x@uv(ewK*c$PcQrcu#BVIo3aa{sa$K2S^em z&!4YdC>>e(dvPP9ql~VK(Pgz{@8pyeR`?m$uV240*&M4gU^DXL^+b`SrDX_gA#uWa z8JWIU!NFEtU0r$OG0_s%&$=e!9fAuBrK+FhZ&_PndxIN<%JID-GLBPEKSV@C*KM|V z?;7u?p~1N-1l1Td#z=Sc7fh3-BqR!IyvjBwqEQP$JLL@PQE7D2Jv~igv_NU0m}rS_DOo6^6+@-D%=h1hTU@}eFtl!9p7qs+ zH%5vE#Kf>CEUl4YAN7{F2pvCvHz7B7*@ryemmf44=MlQRY`-GzTT90;hO(2i?lTco zS5*}!a+S>#&19DJ6gluh`6+Uk9I-Fz$+wK?O&S_-A&$d}Q?o9l38nQzd0p9S%kS1# zdHlGOlF;`^r`hdGSdPnrew^p>?Ms&~ZB%pZQux%38t!gy;uouEVKc0x8SEVPX>Wg^ zusL=-!A`58Ns@RMmJzDa*u=yG9i0Y$dbX+VEG~aT+ZK7!y(drlW@ApCJZXYLJ z2FE=Z)|Mc*F5azc-1tePS}XhM^Bl|WOjP$6f%H^+CFRMl1jX&e>WN&%ooRIpL2}M5 z+q{jtdd`_mBDzVpz-?thqb1+0g>!ViWIICU9V}xsn4H3->3`aKrRZ~EYlzt)-Zk< zPIhMLdY@le+}pQrY4H9jr#6tf&|i?7+rF^7CDxP>@7!mBA&|pKGu$Be-97>{)s6v5_dq`vztt;qfMZoD<@EQGJ`Ro}(NfT+%LkN&fNMUjj*bdke9%lZN;i72y1xF_ ztTUOiMdWD(#R?h9eI$|V&yLj=E9a}M5LH57sfSCsgUeLw=iX1`vv$pAy@H|nMVFq9 zVsc*%A<=B>^ASs}X7-3FnE9D=K!5BLA27Q}C%`gw(=}(6u0Oe>q9Q`)=e<43MKgf2 zFqHD?>CqCsA}A!3nwVIn+nuMAqweX6-}Kts-Dwg`Cwl`fpf{g2DtlVFFGh_Qxdk{? zCjEtW&_pkbit6xRA~}d#8{66C5Q|gvCPqg`a|m-N&D~$W?i5({wvUK(r)wKiPcAJj zrE6oU!|Fy|$}yoqg2k^Ms!1R4fn(M=JE&kkcFUjclzrc2%jiQb>HFqS%`5j+wOr))*?s*`BuTK}lT-)`f2!({fJ<(EXYgbokD> zBz;&?{P47pZr2yTiR?ly%_- z-7T?R-v3=SA3-Z(S?o(%W(l%=$pD6{s=BJ`AS0t$=l91*xv^GIS61F{7VjlN0o4SW z1QtzH=t|eF7HyI1up)o?a!&L}V@u2ApvQtKG?w4p-B`QP8~SeJC#e}Zt*C3^QLrL^ zd3bo}IAlKXWfuD0Pr4CVXUTemFz1{g;(>$CEnyH8hMpA=3pHqNZch3BJrM9$O=BE9 z{^#)FqfAV-*O~Q9Y!ofp&uL`pz9!72BqaEh@2+L}cn=*vaYEU|z32$v@<9T<>xnKzkkPj>wC`!yvM^(V?L1U{n(`?j~b%O%Ra7b|I? z8K$J9_(MHs64GxR`}w|TG*p(sbv`doY=sO(;W-{Jw?0Rxi{D*MRr+;|D|n%lGK5hN zzGSw`4wnd<|9LIVKIoVzfceNmIL&AbdakH8%gpctByCU6K#ra^7DaDrYT6GwHK4z% zAa2E+BPj#<$ckZL_3#iNkeX>nsNpIYjDou2j@a$nw*$PkQ?lS3?wattau|$Gk}~y5qKHJ>YWj(F zX=&*jj*hEW2^N5bI#ea|i$j`FqL}Tan=6G8`tBB3YX*tX{QOJvWjPBOvE7ojUBYz6 za5pp=BlAnH!Oy8^I2Yq2cd`gwB2)ftT6%bZ4HveeDqoJ8e%+l3u+!c$S+xQCml+Bix}t!hiP`d(w2NRWUxFGz4pE zP)<@(F`$vXVQFnwGJw?hXpRNIjEYk`C!n7ldh{!yfii?$`W^J)%dU%0tE#FH-UQIm z_|NZe0Zw>8a#-j(H$vHOo10&NBfLGW?mhC8H(m^9(I+6q!}GE;O>>1z+>3+#eoaZ~ zb#U+(G_g$AZTusCsg})gy8VC;z_pmP%;G==udO2|PhP)w?+AyC;}F1TleWa`u=Rh- z%>&%2Pma+46Y)}37ugDK3#B9$S!aF`5s@A%iDSo(6^;b*X%$$V0+1n$n}bW_)65+E z9KdNbRPlTQzwEWOIt=}TniV_2<+XZrdV2atZS64*j;0rfj&CeC2zfi}cmbYo4D4^! zIv~r>Kl_FRU}SB^f3_heARyrVyLU!{T(ZvT(wI}MSQ50zVdCDdq;Xv+kAy@Q-oPx* z?L2_$?8pH$LW$zN)=xD|V_!fo$zn|}UqpdmMetUa+F?wr^z+B{_ z>deysw0+>hY&<9KuH#mx+Wdxwp2E*<@u8&Jm_vIvcr28<^wcrX(vAXfG22)iR=#nA zvr+>Qy4V$&s?9G?eyA5&Y`e*22bUOqAT=;3sA%~YUsIwH0^kjB=F&3rWn>$D*Wm-c zzP|M>EqmGnAz}IB4G|r{f3S;BtgQvq6*kZ9k-Yb4#JW^IiJCPpStY`ce|0PR;*|*F zW}}=VP-I|RExDHhqOFUG-Uc9j?(Er7DcoYj9Iy@JovpRm{-WbP(de>BXpVDIxbN1W z5KtrMA8tk+_-~2J0<=yhA5MAqMcAC=$EwVTs3(2YJW5=8!lrB!1u&kiz?$F>^$i|M z3}$P(AA|v2r_RT9C2r-ez$M^!ybtA&wci|%SlM1_;aWJKul?i+E1RT^7?&8Zb-P~| zQ*D3pL?EzlLv@%J(FN@j?@{X^6d!m21z-u}#^Bce-(*9~F`ZnKLr@|v>J>Xy8s)tC z__1@sn8m*m+oGUiZ+{uEmE*!-tVKr($7XHd-`}uZWx=`nUdzczT*;qq7#7T=>cyb| zr{I$LV%mUefF+1+iW$xTwF>_>_8(P?f$1R{wMc0e?rBX9ff-WFo)stYe@K4LN zPN2|xGPdS^=qxb^zqrRxKoH*YGh zLk*zk^MMi<2e{(A!X|CA*q3#qpTB;61(k{7-o1MiS6bra!?bn&eHZ4?qne#_fX}WQ z8lGuod80_`cL?T?4TqZUzB*Ou)U8*CEMo8}QK`ASjtc?;<51{KS`)77I<{R0X!PL0 z1K_b^o0IW%Gi&yB94<#VI6|t2rJ6PWZa!{%qx(LvdGM|xu%)dFwzp=oCSjct%F5!9 z295_J0r-QXWT3hJ?b*$h_`OEh6cm7TX}p2?*h;Y@;ts74{>>>-)DdQ8mvkXS1OplD zMGD1#ceQy`{^*a;y3Qae#bdzaOfP1A3i=9s32aNk1%sz&zHXZ3;|n1aS9^?c%N)t10dot&KNN=`Msc=6&06O%JO`ph!+f*&RZ5z+8lf6`8zIPoSXrt$mZv<+Bn zHzKni}N7 zGpMIPu>A1h!-fE%)I(Atn!aRUa6|#>-wS_#&feO8{!9H)X4l8OYR3d4(zM&cfRj?G zVI=_kE9`AMl$$~?6m6DDs#%)+ch>Dk?)hM1kcKI@IrbhA?m)YZL<%4xFc|D@bx%kz zDr}rC-N%(tIn0JS>V6_3_=F)-xMz?G=}fY3kL)sk9ub_c-6q^k?LM{iy%z z3fT7myA&$lz6^pD*UOhLVOw)^2aNf%*;E+iJk8NM$iR>S=&2u`CqQKEv(8KBRil*Q z=^X-2R^da5aI&V*w$|1Pz-}%`_tVm10RXA%I{Xq*<1qvQeSD9cW=Gcc_O@SvsE9~T zjhB~!ac!{l1Dx`{7jv$XSD<|E3+tFZ$HzzO11)ye*&~P`&CUR{2RU*=_{Dwq@87>0 zCl?6-y;>%sRa-N9)2S0lcI_{YX}cu!mpxP)|?w&Yi=6>r9$sq!ADR zOk}BqxD%R?aC#oUqDDbi{S?Gc{wH~_0Ac!rq!1Pucvv5^iKv0q{Y4J9)zwL|M3Agr zg@#Ia&-dhvUL|~rj#g1pqN0HQW(=?aQwN7m`kITko|e|F8#kzdf1Q_*U;@Sd+O>VaI^n2h0}(?tMb#jbb;3a{ULh6)nPK~)Tk7g{ z!?thUy}M>(lj-YxRC)xk{q0+~D2AdeGlxMd;pXEDiWD{5vhD_%E?mfpFq7qP&6H`dI=su5NMU`40SCf=11T90g0F7x-*}J z$M@5{p&WFZLv6sB7a@v7?a%G zT!d9@LUXLIudkSPd>0ux&HPA*pUJAAHq;sP z(jtKA(DCztX=~o`sMr?i_B(c{eo0Ar@aWNxulJMcfBhO62@*OFKoGP5BO@b39$EX- zNdaoSF^&L|F^{5R{P*vd3ed#RhH>t*XZ-*N=l4+uR9^+b*|ICWa?V*4ZV^uM4**gW zuU@}4Z2xk*XU-XfC2hdffCUp1xl`^&`VQFCjjlb-*U~O9KBYKEe*I%gxz>@(@A>nl z?_tv1ywT7d$vfMKK?CA`Ug4bY0a}1xfPL;67_@-rRSO(*3fBbITG4E7gX?TgUEz=~ zZQpMYY4=@$6#!K@zQ}W|{w>nqLDdOwOS*}h{bupuJR?5tH{Vj^ryFc2voJbb8~XMWV*5I{&Rs3mqXu_h)a_Z~dB1`q{k zKPsoNE3ipAR!vusnTm$t9GF1pn9UCtA4Z)FQc+O>aUhbBlAVD@Q{RhcJb%1@tnhVW zE;n{(fV*TV%WjWbfm{D-as6!+Km^Ro9ypjf1AvZl5K0LIvDk6?nyqa%am#6@%Wl4C z(!N0ehrqv$3>7ss)IhP50H+IZ0*ci+0`ZM`)Uh5-dU7x|t%V**V(Q+isijaQ=L( z9}`?{r~U9w!#X#&oC9z91cipK)3!2xfvS|zES2Lt_oP{#q>DJW-~s$Z&G4$l)PsOt z4F=zDEA}neLfM2ln#RIC*avWUH{bMK4$p%PBL_<#=$ur>oSSZ>c2 zjC>AH-<0@QX4o~OtA?pudEax;jm6c&PeJ&4)cJT=9G_uha9^{nOXoay6rRf9W^?#i&;x!BiINEpFwzsr#7#l&W&JroXKJ z;PXx+aT4hO2^fxvnWS?Nv0SzcwAH1*)N*Y9c*Bz{tsGx?&JO_O7#Sr>$FZ~0VkwcX z4CNel=}qWq@RU&`XW}|+O@KtpQ=?(Wn3!w7gz>)Bj56b#>7SomUX@lCU90@oGs;S( zV}Nt0Xlyh?p(J#uhs5!iO1Fmw=)I`9kA!Jhg)IE?VQ3g4sSj4UNTEvVxjNFXp*tQ>aNj3F3QDvUZ_xMiq+5#8!AU%CDuzV^E5l-8Jm&^BhRHvfzTEpBN4}jtUT%B2O$=}T z7cdm-GS3`4@}HZ7u7cpkrYP~%>5hPpe459gML}tX$6SfRA~Jxhs}%4iXk((n>S}5e zfS`DId1=Dxs-fLOzU7@k3L7hS_WgNPBZpp88F9@kXl_Hj&AU)>9OLWfYxfVB0iP?+ z93iW}hteuc=Lg#LQLdvcf?#geRaqG})ke1LJyigSe!`XfSt%eF6lq^Szl#?xEV7*?K5+3o`qubc z{TKV_P~2B5q`X^i5zY6>h4@6f|q;Wz#y!9X00JgHdZY~JzfpW4Xni!@R3j& zRqT1tD)&KWyBm4=<=eA2!4kUz2_BZqCI?}kK(|Gf6Xa3AXNc^5^ypRS2SB9(xg9xv zy!zGAvyFBGWyZw=HVWifQ6@p%dT7dUgc7~2ys!CP)3LgV`)ZTNXw7)Vk!=3zxmme) zxFTlrCMqMYW-zMM70-zzQylK(_4n)kYG}~Ly3=>Z+H;Un`_)rlsZl6X-?ZQP*1ghG zUw0wvf>~dl#iNomyMV%?nY0pD$vr)Img8KCyGhzQ&W{#`K7P!4TG>9Uzj=6w5+PD! zV8^Ndf`RRN>btj)pg_MDG*T2Jh1u9HYQ4#1+ zxrWu$P#5fqbY0~_D6JrG_4x7Q#%Refh;~7r+vv=~T>lqxxC;yp_~x7U@0DPK+(HOj zmkxSOw&jCpO6wUs)k3;=07s|ntx(m8iK~(hhFP?f=EAHS<>PzZ{I-Hw0+Z7*scr&u z;^lTzWeWfkns7u+1L!G112(C6db~Lm+DXpw zUf+ItUOnzEa7~bzMED5mQDTPxoxrpC`0*pE+cN}&{kO)dNZzpi5IpGC*48Uv2SE-5 z=ovKwdp#ukC|G11FRV=({ z9O#*+C z(X%aK3d-*6>;$gcczOf;(E|uYqe$<={*vorEWw;ERhRjE_499u-x9Ljg+J_2WQLbQ z(PiF`J&8y55%J4qZrzlsHE=HLf)U#NmO|ryIQGShKlx{=-@qXS@@%je9U1u(?rW;& z+gb0u;RQTySRU$%M$kuxlK2LL8#^!eJeK!1Oe%Rn28ZfP0cdg@xXM#7;852M4Jswm zYVJ5YOTdz(0t}Yh-58~5(j|A1C!V>;#KPepbuK~H06edz@1^7)H-AJE%gcwoGTCf7 z7{AC>?0J#BSy(2D2j@Ii$e+s<)5(49MP3cCpT2$n&J8;+qTyx7^dlxHoS}#QLS;&R z;Ay_T`=AUpgGUuIU_(li;ACS{L7~or)8aVO6*4tto>{njSYKx+ipXN0HEh}ze*L%G zjO!*o0`Z0$od9LShDIFm%a<=VF7>I>gFDfpKn{i$r;(-Klr8F?FJszQFL?I#!O%q= z4d9c|WD%Vd?h%f50n6%qUuh8E`>e51RW7RRe0x4`{av$Nxj!v*b7CLTtN4+!61*W= znAei5CD5hR9LIIZKx05M%u`wUMJ@<*_IJMqmcTwUw3ms+d@PSu;{^hZf>4KbCiT2H z-jyDWH3n#W^u&o5z)VF&I~F`abdp=FpyJoZe+prf%ypc8kRY-W8yVj_;udbDQ`Hri zfoTAP;$f;kj4kZ&))kGu8bbez@c+kCJ`(;DJx= z($YT^0W0xDPTf^Ys`{A!($FH!A)GjhgguNcTnUo3;OdL>Xeo?HZFUQ6cqe%F+=VZ_ z>{B;=h2F=-#WgPuSMMAicnzYr>Mtb6&YqH<&JQqBJICk%Tn!JvAY^UfI4Lz3yVDD; z(Xa6na_D^S>)klYe&R0p<)EXd!ddY5r^f3GK^Z)8@+4)*Xrk!l%aBmwf`ktRs6`-P zUvcshZ;tBXI&wmjsYAZFgqWN1u~}!tQRe5bUzfczGryyhs}3q`t=&Ynae<~yYI&)m zCXc~&W!wD{Y4kwrq4PjI7NRnUM*@7~lO$R{jd5RMQW9)8Bct-yx?Pi(%X*jS&s;qd z{6{VjIw_Niarb5c^@QrAIn7s6*KsR)g&_s!61|!c=;fatAZqlo=`J(a{ht(YX?fN1d9KAuGO$juT3{a(|2v`yQklKDcL~QT^^S z875|CqxMBeo3X(i2d)SCpW6`efo2I?y6YtJZ(Y=~y*0Y|P zj~*$5SqXU+ahF&Ovqn(9_K_H{6POJ=D-nGGLY+;gU3#(&Y2@M;o-38Sai%-W@guHX z-m`F4$x4C^+LQT10vn#5qOm!;DL~Py9-1p%5LfM-lFj4?hE(?KYxsrYwC}72Y)%eP zCzDk5`l0JFTm>Y}D8G6Hb|;Fo^85EA&==rt&hYXE02aCoUNeYb@OYl(N0)#gscZ!2bn9m-XnOoQ1;Jq8 zfoTE^9f=@92NdnVdrP_yf9qEVFs6cJFBePBBNjSvK~=J;)JB-uZ9Twz z0l{n@>sGL2(xhz%5G*Q$=@QQ-X@d<{erknHPM;B->2=&Jmx)a)iK0}NTft;vOmg9a6v7&DsTWTyBC0A@gE4Zf0a#qPQYt)!4uL1wc~U0Ry{ zc=C$=

avA&2HH4%IM-tq+Ow!Sm7cPtsq^G5zm!9q5X~gp#}_BF&aZeldgmMS-hp`knmnQ0{@80!IO3 zB9}@ZUaWm0*={L-8S6}Zg0=1Yd`m^e7=j+4B}X$Jh93SpB*YXDH_-q5HRpocfD@ox znZhk%jJKdXf|P#jKX!`1Jew8$`FxK{9923D?LrG4(wtcVkS;_J(9$vpe|15Bk&M#N zl50vJV#s+INiC{DYxEa{Gszh4roj;C&@^XsK8~|DL`hurkbC~IA9UYw+ z&5K;DqGU)Te$LKDo({#3#Rh;Sb-w`U-TM|tii9L++l8jR4t2-7%X_Ghmchk7%E_sA z_ik`t4gdY@O)0mW|j2LM(B)M>6dG$R$I7tbfw}fJMRp*`&`9 z+yl`uK}2UT>d)4*k*bj=y1RzM0lKn7eM(U(+jU&tCW~(>dt`gQIcs)6z!OxlQ0>#v z03o4wL$Sw9-o1TWq;7>ss_-h5w;6mw4wp6=+UA^e81R{a4VmJt>#a~e;()*uq^qd2 zsD({&&ceV%`S!R_3|JY#tm1#nPWr9L<8Q%FBC`FtB*e{ao&1+Cp9W9^Li89!bPPe@ zfXx)CS5oR9al89&)w7%tar}olzR{1G`XZu}NZ!P`WPrU+!SvS&3G`qiM3W?es4(&0 z)fIk=vD=~0D)QiAYhV*C2ZU>Lf8fGbA>qjpPHFpdU_^6Lm^sM@K}`~~M86V$aWwYk z;{CRp0{SiM-{%4z)s$8eDj+V=@?9^jzBfXtyFL@HIwS5ed@AWi@y zI>yXA$-Fo*A;IKb0>Qb142LEpMoQ`vsI+i< z&LtesPAtbYF0Y98>Xt=*={c7X9V?M;SDS3kJh0%_BXr%z0nlb%6R3#gF$QVxBKNL(S1k5FvxSqy3aL`H!4rzK}UN$|eM&;kv*KVE+Z9$}1@BxgM z$9~=v>lnbAsRNk;ig|*G=~6UQKI_DPdjr)R1F_QcuoMu=L1Oji^<-bi#7b`uHz?kl z%%B>I<{&Qa`jaA^D)})EJ7}06g76D8JQhgt!mj!OV-(elifqi*Y`Sz}uzYnV-c6Hj zv$m7$esF_RA10%G5y2gD@zD-=(w4VlK&;+YGVE+qmS=t-B@-5M}x z8tSi`vs?M?qQ117ynt9+AS5VVMmdWxNOP2(z43Tb+|Yc-0?K&v zM)7s0B)H4241>Bpd_EGO6etj_XH|}<7nGA3xsUBDsU!51%5Y!s?QZrCImL%s9G&Mn zl)L|JpZv!Fe?`Yz0|a^h)J|^48B*qCs+VG$WC2>oCXLG=d)B@PT&r`JF8xC9F757+ zvBq@_U{u^vRh1vg={tavosuE?`G6J-Vm(+9a zkS*cn&gC-5UxTP_8+e%%Al*C*^_U+lm3?XY(ki3BZ)6vKy_x2kAULYfFxDI5V22e54)br@$?glAm>?Koq5X`;#DK(a_DVGZuJ7+Tj{hTNdLG7wFON zM@N9bxY|l_W=*~+PU&`k#;Elo^S54K3We96<)VB#sQ6S$0Swj>407`FvEUT~xTtSx zvUZZeQKP9Mc=T!mUUC%_D#%|1VZ>#TG`SlJ6C)7#4wiNaE-D*J-PdMnN2nw7DL|X3b}>s>fW3H#2K?9SgyFDJi0T?*oLEf1~$+^ol}u z+u^RWPO5|@_!}Z^BG7ZPMZC`r$_Q1Q31bVRd;JA{w}H}C>mG}rEQ7+7h$jh~Kiv!F z+(iKa9ti)|2+qPBkReRAO}N?+bD|FCQj73oSR@&Xi+VTFoQ|TYb@9#Qg;JR67%8Kf zH=aFU{If5nCcbRzxVQE$^Pj%%wtP!Xh}hX`b<{yQ0VD5qSeO|EPJwh~+YVnV4|eaE z13j&2$g?-s6oyrjdc~oG{!}@INFRGG%*y_N6i%M30i7Ds2%sd37*_2+eE2YeRv=3p z(>6!9^<(w(*YvNwcK(j+*8&z98uf%D06~C5Xh=-}#)$MP=;APua|ae`sw;yHD)VU` zp5&Ulc7SOGBWNQUOqssQK5A=ggUoPV6RB!+&34kl#h#?;V*GR?gFN@GoTrM(uGM&B zo_E8A+}zl>I#V#%_rcn^Bo@LyN~|qj-P0~hGhYm_wky{tYI#FbZ#ptE@oQB;Ak1}Y zIN+cqSM6gigPQPP0uvYq+6jB_Upu<8%TJK#8Q7j6ADkB#{|F+ejLSTjtqz4d>jWKk z?7U@(&~XKi^;xDY)4e%>>ROOv2d*N;&Jc1RqKZhla^jzk(!-r}43|UKIf+1`>UuwD66A@M&VnqKPKy)3Hq8Ls zqrzUNoLb$<5&%~hf|%K+4NPz{!QV4O^t-E9hF&%1K-J`BMDBh$Xp%NGt8nD-J2KDl z6UTle3Iy5C+uPqr6)hZlr*dm5rh>*n@iZS_1G>zf=%@=GC=UgQqrmw^snCo#hn0lr zL%Gz$OYRqX?^o$?m38t5^*v~P##wEH7I7GSWMd-;QEC)KqMCAZZg%V%cQKmMk)Qi6 z#&@h2P?O45V!ix=byXr#{t(hS#@wVSUjnHR1HE;=u86G3%o>;b_caY|L{rEtsNlG(y=9jn zf_F;6v(R8|6}$>aiYQxHT!dsMNH1ikw~V9a&1OihUV&2u@vEhVnB5q=F;x=D zi|cFX#3Q4CWBoX1OxEytkx0vZ!aWrbG2)rMndW>fc~C&O0MO9E!-u(%($W2kl#K#JJK>=Uf(Ih2*9+-tXfPp;>N^#|Gg9Y1 zT~tXya6=GI5am2*Wy9`uwzOcKo0qq4)>;4Y;~H?0AlaJ?_#HA^%gf8IkS{+(OS>{t zH5*;-fvj}jl0)5F!rZzND|X-=HQI0U$meO2^>}n*2EHH$M1*VBpC^{~@$mIOP=C1` zXy11emkPi|V@~^e-K)@T>s4aUqq5gTp)GmDQ)&C$Cbzv^{V#jk07idfqHTs#FPb%- z%-Ms48DX>x0w0h-=$TtbQSR;@7*$qN-n)!Zl09pC{>66*ysk5z?Tr2Y3s*ovGKfi` zJSwI(DQ>=aIu~b(#i7taWRsI9J8~rjwMO9t`;nQ0^O%{O#7-%uOWE zC?H_^^Bs>Bp@4o-$k`d(`VNQr`tpeV}W=@Ofg67JrgP~7>%7X1;jO~3D7Dh^=X${X@LzM1yf_H!Mgd*Hz z*S}jmVGywn5GFz7>93X+UNN~3%f-tYB)&8ODx}(gxcLxh9GFT_(NbLE2a$;=DLu77 zzCdYm9u{(TW}CQ%-Us*^AwG^aL0@D#jT)_lbpS3n0$H-=W(nFK92uuGK;!vV^1)KE zD10Du&vX^kTi3-#nxbN2C+0vW!;jImZ1KEXQdu1 z^AQHPZk!9Q@J;T;4^o_!38Q!lp$7l9sob0 zY1VK^HdCl$!M+F?w?T9lu$M8`h`0ug7T%k{)xrH7x{uX?<_e%`Z#6)=1oZ?|QzL+H zo-mM$N+>IbO#A)Ek6|Xj5E5PhbEtd(&~ykCvz5=2XHeu9gsLLzdg~L`{3FO<3&<`Y z!-&G`8n(`_jsIMTGXV|5HMsU-t(<>Am;~m>LuqwG+FTJQFjuT}uf*aQ!iI-eMp}yI z-_Lygp0#YLC>CnSz{p6G0Ov1&)BP0;U!C=8px7{KU=|R}viN$&L&rHtd$((Bp-FM5 zSj)pIn&6VRv9AXVExbRjS1B+B4As-0RrW#jlr$4JKNCK0E5%U4f!G+Utx9ll$2mEh z!Tp6aCpw%`NKjCsq~hlF>u`_bE`zW+K+Hm1V8{p{S*fpOXwcg?V7P;un=)&fWf1o3 zzi=5GAQu1UFe;Ik_q*}MKA3Geel~t+Sd=Tpz)DfZ-^B#for&OhC8HAM%P2EMQBKH_ z-MW|>bIsJ6hc=KY&owhFP$4ll^IgZNI=Y})d-R-x*LnIuY6s7PEDku%!CZ=Qz|No@ zL7dXOW5;|^qIJl+Vs!13@pm2j1i{&>;RPoHXP7!OI-T^AijSGlFVosmL7TRAYAiiK zt0c={O75C#D*gzDPGqDFai+6tCR*H*X{I}C5*b(kC@~5l_f^@O&z@ZZIAsc{TO`H- z$QJAX5Gkwo~*Z z_))XYb1BU_-6KVR@J_S#bKDYneY3RZ(bTK8L%mr?)Gv>Yk`mGf91;aTF9t;%gW-Fl47YAM%G(W z5@*NCUrm9glCU&JxxqXcetWrL7TzNOh82?dE9d{#FrMiB$eY{hXrWvtYVsL#e`w}g zJ)OC00~ZsIQ$RGNP&acge{@gf&K)B-nE{^abk#$Q20PDdbeN+X^!3&*k&^rCAQrSj z+fGk6FS8>E;T3ruK3r6TAOBT3&xz_6)cJ$7hTalsA$dQ<=D!sOcxUeNf?yf*)|T4r ze9X^rFCoRTQFcp)04hKk^pb-Wip@JTqMA)6ps=h9%W9rC_?e+(1NhT_V`C%ffzx33 zN3T6&vi~!PnmvaB(I`egs5waEg=sA@F|i|TYy_+MH5i-(g$b~Pcu5r)=0DMm+u)nI z!n*~4VTy7co+J7vNgI@cB&~p8$YZuq8b$^{G2pcj=7bZE0Uv@b57eDmPl}VGTKknL znZ%M)n!U<$)kOJI(GF-Yv&?d`>DZCl)TNzm z`MMZ#&FOx`|Mc=-@rYBurtEv_i;jRsqEb@iUfSk% z>Xc%7f9Wxg!q`Jzyse_eyevM+y%r2p{twYZeU#k~7BY}sDVnq(j%V?2;}7!2Zw?A| zKJJVeShIrouCYcxT9_5!ITK3uS>!m?NryCeg9Wf-*kd3{0_B>3p{g&RKU0Dw0;yJm zw&I;IS&$yfhy)y^N^DOPQ zB1ks_EUNqSIBj#(!h5Ws64$1XCr>e_L~tMs5jIAry?^+=^k^;&n0jOGKy$A6^?5EW&>P0We zcRDu8U4gk?K+zcrC;GfS;Jnfy4s*So9aLS_x)_*5w|8)W>WB!#G<26dJ2a5=J{(J! z)~|wLv>X}E7xeKPXuPFBILTXSQm4!WBpG zoENCR!U^dG!NGXCz+Zu`)A)!!M|{%fe2<=X@~o?cMW!QBY6TuIoJ?dyik4Y$4BqS@ zH)gzbc~~{9voBv_K!0H1y7ImNL>iVB^sID*p20U9x9-g2oWBPi$e6(9#9&>zr=2xi+Ajs*mkqC0PZ(k=1WYUD%V z!&_GI=FUa_;X%XWV`ERTy@j4tj;3ye&;Wv> zb#=F~VNMsGczpfrqo+L9^H&q3tY*Fdai&^!??HIi6>Jy?q~x9A&ttj*7Lsv})&)L3 zm{RwJQSpupV-MeFd^+-}`QKk0J==_eceiAO{#|388-5Y^IbRt}~S%#z7<1%ppKFM*#ZN$kk*ZqL~}xH51f0-`n?xP&XR2M!CYGvErAGt2~o=kVr>10!%C+H(6jdB5* z>yC-v@!xBl4jnyI$Kx^BPQ?as4Q=gHK+XS6G@UEw_Idu*!etxDYqAlRSG{ib?Ji)c zb_|X{8bRec636&TDBsYAKte~VdL1BsfO`Ell3;&8oeV>lZTe^E;NX2eu#Ur!J2QoM zXJIMrQ)}=_e5$fZ8{be6nSRt_O_Gp8-OURqwQ&iG z^*#7;fV|OpblGXOQKsh2M5bGNhi%(f)*CNcI;)s<=)AIhKD7qj(on4i1}1G74AA#O zbDSVSXO-?R&4Y3bt?TKj_Nbg$aHu-;mqVk?Rh)^^ zwfrYdY9Y|zO9Vs0C{R$TUpF!e?ZVr)3~>!*rw6bv;7;)bZVI;pq}csLAPQa%iM}Gu zhESp{%3KgFcN7(GDJqsBDS=1?K#!(Bzzwt&vH)aH{4DmOJoudNMTlwvU#p^~>V^E= z#{{IC6SOVr;b6IVjfC481@p$-QLOl|hT$_4^iC8PM5)BI8YlXi?iHJMXNgVdkM3*`5M@gm8hk9OBfza9BeOW(E`cB+gJ zZ@UQ%*}D{uI-h-H-!->XH_gx?XwVl({-4+hFrc~JWQz*NI7kXc=#oP6FS)c&k>wM5 zdB(&4Ftabs?m1^3pq9p0_h=j?JTs_oAl5z)=@I&iqfj(KZ^wUr@h~eAH2@=ql=iyx z`a#SBS~|Tmg9Lp5%*#C112Q0PAgN-KJ2y>EG%`R(UFQB=AsAk7#&27dJ+SQC_{W$g z#CkPSS8~cnjRb@t6LeXhZ&X`}y`F9NEq%?ns_?DpY_4QsRQ2t0P|uYhAzB%4PH6I& zZQ2$X)d$>ZwAR!rv~SgL`r`+zCXQ~@GPbRw#bKEC(_Y8xo9KZ0`abazXzm?Gj`MuG z$VzyynDGM%{R7woSYg>jM4HcC2GoX{3+5=0GQ^g^**dV)Q{+iehfFylFQD-BB%b0E z9<m-Lf{KDNOcC(?W+ak$fT4$zl&>DEY zB|v%`|FNCtZ^`VftA1DZdOe}9H&Fb9h{$qM&Kb7yKf@6oO|oxr#X;b{wB&$tNT=GX!^UNLgPH^~dqe`k0!n-E;7{hwr7Hu~ z;n(%_j)PQ#tqA7CX5h5)xr6hkD%wM9!!9!RI{W@cxz!A8h^xz@)vlBUmg zKNl6tn^K-#y$(WB0pW0!5>XqDjv`=B2w&|Cr69Peg0-3JSWm*ePyZ){Cvw8h+KxOb z@5M35h!Y9B8}2n1IOQQug;EzHh^I_qZ=t%vzle~t6R636@28z*&Z!&jL#f`^<5x!e zhs*bIt&~UD)-yIka?kRGV%^k=GDWh9@&RZtgvkNY=_5f-v>>{sa39=8MBc4idkN|> z&dz~~`+5dvx7U8|n@Wm4RXV+SOoP)?KHg7uJ~_+PTO26RxjwJ=zBS#r9N{Q7#Wn{e zYZ_yo;TRO6RBRy(Jwx=i^S(e(SvBB_MUcR#0Asp2*61{kLoKX37 z=JJ&*L<|(96Hlx3MaS&$!K){n1a)%)wau$Ea8vxyd;K%{LTC>2?GBPq7$-`+UxqZj z)DX+*jgRj^72Ow>dN~D}g7CAp;9fjIEu_Y-;WML4nH^1OOFJ|RV+3Ke@_f9@i~2;# zCUrF_&#tDa9g-0;?`skbXkayf)v}6f@rtT(dpwpeUJ+mJH)bx)BONvcv%1ek%+EgU zCK{fRLD3LU>eJx5x32xYUwDzN_SqJi=jZ8!*KgqnIRMQNA&7r-`?444LqJtaX_QyT zI<{6sQ-7-yOWufQ%X=l2+jEH!f#8$?Y=u0jLB#2?f>=rR+ad;)rUptu2E{0uyLBF z&53w;TmZo$jF$kV^jlq(U=C%E=!9(#aE{M*c~Z}~yE}RiHj$v9%TO^%E_Cc^x%Mz} zN+~W`>_5u5Cq!>mC3L3ZuD{#gFY=~NN)}&8pOugD7MdH5zrS>7sah3kcBs9-S^j*B z>!k(Oa&F*FLKL$$$; z=nKukgi4qX<5UYCW+pxgzNIzcf9y)V#j0uM2KUu8uWvrBs^SYjM3cz{0;Vq&h5YCkYsal0$1e(|H}onwHk02~fLR4jP&LV^#@H}l)0-pP-#5dc5i=5#HBmQu zGU23PaLuNX_~Ncy??V(FK8VOeT*}2umq;lRbH-U#BQ%W0f5)-b#o|JD3A6Q_=NB<&4?EN6=-l2 z6O~Jbh1y7_$+YiwZ0?JM8FJBOgRZRGZ&bORHSUAk+$3nyl&>;^>b>3@f zl1c_0St(oi-bS!jW`1V^kD%Ht1Bv^KA3RJH&&O@j60H`Ae>_G#Q^t68oRNv;;@Cf8$gNy0p)(LjK|PqNG=! z%J$VnZB%}>jGjT32;?!I(2AichSehk+H?4XF8P%&IEZnyr1)q?uf6Sx)PhKv`c&3@ z`moZC>y32Y)`9&^(_%7ju-VXzCE%xl)Dn);db_Ra+o z&>T}B1#&5Xtf1>ZDj+cbpgdD$Ln!Opfwq+`YYiU9HA8V32O1CL9e?Y-`kgS#)!!Fz z=N>BZ^|@;OugHF|P{UKQ>2XvuSOJwe)ST#R(VX)kI~iGu1FZ{Mqzq{PuP&> z2%?V=-urmhF!`(*pQrWhdak(H6SOO<{d+o?_imH?`F6lxreKe;;l9>Pb2f4Dcc5PF z5Q7U25yv-7`pHmz(Ir*^20~1RgG1IENyPe0kDl!aGJ#I$9?C=XG+Rum#nD(MHJi*0 zb$CWwy_=z|c7^?CmU7U#E;5v!M~eit2D$+N=8qajk8|?+1OBj2T{lrYG`J`nfmJZ8 z_TXR_-1ye5petro$V9fARV=xQxV@DvZJcx6=QDYV=6SC)u~*Kdx}LT zj}OOAf1fW668DP~8TPjhUu}A6m^Gy=Xr@$y7^)_u_eNdz?;%q11XX(@vLWSp77<~t z-{d-u1P=7W%eKyP@cXly2pH9V6ts=9`#&(&R;kFl-E0u6k6i0|e~@Bk>#gjqq4oL` zy~e?fQ!{8wdPm1KQ@^^r?EH6DF1#OKukJro!SqjEse|=}qYXg}4COPeTUTKaUEY z3EJ_Kc8IyLuvAxo>Mv>P56wF7Gu}>ONRdufQYy3;i!fW+_rUT=FMXS3Qg(H;(JH(q zC@%}=t*i{kNtNn4L~l6TfFyo#iVz3rV_7cE)yXxZ2HspMmmu7v_?6%)NP{c-g`zFd zn31EE4HY^OCuSo?X8-dVolDBI>E3eMFyVJU z9zIv%q)Td9s=2~Ugd?E_L23>xL8wo>8%GILhyVlt6}|1%q!tWt+I(uM%J<_+OmWs$ z%EpDlpzK2fJu_Kr4W(YCyzeI;=@(@O?q~N}_WDSDbr9H%HvKk@Jnz2?p=LQh<8~K9 z+e74d1C+8bY@g?(qjX)NXvvK->tkEnsA~qClUsTSBdl(Y*{Afv1HGJyF%t2GKhoR zomGX1>uO9suvBV1$#!CW>ppx7EL(6w=X)u9+}2l~!-fT7o}e`Vi~yT!PE1m|cFk90 zz2#v*uDj4(5{7dY8xh^%?An{WjbEv?lst**Q_N}Z2%sw9EC88JnMRSd$CH~H9UVOj z#U)Yo?kYh!98Acun2XVyr-S0j$-uoh*@|bW=G|p-m z6qRj-1AyomGk0gd4(L9tTB+2Dke589y1UJ`Ge51muZzDEpP-p!NlM7=^C$Axtmh>{ zK>0 zBt(zCYC79uoH&hbFjaa$bE>s-gYJRoM3yJaNH`k0yZxzeIK%K~6dg{?mT-W8{*8#W ze(WICKQ_H+O@xncZ)nv(ri`pOaa(^ph&vjE3K0(X=+W;4bx|U~Tv2C1k2~;hHu9uo zQ3~8u=Y6PgcQbLt!PVdIDKZFQMm-L|K{os>&JzNTfbe%dHm&1TyBQk}tyw!d}o&bA=yeX9tEm7g?w773ffUHBw6Y6?Ie6OTwopn5FIT%~`=HGh{LzoH1 z4%Z+uQ9!LkAyh^N@*bQ^`jsIBqz^H|F$ngF#Ps^I&}&d@K_@de);t@s&FTt!?jgkT z;7gSuG9?R1cv#N@u?edUnKt5qjTg+M6ErO*dgCl-%36d%tjhMG1xu$&Cl1u0NhR6b zF1wFxpP=>&xnLFrhS+2=S>G~i-G5x1?!g}8tFh(VcieXk5B#jS+HX93RHkm2By@Ko zjS=z-izf%+{UQ`4Z43C4{YVC*)^wTPinP87(m+wXGR(HPzipa`A$_dPqxJ0pT_!W? zBY-nRI1Ait!#oKWWfh6_BdfD&;HToPjau-0zgzFk5z3h+xdc!?p0J+NF?mewwgv1-h$?%NsIn8pew?Q*Mn zTshT8Gerwc=R-Qj*LK5$ed7WZ?E#s4)}{K^?FUo~y54RyPIbMZX1cYCTFpqTE+mvE z7`CoL+maq{OE~L6u7$Pss{IdrF3H7rUcFNmm#RP0RR)8 zWw+VXjCNQymLOR=;(^OFdGY?#9 zuK^;X`u>S1ar{t|j4C1|7soGt(JrQQO=>V@JSx@EMrF>)B#M3*dja!7re?XX$(|+% z?Xd4exr3&bpd$Z^Av^r~=__;b-nBIs5aJ|jWn=Y7{8Uldc^(@GnR7&F9Ly4-x^kw1 zhp@!3-QpD4A&~*4$qz`w+Qg-9_V9>Oy{1%AGP*1^uDgE9bEww|*QkctGRwPl_CB&< z>0yxcj@IL$@l2b9_-p#_q2z6c*Rl%|Pp>^Hpv{7EkfdtBI8!C=>7>9r=%Wmvxgcs3 z{+^mHk9P-1ehL44I4<$Jo9Ka0ohs_LN?AZ=VDNi1&@-*p-Cs?|;Ekuz^>$ zZ34CKh9H}r2&y&+vwYm?hRd3gE9(K$GRE(6cka2uOCPY0)C_U`*X{pgP41E&2XHnf* zN=ICWRy0$%{VkLcSyiU@8J=a{F|59yycZcVrk>`#&-t0V@n*DATh37Aq&oW?^J`x7py;Cej#1?ZzT;SwYzaksp|=BY+ez+J|mXfEicDK^ta{ z(2eV-ga>k|IsI$Wu@B>S$6JJX1$Jzx0nP2yz;s{A=Q?}( zVJgJjKWdk>v^FOuSiFlHoyi#t_@=}@vIE`1)h>2l;7ng|d1JdPN^Ug-ngM^gdG~Jl z{<~`3@b&PHLsjrE)AtG6yDlWCmm$j#uEgVYgxMT}v;qWR5i(}OGA(nXF;zPYN36x*NVGu$pT%l$;?7KmN|e@IV2R1au*oz!N+`#{|$ z>ckLyQxg&(kq}l?FNU5VFb1CK*8CMctwQHFk3ZfO18a+oa4dP zf|i;6M0VAWXIE=q)=K;CXxjyO;RzHXtfx=Mlzv_4>+5#jBLr(V=yQNMxSL<1$2x6K)Xz<+x0!PYrbxMgp6l``vVS;o?%HTijAv zWZ)omdx>&vvaHsy2OA;EZQ{7*=~kK+8qzmHnOvY32$ zrXmTBaxSE=DmDrouH)MB4~;oFqgMfv#=vbm9LnpRS-+{xXDO>mzu1>}XBLb3-)_XI z9YP2OaKye04Xr>J)b89FB3h0BkYKnw8EO2`JZ||3PyHL{mpiH1te#6OFRR2H|6|kq zKifPGWv39<^1nFZil8X`H?{4bX)%BxsPHbEF;+;~s2N6eel(i3X3EbcZQxLB!EQMw$ zgE1_y49b8h!(mP=mg3?j4;beIU`la<5Ti>ZFcG{Z@1}N&8&Qqi9X3v=n~W8_F_k0OeD)p#Vx30 zxBUgQ@Z2OcA( zbc6kbDE*(VSoKb(BH17ee1xHM&*8oPVr_2UB0G#7-fY&nY^1DlyImRp9j{e=$@;3# zRYP7M0H-)8;hp`YWll4C!=!%k!sMZ7 zWzpUfarxCEZd4mv(ItYx+|zL?tRjW0q~CHmKGVBdd1`d)#m5&jvNkEUGEdF?3O;2U ztW#91tX|WzS-m2mBc-dd%cGr1NVnrq-Pk#j-o541CL^g;LC$$h846~-kG+Jt;w;}* zlz0?7Ot&47mcD94_Ysc}VY2qfyU>w7v3oxET_%@{AJ=pBeIKv$6Bjh%F#y8yVKT@A zris1Co(3<&sQz%a@Y=w0!#Zy2dwo8bZu?CZw~5t+QsR z7;-Y=r$W=J%`)rwliAD@-E&JLn_nV?&_O zdM%-}tI^nj(3^0+qv)#O)Xd0g(@*E+Gxuv$TD-Tw`&k>T;=57r+}zej3qNu3C|FrXP<}ACR0hvRJXZIaq$!fdml25w)eQN61?z; z6PJy~>t{YXXZtyfhkENt*_6<%5!e``11_gTl1W$(~GIa0}>DT&-+ z4(A>E0UihByla?0UGcVSbDtC=Cd;89qy(3OD9-bUq{p%R3BCg&k{(r03UZ)P1rv1~ zw4VQxjcx2{=Bqtjg483#UPRjnmzuvpk?t_q{ryo{T2@WAShZMJ znppvb*lzErdQ+VG0SId?Cjxos{5~}|0{8jTvw;4^%yJc(-MVd7G|x|P&^b770Z(NY zztw<-!~Njs*sRZvOiK(F&`MVUn?No@tgD6z;TNZno{~A0A^bu;b4IHUvAIz6BF*HeCM-$u_Vdd&q zE)#G=|6cHO&TW{p@)|<%jn&ajtKP3dR??H#9fm5sm!?vvpX~5AO7~dxEMY!&TKJcF zcGdmf_x#KsKK13)>+~nnR56ny3&eWz;389!UoO*JSR1Yo$N$pr{w5uy*>M#IY8b|; zpq}1zeL&Po9+X^NNZrszf9ZqkSpopFd9 zy6!OeE2uH>jqhTZ^K!+e*iVdVFxb}do8ASXE8(zOfSst*5$OM~>_UDAWUHaCJNthx z=UAgBFHQ3^p`cTmRFivl+=q05lp>tMRio&fhQ>0VbDMLLR`*zf!K}N%Ix_^&%oY-w znKrSdPFJntafmI{kM)MHAqL5w4976<*Ue*2mlsXaMRP%iMcw!AJY!7zf^84Oz8DtE z+8Dz10^t+-ii4OVfDVClIrsQ8iNvqIxj+7VA`v^^hGZKKrKATyRKJ$`rWcU7!j*}| z%(&pVW%)pXPncF7)l~C^srFdT#7XhIWIkthl|z(cs&JDK$>qz5WxJT2akD0ULwMIk z@5{6Y=#cq#s61O~IS6;TL{*xNFt?J;6@xkjIsHlN>AP94_nJ`4 zPG-Lh_TA=tHBx??7zv?1CI*dgspq8S-1uKsHoVHuSqa1&Ehh&i*qYfEQZTzbL$Y!C zTK0zzQj$47j0fo`zt27JGx)}~C?K0YU|n{PgN&PPb2Jgds4&POyLqA@yB2DS=)M^} zE&;0A=lzuDF%sjTn0}h(=Ve3NyApqf#O!Bg#9sX=!M@nH66+GS|G)uaA{BDWI|p2^ zJf$zfXW5-~11ffaiQcW}XlNt~l5=p1<)Pjp1lS+dEy(5y$**@%+|=}BTb44JCL@3V zB|zrH>5#91;tEOS0)cQJL-EXV`t&vCfWQxE9}%TpED}|tePftGqsBQLmx~R~1asMC zDg%45)h}U<2iDBFI-fq2OkBvHJAZyl0NhqrBmZdr>>VQ}0icmq!(KA&ScJ&#BSzs| zd9T32Qnk$20e3htQx+vBh%9i{1mzTE9#N636dzZ!`9w~`JtG@zB~7+F3eI5&tK%rM((WsdfYSGff_e%qofw>^)4tx9C12L?F2Vtncaon^9MU*O`|$*WXwk~`XE(q7Qe8NnQo)5c_V3e8v$ZHq=QrxO}!l@?mIV20K`Qmyq|c`dg$@P z)Uu!Ber7_*B7<*YY5hZIYI(l*`8Ve;Px8w9Z=>09=2s;16+b#oBFBul;+k_HbbzXb zZgvph7$HX_m~zOxqTe~4CPK6hsN4KWHc0_^RzD_%!Fe06L2vCncu*ST7b2@6ePf<-8QIvF6=|0h3^F&dR97}V zxV$=Z@UQkwzfrA4<4)k$yps<2j zZnO8Of{{cEK9<7N0er4qE0##rz(8Fr3?bD;%;q7~k+X#FEdpIS`b!)Dgids4^`*X{ zDW{^_l*b)!rmt>t&xYs)vOruZ9`lHqX(&-;u=>2OD$}X7clz>lFIWdPQDir(gK5yL?c(l`5rM*>7 zBc_X+<>6%_VPZOS?fBA)h{dFF+~0RL;+I!qm5OyOCfM1~6yXaJ69G{%5ZXLUaYfK8 zz~ZVBh(w5Ft4UI15kRI-$GGM1t(J@l3c*PX0a^qgge7+lp&95?#XZoeCU~3=FppJl=x}WyJYuYC*0hK z!UZr+dyvub+w?I01ovH{J~QuoUYPolw2E{@Hkutoc)w!G7&*f~8PAvZOp1jR5fUqf zdXGsZTm$;Y*}LsElOd@G$As*ZGC&f*7YaG<)p3B~7=)y5M%3#+sskSu0!^$@>UsVw9jvvRu7 zcHEf+l2j;R!{7P$eEX?S)m7JxTWuI&K zdvJ$Ans7iH2F%eiloZx(lxh~PvO9iqJg@tC;}1>X-o?!((b=poqicZ@g`Eczy7l)> zeTM~umU>D&%iJTwQh>SaMq~5TC3NdfI58KXI(O=>F@? zin79~b^p%X%CBE`CQiD93)vNFo5%7gB%iTAMRkoVFu*0PdGZN@AEs()wv(GuNt?2N zy|eXz7u@Lxt^WGu%aM~m1O{#%*`qdauJIrT)x_!mN6|S$E*0iQy+0~@wd?OY?%GC1 zLCtjU$M2(tXBYyD+a7;79ri`M=tivBhhKO4{|fQx9GjGJkN!E^95-|PvN4mVg?d&# zXLl5NAn#bB=KNXRTz}iss|{L4UBC98{HP+-vKtvS(x8GI)uk`nB{qpV$*dZ8{jUcRTr(qEBotk>y+hEOhaiHJ6kh zpLDN3OJIn%6+DB>M#JVS^`uYKO1z*paQl(=#c0WSnfAS(mfM(1N7jnJb&!5)6>|b5 zo7w?0^V@g4w#kK_fAGh|=2VGY&O(~1csq&ZrfowWM)y`A$EwSTA_HNxt?ve4> z!t%g&a*ql^6%Q1FAau{3Wze-f?xxFhab=CEu}G}^i0xs!KMf~zYE*Z77M({YiPIU! zELoL(fBi3&4_l%f9A_#P1pV$y4ixJa=t=9VWm?{2tgslK{j z8k?)wP+MA9AyVUfs^E6UB2!%VHbI&YL2PVy}ezXj|W+Vb$c#Tz7W z{{nJFj1VIx=Ruv?|JK;-(aNt9`rY2W9Y`rioUCdg;`H1ULnb~KSg}i<^ zv2kjVU6M!UdbuOz-!h%-uU4j2MR6gH4ofBXB7WIPvn(Q|*M2l+B5GP&wo`=K%0Kr{&Rzk>9sn3t@co)N&?aa)~klBxQu4WABbMC?G^9%g% z*B&HhkOQSAB_*Zzzl`)+fGGE98sasm&478}AmI9i7-{a<-c>SkU(7`rA7{_5Bq=yCIYR>74?_ zU2Vlm!(&u+&Nl`VbvJ!Kocv+c>eq5zM&=l_RzR#k6+AB0p`s)PZ-8n->OihACOCXI zJoBM$_|eUOpPc^a@2y)NcJW~{$7@uTwHPmi;YprFT3r}7hKtUs?*|RRl%d|!Pkn=P z&T z)|ti!!OXj|X-;hHIMbonxHKEdnw=K9pFic;OY5`V_iqcZvRv+ppSJsT&5f6reiwYz zeD~*0=H=y$jZfV7-(=v3+3NrCea9`i@azDs2(7^RdeY# zh?2F*N3{{X?c{|F|9kb{7H-~;)nsC3u6cI!!hf?9eu#5{PaceLe+Oa>6mGkpzJgZ{ zA9C)#h}&kk_UZ_ykq)>wu5f?x*HNq-6Bz4Hep)W9Xj;WFG1*_^S;Ewp@i0^Ug~01q zuXqr5WhFP5vM{A`=g#NFx3VVDP1Y(AoW5K4zaEdyi2AaVmGv@9Map%?oJ+|!4ZgA- z=l3$-?RLSl_IyN__&J_2(u4^|TR)YoD&IH!=>`rfgVD_{R{SJbluQf@ds3qeGMgM# zXv?X-V&=q)=g+n6%@dJ4ikfw3Xy|(0uYf1pqlt6*@c*8+3ps8C{7xi_BN(kAUe!A$ zh8Rv_n69$#Qc*<(y!t^_ox=*^8*hb7acRD zVBejhwZ288aK-kr?ZN(O+t#=-uYNCVynGL50VBuYV^5rGC%Y~BwVhi}^`3gSwk&;i z_$&?c!NA})@j~Ye7t+g7G@eCAIP&7p16xDaU3^{g$euLi7LCjMv99-|(feK^jZ}5m z=bE#tXJyo^%j7Vak)9cj~RHA4XXg%M`<`=f8DM@$EA7&-%~=#p ztrznb0$`~m1)GW|LWZsF*I-thmu56{#I_!aJW1!}6!m8E)}ob7gu+ed)%!l7EiX|X z(_oeY)Ve>2v|VsmW967zxw+c!eagaRwRuONUJ->f&!+R$tOFT^qZ{8h!sE=Qf)DQ9 z%VsR=G$tYM+QX7i=BwC{GSYQAa6NW+f{saMa>C&k3g2>RC+X^o*&HJXof>IuNT+H} zW3LG8-fbD|F~!e+v7VbNxe&7NWqy8S-h)oJMb(1*4OJ=E3*iCs3a^W2qyh)e-m%>N z01OYWa=uzXMFN;fKq5gu0#O?%y{J zj#}3rUAV^>Z5~mO2M>{K4>qe#y_bFBh_`lwS-p{ci7Vnt1qbRlIt& z3E%GvZ~c_fMl_eAjr5D5Lp=k|iuhP(dog2vGuhq3PH%C>^P|uao)15784JqSHk8&ae|Vj)RzG_h+tYG* zwtmU#?wI-)HMNIkij^`G;?267eM~RgRes`}kzA2v3Op3YY;vH6LAfN$tas>OQ{8xz zs;-%>^KRuAr(!r}90E#Sd#k6UIRDg;8R32MCGuqO^xnPpy9;Pv>JUS!j~zRf=E!b@ zMg@pBiCN+f`^IZv?_lghj+^if=b~)T`T7`$%|aS+{)l1S_J>?plZW>x$e)tXn`_dv zjLCQsb$t7R$$Z+Pe-`aV7KMJn7U@kdiT&T69jZR)Zbp0DRQ;U#_S2^)W!;0{F()RWsiF zrSebiGN*kC+Wik-e`J=uqM-2k@(n$+&1}PC{^c#*`bZeO!OLyx{DkU?P~-=`f>sBn z!^!s|!lA4%V4*G@o`k_`T+dLl<@@%xxC|@v$$s$MJr18Zc z!(-58HZ`RlV#zpWy~Thq7=VT(2@!X7%3bD4>mL+oZNb!*rh$>j=#L-MelE%!5oW&l zLYO$MNa%NMgS6#AD)mTs4#jWR^C!y}g0r783#PN}?|<#G^oL?;WhFOfM@ z?vrAMrn57i(}Cy5<_*fF_ZsGfsK61SfiZ1v`sj8`^ zIV}F|ezW`ix!Xf&X9LSyIvQo&!iV}Af3`FkGDnJ?P`xfE7meINpe`s0FQaw`VYooW z)tbKV&>`c@xYia_Ty^t$*!%@9BGAARlM;i&RWnWbS;fVfqW+BB`1`Ja5~bM8rGKTE z;b2po83wdm3esEk%=z#|&AM|BUr@ib*6OK!sn^bbp=LFh``|8h%sg*;8*=MQIy;%e zWewb$(qTl2a2%8m6oWRBR+--&l(%f750h};IIiK3>-P-vzse*s{3R5z zC`pF?ekCT&FHqPj2mN+3&RUs}l*#(U{8@!EM~iu6bYy9&gc{yr>Vkp-65^Kv)2|#z z`bt?^Cb#po(viQt;p078tlq*UWYXwcq(uy( z!TFAVH0~(-HZoX~yWxScaDFNZ`ekZiA%oIc)kEZ4X-~&8QH z@(Ef2Y@CL;MgZsc7M~`+HsNRR$7$0$IN12!%NS=ZD12fcF4w3Y*|VL2l{|;|>9-ks z<)fiwadF98?{0612no3g+6cA5-}?IZF{klTZJ<7b_2&g#pITkY6-!9FgnAGOQnX-Q z@&CFv`(#yYMfU|9Sv2juv5fOCPh$9so0~Y&$uSWgrkpKWa!#+|S(23vxV|O3OjxBi zm{V0>j-&D2ivjhMEOD3dOAH>JCpsXztEf#t9ioY7oQTPTwgSlmJPiJrMv;4jG*;9Y zM+Y+C7N-53Z!`7jqqjE&lkL|J;Dc=oWn^c|8Wp_&4 z?KgtcFBvC)p-UKX+)usB`N5uuXLg|`T)e!`ku`x(qvk?)350A6LhFJr|MW6zdU`tJ zSsNe7f;GS$=x65$*>bgRj?0f_ED}$A#875+t>QRphG4?vP0SR=wBhG(-WbXC&>cL8 zKwoOOGK=oHKHL3`~;Bo zv-c1ORa?f2pzmovbRKq$M|zdEP2O-%{HeQN0nKPlneHLdkl(Ib#2J_~byi=GCr+Wn zH>im{lQi?~+MPS^4zG@h)L55i+FK;`MCO~~1~xgH{G`UfUH047S6fXehvuCilGdUY zvCO*+FR}FX>#XdUy^LEjf}d$!Z7@YWB4c$&)`8r23DbU@V~1-NjfF@K4{m8`-{yte z&mpYUUIx3F+;p#J(Sstb^Re~BKooprVV5NyOb6}4S%c}oX)5vM6gOHumzU4^w8SX@ zD;}(8Gj!aikgWQpUHg^{fwGu(8Z1Z18z;;=#3}QpvbcM_6oxBsj;laoMohnS%;|_? z>{3jzf?9UZHMA+;Jg6u`)_&fr-B~*}F(%0HYMXe)X65*~*qwLf>u=WgF%+I&L(9`D ztZe;4Zo$1L<+5t=8PB3F%Z>~W&c1tyy}_fe&NCc7Oawzi5=au6lao`LkPY8?rsaqr zU~9se1M=I>iTaS{YW%vm=(|a&3Eco7+sPmwaOr;+!?UODc(hd=ub|}=BD7IUc8!i! z5cz627kPM|;o898m}8mzj$k7WVZW;a_l4a)zYF5DoH2l+{8Ed_3JCM z5_+fVR&94$UWv~RqOE81*=@^Q8238eL_E2s2MMMUu@+Zj`ok{ed}*lgO|&gC@w3b^t4>SOd`F#mmxEvrF!jX zwH`%lu4r0S%OPfFYNhyL&exSRZwGfrK&b_B=Is|bWsPpeg*EZEi8PBmxLN4-Z29+; zyyD{G5DGjC3o~)QiGg`*wj4ZhKl!9m<<6KGKKltZ=8mWs&Dvv$@d8#DC5nhm?5L=3mpXOyqd2?LQ|pArhSdV2}0mN#FS^ zjj8$HM|QiJMi@Kl{H47S#9Q}E<%CVdx5J(jUF-Rit*v^}1A(j?S8aTp8y!-0PH>$I zm@Ap^s{UPlftOw1+0J&y6NKnLicx9Q)Qu^^8nOM;IBmEVbt6GO=$_No`JUlFaxY)J z(OKL#^&;QFqRI2SS9!KrLVj)2>G--~lngdjY!pqfUIR`kJZ-6F+?}P^+9Gy53`GcD z#+G=4db_yCNRH^_5@W{|TYlk|*Qx##Vm=+x`=Fdf3}VjT3beHT7B;&6etsCh^U9lf zgVo#q&{FsMIhpxFCGz9+XFpN&nvazuJN`TJ#=@Dbf;Ws z`5w)1-6}(|ZTF3UMBaLf_TWa#l!1@8ogZ;|q=^`DDO#$E2*1?nxl?s2*VwIBW#Apw z9K&s@(SW!M^7|9U9FG@u6y11iW%IG>bD8pw1BkDM2^Xe8BkRVmy(*?U`P1v)?INH% zN;j8ETdWz9`j39#RLk1K%p8GcPZX+qBqsf@zxm&(wg+c50X%`^gNGS6Vst^-cl*h) zOT40E(J@KJ?eZ*66t$UhXK?yeMz{+e4T^E-a8`IbWbC`ZSlCL#!><3>j%h?bQuEL9 z&fJrkC+Pz29PW;9H%}h_D=6hEm5mt4wliHP?xZ^wH(p-+HqpjAwEKa6{r<&2D4w5E zFApxb&$M?>-jsjCAn@ATTjFVAL!(~aF?e^D3|)o}{+%@ut*Ur^M0ziZn94DJ#Ncx` zkA{_}E#9*XI1^BAcYw7+(LyaYdi9B}v+>T^j9YxN|DW>nsEP863QR!AG&^kHVR>LV z^m!k~-BO=Dte*%J6y1;cF)CjR*~SMZ_EQME?$*u&hvXik83&Q`y( z7gn+T`&Di-cCaBtb-X9V zzP{3?uWbIuHixDs{#12M@In~Jgw&qK1b!?ABGPfso~{ND^VqH29YQ!jPyOE+gB$go zL(dMtUI50-c;AmLP587Xw^KaQ5e!u>bQ4pFZJ%;}vw8HU${NVpEz>syYWt$TGSKWb|y z9W`~2EA4jj1AzqCc|IhUm5?|3aP*vl`(||e zJXL922$amGb;>oWJBjC16LOj^*g@`b{o#)uw>N)+QawPLEuKX1lYG<0xb_UcA8);? zkMH=!;`IC}>gi-((c!osw>hrO>OAH=@!R2{&NgY?_?m&o6{Wu%j-F9sAnL0a83is^ zu`SM1Rr8%$VzTo4Iefk1OI7iKG29Sm*5K>BDq6bI;@Y-Pc^SO|!39Sqj}#P6J9o6K z)0q$EnVQSJc~+dJQ3n<)_lvliV@OI4pH8vPRD(l=wW)7RQe#rEy>+Lad$U^(h27x7 z!%Y5!)-3A;Kwv<5?m2c?;kbD3e${=Tw34-ntx38I;5BIIN=?zupI(}*Mh-?j*JXx9 zN-s6x4*>g|E9`e{IIQfgEI@mBZ&gVNGqHT~UasuM)p!4;a?RFYZqsGQ8}~qPsvOXk zBsxC90FLWdU&ySb=N}ZvrPivdbxX;j4`-0Qafz(8kh4cgugbY*v{UyB)ENKXUQbVriF!W`o4+ecg>m^o$yqXquThXU8wkQKpZQ$pd$mx_?l+q)oEkivG0sFoWrVVHM@;*Ytg_yX0R=fA->a z@vD<}FQHAbs1aN2nmk--z-dgD%`>dBt?DwZM|M zZBG26oGs08aaA4vd-3;7t7$28?%eLtf|nGC7gx^5luD`mz;|WmM#?nzuI-15Y<%vt6AE5eq4 z_Ll7vEdh;LTq|b- zH4eWxHr+NbIM&m4`Sn-d!A$WQ3V!jVOofKKDdzHpX0IrGha{!lp+ zyS;;g95G(@R-N6Nw>NB?P0#H}q$k?JTpx~qWn}{uJBGo%OROu@?J(i2BN?s@AS;6af(dMM_c*rKCu=qLc`TN_V4lcS}eL zC?Smj(%mT~At6Y2ZMr+(x%BzQ_v4H+)Dyb*TK7HYm2S>;e+t*VoZef0TK68mO(g;= zQGdqw#cDgYHRk<1D`o@MOh06oDL6k^snRmu4r&|Q-IINGWOofe+tmvwOycI52LlWX zzV1F1rQIbV>kmGu*|NfFScz$8e|sZqt66>DXm%@zP*8&76)7h zF)sIVp2`^LlEjT+TyALoA95iHSAm3Tje%zb0lQeCvhEM^(T1y zt|UG=p5MOXnHo#P$u8#>G0{+yIFh$$han&4@mpM|BN9;Ig4UxT(;&?s1n^pf@4GjV zS52g}yh8U1YWLSCcRFkh&}3N*%6vZjyv<%)t97?K-S$tqGRI!gcVtO~&CtncmD6s| zk$8#oxD##4e%Ht4GQm1T#q=vt&o<@x?I`UN%2V8DTsU$ zUQb1ZvdGM%omF|7{aG)|z7`M{ZTky?gNJbA72N!{3?bQ_vCovUD$4SV73OA9-{dg} zyY2rJ^sBd?idA{MFN>}Jtt0C*_!LHk#8ZI?9>!4KJddbWN#QQnzRB6oan#d!dyet> zETu2ATaWYvn=1odatH@2=_q;I6iF$=_hMe-le_|vR;NpZfGs62Zh6~; z2mt@Y3@lF~=}5d+etP9!qB^nD5@0xpT|F=FxwMPBw6Znik$SdBH8gHMxw$$0_RxRG zlb6{j2~HsRz3_+b^c`=Fm7_G4yG;#Ql_fQ0-?IH!J1xVl{^ij*uXC@YMk1wg(AzR| z;OM9)cf;F*(Bp3L)sNEAXt#a_mBU%}d|n z7so0%VO!i?84L$S-?h)Zi$+Su19{pPG8yqGPw;(N6b-VNjQ(R<*3cxkkkuJHJm0f& zXpQW?jBcs1$9ZhsG${_X*eSezCM0Cyg7q-@-T+6P4zpx`F4Y{X(Q_87JCvo>m&8R? zN7bb>Mho-8mmdUGoGm;k-g#bMvKwc<3a8=H>frEKWIS8A#9KvStiS>r@%q*&RtLVo zI~VClDbdx55oOa{dcam}PrB!a3%a+5v%8&`w^*wUEn!T$d6Sm(_v^q8YXSlY|6{v6 zWF3k;q|H$&QQC}J35RK|v-OqMo1L-7o4tI-zIfYsGw&4S`6#nKme}Dgwhs=v|N8OP zk4~I%Ybqa0)wYprMxeFAz9Htlli-#)rXQ}&Cs*AcV(`_Oi7wdWfA|LZc+NXUP(U%3 z%KKkEQ#MfFCs`e+k`C9DlwqI~kZ>r_0y?ZWP(h9-I%IZ=B8@6l8?}vQJW+t4%F2ei zcLExVq%=Uh5YqJnHe2ZYXaXR=wg3Ce*8n5VT)*$Isv?&p^!mcFPnMShf>P5)G4F|&?dZ%g|wpHyt7hbphqx+H-HJhB9WberA54- zSMiae0?}#BY23JDsqpQ@HCQJXV6iGRS@~~(IH&nb>>_1*jkRL4Ai0BZOrV4Xyi%fiBEg)(7`H@FtS z$%!Pi*WSL2|{ z0N@9Zh9j9kAovJ7?Ohmk7q+URG*CzxsrH}?<&XCVvjTQFqg-?1fUmYGQVOUu9z)2Y;#wwze+X$fCY4v zR_x|wPs$x-0H1Yhbz_Nk_%%h$ZRM7sWpkYI@)W%*w{r+?Xf}-)NyFntYO&}!+aQb9 zokY`w*&&ezccHM}l|2X|cR2hK=zgNshzqe5-H_)ZXJhjKP}dkIMb_}U`1efk$U^}-Q9^?1cRN0ox!v0Dt5i(WKN=1A3ZXkqV@5E1@`a?d`{Wtx0%WR@zKad^EH4SK&+8f_3oV-2}}YalDFYo$i<`Zj|xm4twUT@uyogME1 zSaLvo(Ys`qtx*vPhXE{U?0bc3e(?0_>FaZlg;Gu=YEL~gH2m6EcYJ)z(5me?Zr{}0 zyfs)?Y2o7lLUP2n4nNVGE>BJ6w)qI$gDvUF9uTdesOUTwAxR24yn=!OH`=440U;W6 ztjg{!q{{k))En&|l%VDp%_Q&8Or)%HpHvDIg`fxms?1J+QsQ$2#0&$(4q%0DTjXG1 zXy|-y)%u%>rpS<&B}~7L-hJnh$w6iB-!(GHz})zTv3!)4agE@&Gd+<2HKZc&*)87S zov@?b!R(y7FAZi!MHoVs`ZMuwe3F+ zr|SB1`y5o}+nt=04pvlok)yXz3-eN2TOK^x=jZDeVMt5_ zl8b?OX@*x66-)k{dt4o1{3oDbhTlaCsCuM;CxZy1Cg-a6BQu@m`28?q0f~k_5MUY~ zdzloXQ8%i0|Q8f@_J?@Adpu^Q^qHj5>@;`04ZK z@6W&=n9Lxdj+|n^OU*)ruH(p}7Wk#|YLA(an9kcU44(9GDx|sjRq9i!-63gu6%cK@!yER`u1}M29)heC=j0SBwuX8A!*#!rCanPZ< zMX+K+=lH9cCcH}34~~dnG1Yid$y!^Tk#-gIGq?NuCLD5q-H_}xxlf8@)uKTmh7s7l z37$PEs4xdI4`Q9r2D*6w@(&j|4n+9!wkXC?zw7lNZ(B3X;v58J!f=it z56NzrMAi8FO0vto zkD|W>=87&zaWeC~i#!pa-G^7lh|<#4gpGye%GBp@k(tihrbhc9`W)dbfqsM$KNr(L z?D*%_#RFx`bhjlgsC-=N2QI{x9tDzKpQZBGn$0<@BLFnpyjKmj{pgn`AUqm7ICFRN zoINn1Jg;WT{82h%E>ySY4D1!!BK1hJajQs2eZDusl7pK%A}Y$ZEAn|M=?s9S0L3K! zpsKIDM@UL)a>0M+ju(8f5ELT;NxzV{38<6x>7g~KZ7+_xJymL4xFLdQ|HX@-%+0>7 zc7dEQ{$5)rgC<(1Hmw|D>fDTL49!nGR7K#bvBOzh;IK}@RWChWC=cL~ z6GHKotsR^GGR?}ds=$_Q9Xv7R(G#e2^g~0MHO$M#TTJt6ps)>8LC{8p{FWheLd$uc z2>j!TSB}pPCcBaB?ig(kX&8SYLjdgx78a1*0d&%cj0-L)n?_vTO(d`kVymFeuiXa& zhm_OkIv7AVn>{LgctZZlgL^JlpC7M4I3464AjVKgwM9Ydy&n8e5t|u{dT9v6TNf4< zuHuIP#tNMG3<}i>5nO1S+uKOlu@_{#-XtROW9fiVWGH7r;&Z_t79Gl#N**17>!0lR z9fK>xoA_0tQgVjsewb@JyYrjk=gfba7@YdLC8j_Wrc8-Cw?F7hNw9y{$=-n~TdFhM z+sld*uxE%1|vY&m~c)-hyZtzp^r3EIbP6 zLc0N@$a?jOT%o+-&J;dA9MWhKMG+viL@eexPf)Vh@XJWTE7bQu)~1Z?h|!rDm!D7j77JVc--DBVz8Fm`rwAz6UuHNLX0UlHKXL zV;wC>qLRK-W_w;-g8Hddu0g?#YJJBTYVgz`b2Nm98vaX<{0(89V6Bon-;Sx{>cG{u zI=b^uwSWdSDaqqLwO|&3a)C>5@*{FaByu1zF?86hlsXE8uOy2)L<&%xIRzXt^0m#+ zi^Jdtz;(X5o0-K1c*&m0B;a{9XQwbRsafI@7x_qxN`1LY{Lnnw)G?qSms`#y{q?Y@ zV|qxs&^{4)@S3QMl-q}c2;fB(%rh5?PP=2N1(Pvwi23{^(O9%=6E^BDsB#7>_n~LI zDlqGth7k=-8n=5GJc54c>NF>rbbIGIXF#_> zGb8iiFH4WvZ7O>%Hb!L+N4waQ_!M!ZfJSp9EiEl0E34>&3IZEH>@-vI$S2uyz~ctt zT}3;pX_8|(nmyM47)dp+o3s-7;H|H6bpm%LKn}oRSZz){g0liR`|IR9h@=KG?;*kV zakODomIefYfB_JUZl8NiHV+PrAw`u9djV7%ed*edeS15@spk~lYx?LZEfG-u7$QF= zpdvD0E&9f!lj)0Y`baVLf@^bo$3(hzjC!IwfF{x3s`|Kmw6?s<+9BeuG=VsgI2DQ? zL~Ehh)&vs}O#&j07tm^#c2_Q5VBI8l(S&8)Togj+kfGdA1(3a|H;%Ok; zclj*La$`zmkHdBJ`18uJLZL5^Ka1{ra%=$Fbx693+6XG^I@74jnSrF43Kl3uMMMrK zPkwGdGK%&Q_oL9!P85QhP4?89d@0IZg@3DKRLg-3KP7EDkMcrKX;O9niY(=yO0%j` zU<5&wQ$PRu(RxO59`N}pcAaMIh{FOqU-%UCAGRH=W$!35C`=R`LW!@;Kx}H0~c{>y6`5)?nJn=qj+B?gvfIu)0?5Hrt2MrpovT z4*(o|?7zFmr#$(-R)sLQCbtH}Qs7X4K^8!I_v77Es4#Fiuy-7)btI|$4vT6N=!4c& z-&I{HACNEWrBCVL&q0AUfFH0oqCFx6{d%QQ=P8*P=9EyBQTWrMS6Si6?joH{k34&0 zZ~Lqxf?Rmy!@$Q#?S7naPZ#$}7Sz(6eX4yC6(O1GV^H{k&=r)5 z(4~u%aKiTl@~Q#|9T>>fbe;Hm{3-aCtOv;Ao_!K}7mqd834pcj?Lpxb!CRHGCNEL| zAmTT+5!y#3bXmK))%AF+-W{IakM-6?U)xA$X(JpYtfItvuJLj;@_v`(tS?KO#rXau zJ2gsrdU}!%dlfnQw@_fhg!DI!0q*s2nri%BRf)xEZ(Z_hqu_rcWg;4awj!n83>C2;dV!&KzA=*R}o;YdQx-$IZ`=KSGOOvK$jCT4{!uiE)%hzzZwodd6@OF6w#&B+K`%B_De3>WvBtCHmSnv zib+wi3W{bqIOjN&47MlYEzzr;y_G9?q1n?xUCQ}3`%mHIc|!a8>Y$&XA@{-FUWxHm zXlSV6?=RADKp_3}O1T=+HifR2MqhA+{W_Zs6ty`>sTy`RVoY=sygAChpP)L_a7@*? z#vAjMUuJD@+FVMiGdK=4qnq-vn$97#)Ey{E%@fc~FW;ExDC6FA%KM#;^V6h*LxL2I8rqcIpZ|>;Wt6u1|U{xpVJrJchx`g2e7)zl}(`BD(&GCNt6z@;ouYGzP zwA;8e7o*mZtvQNPD=yqmh{s#I-P(>1h({HjSPg4c#LAwFD~2B^{bcs302TD6r98-s z1Q0~S0FggKk8~p-qaz&2@L7YZER@BRRM>?JHj9@293L7+4JV=ue9aF65je;71E+UyQYxPI4m0iAMbu?%gh)L+ZcO7 za}(}fNFyCN#y-@C*HJ!nj-J>nkE;vTqR zO^V{U_GND^q8m5<@x)-iP;#dp!6oM8desnp@uLBEiu}5DS;G{aX|~5Ly=@Jcqd`&z zl%zcT?cF%T1M?9s7~#d?P5|E=qR52U1&DT#2N(j>Ajo$L(-g7myE;s`zo75?QkQ!5 z(A&MCTzv(F_-8DRG(!uPt4wgo1aFtp5vs@k%h!ruYd0^dq=x?-qEiLY&FVp0bV4pj z&Ql);R2qa?2J3SFh>`*njp(1~R{PN`)+v(D1??Y+M1m&8XjMRbvNTPhrKo`{ZEz}G1#7TFW=7=80H#HrJ0WcDk2OfXZC%!6jNPG@ zI^Gy|kTm5;u(wxM%nEeWFIat_ajpMjgSxU<^KiKV-j~O{!uM^rFywPGu1!*&hoyUX zv?o=ZVJu--P&)~Rw}`ewVJrt(BIjn=p2{?AVSw{O4Yp|BEbyN*KX?Vy;!E!ms-us7 zlZOxRudAzP?x2$)AD@EHYuL;PqSWpN`2bL2Il?6lpD0Y7S1>Uve^E@TTrcwrjqI5C z`x)H+_Zb+0*(O25qX;&=^?3M?CCUYWGXVev7;@5wra9B)Un*-Y({-8OCFWFDVnZF* z3C;HBf+U434b_v&$<_*5ie7qwFH0Czw?-Y5EjVLW?wn}y6=GcePp3)q#?6x~me#eP z(BQ=8iqPyb-Q5kEV&0!rEOo?v!@r^=*tWjfe`Ogf@A-#VA9}C#i_wIeg zjjvyys1a1liG~H(VPj%^b!YhLGoBKv)}7Jz>pYI=DcrAd;t{v$_Wx)@20*(23re{g zbf)`&jSUY5Nm#Ey<3P{D6AjKsfOX&khRfw9HFbDiI6mW!jc}K{+l5vPPjlLR3ky*> z%FHH}VI9jDZh|Pw;0i069F>#-G)sB=FonrdLL-*amW;A|8JqaWku;t;sPpPP-Cg&G zpU2RGFqWBgP?FCzFDvM$UKDFr<7ZpByeY2taox}%uSzoQ(CO_GzmeGd`D%u7`aWgKF|BiXMQLo*8qf(2cbRTF{ zDfXw5w69~SxD%6fh2a{()Al%;H>CObX$5=wZoc_|W4wtuo=Xi?qTFEiFR#)k|2%U0 zpZeAe$KguXr}?{ka)IL73G7;6aQIqTNmex638gDgFxuMP-A!fuIxD9NxTETt8bs0* zes%;pFEOF6HEM$8-wdV(mmA&6_lA2{$DI;@qQByQ^uXx_ug?#+2!pH}3M|(OB^Gug zm@+$7mVPsmPNK1g>tRUy+jmzmynm@E<#{=`9NSI6X!ssuv54$$#XbFWl@uMy?8DGM zCull-b*U>>Uj(Wi30w`-GQqVBB-z9CKAMgPFrp#} zZ!ngof3X6EVeB&ocrKA;XrtD>vvObUve@3Mm~bK=X7W9vUXTRU}Ash7Vp08g&rODzWaP__bVLmur>_(E-0VA z%a6@zRev^sdr8_tont-dw*T1GmGgy>^erD6+njg%WYjy&8(l%RzG~>4B!miZ|*{W zQRCCN&{p1&QABm!brK7bpY`CX?tANiARHYXcEelK5??iGU~|w$CnrN^YihJJnc~oX z6u1z4n{k37xJo}TqN;pf`!QH>;C+US1#m}$DS{ux;Ezx%#FS$J3Lwx@^9R-fwD?;N zcLR8=XP#8;2{AKwOju;)2*QsfLP)*Amrz(p;I%@0{_)}2@Pijp1O-v)5A=*8O?~*V z3_^E1tlFd(f0D)JX^$F$uQf>A@BqJcXs=O$#mJ9{SxM(U*94(SGoNkq<#e0rGyY8t zMwNb|+G^@$=A0ijtanUxJJEYgt8J_38be|l@**UC^$IGR23_*QP}ykGKn%u#^xisg zR>S}8k-s%6MUaC#w&zU=E87FLNEaDlc;J7UPgYfIxUgUuD|HfnAhB8_8MHYmE3*Za zN`+1HM>(-N4p`8CgyJN^C{&QCe`?+_sll??niVei&Q>ydLpv;JX9Or{MZYYeB+ow$ zib&dW5`GHpEa^!NB=qqfUZ^IGLU~OiYZ|+V3$tB!}E{JhPt3 zU6uwRx$SFuMZZTyG^3#(2W7C(jWo*lG!#-+?^g?=lfB6{Ul1_u+<#|wSeMLZHZkyU z;74NCRK4T#L-a$xhhu9u#>Tm8wG|s)%{u4$lLe$QUC5kLTdsKoTjIIvvlb1w|95~} z6aIYixl*3m_yT7Db*@lI@9Md$qLOwCIIk-)>};1H_Tu7X{UV{s0VW=R{3W{UlQf9z zKgxVBdQ22B1d^?_3J-zvTOb{~;*yM8!S}+sX|}RsrGw*Bh}`U@@Vw>$MwtgtK~u-V z@~!mx8r*u({A!tH?#CMhI|+v)97TN@rd0YJa@OQ&f2W zDL~E}>Eyta68nLAdTMT*2 zZx+#?=u8gY;OwK{Iihb^u$Vkx3all2{^)nPd~JHK*zT*W496I>+)34&X%XB%BRkKm z4|aMs1JG8C6(^r}qmtd_q@}UJbY?yxMv{vI_Z;AdF(SZPKBpc`z-9=32X-4xmT4Z5 z;R|foR2*OPei8~c7FWzD>%a9=wPBpSIJvb%{8(~kV}7IG2pqtRA`PGaxa%Ff`|vTw z8aN_X4N1u#3%EcJ3}tWV*gowY&K?cBd^SiIP?Xc5;FSLLs{#Z|15*yZB*6AYF{(em zx3ma704s~__M9|?d69);!q81dM)p4s)!JxT5dgU0P+|`bC5MsS;orzU_C6^|PGju3 zf$B^^KkrbHF6FmlfghE7I;){&4|F=OgtvYJNbj$SG@}DOKa&ZN2JJ+NE__oCrB^-f zQ#vi3H6Ocbj_D+Ej8e!O*&TA_C{P3W;21tK!dB) zhb!&b(gYNf>lGJ=Jfu!*e%+H~T!ZTMb+w*%Akata@9h z)lI7qR?9BOe17Yd((Dc7b2oc9Tpc#nT#Zcmuq$6iM~8>rp&*4f01j<6O)FcQJNOZ@NNwQ2;W4Ex%(nVk>De8u|lH8s13 z3bp5`V3X?VB~G&|h2+*TU_XZ|10rGA@arS#cGe1*g8lBEa5|D9pg`Dc$SECd;BQAZW8DRylf@`K-8tV(xX|9t zOS!FCIPrZVa8{*JW_L4x6wM~l4>88FU#kUa+hgOpE`)(aR9I{LT#^0D9*8$8GNDPyXVo2><(ij^yA6VNd zuxV{=-T9-et@|Jjh|_P-&VdkFV2XwF3NGAKsL(PSF2pDrc9VY4fdh#@B!49&QzR5c zgaLkjHVcco;J=YF8o8Dx8D=v|LP_~6O*~j*L{Txdx*UlV$d7$yc5Vhx;r_03j;i7- zf{k~EGHNs+m;aJF5K^m-hF#>+XPch{!k5VS_?6-Deg-$Xw&jU4(}Gx<^Rj_wo1t6S zZ_7v3g4)$mSxX(yIrPPS0d$Vt;Fsjh{s)Dj4`qA!&>>~f31AW&8kxX_kDak z(wiC|DrlMTb(BOKiQ>RhrdW7BPm|{Rd5Nl@5>khvHMH}T1|ER04|H{rt_*`;+4*^f z#m#)V>2w?>hk*`k2{;icj#kB4YEAUrFnBnG7Gpc?;mLKbfP7W2xL@6zA-^oUdIH)# z_`)=OH}uYLWWCSMhQ2>`=ril!<}dgeLw&*>pb{-(l@a4wWv+l^@S3WEVxp*F6&G? zcju`PhOKYusA7h#b?|TS)2oWnHk4QNhE&mz1*<+<$$3<8S1Ph;s==4v5390$A3 zIq#llb@3W~E&m~|eY61hJKCxJFTA~vy)FENRcap1{Fo)Z&HsDCNMX$KO1)~tdd`-_ z#zBYLrvhqyTSGX5jiC)DMs}=%@zX7BkM9-luY6k1uNgp$mtfuc!(%o%53E-C7&+xA zqB_?5yZD$KfOTs~vz?tQF8mip1w6Unf9^EGX_(d&3d6_47u~RxihRq)H>6+3c6Y8C zRs75J>{4-E`Eom1me=dmB$r;QpRGb@v{BPjn49|-UlZa+zK|a}r!ox~*kSUL(~nMi zE7bYQaagd9)(;D;@lcG` zn@BVybSqNT34*yJc_7@lMX0gwb24l;TfN6RFP>2~m(CW16&qWtsrnsxnRcB|ME)c@u-N6=8 zVrA1x!H0P%1rJ{@m5)WYaC|P)AFhXe9*+M*DhIl$R}M zfY=0oV0Pf!SB#9$gIB%e(yOL$-|xs8IkHh`XZ>u4T#b>xPrVOMIAepUX?S4Z6!bYl z;fm(wSSeW@qI3iD8`9oCt8|X#Z{DtT*~19J$zESqW4H9zV)A@zg zmdZV-*z*)uU%aBmyng6iQ+H*pVl%(S<>2S0&EDuH?p<~A1rNK@-|u@GNBv2hz<17? z`i7tCndT}}`D?}^w=BphG)6tk6(|z8PJvAWRRlzn>Ov5=8X- z;1?11@?uY_2FRlz4yzEHpfKY>RuzcAIFPEnqfcEV@d3WQCWO-UYF8!mZcZtidtF^A zfxs$_zgTmz^Z&f1ut@F~3X~EEK|sY5Ny%Gqxb80Ha!1E=md!q%gV0WRvH4a8^U^m? zj}8O;tw(qAbod*w9cHg$X@UNPky z|5#n_s{O=WqrjR=a;sKrQ3j+ov`&R19v8}k&$fdwYF)4C8I_9;E(u9_65!>O1#QQw z-ao_Wv9|$Bc_tI=?TFnObiO=>;>xcuR3nG=hZ?(EJsH5ja=X#^#uQOKQWg))AQYeF ztM@^-I-GT}zl@fbmk}`}H}@ZNf~I|ctvq9RK0)t;U%lPVEVTWT7W~6x6<7F-YWh*lk8nx!c=I#ec zi&Y0gC^t;slIR_T?E9~^FTq_h47x>JE0~sq-lS+>rELGIJn$pKb-Lcs%EZ@JduEa* zKiGA}y-g&8$w4$d6^$uOy`yRPInm0YW3Kn{Q`7k7E3P5|w}!1e8NxiDMny&f&68%_ zorJKOkeC2klt+(}R{m%|15@iXHT6R4XVH;Sa1k)Ehj@CQP?smOi(L3*5|%92^p1!)Ayc6yg<>R6!hh_wN4j7IE`$0}7Yfkj?c6O}zGF@0M@bt^bKbRHC<< z84z>eg-)wf3HD!^}uOHpr;V^~wL9oR7)7s#`uABP> z_PF+kE+H)_Kx&^<3aMk4{ehOJ(6F z@%sr}MebQ22X9#^SgKA>v;S+?xzA^~ARI&gv#-$vhEj4~3j>IeMin2Icf2r*A0&4D z{^t$9&D>Q0ip-fXEB>&m{e{(An)5Hndz{qAds>A|i1JwzKaTQfPiCb|_AmSq!4LJZ zpq)B&7_6;*`H*B`jG=9Pe9=5%=u)G{Y=80b)SL5~*~x@lVcdF9T(54 zdm|;1uqc@bB@y?W*T7ynAu_+$gY6oi2*A}KUNId)xIx$e;@H(R&VLe5IPQeI;LToq z6|AN}_)3u;QvD##OF>y(#_%h-%=bo!EYj)(LEA8h*^#!ajEq1q6efuOB*TzwqXC6l4`?>ghzW;kL)rQZN9KvD)yBr;$M(6IOm&XmvbsXc?D za?tnBLpSCf=-Gl53zWP_*Z{DaFxMBW4XO`TusjNjw#-q2RP)r+IqcHBbDx)3I9Hbm zyLy`9uGl*g>6h-^4wZmmD=;MF3+Od;E8~J)H`%JHAS?ON$byds9OOFO?KdaMNmXyrFPBOz2Pwr;_?hnRzNB5`V9PVWu z)8ZSyanAE`_T4V|qu&rA#q7d-74y~-uA_@Y?j6_JCL1k}`=@wSICY>#YM8(u0~7q7 zwFloe`b!tN3IioOMaA2r_AJSihn?mZz>9AUsCE2?2(63pJ?&SoVyv0Bl{Y;fUm)mD zj0lF`!9lhByMm^#>_vQ0vaP%kJ@gGr!oV4WL3GY~T$K7E?sd(#VNX#og>4_(lh}=JPiQS4+L@V=;{bO<|=g-;1_g}b$#FgQ1O)m_8 zZEGp(n&Yotmd^7{E|RHc%^jwMghQLJF7tUBr+%N)wyT&uT4DXXo5gOsTm`nWCjkke z<9VEe5!%rk4uIbq9W0GMTNUYTj?9hVGUKmko+aZ+9Bay8`>Zj7UL%CUyt<25Y@CdB zbqDuxv_V<4P3RYzCdQ^&4alA=I_b11bBEl-)QoF}UkyP`lv-?~=qK}TZQ?Z-8T*^- zm9y?Y7ryFL1@QYb53ld6ug@b<4ZVK{DGeLYX#*IL3Q6anZGrqx2$`l86`jpeuyPHw3@V?bk)BeFHK#4}_J<2d$L2LP%5g?1}rH_I0QDNZ!V zMLl9Fnhe>OdAei&^aup&^T zH*oER?aN^|Jx;KwbiPY!1ap&u;-6X(%TQkUnO8jJ_(!0|aCKvC5J{@TLVPSr&hQOE z-oyZv*2;b?BvGVKJ_fX?-#t>MP|yniiyL#ZBKo!B1y-Z=qbk6|82uUkCEU;W0dX66 zPI{D@4U4(E*SxH#2=kxyXRzvh(;zqzKsMP(VQTQ4cq)Msy%4(B4*UZSC!8 zK=gwDAs_TbP`|utF^fo`;9kHb=Rt&eRPMV9t}KCK_(9W z;FG+;zrGBA2q#0WSyx5-tI@XZ&kK|cQ&E`X=aGd*hjvUB1=~tK=o9&z`wbI;QLhJeNWXQ)1p=l`zZ#wQctd|liCHmtYT>s z0*}VA-@dJjJZq32IM`eBH+AJ^k#xYgf^HoJ7K`q@2=41w$?KVwYsC({ic(p8_k5)j zql4ZksTSyApn)%|YkGAE-nnqHnfMN@0LuJP|D@ zbM2TowO`d<0_gMe{8{gzWX0=xSvSu^R}jMKg;b~v-K$+Bp#`o{fO|5Iha zyJzcbzJkjPH#3g4mko!$XXerPeUPcS?meaa*F0T5H?*7pDT2=-C?8VbYDz19HFH76 z8m!b|6#S6gK*_9}YXCWvXaHA&^2i(fB%-3!ii%)-lCusC!*?OeiT3#15^=DD8B=n_ zJ}kxbg7jpA2gl96g!^QVwQS5MyLW1SHu!sO*V@V!?o~7*)9aO3WFGh07Q&)k?$*-b zSM+Iny`B6~tEkjV<%)h7;7VMYS|b#^n_r9SYWJwkV7go9Bm%jFRIVZ_&r-I;BI&;E zmq&hAc*(D7OOU7}8EAQx-uUTr?p*9aHVlNZH#CIG>?wtWj>r(W%)6N{q!q+(!VHwA z{*YHfB236w))6R3kZzqC^;oKS+6jZ|E_cZu7ZN)xYH%o)ZVokCNLwybl-5GfC%gwb zW$nE5UFJhAyLjtsT-tz{0I&&$0cePEQdQ|meVuBCBN`MQUYBujOCcFzi8Q+YIz&dU zxYQ&;n^I&_()x!ha(RQu=>)B%*bg5*L_>pG%>CFl<79Wt#PwCS^N-4M5(=+Qsza$a z34{HZH%=+(1Fqc@w&u|ql0B3HT-Be~)F5;hojSg-YxaGY{&Ufmul?zW! zEH$+xt(>lm>tWJL6Wnx9 zvdH1`rNP=wZzbjJ=x;Nmt0(Q1&Tv8ZEOl!N*4!h+pv%5JZ7YHijQzUaj?|h&(|-Dn zDP-2`j61H3gkPI+o^C{bn357!pC8i}t^)oSa(c>Dc>nbU_e|5kvlWN8))5 zqE!gMgDdsKcPo-!1~CgkP#}oMb&l!RUBO?1L}EZ4(x=Rq&29%rHgHZii_(2y&iy1e zRvafMoksyQAsL~cpqAPX?@rkSCZUJVQC4~k8t<}ux1n655EKe5#KC>&t{~Wk))=5` zIJQcw?%iYhQa)w&&9tumPOiHXO=cRTfo_!=hY?jN`a0_`vb-#3+yyLaJqO5kESO_T7ub~!PTo#(v;L-( zspV$7VCcx7ksFG4C|#Qh&tJJEbxv%+3Qeu5@l3<;$226-Ed3HJYn}Svpxl`K0BJKz((n}M|c`9Q?fks^zD+@z(a zfzp82*!pl&w|=>H0tzlc5dm;XN^vn^RNE5+1G*?i*|fRmmy%EIX^dW?qm_IlI^FCd zPvBQ;>Yo&;-{}_Zh5)tO9C~kej-JXKEuoTc3%?<(@o(0D5XWbCcopeSgo1M9t9twz za?Aj(K)6Og1we1}i#Gf;Vl>Iqud&L}X67N4ccQ>8PKV2d*#(SVDhL~RxzG?~u_!=P|njW#+(_6qpXOfnY{+Y^4l_9XNpz zehL{SVbBi>dMeV@FMhqoO}b8DSU)V_xjhRPw61Z1dMLY{KR@L?SiRc$S3& zC+o>qnOIGKIXsDMkiinBT-Gm}tKd=_+DbhEV09H=XSvAYn9vDDW?Cpf4utDPnN_#I z`tPLeml$kzP^wW2B z>`Ql~Ua#5^i8+T-`k{{e=B6gF1~fx3D@WWjs59Ljcbw#a_)Aa;v~_lVfh9hQQN9oe zR>qZ)iuSME>!Op}Z%Bv~G5%cd*zgewAe(!nIbGVF(&sgBYbi(8T(!@gZNN6FgsSQ@ z7+ez2tD@CA+F!c~b)U$L?LmtXTU`F8!8}(#+2PWm{1qpR`voUY45S@yPJjednxo)> zC;h0rIb%QX2Z(fEENCKA!Kh|wgZ&y9ipw?V@JT{=4MgW;3l`#mW4xV8nP8dRrEAb= zlm#-XAl;ujR6YvzLXijqa{dd>vzaV-$;pDn)VZ(T#9CW*(SkulM@J{C^&Ky-vCrR$ zYh)=Z8iGE8hnl?pMF<1x6hE z=_a6@ASCB=+Pnh+sF*3axm_MmrGXp~uu&djV!qH^u9e7RyBn+Jetf6nK~OW??9ZJS zxU@uS?KUP78L2!ING`VUfeDMXUn_AsNgz_k#YNT}$iNZ@LczEzAr(5Xpad#MR`szu zp`te~J0y*GwcpBLB|dEIk}1sA4%PD)iOMdRUSG8RDvVP_85)DyLi1bEX-d3rM`h;m zYwZU2zC?&3Q3yHjx-!f8;53`UlxW#S6l5L+sFo~9T;0N+roMIYNX)=TGoL9_%&ckcxbzAJ4Wp&dT_m>b#~*+E(MfZZrbCe)rhOc(9H&*jV{J7*PRkMWR$IpFWX+?+3*(cIDk9m_gp zDPT%ac=__Nt}YE)YnUo$x+xbKn1(?23dT!+uy4LpQ3-6;g9KTtN%sSgv4;1W7=S1m zzB{Xh&f8!Z0$mu=%{IiGUT8RP6H@{xeXr@_RRdKCMMXR#1tK3?Y zht@M`n!9|lk&iJ9JPLfGFpjRgUAiq`kJ*^_sRg8_YZi&1Jp)rTgIt1BLv$LC=A`n# z!~K)>lXm>Z2HYOk-|W&;BR30)XEwaoHXaSrplU4|bvG#vtoKD8C82iDz6z-tu!Ojh zdYq@*dUlprHOn4$kg*wl)RCkyRO*B$KkRFTOk&H2!I+%@>Wg}O-t&iEWhGC2-Zp{C zz7^_gId zg+tSr_8s7T(jLC}FV~jBdfJ+bb;i-%l_Y?v+apP}S#_*t-$EpQFYe`lbCh0MMp|Ii zz?it<q>qnpr=49V#mWuK^;PU5EUthWWp@KuWWf zvaJwU9g@)2p}gf3vj59)AdwlI>!3@A*8@7h9UL7-DN5g(LlaGckaHxA)F;k}^KWs* za>1_m(X}$;k-=#a!p;OyVQdxs+F%^~%|Fb+&p&EE@ydKrY%7?2b|$^9cB(D6Itdx7 zqJDY>YJ{Og^KJ(NrXLc8y1tA?UCJ^vS$bitYfDq?Y^w1qj^X$z8itASWy*&uSaq6M zx7pS88x2{TwD4!o7d@)={B_G7?Ypj@%vuP>_qs0^-9Hrgsd{O_B`^-#J^jwTpc8#| zf^{BWk@1V~qXppEQKHm9(wnv-bN0ptDYZ}zX1>4c@g+TVK-01BWF7p5=D;o&?0v_`R@e%{!`UisnCY6Etyk^M;c zS96%@nf;oy-pLxpm%ZzSijLCnW~b(U(U}65x~3IC1nyv&4Mv>~kC#tit`_+_SF@1? zr42Rnd*$V^11$s^;MBXUgK`10m(y;snKIvBtnK z>QK72eWwj`ZBJ{Z1i>2_gP(if962`!0N^6E&#N7A-+2)}#-}wJCO>Mm!yGv?tI&1ctN80CIxL9}Z zekwAZ_Qd`C-mN-bZa!Hev-*0=cPAjIFt&*oiuo+)UQ?08%2--XsSfAK{>#zZ{1a#sCj&{r3hH;w<8< zug%|BvoMrzV>@|9?-@(_PrjBV9!z>oVLFyzHm59t*Ju$StgYfn?MC@_*N&{vW$z7W zgAtFZN2&RjeaG2fcK}GG5=nr}xw6@S0Q|Cx?2K!W_VWuiW9!=}kkx)g5i$WXA8rV~PbwEV;>8TD% zO2(|m{*~E*`RCq1x%49STJyS5mKj;>LtsmhQ~;Q90iu(e{R^U|nG1=-T;~t~Y2(9% zhNRbxZxz!sFjST}z%+;i2ZNPD3QS%Qb&d&9!ZzSOw1R=o>W`u_WP!uk2sz-8pVAK8 zXySt9HPz@|lbSIjY5Y(-c9vgzdq2roMFhGPet0~3q?GjoPV(-Gzezv>Blr@db@N`rv4MY+qzO9QnS=o;!McE#ixWxhjH(D@VY2`0|-F9@4 zv_{KPK4^d;Z&D_3Z32IYSB2739wI0pU>k<{sqB=pGrZ?8`~t262n_&lg6@w0aB0Nv z)qtVvTK|CbI%43+$heE-Ae0U0N@i_=g|l&=k1wXC(_=i&P=avfcN+&KB1p4n|S zxm!Fw{85a-q5y@3wsWa0N0nMKJ5M|NKzBm7&IZXq$PPyleDrh`&I6k`;*k$$*Gid@ zPKxHAJo9QSG7sQMLh?QTDwEw#jgdefV6PyGyS`10Rpx>Bgm-pAO=LH|TUY7J(f?!W zJD{=d-?%BGkiCUONXXtJ6%j)Amc92($Vw4H$S#si_Fmb0W$%&O-g~~+{eRAT-uF4D zbDndQ!+rnm@AtYs6R_jO)+>Ild)DtKouHIOTfM#o0-(p#tME|7^Jszd3PFbKwt*nV zbF`)n8J@=*G-9CE0|K_d^Tn+fi1hsvAr~#`>)N$?k+NM*h!sQKIo?D3c!O8jTm{jA z;SWSwX_{zWQNU?gYhs6VDQEWU^j-1!TGkr4K^b|W6S>^Drcm@GB2;C|iS}nI@W{Xg8!eHOZBIgLW1Nb1-SrQ~tg1AiJjZJMD83B!#hIyZxcnb}=~n z9$n@1#~T}^E!68DJ^%0>JsC&Ealy!ks5S`K3wN6A#T(<){h z{u^&eK_N(J7g|2K2*fak>`i^*cW)jS^MQ(A=+wp0D*YG?4vpw`q*#d#Q(u$Bys9$! zXFrx*)R<~d&IHvz=tsOB$+G{IUH3uay5^UBo!N~>aNnPqNNLy2V^7e8r3!*J;J4Nn zu!;k?`9zf%r!8!bjQqJh!XB4Kq#YK9imj@34e0*>vo11<8`&Wc5rL{f+bnV$vrT9X z0wM21%mgTn6xGy@a%h92^#e-hSKicII>YPvU85V0lVC%M7~VwIvA$0LS^jln6qGz{WwT#&5!htq2ER6PKV9bd4VU9p`Crck?pfW6qdWQ zZ#K6b164QpU-WBJ=3aMcmD8ZvNF=6gOKH52R2^pIN3|Z~!-ujI&Bz3o&-6XxCqf8QWG)$dq0rE|VAXzfwLLfO5*Qt&)1zY_kW<$%S26HTCn1ttPP6tSnJJQWCb6 z%w9);PX)|G(*G5|L37qXsg*j+l+rdfdDD3VM;yxSFW1_A5azoSXJ z`g>iK^kv45Noj9WY=+EzR*QpoK8g(l9IyE^$k+x}!ZiHuT8zvA7s+Mm&U!WNSef!q zPS--{y8iRbCZYIFjhT@FcZnI6zDp1Nrhbq7&FmZJb$)iV{%iU=3C}zm6R)+5Hbp-< zs|Lmo{YE&gFE=!3jx16RkWM90M>&yz*H%}z=NqRHyKv!<&JUZALUG!hN_v|*tY^Z1 zNM{(pt8wGMt_B)N)n|fQ0XXu`Z)SlX$dfOZelg80v6ZHGTxKGqZ!dxDx(9%5< zdPK@Rj`;|Y5~`2M{KzldEbyKlZkYMlc|YYRQy~c* z*|$+%|Hn%F_*063SFk`(Ct~`F3t7x^jA1$&p4aDP`zox z+&VY1LkStzTA%um>)TxpNz0)hD95g7KKqP$G(A5vC-zJN-{fLs+;}-qC4HGPfwc6 zAP`#OB#1J;91g!MzvMJxGA#TyaB+W*y()0n@Iz{7F9eXM-)RherI`eSHq7L%j?If( zXY%O~Mg~pMdXZyJ+PR0FHyYt*>5i(-A#fb7EAp3K`?0Vp)5%Gkms? zwhOfc@p4j_W>M3~OydpZaVV(j?iS*b}QUXL~mN zZS6K=nUJ5(Mh~Q7)1CtKd&Jh0o#ESc|NeTvo23yuKaZL^?DVNz2z~@Ol`(X<0F^_7 z;uW4uOtw^=RP0nR(_SP8PDxO`m5-K|K0vKn{Y3?vAhNLrh!7~PgtP^KPMe|327>v2 zDS)6^KWQ0Ekt#`o@}R({;PX<8im`{N(z-KQzIQuiwRsy$4!Fj)3m=%0(AOm0{n>~#a-LNdrsMMvyEm$NH>Qas#Vg5*NQ+f}d-IHH{%!3J zpqfP=?s8&tS1k;7zx~9rR=`a)Q1jaFM>$9fM`kg53bBs$i{4k~KH?(9wXql1U&3Gi z6<}m2DD$BTtIsNv4Ra3C8LQ2c3CeQk`KRn}0^(x|rkej-MQ8+(#cj~#;bC?rj5CPm zm~CR)#OSx)?E*PQ?UCSc?!hACe8}j{skr!$oggL1i9%kALFSLbx|y97eAQZ{EW&5| zgdhDMI@F!Wt@DP~e?G$&%Ii%F)K($^m)4f9=VRfQu49w><5yU+`C3nP5CR#I4hmNn z46`(6BVzf@8h=7GA1|cuE`0Fjuh32S-v=nx@2Gqj+^Vk(s=^{D6z#!-bEM8!yhjFvb zkgw2g@hwu<|Fizo?iI}#UW5MC9vL$2u=aQcTZ20zrM++eg!0uAB56o|daA!+xlhc| zqWoKr^~K5bJ}DyXbM>j~)=8Vy=R_A#hoO9vEg~PL)q#w7?U$7<1A)8)qa9w=QrKN~ z4y6K!ybrWw9%alo$+6x%!^-u_5YFjS{5%UV;E9VWu z4W*(Sgde82;t!_{{p!@;>OD*8_&cIv#6t-iRRwc00eDN65Qt(Ru_4{|VLtnE)MZM= zXDda@lfK^T3X*sTw9a##$grKJb=qIwYZ|VEkods0 z*Rc=6S=O`w z<08E!#GFj{b)4p;{`$%j$a&^RM;rNidhh(&f6#L#^xPG8{Tvxd2zI=R&9X`9nqvt* zpRz=jCJW=x53rNQolRnHaL)Y4vh9r-g*$APj;7*ao16}XE9tn8>?Q9%6S^(K&ov(- zoK#ejlMUTpWkc3Oz6va^zrIkg;yYZQ zsp(g@x4ql)^>1?uhs=itc6$Vi#yQ!#)mMfU&uG;-tEsO|L++Z~Wp1>$+sXuw0}mxA zBIMq@A%G~iI_CUMn9N~R1RNGPga;z`6LXa{kE-TFJ4E7E2f>E4l-%*JZhN~aR}0+{ z3}oOpP5)i<*HQ9I)#)IkICU=MBD}_!lUmt;bb-b<@Iu7DJ$Ul^9l6)>};G$OUmlHbJEfQ|5Any zoZ2(WoZr?@zn`CY=J!zk6}%fdT2Or{ScM@QTa_D6U)RWSOnq|UHiENdK$4TT6-ei# z+m?f+Lgty=r(r2SxXj#2jGZ`xfV`FsE!@kDXWgY+>yeWP`)|sH8l&#Xyjgvpa!ad< zmJF$j)NhXy?lVT4V!fEzJazTgm)jR7i{W3t zew~A4FkPFxQr2%&Z-{5AAJ^uSOF8U5geVOUP-=V=_Ys4@9;kQ-PHPJ*G;F~tLoB^E zDbdQFu1t#kIz=~2hk}ZNBJ*5AboLBQwOdGR!Z4(XG5!OVgZ zEJRFe8Sbn>jzAD&OXZJTze~*U@1K3pw)z5sT)*oa;v5#`uign#LvlpOzzAFtPACMw z?@VCZ_CS*9jgqeJ$T2xI@(Ls5!nD_mQdx2ZOIkNM+p@!veNeHk38fQ6o_9QTbGM)* z((~>t{qGl1dR#_TVGff>Daoo(Yuze73GqLN4X8~hWhyM0Um3COqIak?X$4yGMq~Oj z08RXGrT%N*1%x2b*4CH7d!!cCT!ujo3sKwf@- z*9|XKgH01VN5@T2^S zO!nTgZwS~LkrsCdPk91*Xh>EBB}d22b(#i6H$L{aPs?v#h;1FW+a66yMu$3wQQ@#(KP4T!gClCz35M7Mq|CX&G8F zDvayMY&T&Kx>xZP2}(P2fQlhAFV6(5BCy5*q4NYj_prnvrTWF=5M=uR*cs#p>@I}u zcrN#u%~gC-iH%;^%xHRnSj(UJ2DvP+5(OkcVaz`@CuVW?2@uP{El=wM3cucEaTYdh z7`ts-EF)X=IW`<`;C32nmL^4pO`_QA=hE(I^}3NSy_fqAXnB)C&Utc{@_53iH3WNq zyxc()yl_0CMnQV+UI$M09W*Bi3AYqf-?Yl&k06QgXr^3;F(TSPy+z%Y-#{>|mZw*x z6kPKh+b)q3EqQOjL73PPwBbf~<<{HwkTmCBpFVcrdIJ;wf~|k=&8ZaaLJfXW%FiQ% zj=m8iM~sAFg5F|$>dXJ>fd6Y7UP`S7*JlQ@ogOJr{dXG}?@wT|$<9*Wou1l0nP0(! z9s@A@`+?kML~Ay*xlEIj+pa;xv3IE=ksgwlqeTmx6Bv5n)d!inacqUR8X!Fj^ozQ^ z>8Xxg@3l4J>IYjQx6^j#f1&cz3I2Tj7m0etbwB{WY@XaQSJ$NrS+1Iv^ z4fa7th$t&@q!`*Yc}50la}=190uSmuj~czz&gfb3Quc$yY{3@z7EoYDG)FyWD2h2h zf-K?nlMVSD?sopHJRylto-sJifwn^{@}$j=fdxwKipH`yQ{LIOd}h!|pc6 zQKQs)`TKMlwd>oC;>C=e*c&&9XnX@^;q2>SO3~o{KG}KYi|9kV_O^w)0^Acu9OPiH znR1);f&o-G52*%FB?-+HG`rlt-Z;VA-tY)|8^Y^hust_}42VkgOP#`UjxxNDaK^2d zzh=Ah(qDNN+E5t51DW?vSBa!f_=5VewO6Ch z1>i%^a3QOu1s@wR>e*4!X+1$%_5D9cP{_w zEAUW7us^_1O#zYh=;&&@UdSPYntGE*tx!XeB(RpsXQ@BT#vN`j_rc)~^GnlGM zW0%i_X0PpTtM^=s!rH#mXoTl+_j>ZW=9ywfZ(dOh?k7W3m9eX2RUBgiCOhF=!0S{#yi~)e|Ko?iquq7yh(7;*qG}0J})p&^KmD9%VH)$ERDwr<&`#8qg z{ik=X*608a(}p+@-42k5i2{}+_uUq55STG%Q!nfrk>ghF1qqm_Nwzc%$fn^F13@Lt@2%~G|s*FlUvCRp0rSsjUm2&bW2{`K`6ED_lt zw=VOJG895o@=j=i@rQ=q}BlKgPK`kIaIaDKLMBTKx{tai9`GKwg2vr!Dn14ipV zE*ogK#fhA(ru_bQ$Lh3X#5K~<*;hQ)sxY@OMl|7!A5q~db5FiIzC^FIOKjREgN^*LnUiQD+zI3IY7AsdoD^v#b_~1t~$>}M7#cujxKuP-u&?W9|05zey1K9dzk=9 z zb8ukrP{zs1uJ~(xqr7Sz*F7|*y?vxn0%o$RyEkL+4SZ#2RL29#-E4A1^yZKr$XrX4 zA6z~qKgJ|?B-+rlAQ|zGyvNh&l4$6Q%KhCq#fr9}VcZP8mchR+1>yAJ;CW=}#<&ri z;PT?)GbJSgXdeb~Yw0(XI(#rOZH|6^l_C7t3dHjFWT2k0AD!$&UUz(>!Ef#VWiF=e zZ2MeO{kEN{gZyWh^~cU3bOLfWA(mMu`=4$Za%?Fvnliz3n?iSax03>t$2709P3lp3 z&zYX4ri`T}3v`nIJ|5SZ$jQ#m%*~wZ;h%Ye4i(dhttBm(nwn{JaQAqhPrN=d&P_XL zb`ZK0_e4-rOAfcro0;<0GX2EAA#?A^3h#m5Q%tZn+`55fNQUjo%1pX(N4$}J5KT#I zMH;U*HsA{kfVWn?I*;3mWch@t{BGP93J-Fc@>Zw` zBhEroS66>~yk#@qjfI1gJJ@F_C4~XV6X_!$bbZsWj)vP=JB<;QN$x@(gLS5!Dyn&| zw+Fjj$c46Ll53+jr9VtzR_W$M*02?+_Qr{@mWt%NK-28lFI(NqS z!6N^yRB7Xkt%_m0kJ;J$>fiL}P3dNG)y2`2o&#}3IfMF7Mx;a^Vq0X!$`h8oFbp`f zu)icw`rC5+@zvkRkGK?n7H(0NsvUFhlKFyqw`gi1!0 zLisKcQ4^r4c~hB>j3lqk`(C{BIq+m?`D%BspZ5#i+jmch>KRFb|L#3K(JD&M$s|X6 zmOiy)A}J;D#%%3X@X~!Dbhkg_E4dHZ@Q}{CE&r@@V6M zWU9);e_)6sa$>|Fi57D?z4h}0s)SnuatKvOLWaERaIiu#-X7o zcHJrzd-lz{y{~$!6h{XJD$|i?(|0iAUI0sV=BC9fLAD|!R8D>rNeYpu;a=xQAWH#H zb&P9qMKZq~4Mb434h%#9?wYWpW&EVg*jyy}dOYtOt`<=E6X0z1uM({61!T$BC8BK?De?q4 z3J3~vS}suwOcmol5JrC>OhR{$aA05<`eM4IUXRQ3DG_Js*NcTx@)mj4{FKg>)96TS za>||PBTF>vta_MRZIq}k%T`^9W7RF7@EKFLb{>Wc{(2yD|9?-dQ*Dil&vB8h2vycnHV z@ZE6l>Qh&+QBNH2yNPvWAvCvs;z_8Yu^ko&Gre0{(qht5DCQwrk8_4@RM|!)sXF$p zCZgQ6R`RWc5DMKEUCqcU?*v*|p=Y15q}1yNbz&#HKF!w>zA8@;bG;&eFX(ELYI)mM zR#v98Kr_v0)QS`HSpOA7Afck7mROHRG9|nFfd)?95?3?kbSX;V1m0yqk3ELo-d^X8 zv4~i`x1XSxMc?IJIE^TX*Q2JdAdkN{QBZkYw#pQCR{N&qkxEx*-~2SEkT>4nA2Tx> zoF8o>?`^oO2%+W~GO8h_eh#wriwd!%AlrI%@WIjctOD@B6`OIff^|0B8Lrmo-z9?B zc{Zl<{y23NHLp%j++IG)JB#{2U9D6s@vPZr&NRE7%hBSYHrZA8K+pO^HkSLjl2$_D zH2!uyTKo?Rbxsd?)+R!1u)T{`otzzhNSycbsks+VIzk_YUD$ca{b#j$UrL@;i>DUY z6bV!k<93&mB230lqaRu%kY-yt28C;5OPPIKx#Ktg()5^0LNh|mbf`f`iUXR-fuMt> z*L>_Z9cVy)l6gdUE-^s(e_ojy>b{h3D$A`~CDzk}uXA*|s%(fuSw$)ptyqdeiZwk< zANa4~7Rf9#KH{>98rj=eHe<6zm^t?Trt`SD?d%z}(r-=`!Sax>jVo;Ty^FC&ajSgeqv|-~6t~V!WIm_xwz1aIil8pAMh|1Igj63sa7^s)i%U znyj}q4Y})oO0z0x*goj(N$eKs7_3PDoGJdtahq6`j!xJYsTnlRw?|3r5$=MP_BSn? zk9M4nhNkQ}K4LKD$G30otMhTIGvpe`c9N3b(F}-M``LEAzB2U(DawQ=U~zM^2R_rX zAkc;gM3cj{VPu)eU2*S=Y^TiZPS6Hwf=dXr%`{LY!U&43KJn{B8qv4E?dPB7Wb+=PhNs}ITgZIrs- z;b$=XM!T}OurOBVB^=2>00*b8+jI-|;~K?z0eyXa7`s|P@L#zYCTu>NPaRr)g!Uxj zL357A(>oaSZDkX)^P*)YH%sqRO>XkXs^&-=W2d&KQap}$yVn_RsI%c3iAy_I=O*4* zEcHIxo<;C|`dg|0O3gX?ZO5p8CX2MjU3#)Eetz`|=k10j4sMxdURM>{ zEb+0$0Lms)(-owT8BD8}x0#^kIrJ3~ZuZ4_w0h5mJvO+cQq(ekySZ?)+{+@^r9k>; z!s0Z^ZG8C{H_59Pf38OLef~i)n1GN_YBme9JqjYCVQ}GqfLKZ%^PO@;J}c58f3z_! z7tiwvUQeBUcg={0F(Dp4$LALwX9`&9Pbp7$<_6Tl2StBYhuYUD+Ba_LtfH@ekNB*P zk0v&D=_{F-prmwa-xqX%y>7-Wr9?ubcgzvj5~pT^{fhN_otxK|xqVO%h95jWVx?m* z6J!64F4k*B>kG$uM1D@i=`mU?TBuO8NA*$n$#G3HH`ZR`>)}M1d|l3&JwTGDd-nhGAMmk_yi82!c-S#NNk7Y#H_cPM-ajyqbtMmjLNxO__E>qTH*jJbYXl{gAvoZ;WUe_wxFd*lv9U#Fm(C+9K82M<&2axksX zDB1rWr_|=wR`BNe1qHoG&nq!88U#;5dwi+Y>SzHia`ZGc(SDgW?K)|5a(YTYMrM}P zXx;0WCw;VjuPeO_ufc$@Hapk;tv+kIq+~$3Wk^U+#->A&+~AK(wS_4;nK!7_-uiMW zpOjf#@}eaoO!m0A$Lh>!GLBmp=9ZqPv5Q)WU%bj`cJO27S~wk*7IpaC{OsIe_u!{e zERBFbwYjb3)B(;G3U2<+9LCrgXQBqG6q&gzW=V>(pqjRkoTAhD{_x)wWrnC|Z0?4Tky@WM@`?!&CYsKU1ZngyCQGQ$ZX&RIt-?^P&vZibUBgMx3Cz-pO;f7pFs;ftspeZqIzs3k-XBVif8qbwgRmF00Iynu@$X)1iyEuB( z@dB4?t<;Ux#*~1Dc;J1*+h4Esjv~9J|1Q~^a0gzW^t6yMr(Lv-8#CyVdmEKj8bz{ z`8fV8#Ob>p?U1Sey)hk7+&{-hgO4H7HUX;?Y-0q(#Lh!}(vS{eT=+fV5fg0V3mY2{ z6fFW?hthYXod`xnl|Unkw%q1Q3Q5(IUf)<`o)Vj7B>EDqtLoTZAmDT)ts{Iy7D}k584bHhu(XeiIT?g@i-`W zSTn)9T7;A3wT6Z?xIjP`kF0_MmP6O;J2h}hkg@ltvC$Vojedg<4^#wAP5!|7^4Ux@ zN2I;57_8-Qykq0@@sDkRR1mY=5>2qpw=aGh4nE_wQO(qeP1jQ}NQp!K zxfP4MJ^@`rPOe*Z^zAeOnzn=&@0LDlXppWTUj^^fIn~Jv%j#rAag62yLcvftH0lVxg}@@9U08$K0kO|aj+frI=Hlmhl5 zZ)fBmAZFs=*OMob25M%mxKE58JaQ0HGEr*v0%k-4G+zKNmU|M~YCQHKK>i*J3l_XL zvYNb_5rWRUU#2U&+BB)2--JdvFsR(myX-j;W-_mtkXYtM6M z+|-**Yj>JD49*Sk>^(0hZAR47eXe0~MfsbrO9cUIUG?R&-rD;DPMQ-rHWzl_@J1@9 zbJW;GL_{j!Iz#@J`e_&0qS@75Cvfeb0#B0n@^DRsx(dBE7z+3#+4pyRKr454wAs}j zLC+hmAT4b$*WimRvAZ}JQm~$`ibi_X;GG6FmhDK9OE!V&x`98&oo|5lf=3(qm*9t) z?95YBC9bGJ{mEQ3jb;mbIkHLtPIpK_m4w7lTwL6kqNgZybab~;8@tnr$#9DDXPsD#*<{IfBBm5{ALQq!JSR3)$+Dy zY_x_N%^Ye;nt|EbFqpmg_~PU8Tg!^S2DbH;HALsQp1s==Pre)3ztA(k_S5Bmi^Bhc z-_#|grKJ8>gnGPLCBNH?nV#=e{?=jqK?J(8qMp>$N$avu$eEn1{%55m7lCrPKKcS5 zEsR>&bo5dL_88xrm|Pxv#xw4Vci?CTBLFrLqulqBzr=f^a6^XCl*((}w%-`z?5fP> zhC;445JZD)t%M_gPLaL7*RK9jP4dyXT`Kkg3kVv7R(oLT&QdR+nXa}nlfMFFCsR2C zm)7fmz`!wXyw;N+da>%*`>)fGV-$p}wT4oiZVV~x)+MHE#W3{#`Z))!ZQ0Dmzmkl6 zx*Z+a6PY+_XxP#)=a*p9$?Snq4!J>sLlwehA>no+KR(rWx%Pa3YvSO2c6;RVJHLBh zV$-Rcu)gT$D@2&wO-!OVJHTeWKB?tm-^G}~txWnrU1ct>Y^o_`7|T}Hqk*dZY<> zWs-)2h?PVqO|@4bI(k6Rh?Kxy?eaNhLqxiObCTBXp$Y*g1vaLuV?jj@J`h2tWgKcD z=Q((NKGTTCgCA3i&juV(&-L_@A4t_X<@NqO%9MAsE$XR3jzc}mdNDaIPpKs7QS(tR z1TPdu81mOb6X56riqfEqMycnJara30Mg5HBFf@OoZ#S_90$&|%`P`uG`wtLjMs21qn^h7`ni z|0*ktyZDaRc~yr>=Ihsc!^;{3J9$D9d(t&VrJ&#xjOur_sw3$WMwMSHJ~7`{E1`15 z3}%gR-$9X`^_*VFrmpv7I#;{w$Q+$%WMUr6m*y8&Q6mm#h#_!Doz@y>p^K0o+t0Ac zVt7CIAykP3U4mFiHTr;imw{UBNtd^O+`%v&!^Wh%j1t{Eo7UpmKiw?P$cRMa^6jUF zH%b$DZsN2TJs(Q;%Ht}Itxf(VdS;NBiE-jXAc0a&$?=9QHvyMP&2Qj#eAs)jf;aq@ zFJrz>-?X=mvq|xPWSQeB^Ge`_Mv~SqcNb5ctuDxwYzZbAX6YT>@a6i?)*HuT5&7U^ z2__P?r#&^dO|M?5s|13EX@9AeNif3W28U)=8xKtr^_{H>TPq&yfw~`FO3zfQ|4e;m z{s_nWS5js7lHs%%SDA>7U?*;q^PU7ub)xBivQ4TO68QGN%O9%X?w@wGma3c5H{2uV zAiwiGNHT#BuM?rc+{X4#QaMa{wUYItJY&x9ue}7kb%4-2zUMcEax+`>Tr;>Q~m+U4yO<4_TSYu3Nsv(b9LN^EvzttQ54qxbpez zK7UA6CpKX4Q!l|4Ent*hlx`-wucIfl%sNS@#@f zOMZ*6v=G_!zEx_Ks2aJ=B6+b!x?8a=evJk{-vqHIQaMw3s3inhahJC2%@49!mr{Wq`Cq z#>aO8Lc{ul>0d1?TsQGsyqY}D1WfEnbB6PhS|{ZV(*xeW_I#e2 zIbi%o0*Vkuq!EavX2u|6xY!yB9lK8O4;JU=C4h#3vm_?cQHVe2dbz-w*9cHX9CD8L za|V+dO+{a?9dhLyL*O|{7}fLKOnLk|W5S@9@ZC8mC4Mnz1N?wKuDNmMMK|3h*F4-c z$w)8}=YP)3_OP{4Ntk{8Ex;>jdPyBIr{a$9x}=ojN=KBODMT^cX!X$~p2s}kyV?nr zWFT8!#;>;KEbJmx&QT(gQt&~rm~ql|5!ztfS|7$~SqD-3C=0#*7YEPbV zywSX}^xXeQ*WTW-D!+8Zjv<@j2Qf*+X;b);?bOGWr2xd7g!}B9&bQvr^3!S*3D7En zB}q7jE$u`}U)k|g4h6ZP=QEV;@9wsc;~RF~tKTGAFO=x{u}FW9lFh+qI1c9J<=&(& z=#~T={)rNCaB%Pj;|<=B`VVz?ETo$fDpiuGn)AL6h|(7Xl&g)!l#4{hH^n9_coZqz z#HkV+J&k=`Y$^{0j@-yUJy%RA?;_#PV73j!gQ^ukdNq84VJ`Ba`2ZFW*P*CVa0U+6 z`}jmc3MV`JF9`f~cX!_bB;~b=%1@?WbsZ}=YJ*{}GKMf=b@@7cV&dnJ39G6N@;azM z{X=^N?di!G$!|ONgpj_?QmgA?$#s>7%dCyhF!7eVQH42c8{%$yuDa9{%xaa|pjFwQ z)+hLybIbuc(s#}{Xf@>@b5vx~*PA3H@g~2_?L}uH{a0erxs}%ram*Z=E%d2EgX>ov z*7^q_CcRn_^5iG&D%FKOG~T@CBh~d5xWX}Js;X>@53-0cUoa&M+Nk2${_r9FHY z6_9(6FQz6mA@lMd8A*7pwaeoK>75VvKhCXS+`&ZgldpA#{R4?jcdhU%-aorXbJFtl zR5TT@qy|$)NwdhF4lcB|;o(xPYW&B5l0fa2<=;%-d$+**@3h<XuY_xIj3K*n6`hZ8<3kj+@_9oZz&c1pr z?;s}w_)8A+FE)H*MaU0}_|-{MHXFocKqIdjxsPqZQCe?ah~`&-3)PslsCDIcUln~Q z<;*p)`WzOpN&N9@U&|7=K(z;Oh#Eh>k)r+h zGiU)ZFWD^Gi0k%7mz;;4T`iA);!@kv${QnWM@!1qKe5Q@oSm8hY{F;PLnl5Js&3@p|G^VE(x)_71wNeGRK_Zj=wMg7cJRdioCubn{PhpcCZ7((bc0=RJ69)@U z@DlBpYoHSgFWi0dnx!54M_QV)1?;3x4GmLGXTht3EH<>=3(o&-(nSg~++d?hvnC)j zK%KGid9YGRr9i*?nmjlai9&L@ZfJb|NAo?!=gI@V4(mQG$~O@YF378)P!K&N#uM2= zfpQ8E@(Duya{)_CzVe;+$E-i!WY}6?S+ExwYd@-xDc6G)DJtS!YxeYTS3wrO(gr8d3~tv;TIfo~|1!3nth=OSwOhv-V54I9 zV^*x!&0wjF^&60$`PJik?lhX$#@ksHQ_3RqmEl2z`>|-dre@5MtoX%~c0|hjNY+pf z!7Al9S(VM%`V_O|gbx`ROdw4u7AZMc8;1Ij?9zXC<@_FJ>k=pQ+8WLVdX?as_f$A6 zvZnWUre$IkS$2}VA^xm>X#9ovHXya{T)B5Ocb}g;?-%_k9GpEy5@Jf{akB8nQP$Cd zq#ujPUpWvFN&JbhGQecFOS)J7SDk_v#s|FT#LZafc^-=f!*Qz3W-UZpB)#uST)xCC`4S)zzX$L%_Oxcb@g>l$b-omQPL!x<~C1p#*F5r zIOWMSzHMi(ARsKQN3t@p$Yj}zPW~Elg>YFAjeS6!zxjTxTyx$4e`S{EBY!Ndq-@5q z6=6VX=Eu~n#r(8=1&u<(z-j!vJ&#|ue8H(2(LQ`mM;n^3ne#}kOi5>t7Bu^O2FDJv z#Pno$(dGe1*JXdb3`J6^XOBc-6akH_7kti+!<62WQtTzJh@oFFQO!>so%#KA)IDd>IMv+Jw0 zAtRBHu!zXr{jw?XaYfPmUwAiZk52G^$Q7{_YluX91iQ$r+j9wUR%{vE(7CyRWhN6* zq2V6!iIYgCyVCH}?R^}hSM`D?Lgy2=p2$Re?E1VI*VAXuh`>Ry#kCcg+I!Q=Qx=xS zD$FSMC>j0{v=A4S_!M4a*KJ>mUgclKKUIFsFt|5V;d6NT^Pm3ht~2M^3}g$X!LdP) z!y`hqo!yymL4Es~*J&59%b4qhQ>dycDY2!VS(eT4nD1vR}!a?_k15mHiW4a@$Ph0Wx2B`y5q zY`ws@%fxYi&m9_3CcND8*nGkyK;{+DbAP$vH@f z1&y?QNwUozpQ+G({M`S?%h6mGtPI2Q{WeUyNzvs`DX|k3?R`MK1NuAwCJHJ>7FC)1 zo~MqzqWtp3^;?}J0v|qT1(LUWoF;w_E}{yjEwe{u?};CGW#%F_X-wXx6CVFslGkF6 zPrWfJf1Ln2x8KgIuTf-|_qEX_Rml$~@AWPxm98fAJKqQ|_3nQ8B!(;`_1>QIWJYEA zK&`9fRP!*=cwE`guUch$Ce!02n;|_05`tRh`0fR{gu1@BK2ktE$KO9Wnzciyj66&@ zVlS=QF-dvMJBoosRt$#>Z6>raOP0@3XSGXZf}4AtgyU*li+XnFljm7$ZL)sN#Snio zto$~PXIV=bt`l2!Bs{-8z>}(?-pwag5&dLoW>If%al!W z1hhfxd4j9J0~teLQF$x|(Hd0LHujvZ#0jd37~fsdnK=$7mz zuhkfG`-u@K`(Zt4p3-g!M?;z!ye)zaBVY5DVwSXqiTIrWef`q8VxUhy)5MhOY0CMJG{vjiO- zvcUR|HYdl+?P#H5C(ju9aGZn{z0>)@T^0 z=ermC68zE1b(^MDMx$p2q+&)}58O6|o|`e%e86quED2jK#Bk%+nJmvR$jEEw#=uy} zH4!CyuE8~~1zbX8-JT%%W$|T87Ry)CCx7(kmQa6CKOU7U_x9x?s%*G8G28XIU=vgFBpQ@_tz1+)16Kbfe zadK@NBT5c@{lm2SR3yE~^{z?8Pd2$8Vrg_fn_bN^8QiWT7u+rDVaA7qT|_a+t<7w| zm+QLuT`M0=h(yVy*zZ_OlqaukoIKuJaDR``2j)lU*ZAyM!EUsX(nHmaSkw}?*=+3A zu34k-p2@D@XBN`Ior|^W`^K{v@9H;koXQx!IA^_odu1y!mPeD8T9Wfey6CCIr|#;U zuwrK)7Rhw3X{xh*oqFjt^a`W?#c>zsY-)CPcJRaMaUZB5mQ%5vsgo?_2@|LL&UDwoKCs-U2tBFU+lNP<338`B9egwfViB{CWnaA;!9<&A+RR*!9$ zp9_Xv4wGcpohtx2b@WV2eVdU+Q(Ec^pDE0~^j>bz3$xbi%Us=_;_%IH=elGRe`RWk zMdg$DY%9?D#+p=rs)A4Al&I~RD0BF@*L1qa{-gX=lH3>33^(-O`8IPN|1dVwU$wWt zXMW~vvwk){&B&SG(h;qwg@)^DIA89CMK#O!$r+_!!oE!>b9mzA&jissz?mcK4FE+b z|4cGnQ`rxTNUEje8+@FYpLQlNL$$8C=za1kOwx}pI4##{E5-#l@+aBP$piFCbb9*2hIYH=HYqbZ9B{xQs?GO3ho5` znuhmV!4}6tRsBpeX1M5MIDvT)wK_I#4Wo$ntB2blX6oi}bhjVjm4G|0Y9I<86U{0~ zW83l{k zY(w5UOd+9b$ydoD9e__GLmu|V(A@t;t7n_5!d50u4^P8O&_jZ=?Pq!%AzS*dUxT%t zZooV@K}w{8o+X!f{RuJc6z|u^v^BAy*yN19d5-zO0XX#@=tce}dj9h6dT`%NIUxci z_I2d=!H^D`#2f%ROziCW7b>IFk})!3j_sh6kO>j)_~DYH$iwv|=0&Zw#G}rx2mhKm z_qZNwE|-Vg!zZgwqD}S+@Ol(WQ0|F2NqW)jrkwG`>q;lR*E5X(2Lml_;Gp1!FFSdt zz;K>yElF3_ZhKS3@mA<|Md9D`eTAuNx1ruS_SiA;!u$Ro$R*hWqjKK#bg4HBehawj zbNlw(i=vwbvpn^YuCp@_?DkEwv$UZ2km4h^aQdvovuEEG6hRzi9z_zzUb+#hyRMd# zP*$oQ*rp|FI>dT!$33U*VTxKSGvzAHVA9#lp-rp6N8G8|18-x=^L;QJ+2oe^uQYHg z^=ag%iA3Jtbxz3Oyfuhj>iEl6JjGybQVnk#itBmz zaVosyd|uR+t_Dv|P~=D5mGH9`m4fV2>wl#C$ad*m{y78R!nRG=vD2>6>kt{fP2YT9 zISavmj(UwV=3z${2sCln1kw^-${6ibm0oe~B&0=GwmP#fPeDuwraxAF7iYL=| z_ieuEsd=`wPHJT);Yup(eIA{=k03u7;|8t42WX+zFAlNS&>;Wx=@T%>5UC{d0yi=x zox1w1;iUdS!U6f#)>e0taBTU^+jUxfC&^oW4OzO{n@4ReTi;c$=}3CvpJb;Nxo(-= zo%u~F8H0}&@m6V*VrNI09R0=fJLMWG(JoGPB?Jrq0i9H~KHg`ul|NaYaZqxeohmK< z2*Ss~==v3=`$oT-y5j6<cND} z%Dz>OWzs|eEOX$|WBg^3*3_sF>BrI;fj-zY)>rp>E)%rT3M6>!71BI4Nb5iUoMOm1 z1yO3Ma^9`^i<}@H_+fEpTPKIx&e_|)@5sxv@TXg@MdyNT!NK_Ep&bHR zV=RNG2@YS3u?_r6-Fp*`r}=+)`pT#(u6Gs76r@u?y1Tpk-FJTf_0GqcSu?JC&pl`FC*2L$bNX^2HqhnUv4OTzi;6#b^5=B+ znEi#{&pQ}<+NcSvS-4)%7N^r*E#ig2vgzFODQj)&FEbTQOm7+Vf&&Y%681>%W0*iF^;xl^6?JJ@A_1_DVO{dxgLaLDjFmt zef%ft3XCGC2xXGI8t4SX#l>H6aNt2wD1&A#MC{ztoSx1--BOZLT3ndm7+mXh-P_+U zCZDWU%_=B~_=StBXJlWp z@3>;*Ybd0Pzpxl_P~3YVoKc^|O}BK4g;rd`cG3H6G(aB8fBN^t~%kc{!ZOE9NLyqn@o{L(kiC9XDnO?JW9$=x|uC4AuQq6^TM#&HS2DnbFUWpc%}* zM3?kOJ9{thO@-`?+BrOyMxjFaf;pStUh_j?tQAco;+TD6uxdDb)Xdi0j{bWxmLsa; z^!7PNful=2b{Y~VDrXOSctb?Ghm>={g=<84&JY@ESWA_k*5m0N>ol|Is@^L8um6ae zez*v~?GW?R2VBhpNgNtE9ArgPQ;df2Zj;c;Qy4U9pddfkLOmiR8yQ_rbBOfAb zi;sqT+A6u#$9@c8uNZ)I_LqCl@rYf}`X_SQTDD9q%*(>qS{=VzGE_)Hyyq?bB=cJ) zS5q0;k5-KvE_IVK29Gq0gMxYm$-#jdLS|mqV3++>ZDHiK*ovSpB*DlCUa^5G^us%vQE1?3FpQixJf z5#l?3D#XjfLPvqXlVcf%l7Y-uDZNu;2P@b;*I(nS0gwKQ#!^k|uS3z>g^ge1UE3{L zd12Httl2VtV9AN%haxA=e)D#eov=7G$>722E|b&nzuwvW>~yY!!;}~~n#Q~;tGPS9 zz3HfDutMHG7R(sJbv@ed3Sf3sY2kmy?^L<;hi`a%vD_n1rzrWXO9w9$0|oUNgJrt> zGT#!ik$zsoQkw#~Uf1zAXCIYu8r4LSd*4mC`~iYd4y$a%*3$fwCYm;^JM z>xf<-nQg-o^*w}43^?FFiyh?@jP@1+6}^mKgL*xq6!TQAAq*7dA(M*Rq`P}ZPodN_ z((Ig?H<*fsEk|EzY7)a=P;Q2Z#sy6mXe-LFAHxL<&G0EHDS>K5-eIg7qJbY5FlUs@ zc&jV_F6CcPzhr3bUt3ldej`g>Xn|yoifQP`9imxa?h1+s=OI0Fku&F?IO7H(C}9-* z7j7}>uL^L-p1=YoMeE{}Q~rd=f`8+Toy?a~R^Zac^*jE#8S77%dnO$0JDRfDfdVMvmBIi)D zqDp_7O!DaIqEMJNBe}@O;r0l=-31E=3ibQ8Yl;mhmea#HDzfIG&CXoByGQQkPV*yI zdW*3(%3M!Hh&MD6iP4W-I^ly1gz;3<<4oUSb5sZzSu3l;V}I98=7~uv9!`v_k#}Ml zG0&+55V$+ab0Ze3>+UU_Io15LqsENLqQrL;C?R0w5PY}sVEXMx zPoA`cQ}XHm!k#!t>wA(Wb_*W&UPmo08mOllT%nW_I}s+ZUs@^;ANsEf7~JV`5wX=f zaPi-_$$CB%@)s|UHh;_gDZV3S&Fcwmwt4(y4VHUCNju)b`v7^Qv@7W_Yt+@QF*}g@ z(6D7#Rvz}V1NJl zzrTdx7$_WlxGv!`@==m!KIWsfKq)3A%V7zSp*PUo7k%~y6=jI%%}wETX6DE&@<*Ot zYX3^^KVITQ+zt%iDi8Sxh7L(cf8SM+Ws^*Q`sTCrkxAi!&$O9E_mYkD*Ax|S<&V~_|1J%Glr~Gx zHc4wKN~YetUf{ewU*e6eUv@tqJozlP8Ki*gv-x1{nR1C&+=Dfw;lL#4#A_~|nn{~^ z^>FepUCbByH#c9cxCJ!ACEg`#;&9uXGEqq<=s*TK=XNcppYy>oBF!7f9>*>-mdtDu zKf#8Wjk5;7eZ@t>r}&!Ti2;D;Sm(4_18SUe?D6X=cmK{Mk-UP4tPoD7T^5&!f~>;f z_{zvzdk;<4g00oZwD6g}#a0zr#9J9WO}Ra^pRl&7{@`3O9d>WAu4?Lgcdm$C;-np? zh1dO3DldkU6!mm*R0ePYmbrn6@vf=i1$9VNl&`n6#QD8^xg0}0;yAMrjZ z0kl1?Cn+0K3-1+>8-wh{ABoi7xwGnU!X-C#ruNq)Ujkb~7Av~p^7XZr+M}tdT>=Nf zgsY3G%JJ^VyjWMvXY=ruXJ-4;bycMD*5K*$E6}x5q&K@{C&%;c@Q~87(9obXS`D4_ z#m&;w3kGT!*MP1+MK>=fGuuV7`dch(W}9IP8Oi&R8VjFfSI}M z_vGB?DCf|*8-_40U)+X*n8bhvp|cI<$mtS8S_IB4~QF+to1Z=aQ4S*)I z?+|>6FlmA3cod{Zzyp)lO1VB`G<<|l`}PNll7zB80FhvxhxXPe4)d567D4-U`5u?Z z!~-FQ*_iyeudXXS{^185P#EFEg#Xd6BrVy&(C~f`R?@y46c| zpYBq5lYbcODuZ43wSbL2Jr9WEwVxa)U+JT0Zrh%nb`)r}3dEb`@cuSv@Yft?d1-!g z*?=aG@?NI@Yh=F?srjq@ogsRvUIpB=q))6VqOze@tpf=aAIZ+h#6BDZj##dq{b_1K zlr(6cKbtnIm>~?L_NTPG{ppV+$<{5WkZNd<|(_8Bv}8-Y50h=oTX?N2mupit7)r1;|bwj#)A15)2c9BG-; zgPjN{63v-C9zSlsTVGti9!Rt)8R7ozTvvyG#PX4%&(e8%Aj1Adz};93t6#Hs9677i zImSYG5*aJWu@dye7B++Z+ZGBC-p7)3gy9Cz0BCp+WUqntatyQ z?kjJ!Sut0SO7T@PJEu%>k5J_0Fm=`86_bX=M0Qq))i3>R5k{&;VFC4j^;f&PR@wnj z_4k4s1hW>h*#H(r`-Fl{u-nbr#{?azG@4_cnEy?;au9rk@Nj@|AO%_(Av+;1K7MVw zju-R=B#exh5M>)1O9+OYnrsomeB}XF7M9(EgA%`BwwoJR&Y_mGQlu6^yn^p+Fjs|D zQtBAA*%$e-G;nb>!3r&_7o8G2GHARwg@cc`JNQ7xMqv6-cXsXhs}ttOnKSueOLIyQ zZ^aV&01RqJZM>=;vq%jfzRVZ{edcz+>+d(2p!4YZm&v)4#j^(EZ&M zTR7OoQZ94# z7tlGZq>SxI@p9?&1JtNu3$(+eC0L|du@H^%#~R6?qNHe#l2`=Jqs{tD!#4GE{= zb!)CFOirJtif~&NwKgcfr@n30Z((*=Qbr?}^XgfM3@TaMq(mquPdMweDvH-uFCUyD zM;J5^ zUajQD-_fjwnc(XK@t|+=TZkzE1?W;<#dP)|H;qImyIuREZ36z7k{Kixi^w-4Sv1ttncHAwnH#jUa+@~)RJ-muU(WOEx@OB1T9=_pJdV`NI zuhI$va`0wICkjZpxKvB2G^m9G%k)fRQu|uZ6rw?YA~*2g3f`(&=R1(;UWpZ+vHG=p zCn%W=Y-sam$9)UFDdC0mvJy^aSGtQ4%c|p${y)9v%ywwpUg-NVMt>A9K|RxX`hgGr z=+9a|i?SM(`_du`KXCji9P4|YXWS$yGqeu&hq)3={rMRoXSNKkBDa|p1cEOaoBH;EcU^jTfXOGYg(0wY`j(ZsHX`EMjb%a7T_C zy-_DBvy@t6Up+$d;5$zd0n3u!viLA%UYcY4G{=c3z=}Rcy5MB%aYh{?(%ZMmOic&; zT44Mj1o7{7-PK~DvH7W>;RL9US{STsKN%Y?Loc>#`Ub$#$b=BG!=r2e*(0n!;Qjil zJZpDEq9T`{gBp{7uyk%IkEpC`($&RTo#EF_N}lYwB>=5_h*C zkofq*af%xarcxxKzjPapv^Epw70hm@R{sWGa}~Y5NRrHWp|#l7XkvY$Yv@ zp+$TedP8)X*fFn#q7n{IQT_5I058LQ4S+1pN8sl*bHCD&CM~dVRX9+6BUD;hW(xKp zBmFr?TdNWpeI?@OhR7<^k*4F(XYU6X}2{T zST^I<`t@QT_@bS7KZ-tn3MT2@*s0f8Dnwgqw!KIu&K%`2;rbp`@c4}#(JRX z`T6jRQ&&{pfv4UmE2C_N@quZvoj=N5`g08JzWp=~iXL@7K5HBMg%2kF$J(<4RkZFj ze@hLuyO0@lY;PpH0U&(P_yvuQn`s7U(?MBb*aB@CW%;eQV^L^?! zSOqVHlP39b9-^>#o{8Aj&p!s?E5wyV(9^vZ3Mb#^u%tYSHY|v%DE+xP$e+mn|{pAp~OII;&(Ym(-w#iLE4_?eA(CLVc^EP5!kpnZ)PK5!xkK=5Dk#t zrF!nn0bCKs>86&NW>RAkiq?43($gHaBByT(1)S@aEAQ>xqopnTmV$}^{>=weV|7!! zlJD6(C^p7N4tX{5s^{#sTfBP#ja`o>Iv|OqrSToyf2vdM-Nr5?$5G1i{w~#iHATO0 z2Vh9G2^k47V}WH6I6|Tz6fppgUWXA#(Wn>Tq4a+OuY^Z0^pkX=eAm#!?O4{~rs~UGX4=!OEhPkOOnDR_UTP72 z;vLR~`a{BBWv|0Ev$IS|*UrjjIe%m(1xUerUcMy0;5O`99hnhUP%!!OuZXwYERov( z6y@9BgL>3lpAwS44!c(CMrT-OK21IQs~%(Gxl(&*#zL)>YX}mf)|>i%zH*StH3@&akM?j8@np*Peo;d+ zl(|Si>W#CReflI|fxbu~f4n6_8q1 z(4ewwIxpcOGU5p>Ya1cTSp4St${CV5ovw}uW8WyY;r%!DvI6?FyT4Bg0n?AwILG+6 zN3{qap`&=5&Y=>Xm?$W8@I^{kL!3?@Ws!S7U+f+jMQxlGRQ1GPgLlJxEYmrmY4Ta` z=8Si;{AQ>MF^B^O1wTzBxcjtus(0%OGRz+v5AI<+eEmp-z<$FoHn^abi_9El%D`Sg z=#de-_B~9GJtF=#fwvW|+lEbTHz^lU_eV-80g@cP`%TBH5SNC5IDu_r=p=@2HQf5F zfWw#6e%VcUYfNhnl)0z;r?>2t@_+AKUu1POuB)4!oXjpc;&45IkNM#rDh zH_E(MUkwsSMNe4fQ&g!WnUaL5y<5K8-ZAy>VJcv%S*mr5bKA8#EHrtH98yC#mFIjp z>*CF>(E&#w?Q1}J8W%8)+Kv+!r1$RB_tv{TzCxEWWh*P?1S*01YI$}|Pvd5kgkaZd zSNnSQ%xz*vExjzZRmr_xr=yVKwCZ#Ww;EU|@jjV*Cuj{e7D%91)3XWa@KtjD!ecl0 zW9t2vl*ph^x!f)DTiTBv+GP#-h^g`Wr#MkuHp2*{!f!CQw5c#x)wme_;$D+rwqrr^ zzWhXn(d^)FQ2VWBN6Y<&3AbB-X(Ao`i2~Cnd-N*9GGZ$;-Qe3Ag;IESlYiQ418E3D z>c31`ZvbN9+1<_7oc6@HBK!bo92PWLSr@&WmZN!mqDGvRio{fRj=Us5F_)&miE5Zy$J#DQtw$Jw5{h~{`TTxgyTlL63%o1 zWIujbhHfV5{@nEBHCRY&yRW6sg2?XRS>7Gh{-y@xWD?cuu|L!V z_Z=_|#>~4$8rxy{~Ax>jhNFB=aVA`i(!3&EAgxhTZ9&9l9R;u@EL`K~pi-pz*1=x9!ri zALc0BG~N-GF1xDqyMm6@b^(A^biu1bIWWikk1FErW?sb~7|fdB@qau~sw=utZY*|w zcCfe@mCFRGRc?gr9(ioc>~fAUAW4xe^jCC9m)R|NL4+^2<2l-%d>3s}Ej?39YwL{k z{h)W|Z^ZdzZy!_dUyh~RkN7}&RWCzyPaKtUtQCD`6Xp5>Tv?p^#{Yg=m(-J7G&w&+ zA=|F^pf1+4kM}oovFh%r^bL#UmCnys9C;s6Ja(e1t7|SXsz61fMl8idu(vsZDscJf zL87OSxcal?Z>ZB`7Hv0K8Sm^Dx@wiO$>(^NHGAt$LwnSvk2V&DfALpURXI70;xkmR z5OSKQ+vh|$VVs@%JB4jo7DqGtqO`IR3&Xg8AP4Vc-Malb|AmHgPL~=7sRGU$z;e?| zqnc9k^34+7`r9c;8av~k_y>Vu=Bb9ktdW%-Xi?w#f63_&?+;0dp42H$e}#+m`J1Xh z^WC23By}VpNEM7j?}Y?SB_qrN!8kx?V8bB0&wc-mlNeL^o-zn2!^tR zSiiG4ev^|Vu`S}vOoy(n=$DC{NtbTK(&;;&a_8wBQn!Eq>G#HHBzl!MSvLeBJlE~T z-B1b2<*@n0i{x*nEE!NkU-t-_?%C&Ko)aNP4+Q26f^?nX+>ig_9c_%a`K8 zl#cdxZSY7TBLKF{8rnp9en5sZiv%zETc>9=JBe&pA*i!w0EL2#OtZBJN ziAD-@AA)4;t8h&?Kwin(k|jhamz&1;7@EnN2OQV~EMd+IpQulLJ46#QI#Z-7>EB#4 zHNEsX3N;t|Lc~dI7u1Eh(`qHS+y&&AV8^8WMRU(fUpI-SDxyy*O6f_D3EKi-V;t^` zWuEs@`#%StcyzpMlbG+UL;j!Qg znS~Ke2V_VQA58V>m&G{pQnqI2-Z*Uc?%C{ZUjLda-Q;X1Wpg(fo`;|l{gdYnAuj7& z7SjTytGqv4%r~hjiocO|rN(h}&R1v_DX$Gap69NzCA^~KmtNSe%ca{GLKlhf4VV(| z-jwMq?TDK!@~M@?ui6MaF>j3uYYs0>LCeR{oma_Mv^2A6ho-&imV2|agv zh%4t}ar8OwZ~CBK5_MWg$Mw{FU{CS0eEjtvVblI#*V*z9`_~-+HXuS!OvUnBMC_^< z{>vqK1_iUJ&`XLCcszeDSN{VV0ReVw^Trft2Wel9lg|x3QkmPFT)ZbcJdm-v zF3mH|0A3=j{_;A8r696x(?C;I{ox%hE}k)a{+pW1%XzufmiT|0H5WZ9NJBYVzi8`& z`~cr7x15m%m>?&w>*i?IL`Q0624T5J1h4?tSCd`wE=y-T6X#Lxo5Exo7+GQ&G2A|~ z8t9bx@r|#`=8SIclb-MRn}~-{|3o{xZa*=5H|-10+Gl%IKqxbFT9>uDxRHdk<@PB< zZNiKeCwzh3zsm#xR>yHODcsv-(gn<^(?4>m`)?+)dizY5%ZS`{D2S!Yhj6m}C+FP= zJlT>|r%Tp@KxbFuxRWmac*Ev9Na`tRXgc@Rvu$lNuhss6NDB)Q`oeCC2Q`DGM*Us@ zA46D>O0nL3h+-lmCr1KdUTY9hKw@HIdTb$yuM;XtKP*HJH+zJ)Y!h948-w|U-^inq z@;ah!KCp~UmGP8|!(U!9@wffd6ld0(C?I0xE$V4%ArjH?PE!BR$qgA;g^`e&RLj#> zDpBErtF<8IC*$t0(6VSnzSFev@sUVxOMEIdW29|`1tNy61AlL)Vn^lqKsboa7%9L;n{dJT8-<~$O)V2^L%KmI5L7ex)^A!1w_W zDF@+6dAfzZS|`W&keTBYhg<2g@!X6T7#MmM)KE(Qu~tLsl^&?U=!t=&PnSuQ{)Rzx z3u>=XL2$;iQ+p6Z@q#(Ym1+{{Kk!?M+B8t5%7$X0EjUAMB%h0i`$ZQ^o@yB>lr+J& zdh2hoz9b%_W;fVc*c~~AW6MZu_0>LYY+t*xKa1DMoS>9orLDEh8wa%eLhXRYzx#Tq ziykmNa8y21ukq*bwzy~;$!^EaL&eY?!hb$~XkWX)@J2|l|9d2)^c9yz;^7zRPoJFr zdzcOx5l<|3Dcb`@!^x3Su-2N8Q@h{AHT`MS{yTNv^-&sQb$h$_lh9ozf6-$9V(QQU zKye1Kr3^WvS+ZwH^h69^m)y^lQkM)-*Bcsv47e6_O);^R zWoD;c?OWTm2hv?9S=^h}h=8mSFA@?hX@*?eh03x-65wGeewH;oeTX7D>A{YoGI(=3{l@J%!7VanxBo~>erk2Gw z#eEqn)489 z;{qe;%~6CK4hmSf*SZR@AS#;b0N)4!$FlL&U+n`^RdBww?NtMMP*Z#6EaesZ%SW z{Bbmuvb_QB0cu}GRto(~?2XW2X9-suDiI^f(oz*MF~rDl41tEd;XM>APn-zK z@TL+b`^v&&!*-|KzAP{~gx4{0#8;_j`!7a{qkxRbPJJFLKabpQ&(4IC1c~lMb5jUpSZj4J>&YF9Z^lwbx(hK zF!%ezaxLiur~*es1m1t}Ko@!nD*m}=K7>#UHp}T}VpL+=S}uYs5AxkOqzx07`;UV6 zH=VhsJgWb+$`Y88H zi4YqT7$D&P0<;yv??{$MT*QN^j`b(y`k9DYJSS527Qw=xt|z*4hgCVA7-DVSt?bxOY{97mKb!_tuaK@Ib9d}lud6J%?8 zNk9GXbwURV972T!h3M#*GBO^CXW9SZ+>*+iD-T*d582~+>~$b(2CUDQxN!hT=_}(>DR9>%$g%PCGm-wES_RE9v^#DwI zuQBjV5$irrf+)Rkk5imCSl-mwj}%WNVo9*&3qDUVf3`2EeZCxsUukcWoa6s`Zn_RZ zT}67F5*PeZnw)yCz~FwvAANvN5-%7m8Gv`n&lc^`-|u$XFZPVj=?B!%%`Ys#B7LpY zn#^hT@5j>pt~^LP6AwAvTMe!qTawCgxc#}IcN|hS)IO9mRki5laNvdmgixcqdF1ES z9`c>cp^XS`9A1LSk3$BJ&|@N8STk5;jM1{fB1>tX-+I==B>{+OPvv0~8unLQ&A)uX zpMKw9T=jvt0ijifB(+rV*5-^arP;hJHkn~(ZD5;Ow|B<;>VJ--Fb8q6qm@aCJ^Wp{ zQzcF{8xt`HzRAAc>%T9iW~k~&;!ks(n(CVF1BZgW;-77$)@<(PxCkSH*OZqr{=fWz zreSJ=r;h|aL6ogBo@a~ct5@146Imb>QZz|0Z2|Wc98|cFbUlgvaynHjzvtXnUlg+ighfc&(>aTb-^9GR_JoIUFR2c?M_->p3jQJhx zWv~Tz@kMs}q_V;HJoUXy{RiCkAF=+Nn#~v@tWZ}e1VP1wC+dTF=~K2tb~(li zigY`X|$H=+FSV&B|@K8T8V#Hn+Vrv)^kgYn_5K`9z971 z2bJ)rgH8`^F?x!eEp>XEnNqqQ)-(-H63-H7`*EY@a^NE&AsBfoR+(aY``EN8SgeCx zJW|gz<-cTScdxQ|yAn=y0Yk?1H13JIW#N$Fi;Ct&>$$mA>#1xcgcs=Oe742=S*_7N zT9(m)ILs)m#U?{EkRKrcdUm71jJEh0xBuM>Ob;Nwbw)xy66@Z>hltJF0ofj)4v>}W zPF34OxoV`kKp#@|~vj z(6HDpe_UGMprCqvx)lQ?@7oz)(YKCyhim3vymz_gQ+a?tMoiwN=XT8fvEdQ{{ByY; z(*fjjql(Xi8_tt(++r4G7$p@;?fWp`N$aL}3Hwydw_&2}m;RIAE!?IrY!P#TWEKs6 z$^*;_w8g@IQ%dp&7R9p3W0>rWJOFgiRzCM&tIhQ zX+G_a%|dr0d^D$f9?OB2ZsQI{k=n8YddY5AxKkKQJwG$`6?&2nC@7@azcQ6JoKS3I zO3ZMh_C~CC`hZ%^GV~AT~a49lD#%Q8E_qZkYfZ)7~a*JBENr|aP2 z;{Mf+?L}Rz)8db&F`*zX%0%#9|EWA~qfx1zRDlVSY;&0cZJb(Z((3Qqu^tWg7i1~! zBYpOoa(&g$kfv1W$e5k{h?p3xzqGf)GTIx860$+_P&)m-e)PsCupI|E1UME`-Cuaf zA*$PDP8{nh(-T(-6xVp#R#S~|QM{cX6Jh&HlZ4Cc*j&hK&WW6c3`8k0e#+H=4@&s$U>gze=|3o^Y73gMNfTxz3`W!)uW5#X4z5eN7<-}Uz6zxz6G5^`sZY5J-Bc#Z@pda zSP1TYS_m{t^K|kwas9VkW?oRV`Y#-c*0R*jw*FbVjok1OTbwcNwAP!hZD`iGrsYl; zFs8zSh}arvXkc9K&o|%=c#mqdIO6+N09`6sgH2VUjbfSHM&O1o@QPqzd(~s;;B$wB z*6@c!xwt=-3JbY5n9b=eEP!=gKf9&^0Yi9<(!u%r=)Y!nz@toBz}A;t03J0S6vxX6 zTYz&fSpl+v$Q*FHJ(1G&d!GS z5E*qiw{-1+>DX7Sz6QlHH7kt}3co*u!-dDOee4%%n9oFmnIF?FH7UC9y#UO(gu8c8 z@P}Wb$3aCXg~HEg(&6N(5AeBedTSQ*)liGSyPj+#@tx2Gy?8P7IyEQsOK4(Wm6GNQ z$2{Y}*59v8`1p_$d3V@|QnGP2r=

F4$I>PR|FlClsOUSEcax;vFrJN9Os0;#7}f zcVIY@E}0ps4h)kMepwo6)!dO%Q0T>vTj?A0{6L302o5jM=#&$(eDI)qzt)$TOalsK z_^9Wpc?Kvm(KF?z_vSCpB_g_!^$mV9z3pOA2QrI>0V|w zT}*Q@nZBYNUpYRo3@o6*Nc3Uu;NUhtqoAe!WQC-@oLLpMFCsBL-g7$3c~UozSl4dR z$c1rJ>X*E4MHr_b#%ai-;2#nyiCWX=#ZsatjrOP$fgd>|gIrw(2REf=9z@lWwz3iY z(an1r~7+wSJRYg*5>>tk>Oqrd*^!TUGVf2woa0k_*nsBL4aag|bNh1bzP zN_4&C@;~m{y~6(=I9kWeS3OSQ=V~neuB_H@2ys6Rf*8qDKOssDOC#x~Ewfka^hN>V z2HNtwo)+{BD~d$C?Ce{3)s4nLNj3jiJpe)y7nT=Hapmrpd)0!__Rlv=wH~GG(TxxV zb()uR2Y!w(aPcP`(C%Qmap3n6T*v4tDBK>>%TvHbMw+WOKxd$5Se~Gw3X8RF%${l- z^ReFX3=#_@xaZYDbUj!^79wG;`|h@A1H?Lz5^7c0-=;=JZpI||Znqt$P}A7HSPYV&D&Vb_QLl6RWCDkV21nu+~YiiSPT zqF2nFtzol&h@V4pIxj+@HY0u23W){~W!(b$Mqf1lVpq?E(OsmCP%}=t+vi`8m%NFe z%R3Q;|GGpGKT|#Cq0Bxwh=GVpcN`oXCY!~#mseND!@1E?(@_wDnCq{2Jd?UE#SSQ| zbt$%=AIe?GhASoFxq~VuCLt*?TJFvZB?vm8wZgMTtUX{BZSU;VLCijtm5(4XQy7!~ zjZvmKrDh53)lpIa-IT+uCFz{?w<~1XidhaQ`)+>*VB3F|lh7`F4OZye;mOPBjyjJcY11mLBEux_KHI*NN< z41X;lM-s@<*ASnNh^*^?4>Z-dzqg}DANI{u=<2gx5RHqA^Y!y9^4$bt3T`R@G8&-< z1=65>&yP03T-QkPrQ1LWP82uA?;8+MlfGyf-Z(V)0JDK$(O}fzl8uLFPOHtq`&Ed3 zFEMXZX!Ia`)9K-~@^OJ?FA-zC(EB&u8>9mw-dK<{XW_c6FCpnKtB)*(kB_5L95b^z z`!-Kh>MgfH?9dqe(#8X06;7=*hRJSMW;=Rmvwd({Eu1<-!z3QSSutSro;=#2l3T#}{-EiyMxp6>WuGn;YtV6! zB4_{KDCQ4=$N!n#1qBlk3R@_DZ%uE=J>~Vh<^x2#>Cb}1m4>Yr1iV^USe&{z7adNG z_{|VL$o7X#3^b1}Q-Xe#r>7ePQbtg?H_=mde1EurYNLYUi^mDQx=5V5Y3_cRcKJMSxw+&=MZC4K zvF1EwAxN*jDcf5}Ann{0Mj5iNChIo}ag= z64+lvSNg|0YOfiR8((#;3O)Ib(afOsX1Pj!(*4S4$n&18u$YlRy9Q6>>*Xq(z%dg3 z7YV@gJDm|O(=rgRENP0^8YaC+sOXDF+CVRCB1ACO*H&u#GuYK72 zkZFmP__ahf_{GfwS&Vky^_v$VrXQfn4i2?K}CVcTP>Nn z+-w?XNr6Nu6(^e)&S}n0+|JfYiyDygy_7vKE{m(zBEP?3Q0X;eL^ndF9aePZl4vNr zk7BSL({bi?@Xm}Q1loF}gU9(1?sl(%JJloG(tYFnXEny|KuK%6q-6@!&~ioWZ9dT9z( z0Jzo1FcVIWe<$LNfPlw?rU#7jM<49|OPC3lRh$eDe3{5?O9>lespSlURE8L3DUf^v zKV}G8Ludr)Fvvex=}W=Gw{g+RU9Q5&)ez6%-Kl%Q+z3M9bd>@ly(}E>{@WrETWCHx z_kI1(yM`4b_3wMnox8dFCHq=hy{#HwJ=T!wYOvH7Ssg;kBDu%)FIT)>?77YMlF|>x zXbBs?gajm*f2Gk0$EWXlf1Vu5VK-n6sWCtoNP#8?=o&xX>EgN>R->*;FIGjdwDi(+ zMGvlb&*ivn)_NVNd;+z1Xb>uMr>-f`-}8kCg-B&(bs?ECswdIa0~moIP@QamabWvN z-lB3b%>?diFOuEud`En7l2|KSD_zKt zQ2)*lKK>{Kr-MaIKYszN1EBOQO-nfA{8m3_)GFJ`@w5xnp`>V}p`_8hSC|jG@7zf| zI{yxMpC9*Q?_?Ut@a{rv%>^>y(L?TdJ6j&XkbDR9_*hu^H*lOk-o$%n;*s4G6@o6R zcuE{IlS9cK^HU=Z_SS|cDiO{Qd8$(9!VceKg*f&}BkB#GP^$D&|9=QtSah^;N0R_Z z&Y`OBB@N9yP=51chKY!xKk!*vZ%=Zs59NqKFaS{cB{V5`1O%ODm5(D-7nff4_GP3N z-og*wNF4e{7lST}nlqw$iiEtN#QXER-bC(<5Ei=CNtwMLhv3^pevdD@p60I@=+iVO zF#cK2+<%W%Al$mK&!DlSG1s*(GZlp_+gvAxxo$cB_BvmKM7+{r_0#NZQk~GR?Dzfb zCb^+$Zw^iuRpLxpW`WrPHP%eC%JgKG{Hz{a6VeBca}>TYsAi?jzJcFeaM#TY#PcxS^xdf=&Jge2{%X3l1tM{)`j;@KG|b z25~yp8(TM604*Ci-tgv&%$MalC;bgtIzKfX=d0d)M<i7&k3o^Np2ydX5{trzPiPA_r*`AVw7*<%+(kd&XAg2(K;Yvrh2yrtjft=IOjB|^MiVB9D zXkJGKsBM6xVPCKl{no2GDtP?Wh$*SPxTgv(doU_Bj71uGlXkrkJf#jWsC{QF`zvj# zV<=GI@$>TH&{%le%53XrVdapVD5594Z$2D4VfjAKULiv0(fbDvu+-IGDHP3t94mNG&J}K^7-H7&$|F=PqGezlcf|(qJCD zWZ&4Uu}G&Sy^1Ml)LI=WSV1Zd5)?upCU*U0T4NT2aav zpw$YQa!)6ma+eMHGq}F(<+O#V#h4oNTFu>BPzqiNp7T`Bllef7IF^w9BVvO!cpcMv z9<#A$vtP{pE%hw`aLo{CYo)7G3J@zwnhN%Hh&ZzKZ!YcaKd zH^6Q1tww_(j=R>H-UfMl)id%tx2@kszc;j{V2|W*MtWSXRVUtk*L0a}aRW;cz1loT z%{kn+r)n11?J@%&RFm9$yV5uD%u-Z>*U*$dlH6t_A3j0@w396=*D)6Pwaz*Y&xB$; z2GY4>thn{@z>;9WAXib8VE+AkcTq8NcdM_Z!1|ceH;O5&98!2kq^?+n^d)vf+g1L{ zOZf5(ul>#3u`O*&Xf*hdsK{9hYZ>`PER!R?f( z?3r=zo?H12D^PXIVReZ=)B$ZV#bNJKVsM^5X&O`B#<@whHe8vfW&HNlZd z4{(%5d(}fv z8Wc-&=kqnE?Vd0b$l}Q2jOE8xfBL;=9=p}+ZqtSrbF!^8ccviDQswucNkKPDL1C#$ z?0FcoYk8WKgd~~!^1FZYq?g1_w}f!=SgK~ezgx=A#y;%qa>97_>iD^5RA=tdnzw5( z@pxMo3qwzAScP?pL4)K09!G>_qY=5~l}n9jyG8}^4i;JdGsb(Vb$ck$f?9sxdNqiI zr@sb%xjrOI2qe^+5SgA=m-M594l;0c)wJ#KH$*~T%Xq0Pw=*+|+bz$y>p$t1&}Zn_=nGjf{?h1lfX61wF$ zn9s&q#vFnxMjVTFNhkWuM>Z$GStcnGnajb|&5a`=AqlteLpVj!emWX!jWaEYzmC3Y z;-4H7b_fOrLRaS533&Tp$DxFP4N@>P-@?SiOqY)Dgq-|LL&t^=`5i4>qIgec{gTf$ z)z!uB7Y@KDM)TOyz+reM;1&g8p4eDeAB|ZB*-R)Fu#vFdgj+ZUr*y4ey#Iepy=7RH zTNgHJ(t;qNgdioYbc0F>2#C@x9n#VbA`P4F5ClO=K)R&6ySp1C7jedVzweyu{M>FY z)b%`T&N1#(^j%WtjicZFSl zumq28wmNa4)x1ubC0}h}vMk>uKSG~-AW`S#yxXZJG7fq<7%Nvu@BPDBhv5DiRPRXp z%i^3qwae(NpOL(K%85(E5qK=FhxB%J0|h0QmZD1f&LbNwzO+TPV^{ED7ni{b6b)TN&y{b_;Z#fSfr4K|K)RDq1KW)?{!uikZTl zFOgU5=YXIWoqc*UNiQm+65bSA<7nAo6GTIU$#2WdvLFk{p^(?_pv_5S=xFEVr4Ff1 zU5r%z8X)o(&8`kX=$A$z^#spZuG8Je(=+Ia zr83!a}N2DMJh9jlIc1K?48FB6drA% zW);8G$$gCHp>(PEqT7_@nLu&FTjDdW-%o5MuK1C}cf#&~16RdI_M=L=K%Q*unwews zlo02b5+@_{W3-l`jv=Y$7_nFL*R-c4i~lgdKl*!fOD&ht{La*PZ59m3Y^VDg7}Q(! z=o!Y#yW$R0pTR-#=4Su8n_sBUPzbm$!L5IPinuV*fyb+I@6Oh%I%atom$f`mQIq1c3<{B}L5&@m2E5R}uKaiuBy;SpA*q zsQxgqZHKHTzF&89?NWEXO7abx%+$neApL%+>4b@jaJ=j1fEs|0kX<(SG zxjGFot)uvagW+@VotguMVP((7pNC?|LQ1pN(J|BEuZFehoa_ad;&)97Wsy07nb3WF z(amn(lFv%NMZS}lm zzE#=aX{Mm=+B$7Ev}T8}5^z4LVUxvYJ#(BAEN_@{pQ|U3@SzgMxr{g2$r|9TV0RNi z97-0-Ic6>~a%oYdQ{aJ7dn;M1tz});1U};fjbU4kBTK__rg+t_2p$A7I|4!}@+Z5f z(C%o-YITzfq*hbyWra$RQMyq%RqYJ94sJzaEd8uas)?I%;DS8gm@=Jm9ihzwTzD!8 zYp#$i_0`2!y%fX{m^<3Pt452rFs;z%k7iTV1C}SBmY##>=U&A<71a%~*Ufz`Vdp+; z?CIBNcQo(jtZ9ZlR}*e%s}(BPcZO{sd*&o}W&I_lHG~w_B-i9mT2Qa<`@X>W7Jj~8@W~#5D$E;lvBRgjn9x0{nwfS> zF#=H5gz4c2C?6lJ30Cve*$u6*ne7B@PXmz3p|`bCBOqJ0cZ)dj?ycj|=6MzMDP8-M zj-TzCm4jI`X3c2cMm4JO()BC!0w*QLeu*o4eE{_$ta1fa;PfWyU|R{a^OXT?f=tl8ELmCXJr- zs^U@YBWnnMe;X(YSZ>z)C;TL65tQE{UAy(BBjukUr2pE$Igq`hTHW}0ZS^Q_Xd5~hUuhOr1)H~BQC^i661#J;}faV96|x)LjcXFxw&H(-WhCLa2jaAQ_2($`V!q4ffNivx_fNA13#PP+D8) z*G5`Up(sTE*C#~dbSTnpU_UlfWuMvFgwy0f_x+u?__vk4B1nLOektOLDqnCuX^?3I zV22b8AKZLBypvPB#zwB(S3Miuit*+9`Pp||k;>YJ9WIi)$|TBMdd^~IOv2g*5dEn; zT>b4!X-z?|lGf{oalf7KKs~X_Bz%P&EskvA;SSRNmMOI^0^l7z_xbOeMtA8cZF-L) zTI&a?|1O)aUx8QNv;lg7yV73!s6YWQ>@Cn7iS&Cw6B(n@xYFQ2WznTWO4^q%zmO$a z!@0^~@cC0qQbO%wd;42c-(gyo(QB3!7)lS0kMzzDSE!#qUs_%k1=BK8&-~g4g@>R2 z(c`CHL4GH_MIOdH1zKgC-j?@%r6XPOJwnJ%mcY+8VrgA$M!RHscrCFrXF=FcsW+|< zqEZBde$hd+T3|D;r-%*(c5K&Jbldz+h^@Xzk5GfoD=Yh~lUVH!mOI3zeb<5h$6ILU zXL|P}Bp9tzfR91e64J$f-EscNmG@e4M9P=<^5Z?9hRxJfR&lD{5Rykl)^{69W}oX! z+J0qL6HUHxGbdIK$#daO_&-#FwS&V?OYNOLSFy)pdZoB(W0*&P6{zIr{ao3zwKqJG zIp?oa0-@c}L=QB_1_ySYO4Qw`r^X+W;En0|y!k-w9(FSWo5ck9?Sc0Cbu122lh7R9 ze$eJW7INi*gkp`VF9B1Q^~qpe>4)xuT)8w`&2tAqTtF2mp>{1C+JPoYj9^`d2F!0* zPwpsaWQi@XrmIeWj!)=Hpf15LW%`_}nz=MW5pJ zadgX@wE?{fN3H;N#HmVx;HBxy?F}O6$%2nCnz}faSwu>vR8UzSH?|Iu@>f}_Zj}>7 z;257?PT&(mkl23G22DHqR#B-WqA1Oa8TY#xw?qoJz8=3r@hbiGBP~;HRv>$)ytO4rYv$t zozTC0uACrv!Vm;_YD{WSU*L`Uk%i1eCys;%1wD7b#q4oq{l?Xw2cHpPT*H9e9!s7` za}-Fc8LgD2!QCL3qnI0$f>n@^h6%255=HhB~1C%L&)bSiBdJ=cjWO}INpv~{l!#iHfFQ& zGnHOOvY}F*>hh`UEjSv?R|)Rn4(RAN9~D{)JxdvWByq5x|9Ym&Vl!q*EPQdPZ6~F0 z{jA8$-4Q=n98QK0Yt@z=XNK5dxn6us7`W$lx-c-swJ<%mAZ}!%Y!DBr6Fi9VC5rew zIbWBP|7!KoOY5(R$I8q(#L5{YA={oft`?dGczZaGst4&kolabF14jXV9-+s}e_2UORSJ2*!e)mi5d8q*MP zzkA$iWq}_8$3R};R6m7KdUA5pTbg*pVtf*S&AnZX9I7vnRVs@h_=hUX{7E7fjkoaR zp!_R8J^G6K|Dc@@A#>2xRRDZQB`W5ytg6(z3pxZpe%&NH!&78lG7{I`I|Ck9jF+nO zd!(O!88B;BAnk&ZzYGb@pM@miP1a-@)KIFA_0%b0=0cgEE4v>ixvP#z7r#SjO;2LZ z)ktiG-MS9DU)nGHWIkWBO4NU}fc}2)rjKymQ!yPr6P0`K}dO(VQ|a7@;S# z9$4&5X;-3bTvV#Y@S(=XXiyuujnib=M>Sw5|odG22xkBLu z*Us(Rw*?@K>3V*9DEbZ zS-CA9;*^og!AZc&PXedbM}j;2BxHu)lcOR<)!d20>%twu#&~r}|I7Q=FJzL4dl(`a zz(>B}?Z4m*{}JPok0F**sUz*BfDFBXlF`MbZ9UutwKaxlX(x*=+dN4%AQ?b4cXtaL z=|iEIB=Rwk_EOYWW$pv?$k1f^L)TWheO(KvsWPSC67_Hes@O!mMM2!YX}Y)qx?NEuLhD^mhe%8d}zG zam|m#hshiM$qq4=%4Qf-^D!m3OJ1qC8I~%}&4fD)v8B=cERh9ytFs7K1ngbcyvQ_c zKfkFze&4=9=Z?aC#tX{~44ZAasYig{`M+OTV8bZn* zV=B{Yg6Vm(>`BOSfu@lIpe-X@6BDpjpAFOF?U4jUB$}D@U2Z&oQ+Ii%{Dkabu(C(? z$&}us`HCs(EG!|RVaMf9u6vbjtB$}mv-GZR$jGTT z$3muh@`JtQiEJZWjt`kZ-tEIO9|I(Vi~d|az2PT`;C|_`?`m2#_J>hlQNeI0r3Kox z!F2T!4F#FI4;nsWwH&e6`WIyzd9pQ>71WBls+Vk#Mo`m#eSgYUV^-yDr=+gip)(L> zSZL&)NWM)vkD0f$St`lLgna?(e}20!J5Q6%n74KWb{9T(H-PTen6lt>EF?N#nOP}7 za-26~>%GCgwX)Q5_;m#aoo>sY?s~>E-~V74tI{+MLau_FG@&{>qCkJ&jIkFMGZ3MAtIg|Nd(Q?*~2@k1?4k$OgX3 z8b5n=2c72AP>D$i`yP20%fz+?8eSkB|ckL$kKRBj+=}}^rR?DbLvp)^JmEQw91^Lf zD7HfT3>tW0YC$&BN5sS!nuG-hFV~*V>H*RKiKPF8Vv%-l@Z4%&|_yTt(NL z`#wHetf4cS&iGa(OOcxz%y*RJNNG`$qVN zJ#^NYB>oX?1gk5LiNkS*0CP`)nJcJ9G^@T;nillPKA^I)7-+OeRsX`FCM;a-#_5bc zUi1x9joC=sr3YSy4#dJXW1+WW075%6X=WewDFqkT$!ahsff(CUas$}0T5AM)Yp+p! z&Bi8Nzx_#%*&xYM56F|MjA-#Ad=!gkRP-tlEPYLte&DExZw!1xxUKHFJ}(E7x$Zgk zR7-4kk@%wSKU^*C3I8|+>Q|hq7faG={Ge6KX%I5vX3nP+4v!=JUZ@Y3_mI!1B+&)nhcF?Wb z5#%i4b33-{d1G1j4Ym6IQ1egpV#*_<(Nv{W684F2pX+${AF!33@=dcR*?aA5r(`H! zX*u${l+X|Hol5FD>xS&9L#xb>4+YIKTdhZ$DMsRJPog9Z-f0R)5I{u!R8I5Z=t@ow_Sy^K2n_6d5fp*=R|^zV1?lgj;?`fvPGuTMCna)~ zPOWjK3G3Q<6%5SJmIgK1RoQLo*Op;64rVGcPV?DwsQxuu&|>zdG`hB=J(rG2i;8dd zgR+|Q%2XTohC7V6H_*f)=u|2rlEQ?`x^KUa4{zug(EBWvs2TLtIFK6mVl>fzq;t{a z&K)$T9mVRsNJ>e^ldUx?S$O4&a^zT6R^+Q(!e*8j<&xQa-}9)X7FEO@9j)2wwm%&7 z6aM<_nODQcOYcG-q^eU%{GAhanxxR5Vq7z3ki@om65XtZqXnS#+cR-jGQ|+pzYWwgWF$ zHgECgaO&qt?U`!^-fyISi5|DkSI$Ebo!h4?u`7}0?PXKS7EI-d8`E0xMZ2G7$Wo?K z#%oj1&^`;7%zV0;GrB3;5_{Hxw>N!)eJrr4TFQNF(ixT1;_Xg<1A1W9TkM$Ux(`}gn2ltG|Ci}V9Fo=wrG zdo`UW6N`^W5#~zC6byOG8a+`DkC$|sWR3KlG!_k~-gM(ZHGlu#$m7`T(R=l0=^+mZ zIfyl;N}Fb6uiiEEgm|JqGNnm*;rnCkV+FM0)lFO0<&ThhD@j98kv$^VSx_ujS-l4D zpR*t`M+We4y_@S?BV4>cY8nZgR5mkw-f$_VqLNde-Dpv>SwDc_mZ{PxI1WI}?g^>Y zTyED(t&7tfhk+$$iSnIYlH?b+*4EsmCcJ#2c4vDk$L1((2lbOQH8z(?x%pQ2^5i43 z-wH46S8O(FD(SOf>;yTzRDHv^VoIoYyC#XOnjX+co^8egCVM(h0{m*ToIO=?<^0?k zRzM-|RjS=!59{COW)OGhJ;4-jxjxmD$FKUzGuypIINDP{Fa;R&0`dKOZ)QZ83o$V< zSv$o~^!GjuR!L4@H|+@gS$w9HtkQxkWOto9uwM%Z@XskFI9E$co8Q^q`8_wV_KMLI z-5TdpOD*Hs{s~U}tTo@!Zd}{oYw3;`U27hR=1sSEEnH6xoNy{kH8=35J1kOTBZ?~xqnw8l=RcVVhfXYT@oxoxI{#Lk;(t@Mdt<)`xK`j)*-3nvpy)i z_zQsp{>nG*NjJlxh;pSM<(unvl)F!G1ob63vT}*U`;YpTCVt|`;^K;47lk&Kn&otN zH(JeLT&j1w8{$iu zk7m&CJRo$J9Jn~fFPzh1DB*H^SztajDX6iv6=c>H8nruVF!a-`|9{KzBtb^_bW2bU z&nHGH>9Wt?9k;lqEjOGMO$+DV#?_ZN{M!vtFEPXp%iVPXeNO&eH7bfUo$C0}@#-R( z0M_K}43CWKn#4<%%!qs%8>?ji4mc~Lh@yzbLvm3g*GY%$u>zh1;47)D<*h%NjV$vz5w7NwZ>tSIJ-k}j zGrzFFtPg?O!Y(s;#$skOk|C~lC7!Q1I51n&*z0^6MS%cOb+}p`Ix}BAEN+QMSTn$F#f+R24K#wIUZFf z#DvJ0V44_SKE4Eq1cJ2K9_XNfpAXapNQc_MA_9SQi#J-X;r*(6{~gUD-tzL^;wk?u zIiBZWiZA9Z?sF5zif=g~1)b}PeD}OlT?pYVdlDv~8-2fqZHq&7C^czd7HR9-xZFyqGPV^xrmB2`y~l zIU*LYtS2k1b7Ni^B87|M*^7G7R;n;HntzXylF{9c392Oz?}dC@>?xBh?Z&s9_H=Pj zXRQ06YBIUr@2_kivZSVW-CO14I6s?D(%X~x(S6zf__ep^T(&&dX;F%Vpf06uUc~SC zCqst8n&B(y)}QPPCj@FYOt1!9zJ^?p92+?XErjML2TCgRP4Kp5<-&V*Ry~O`6gS|6 z!lFjx&U1M8{>pubg*bqwq$gwcNj*hyTQPqx|9w;ARu};n%Z`P(r%4m#ICC70!;6Iy zx735R4dH_?hx+d$Uki+0u=*Ly;Nz$4UUIA=47haxY5ZL%; zTEbs_dSt&qx>vqdbndyE@TaAYkk8%I{)wX@UNR_n1P|>d%!UHh)GkW(C;l3^+U}EJ zZ{pys;u zP>T`mDk|Qgm7&x*{2?9qy2bd%cK@eAKb&&Oy0yVF<`DPGhQo_)=@;QQ8Foqe^Q+=b zXM5J6!GD-PWzi|52#8%D8npY0RUEH}8l?)Q(QyaARisAO`}EA|K$=cY(EFng%9+r= z|L95Td^l4`xlp{!+7BHzl6b9vDn5?fU!ZF5Vy>T@KSTm?4YITWz$55Tq$% zbR_iLF(tHa4}1xH|7DN)yiILaL+|K2b?Yrht*5CMASVjJ^r1pMs$akPPEdMs|95&? z8U-U8{BB2nCm+w5$3|lEbaXG~ujh8xk{bB3Y5^4>oGty`S{h%N|I46`>v*PTEZMw$ z)oS+>u`}lnFXg+rMJ^}X-z9@+zuYuf=bd}=6In^GGsht2+cDfS;Agi)x8(5_ePh#m zQa)Cne35c|bKttVrHDU-(7dWk+_RN5KsS=^g_q=8P%D0??q11*OS9dTkj{|H8bhrq zlGE0Cn}9&7v54YMsq4M9W|G5{oNNv3UqGr>gnR7YY$>>N53i1p5J0E#sxZgO(r_Ttv#lH5AV zE()H4XZdgOd~?UFE=&*x=fs6-9lY|*O+$~gEiJJmCG}NN!&?o zV=Y8RA?z4Z$*TS|s+9EtsV`348dmdqDoiz=^gG@PD|5yVNvmAV>;!)zSQ7L=XYh3t zGt{D*J<(Zj4t1G)Z_kc9=tGa2_j= zA8-1NA0{PbpUzfuOqE58*Hia@o>KBoaccd<#zB?K ztyv#b<*kQk&KD+0B%F(DU+;yI3x{f`O0GVVgG}C+Ex%H4E{{ldpAHkc7@Ty^(TDte zUe*78$Stoy`VGFQ=xfpEn@j=qre-F?CSTS$s=DwDR&9Nqzb>ZRy?*$WkHAh%J}5Pa zywBb#M)5RX_k4Z@qBE?MILhos3%Q(YQygZU{2<_ae<@5@ZF+QPpu2pc{zFpJiU)RZ zAZ~{4AOGD+3Jl7^n`1udFqYDa;aI6mn5dhRG0S!78%j004r_>lHb@(?2&(2PDOa9y;{el#} zaa5~)sbN}$M&PQ3?m5*w3(qEx)SE?V8^gY`8kWmr<-5x;av~%P~)%%*!#_4yux|FiA zvS$|;KTe;Ab(2Pt- z?CgBRm=ndgF_rcrS%goUO(6Yu`hRm_Hx}i4ZB)W+l~?*n9Hucw(H_r_x5kV*B4{9j z0##Z0qoRLtF&kWMCjEb{TS#3lkH@tC?M%hixgo%(i{b6#19zS-d}L7Ll>&_>!*RF} zeX$F<1+9n97ymZEp-^l*Kq;MgN+7VLV5B{wAsU;O?;~2mpp2@RmJ3gZRyL^}{^bFI zfyKpD6C(2XKej*Htd7&#F-Y^inoSq`LL!(zz^DXOHpVoYh4)_nq*|P^-9r{gEvkh8ykUnTuf+pS!CDy?mVE^7`^ zhsZ0g#MGhhF~q4%Z~QbToVj_-C{Cr(i04idw`ONDaVM6rM@H)o@dLQpVfA>a*NU~= zDYcyb#?SUKCfVuCdR&u&Cm-TEXP9htPNjq!FD_!+jelOsL&Ogop06Jy z_Kz;8f&UA_2#ad$?d&pZYNmhXvzW}yM%Bp6M@b7YKEBDEwYX6#xEU%7?38%}S@0@< zjOBNjW+U_if7<`&R&6z6nxvswxm8fjLVxlC^>wZokye<$W!?9UvV2hs6}d=)`X%=+ zW@W{sM6-NpC#S5qL*na)Ak2h{@y=oTk7#sEl(w53p|i7z{}!6)H7YlM<@ISGYSN1% zeuoLY$s?Rv9mplRz0|5&&2Gj4s;w<6**>A#{KcCs?}(kLjwHU2e+xL_@XvjG?2L~} zcv*!!WdEq$OU8bUe{gbQc(%WAhCoDWD}3!@F6Y7GwA$O{F&PU96(l+}3y|mfV3Jq! z-Smq#`s()P$_L(agL6;%tg8=t!NuWGNA#p!!iJo!b8~Ztyc~LE-sPB|+x7{YHWcfL zh~@Rr86Q4;TxQeZt)tK{IBXp<5@R*isokvXQGAS(nvnmb<4qm$jYx zL@xPf6T>YAh8x1C9+%dPjCRLt6}RLBkMZGbT7r@w17HD>DO8$@osqi}C4N4rIETX@ z1n+FP?2_6&<{`4x^ROnA_KxCnAFR* zonIVhiIl<@Z904SkGj+A@fqtjGCOy*lKV>PD-9>6qa{lE@kd9(RD0EXFIQK-9~?F9 z4M^;h3(xL`S^)aV7xJw*xtfR5K96Rr>dOgJX7%a&@Nn6`QS^4Eyt41!EyGvWwZswR zfg$M|t1|cnYQFNRK@h=KQ5H~)JLOHch>z>FM-}$p=}x(lTh+eRWm`yf-s5xW@S_cZ zUXR0o6}yeyndKKZ@!y26Jz1!itMr2%?Ce@N1xQm-zeU_Mg(*dnj(Tk%Fs1U_kGwE} zL$p5O8vzh2L(>e%MpPir5z3bDy&FA3cB}5y-^?CPm%NHdd+yE zmhA%qMuk?I!%UJaxxnpet2HE@R-EUN|*wE-|GOS-E4AaD?CFWK-*O z>MzDJegE?(3es!9;LChUf0cn_E|q?Jy6o-Rb~6@W zYqdRF@D?u``mqGpL;|@#gQ}$!s!Y&bb*tznbV?Dw=rCH;%Wd3eT{};9L`d-c`+RWl z;Lr@D7|Lk3zzeq zY*KfxEkCpQhVOxwYW&DYam;Yw9OjUnlVX#%)w6U(6j6&VcGTPXm<$2 zKfVnKE5Q*-zCfJ5oY7g0~TtcI!%bdFHtN3DtP zn4&mDlLTEH=SySPfrl6wCW0M?gN8N_fiAz!VL4Jo_ zj(`(uy@3~J)26{6BWcgV$jorRl{PSS2VI}tl}rtCf#607uF74z?C3+AL&B*STQLxD zd=^3yL>Gt*cLvCi)pVGyH&s~f3*rP&R3LvuY}lUGy703+vj1e;^j}i&Gu|iWuP9RP z?!EqE&5FKn8aHE*Ys0(JAmDu5c+^aO|3wzxg1 z7hr8}xN$)l3nkmywmpEv9h>V*CtXzR9|L!BGz2`N`%<(DpFewc`)@I>Efr7`J`Fm5 zbzY{8ZOPd66WrI+db)vw-4qv#>iI!QeD-usZ#?{6#!6TI>U-*d#eDUSB%Y39^yda^ zR@af03WkPwCnr-}Ya;=CDbr^^LR6*`N~0(E4UgW85p5ToZiL8{(CUS(91|hxb!-qF z?~K=YB)`-7*Zf0EUjgVZt6ldF4Opy_)*WP`sYZ&PGSIO8aC)-=hxcU!cXPnrVWH_MwVt~v7S*}<}_ zZu^K{+5^?OzID>Ui3JpqksiNaDLAWjLJux}zc3=l+|}@`?oGCPvc+S>BcbpLUvDxv z$NH$zwO(ucf>O@TIq({r9s!b(Xri`gUTECk2RLerKK&EBt{G=1DfaLE~Fwh#hHSLtJCb$4h^W} z`;qRt*fNio|F!5~%gP zCQq_8`~BI_(>SNDckYNCZe-^t39I@3!#!q}JLa2cD6mMRU~xcE#3><2=>ThlMOS33H+;g1cdpw?15rEjmz{bxmS+|=fb zc;a8K*zT`dB>ys<;cU#1sg`GDbLi8kBiT=uNzH3-5A5bftzQ_^tN)-O|E~PEMX)7* z%EQx#f6@u_voj@BaMCX`V06$uB#&hNbo&;WH{U+I$xK?PCojvZH>Tgk?fBc{hva9?3`#+eq2XmEQ{Ef287Yiap4%iSn ztlIK@Z>BodZfo?QEIDM1#J5if5$tsr^+Ub88`OoO#*H$g-KoFX)ohn*V$IBU`c8-JQ@OG12+aBA=y51`OWR?o#-HHUVBka)Q2SkncTC-sSt%p8LI^ zBV6z$a^#{8+|^gJ4s+&@$O{#eK2#WnMDvvYQ&dO`3(6f3$+%c+99V0fn@fe_OLpgf z>(Xj1$;rvEs#ID?jgylQVPmkk9YzYUR4Zvg;xWrho^poGwvdyJ?jz85;NvJuJ zB&B9iZ61OykEUg*Tdo~UkUXtj7eumlgvhZOt&xVVkS`@IkfNKQW2v(Se-25N|%3+fG6b2h=|wLXjkJ z}PbsG%O?5hyZ$z*s17(EMm@q0>|%mPM1t zdhs68lVv^yUbRFnBtZguLt|&D_R3@$l6Kt^U8%HPzS+NfxXCHXZP{B}RAkIS3w9qY%=i**-)FY3;`VH{1FW;p)gushuW@J{dcM9IL`v3(!V5n?1%Sl= z5%&Q~w&ybGRx470qxnnSK`$Z>*4z2|*tEz(Vu@xQaDGp%2^v2nc5t{A5gBQ1%Z~mY zY(v*>2(*hi_6mDrOF`{dw-(6VRF+!3f!4n~44M(ra~GmfFYBKS<&RCLYp0>X&qw$~ zFsr{KdCdKG0=HozzMp&c&&0Ns#YBW>&c|)6hjjdo?F+63e%QPxul_UW#D2Owe1=ZC zLE2MvebsTbnn=ve(!J;R`6WecluD^r$DokT-&-jzyOkufwKKuI+YK5hsIN%y*Y_RG zUwJAjqP)f*PzbR8!z)BRY#8~&Tt_`g6v6o=@q(j8bIoGNLhfxMePri$T^}+60oTyz zD>^i`Rjbz8O55&`nx`u;WHzdJLBRXU$_k;w`;hDf7y^x#4M-4Z`#_hNi>3#6w<}TY z6g(Rii~=&_NIKza&rxR7IX}B)ZY1@-1B=d(Poj$59D~wq`Cvpg3!tL0*RCCgf?Ki$ zO90_$g~t&*M6q4p2jE(9vM>#K5$nToX(m-VUg7lO|J(qPdf zbg=S#D(JiqyXyA=F~(Wkz&gI(FgcZ+~t~i)OgN8!e~% zl{Mb!oM!J%MKT(4J%qiDuE||aw;N`fd?1eDWN)qy6uIXx;yT{I-}3lmiX%AeCpCS* zprFfuukV7LG8S4R%@`vQ5&teD!y6+b*WIzX|BePE=rS{J8DCzZ1BM5@@yI%c1!1}} z`=dJWlg$!{=(l0Vh<+|e=1<)9OCN8IWW-yq5~Zp+rn?QLs=DyQ?7URZ4MU~VsNISy6_&x zja#J8Y|jZcxu``y$dyV(aU-oV^-v5iiFE$5oU|2RA zxd|`TlI>aZoW2AbFI65YyJfs&yh5v7j_`!0UWMOMpP){qM;%MuS=Ctr@%@k8=Ww!; z2JUNzU3Kmg-dwUXq1^#V@1$K3Y6bV9R^q9Yl73n0?5eGv-kFIdfP`H$>%|PD6SG8Q?u*{&5*d+{)n>n$hc*1$Q@e7i$w-rTgZxW z1U2syZi}Q55FSnb{<1M>R$N!L3I+lLD77OMc2C-wDu0BF+W-Kj@pJ_%oUe6RW|b52 z%Ycu#$3u|`Y;VU88lZZ>|fvY~YCMHRJ}e&thGpS98YVA$ox z4Ld7~Z3_Z2e(T%@IRrid0k{|fm2xw)v;D(ZqVR3EhS6`@$eVKKG_%Gp)$+z5qXI!j z{NQQ%tReFc>VSy)8(IX^xGs{udosxW^IeNdOYV1uKqfsVhV=PU0{qKs{?^9E+)d&a z{*;qx6n9#~s6fQh%(yBm9b%KHXUA8ghf3w_GL_tQ8jfpU`|w_V%#rPyTCHV!K3|*~$IWIekDeKDY~MX=yR3{qZ+Sary>2=?TA$xX3J}NnQ$>qOS`FA z&#fs^JsS6AJ7bm{7bsop4)69fTA#RK_lTM98CZyeKMM|KX;K--0n!s~Lj(TSEY%W2 zCC+a_7=c#d8fO-N9WL3^2}sDN+bhaOaq8d+kb*P$d5WXAoWPyx!QXfCqQS@rK4CDnzQq@anQ`6_;|rL8Ch*wZBdU*oE}IQvp3AKslzO-s6}~@zOTIXRnEJF z{q{#%IFTgJeJ_&BmdxV)cQJ|^65BSsz5DOqSDn<<(9BUTd}?4|pl@i1^wyUc_O#dP z9Fwyf2AkA~>(OMts8o`qTo2fId_8Fqnbr$%Vn2#nbv%X|v(sGLRBCQsFkm?QFMPG^{*?ww6WvZ&Dg8oBh=ak8MD%(Y;mCjCjVwJD{@=n*{d+gY1vH zX$J?-_H&+F7e1Tq{@wICsWe^Z3}G2LU-KJ9ETaA9;Cb~kLYq&|?$5Oixivc0oh@Pp z1_kj#Z4`hPMw`Pq_Fc8Zgtc0IFu6dDQrcmoVIyS!1GQU9!_FS*tS7!C_|F|ny_}MV zcVRY{C{Vnqv4RIOE8~I4zrMak^Y)}f``Y8|_jhCH<@Rg{3lmm3HW`cRriF3Iccsxu zsmj4mZ@p!Q4`w{~RBRm4No39csxEbVw%&bA%3IOeZ1{h;vjc}VTNRJE?!|Y=;EHx; zG}Umj6iawTzPfxZS>DH!jZMlb^)PGw^F*X=J`@r@_4TPOYmW(mU4z;e$hdm@?j1ER zZ#*PT50#oJH@o6yzIg^C0d(}Lb5UGfU4d2#fGRlH%+IfE!!_SZ%QTIRjVavD(8fxg zIe5}%=`yPua$<9+3+nnW4|dd+T-LKuAO><;X^%^2<|t>zS`c8iMNw-tdFi`_D0!sC z5$38>JnA+v*ssZqH>ZYo1;50T$6{6$FOS^pIVrih ziNMe0N2vU=0HWFcL!D5lj|LCO&9_V1?-M8bK zinOi}>IM$w38!ylzn5@wt)+vDNUjLILhv_h3~4JL{W@QH|) z78bk;3-^Q*l>1GsO{e4h4Z7N|7Nw;ec11}Z`}f@yA)`%+Ebik?lH+q2r9D@6QX@9f zcI#mfDgPkf02h99zMe!73`ltmLS3gDbWprN|%LCYf?fI;3!yLTceW1Q_GJ* zhF<=n){rUtJkbbuQN>;z!q{dlc^OdtXoF5H6nLj0T#1sEbzreKH8HjgQoJ`NN}g+7 zU8o94#8&NYDp|S|jVp$Y1geRbJ?+TmqRlO(mX5*GDCL#IydmT`GBdf#!*mQ*fi55z$15-IwZix)(A~bUbGqEsS?UIs`aO2*GNguXa*2h>Cm6b)7 zb3%XAn8h=~L7=6+dI6XllioKRa#?qGp<~U>BSdX|WJKhf%A%w)FLN`4``0_;maca5 zjG;uN&l?%NCDa6wRfD2brp9Fb3Dv~2EeT(?^3J>pyn-!0a+ADTb(y8g^Ha>>kT{*z zT`6GWKh09~=mnK_-y@Wxd}knUnfXKC=%Too31i65pyWx;eEmBqM`O z_W98^A3y)c4>l`L-!R1b{SODlg@=Pxg|L{m*f9O2ZjO3tF$1_Dt6u9%k84+CYMoM+ z2Lnqtp=rGYc}snudQRoja_JJoGoj0ePC@nM#B1(mI5m2DWI^}E7Pb>DBg&Uir5{+V zWyzqYBG9Xxk2ghsBzE;j$HXjdZb}tu)vCbw0CXMYvu6wB>SCaZXQlqyKc-zWO)kOG z63DbPS(e;XwNB?R);Shpz)DuIpF*~DyZIW>(};%A^iGm2}eLs5ZRjn#v(9jvIEun_n&4>#9kCt z_vm~?1RkwyJo02HpRp%_x7i42=9r@d)>wt%tz$0MGw0em*)dhcBt9FnLUflqMVrL8 zt4p_)^HdrdeK9uxfGeN230gS=D=N(Ua`N)Hu*$}4arY5CdZb5iqk2{TSEI4;;_)qV za&nkb;N^sjxu^0}=_7Fk6Y{l|8M@Th?*&RCj8rGtFxVZJ3b{dFr1Op>vaPpTaE^wL zIsq^Uad|OZI4E@#hUr|B+zKkX)f73>Q66ww(#3Vv&1l`&a|!(R;@mK!`$d=TqT-Nn z!L_oMAzVVYk{&xUEoi#|GYe8#rxWR=bOh)1!5zeR*;CHi!dAWw5KWi14Gle#lk3~6 z#Udf*G;N0A68e{an`)Pu66C9m>^^y&x%=M396)wl&|Ex!5+BduGJpGUF?Kwv!p35j zw9sW&yx&w(My3UTgyv>ZsTa+H+9r7Uo8A*lKk&t#>94*M*sBU^ob!;#$^M%mM=r2e z(X^luNF9!t(LJnF|2(4Q{*Uf?TgcCjU(w2cl-O6b^C*IwRr>2voC-n^1ad_9<;X zeK%ow{`^kXyJtGn~jY_^)zQE>np`P|MqK}`isuR zO%Ln;2guC9)2Oy6>-(G_Pw_m0rf zmCc`L%69KYxGc3Tr5L5p4d=M?giD_SBwFyzfi-;vjl;WjGQHq$ETaAo|IHX`-D=Tj z*uoFi7E2rxzOSf>!e}~@f0bevFF(Z9#-6Nh8^~M`*Cf|g7H+s^zz77IH&5#YQJrW@G} zr2n(#Co@u41^eTUb^@;f&8wJk`VJ7j;G|tH)FPj10!jB?OFQwrAzgJXK*MBMEXx&j&;6juG`(baYXwsrm5^i$xE*a-O1I`sIGy$87U7jWA|zb)-bE>~$+Qm%udWX= z`1ig>O*@4;_`TmD^-6a*cq3)-ao(EkOrr=VN%+6zWBRnKyyj@SswihRbb~>XO5}h( znTF^zCLdvFT6oOAzomZqvCMjs8#3Z;@S1Uw zU>&ZHVU}?OZv?Ak$sn!j@B#TaCg<4=4aqQ}ovv~huJe5y zx86(;)1@F&ME1snR4h-tCw73%+avDi*4p35QNE+rK>~GxNR@a=Reja+#)6y2o){Y7 zkE*p{0Bte0`qXR9$#3-BqxQUCjf7=0_yAd>Esl0Ww}-_BCiV{OaMSv|5t5u z{&kP>`hs4zS88guSV$iNo&2KT^*I&v2T0~GTqJuVvQ%8wzsfcm8;Qzn#z%O4n{w81ps9^aRkPD-wbcst!(Ke`oGbGQ8?Wx#2i79BU-XW=bXf~mH$xz-HMq-ucM%&ukgXSQ zTG*;eX^ZozcUYPhX&~&WPp8Xt+3emG0{G^q8ZKZWIz4 zW@TnN8gaR9zU~Tt&=3FPUrZ9xnS;Dum9P~^AXPXu1grj_v4I9A1m(HtrcE!yZPu_1dmN&B;KhjH-~Qgx|G6yk9X|bI6hc4yc=h?sT%`);Ruf5r zFOH5BsoH&lb_iW|7FwTQ_8}%@=?@o(v3h%Xu8t?T~dC=MrfbLG6=gx!klJ0mfCCM54K(1;Y-kWcUh>e4f|Y3jOew(7sIZ+CdinCDIZSEL?@|2T z>!xg9doo!M)9deF6^WVqtsgsSNUIy8q7o2*FEJ(0iK&H7Gjrzk(0*3GV79nayetVM zBz#K3Srk!@LC~}41k@Cq=R#!8+ON`>72T4 zD|`>usy4B4`qfNth3O`xgS7-R{f7)RA7dD;+nqoWXq!J;XCNtJ(^9c=3o}(ueJu~e zsfM{FmmIhFA(TrLcu#46FqV%yELR2jX{e~2+KuEYQ^j)Vhet$2yfQrla3%CY5SqJm z(U~3mugFwmCzp=gP2;3&A4(ZNnn8&W^Rc%zoDbV~EMvqg>9}hv!$qcKHEzB>fYvO8 z45@Sne%8=qI2_Y>w)Tpk`_8a2DGa}R&#%0u#agPKq9?s{&A~Gj2h-@|7|qA9=<5QhgLhH+5x`oMRaR{Q z{%yEl1|^&me0owU*)ScGl9pC+mHZ7~cr0v{N=`|Ey(9Tf2!{iP*1w~^9eo6a0!y*S*&?G=jh_tf8$y?fd#ymGPljM>vNJQ=avF<@LLnWi6G%8alh-^Bm-pwop#kqkI6Y_T-59;Ez%Iod zR~}6uWoPI85sP{^=&$QWriK#xD{_R76KRe(m?$(fG+K;#OKrcX8j}uC-{g&9kD|jt z4d%Cz5lRa{+AE}`bX2g96U+7@aZm%*4d4_85%G_>zJv#?xfKhGxbGVgR!PbNJ?3_K z*fb2}y!hLUXl;ZEJ`D6mJ(?>U`4LTy`1Dr(zkJI!6pGa3Sj%izsgc=qJYr%eVw^uq zrY~1$%&Xk*Hw1$G&NNrnS}^R^(Ox51R+qGQeB3=a7`DIf_~gkGC}h*YQUuch2Y4vx zbw6Qd6My2%V}zoM$#=TF1Fz- zi@bwtnepR{^65f*tWgNRO3{!*j`vvDGPSuG|a`So7E? z;j2x9Z2m7MMo97xs7j?Nn}UFI*^HV%rZ5-f+P7YtdDm8w{DUjQ=(IrGmYN> z)du*(v}#+%-`8SA(aFQAu&LLggX1&%S+b928_8|VnJ5vnroot{w!E=)eJ@qPk#gtn zOlRV%-tmmmuOiK#gr?u@SeddHjc?bT9gUow{Pe_sxLCi_lW;(d#-jco#PaEP{86R7 z<*}%dM|WYG9Oz^baFcB^mCcAQ$p1Ym(jair;Ngx<@>X?8W<1tYvnxJrI0&7xvcONT z(vlD*iV;^}tSPUV74_Y&N7~DB4@_yRiNwAl>;`m1_yacHGONr2Gigu{pzzKChHoDx z*QMETYG;3cNKsJ{i+cqxCUAqm_mT(qJHJP6ef`y{K(B# zj`DYEF_$n>PyN)lB3Zw|kQRQsoR=kZwtf^l^H)t@i{Y$a9T;?-h_>l5J+7TaeTBNP zZCfsY=`R8O&8N^%I&N+}xbtJ;N32!s=9@%8o~q9%S~Z2d(cF>O@89s`Sj%Zoo&MK67HJz5bb*)9&N1PCubL8lb>bMtMRdW&N=ddWWW@lSI@9-k zr+RtDXQjB|6c{7Gj|}Bpbca{fyB_e8DJtMN3rR6D22NO1FQ@rjx`52jN$)*sxd%$l z6Fo0N$-+kC5)?0T3f`2`i$5yQJLar5Zn6jST&)l%C-PKJlJvN5I$-%tn!4Wx=jadt zqkeJhDKg>8AvWjwR_5vQ$0PGww>CNq&70=K@pi{AFZYnA+4qd|BoCyl>=(xC1 zP~(N>DGC$+6kB5NPEq8_;Z4E}(sqWX-bhEQDsyDtMBN5TWF!uqYUk?Lc0A`X=lG1@ zNShXW41S{k8QQtuc5}@ErjC=2p!;sRNz~H(#k6r(gQN@VHHp>Z@l1S|L%W}8oDpS0 zqQldE&4b5*93KH_D>AZoN2aP)h@4nv@{|9QCt5e*hi~HG(vjhNtJR6tlCgVa3dGs#as!n!=gacVs>&!S9Wg}MYyPs#z zjt#4AXvIE=UFI^YZb~jXTs=1-x(gB#AgeJLQ^LWUSsA2VGSElkv`;|ppx0m$q6>80 z>(Q5iH-D8mF*1G>ld#Mk%646UHa@Bhsa2lZehpbz;b%p3Dt_h6MP5&*vfIy*?6VT1 z6{I9dV3i{A0!KPIiT82PH8YXv$ZQyBMeUEugLDfUv!3l4FmIUdh}^3vA`c@d)qh8yF0sfIuR$r?#qI$wt!irA!$E z-kN}5bu+Pjq#-L-=&wsqZLEj2F)P^7S|IGjDd5PdrBWE|L`WFZ4-Q&?SQA3i&kYg+ z0}V~vzP`D69`jHsenCh*c*fET3eA<+clgxTF-w;Jm37?V(0uZDThu(jfA_(^heK>y zKP^PEY2?AN^0dn+Li@d@iOlUuK#g_=IX{_qpI!LHa`X_EtYknh`ALiIv_E!QG-vU| z0jvJ__jpEQzGN(yP>=t;hbya7dRZfK9+u3xU7=MNS`h4No7Z@eny8S4ss5sn%f#f~ zgxc=;T?nL7hPlmd|H1TsJN%j2eclIQQqi{bZyGl_WvgioS%2m7e-(Cvb&?f!`sZu; ztxp{DBfW!1FoONxOt9Yo?!kL4jNQyk8ubz#c_4ONe|ZgPA0#K85hxvi!}^k%8vZ<& zR#5OXF^0V;ytb)Uy^vm1G$l9HGZ^rKk$GN5y?}~~#w=NSwMY5yp(dm9OD&aF7=*sA zFvW&f8mrC?XJ^MfZH9Y#cqf33QYw;Oy;!R=&F@OSFi%}%^psSlgi`*EbjFH6So%UO z`!Mxgng>G8H3bvCqPS=y%ocNfF)YQtZi5-JtJ&_RU~&0XWkn2&JD?1ciJ-LMEd@Jk z|2KV#RDR%JkfxaZ+#7qHHb2*lJVvkXq ze6iY?k?>%=?&E`4R%*N6`*LQ(i&G_RDEJJ&r}Cr1Ow_o@zLZxe(Id(^82tOL_hgaB zU>fz)bynQO^s&f;UPjmAoYVd|d`=I%sn0m|H%B%-lcS@hfQVaLTMOK|ks{5Llm;JyIAq zwi=W@k)`HQgMbu7-{ag3o%D9J!ullgRARBGyyJ8emU}*bsPFoo&oZn|Zfv7~Wu2xU zs1wTDZ6B%+1O<`)1wbP3qh!+b6WIJ}nbgqhbf2q~&%A^2a5*M!Z*#Ppfu%8h>}9Ge z77tBo7}*F1!$&x^{{GA#6?d%X^BV>^3^{%VYPL&i>!l-1n<}IISXfxHva{bKeL#X0 zX82csb)Jh~Vs&B0YJvOatvvo--KA^O#qV24hi(*ulD(_zYRkJ@9dpF}NLK*(0>5Y` z-+2pe>h3H`Gl?ve8#6G-*_=j1loI<4Mk$NRxBW(4yx;Bg!tXO@1n0*p5}9n}>=^e( z(mXxvwu|cm@<3A%YT*&6j?zi5DX-~j^7FJSWb{~mi1k|p!>l0&RpRA{QpG2!TNl;EnV&6E<(yd0SO z>GizVBc`x?VA7Xt8(5}Uv0LZ$HpZPdgdb#`dB1BItHEIp1!5!}ta_D#lMDVDNaQV) z!L0_5mgn*IAe1}VMGF5^Fohl{@!LpkteK4^9}QeHU2O3u8PPVK-k#u7;R{*`$F4~V z2H0H*wZWnk1bHhx?=J`64gXg|Sew_g>fYM(@ z0536^Rn>zz%IJ9J))>E#vrr;90qV{cFfuII&kVZT%&6ztUUcyHdk8F1g1ch+K~pRR@`pvHCM<9L6_mfvM3Sj2`t zQ$PJx_U0BO!8*srVqn_y*kj!|+4(sb-ju*^43;_oglVOWCZE5Vsd0G!{(bt0<#!90 zoMXb39Q?I7MWs_TI!(j9jLwGUC7*gD@jpi>jpT>b9akZr;+!SpqwEqR!{~b1e!ddOVK>9=I(xkMx4?T4Eusznv6ouSJ_wg#6}w zU$SR?IRH-TL~)II@uk3<+75}m)j{=de>H-~=ZD{1c_8&=0mdyX{+wePP8zY zfYMS97_H&b%KU~z8Jv$WJ!>6cn1bC%D!yVUqdEQ$a5sd#Yr+J?!E~3g)1dTQRXL+j zpQ?2b`*_+G8C|otnHaYfwbR8mk={JIgXD#XTOYfFlSjKVNJEy3jEjgk3OsaY98KBI zrH_P<$9kwAnpb@)DLDyqkN;Fkq;g$xT64VBKcp+%+xS^+aNQlU3a&W|5#l)iuDniW z{KT(}y1yHbDR_Up_qD^o0NUt_)3r3YNaLV+w4RXbId$lG)DzFxb&O0*84{uxv^xYP zq_xNrjJltw;KKhYBQx`QomTV4%?fF8ZtloOsyUoM1xI3KVc6`kRE*$^j*C)l7W@0= zjWtBmixKmypq*vwHF~mYmcMVlzDfcr@KCB~KimZ*{H7Fp^WFGrxKcZ3jW&Q=%aLIKABCNqxw>_Ew+eFr232t3~x^(HyGvvAQh+ZYJY#9M9BHi zk>1BXG&R3R$nZ;JxfLYW->f|k3_>Wl?R-m{tj`fR5c0;+2$k{Prs_NOIIuo*K3ZP@ zM1nt)R;`GJ8+{7wU4}WD;c%)bawG5R04bm$O$3`5Shq)K>s7^VscSLCa-DbdDl;-p z?s?$=^~1Ug!gvmUcHd})dnUhiFyhb)p_^3BW{^J#caI0pqV*Y1Q+j>H@?>`W4eyDe+2=yyEV7y~a5#b+nTje8-ELG#pT^438cK zfjb>WPa*&J5o{_a4?jZ!(08K@PV$_BmJP7>1*wQ|;QC7pGj#%uURHY`?2L5ql14 z5nHz5Fb_+Jd`y>3|Lah`yrV(%;6!BgfTi9Z`foyEtOr8T(DpGri?;^^rT>2uRu)&P zRB2or?E9ga2!!sjS!w-t=5ONZ_tKex=#-6~<5MP66xX)kf}&ZXtMhzg?&HJJidhfwZU7Ow^b@9z4MqaOb6m6pFFtgXl}&VQ$%xf^K78KSFDp$y;-RqDrC9@)4ct4tb-rBSpNLv=_HoN zRpHqZ-HZAx`E7JkNY})Kb2uii%vOe)+GpM+i=>kycrw2G?a^N?bqlhh1Tq=QDRcp4 z;+#!)LA?FuvX8#4*N zY-v=5o}oN5)KWJ#^b4v2+?aNG_3q$1JNL??EIzIknNGR=N#nqkUDxDkl|n0??m|d@ zGX?u64GsPdvlaLgV@G z!F1uzkFSbhsc5g9Zi7J_lu_g!KKjY8&;(whAId^f5+g^6M?O~Ko&FEsZ+-FlG5^KQ zW$NC(Z7y7zXA&pw&X6{x)1)aB2ti!?CW$Lw2l=-FE@pS1jRSv`w5w|%Y(OEg6V8Ae zJO?&i8=`u-33iWPWkZs5h)=@*UjIf z5#kINQOW{>z75iEQQQyLrGZ_-w}cvYySS7i%VDC_BTz0Xdg{ z!wEf~9x5=18Up8rKZ;)$Eihd1tLToFQfPkRVwNdfwtls<@s)40z!OC&fxGiZ6QoX% zbxrftr9h?7=piXdttH)Ad5DJ+46TVZ={2L}=tUa$Ea+AYPt+{+n88+4?%88mFNlR~ z+Uk?@TD_E3&N7jq1ad6!3Dsef*FesMW`f}*tRerAP=Q>*rr1x!fG`mD2iS#DP~XS< z-8)}ouT-qdY2+cpkTu)X_NwrX?*dvOsf{*vNAKm=gP`!5#m?rVE2rzhFs0D zq4za?(fkBO*lqZrmXaU7P4Oq&7WMp9`6woapyV~I#wv^?riuWFnCm%=9G-(29rlHt zczCF2s=Q+gM*1<~rGz(Q#ImZRqoWL92vBj}V5RxG{;>xHYYS{T%S;=EtyFvsO zw>EIZ!kn?f`nJG_Kvln1v!0ce8vuY%-+_^sc|d>#4&t*@Y4e+2f}N=hQK;ViyV?UF z9|I#Ej*aoFq)%aCRgVyPbvf6Mp$RfsG5LvEC=1`nx#K_FRmv{5HwZx1WTwnNt@tCV z>z$b8AsciUl$~lOy4?t+i545|Ncv{nP6f|YqvsI~6Wu49T*O|-M@CkEeqN3k$mY49 zS0ic*7j$bjX10R&D33f}KY3D+#dJ6A@alc<24j>>&Fi_Xp-;joUaQQkux!kl}^_n?0FmWx{0U_3~FD%JQf85 z8Kgl7^2Z5qRY4OtkoMn7RlK*sv-Q35Cup@l-^vcBy5FbNF(Q2|9r?l-P15N|AteD< z3ODyQ_AaTXI0#?HV~n02xSSr5v+v)=n1MHMR8JqaL&4EG^W!TfHe4Nt->LWgu1F(1 zkK7IZyfBtGTuJ0bX7b^d1tTe3c+?LczJ+}lI8JLR?@`e*h6klp2;iPFf^zp`QW7~* zx6aJGS*)%&QKF07aR_!@$c(;&43xH5L}tZ7=SE4jJdHUI)0ZziT1}zlMWdgD*q%#D z5>bmoZnnP(am}E{rRN5>8UFagQ27mpZ}c06k=?gbvE%36#7fEwV*(?JdSGZFg4h$} zOP(?ax^6F=jTO)RZ?d%kpgsIkX`}kteou4$j*66xXy*+S$6^SI9cf> z>~+h&d^cMuEddC(AxTYu#2V}dGqMRn7Y7wedJxTxcYn8@oazewlv(Nf&z6ZTb%cHd zy3ZsYhsE$(svXQ5;=nb&$6F4w3f3EK4u=i7~O+Vw89&1Ii~P%#2I zcNhn~8!#0`JB&Iw+^V~VV9w&E}t@) z-Zz{vm)J~2HhYu4w58nEt^X}hQpADFKu^O*^ql5204}V}5 zlcGnAD;dBcmh1Nn3(j=x;7rK%l0v3F0R3J^Zzr0rDMP2<+>wtzDk$|vYy?AL!jf~hmB`k^A6<)>h1#JG2l0d_sW^RX!PAP_(A!mMmf zG;3l5T%?!*x9=~`F1rC-u-cK35%Y#>Q ze^2kdrT?P;`ZWps?#b*TgF!dXw=;ms8ou&iDYfeu`~+YJfmiScZzW4srp&_HqJ~kB za0Ow#nY*&Fafp4Bnx=^6m;ALi+Bdloti)N?bU*kbbtmgL!)?|_39<~OSn}ot31gDJ z%+|YASWU3Qpc{00SrvkCO3`4MtPEtTF^N9{a7bk?8|qqBmM)EP=V`M4d%?NHr4o$- z7ML*&!fwyY${<6|e7bYTvsN+mGpQ`+RiEN`F--;8u!m`7cdcefj$vGTeBlA|Z4Cs{ z9De-eq!0qdb`U%q%5#gICt(y8xNn2$RAL?9s^T-ews~GN@5!=2tdJtu*XH{*e(k7< zp9s3|0YRt{HlDUxz12ufG{l*fJY~-mEHA!c7}6kUwgEp?I^j zVgGJv<3BPel(Y1oVUcMvEbXa~RNEmac7EpJ9*OC;RG$_ zO!5p44uz?X+h{E{#?Ak?*xn@GNSmm>YLNMvp7Ad!;O5Qy)$X4m#jW_k`Fow=hcm)HD0xyuSl|m-AqLjsY z6M3yl4lJ+Ff9g6IgCH*Sv~&WWit2tms)x1pCjH}k8nU%7WM|{kBEF@!6b$0}GeOCU zhoYMM&@*X+E)`rPxY?rs?V~)K2F_e$p8Rr^BvmEaHyuf80VhpRMDC zw-KgFOACMH{}pM%E~@m!jypplrv$$5-c?#ngahg6rIwZg>}UZ?{ox^$bGmo1v5Sw^ zco`U0%f;uJHHZ)FKI@grmQGYalpKZ>=BHQsrM!b+VS|ns$bL7X*1`ox>!1dOC`o6` zze1WD%h9X9mzSL#hF&&4I;eX@*l(d+T`D&~FmeFKL}47-k+*v?v?~ z`@gnwxKakZ)kDNXvGq&bR$JmiInd zjLgjac_cV{30O>sPfj;DI5_k?RtV={McvHIjQiiOalfF}aM4*A45-ufBt^clEB$dx zYir%WRYx|fUv~he3Qp_~uwF*Lo3P+QBX3z=0&IsjD-b{&o%(BaO*9wuBkZ& zJMvYs68qoZxe1Lj9;YqEkJAJ5iwl?;-lGz1gFXYfEv{6%p(uUCe)VhIqvC()nbz07 zu-xe85wUj{j1(edvhumEnmmK*7TL5vmpg4zYL{J@`KNl9+p?#JLqj5^q2aHeDBsi1 zks&98THHlYvBgx~l-cIsiz3m-rRxVE8cIsa9;nPN&XQ)0`d_|eQc^Pco-Wx_{tRk; z7_c$KF~UR>Xttqg5EcWY^_ANC`` zM?F22*qk34ARou8sA+*y~0{Ek(dM4aPH0NUQ4ko|>fkU3uJqgBpgA6CN?l*My+9OaeI< z=5Olv`>Jrcf-?;U23MA|Xp7ga$RneA-IrAoi}b>}y6d|H2PP-!si?-U_Rztsp9^Vq z=k1x(IxWF4X=#GuDmzk+xdzg|7ak3_ZoV)kEk(H{Aa0jBYb3f8TpKU1qe5XSh5p&{ zPkpM|tT9eyBIdQ*MHG8o-Qr9+{Ys$qhmiEqh4GAA26+8YdrGpiR6O!hC(P1QUb3CN zA>e(&>Dfpb(E1jPv=O5JNScU6I6?h}F|>0g(%=DU9Y)CGgoa{439wK$RM$6_QQcPO zi4oFjaN}9C5={GG5cF?z(n$UXhK@_CUZa#2xCTI|iA`)I3;TFC-@o?(!(*FEo}_Hc zc5N&K-LuD8gjLAq1sQJ{Je8>=TliiMH4kbn+8)FVknBm>g9y9@!q(Za9bg8Pyxz4H zZ%jF9-%EZZh;&hg29rrY-T7Fhuxv`SzvXY;EK`+|{Z1V09|v7HG`gDg%~~LEJE9CN}GinZ5MKxI|>h;Q?4F zQqUxL(B({w`)1g&^upq{ZGp-*0ja#SR^p(Je+VYKic0JRY*#V{|0bkR@$^b5*^+o^ z*RmpzVXM&7QzSOGr6Wkhr4^qVywpx>Ltj!-Y`*mr=#UrKoq%Nr4-fCgrf>Jb{yuB- zQ#h7m_f}_afu9Q8bT>7`Rg7JfLkg16!RLnzhcFi7i-MF>(Ui7#+B7VfE-vxO3!^>p z{!&Kl{mFqxcWLmalv-YG^Z^xg1)}w>wymM&y#C!<;_6GD18x6GWZXs~e3_UuHkn~D zZODDLRj97mY%ESKisbd%2eenIUm;FHm6v~^(^;%0bv3C zEfJg6Yv$!V9o?V$r&^A1an!l(*m}$sYx9qn>JL9~n~?N5BKHEr|$LMtYU zm_w*uhYXK!yq2)&LvaSpS&WCGIgqMaQ11&JNXhh8XZU=)Qj;4q z@Ib>Wrf9P>Fz^gjvTTZ#3v&;;Fp&Px3|DIdk+mtp#{L4!N3jXiL9q$H((6mcS+2vp!H5?Kswi!;9NiW-4b?^2oN5xag|M>#L2qy-4!0A-&|u=1>rrL7Nx>S| zb*YvU2LXN?Pfe3?0?i!vQHF9*-GlM}KE$Y*cO}m#ZT>huKfitv%$fx#439IOsg{R4 zl-s;b{2IaVBW_#4VAmp^_}xM)36Rc)gXilUC!q&vev*@|0Rm%%fjjN@7lf@z{4S4( zL3*pIAp;C7m+g!|-}6*JNHR~~Aw;`0OB*r&t6XV5;tyNq>^F)LFcr_#8rg_XocRTr zKiIiS3yM43VaUlOj40W(D$8&u3>+=^wZCs&GW0${;!n8j14FKitm=Q!?|FRkv+CIUs&pShXdDzzi@h z6Yk#LoQ=gs1u;IR@Ev?#He1nFuWGCFZZ^iwcy1IUljGC2^@9bCuL{-S>Y_T_o;?>T zezwy5Yz%!Ei3kO4goyREM0fZDD`&3Di`Jstv9r^N@WVmc0R8c_-ItIMTxX$%+t@VAUlYWf1x&GwtlnY)KdlUYPq=j=00g}XPXsi~0@ zZ1_ID=c_V~1?zSHI=`oJS&}r*;YDOyQBxws$IZQ8K4fQiRoABHA2F_0mY(}5iAH$F zy3}pVJWvXV`lD7T#)dSa|Oz6bCPa%nI7hJ>7uY z3@o-1H-Lj#q*c|o_~bq@Ro4U28}oARM;n31qQ|fG^b5RoY`)=^#!(LqE;%hNvIie9 ztCOOFfy!%{1enSJrBf5?Sohbv=zquf=xo-Y?2byZGa#~JhrCL?y_0!GH&(rV;6K{og zxaoRd7uvEWt~Ezbd)Y)zY|Ye0fzkajc#;Z@jHSDn$P*B5l;TvE2{Yw7LTGM=z|nHX z+BQd2CeSHTa0m?1vQT|{T{Mb0I_Y4GF6$VvtG1TQ>V%0AtJL5Zxu*jT(OR1vkhlQ! z<^lPy6Pi^_MeXU9Y6$5@X(n4dRt7tM6-8nn_J@jL0-a2ma|I7cb!s~VGndC~EeXX&gM%DdwB>cti7GcVR&+jxJz64HxQn19Fi7*>NQxM!@T>vtyD!BLxsqqyUL51t#iDsy`<@X2vAL229}S{HE_Xmoow zmrnSEg98n=-@<~8VECebU3{GNnr{@j*lk}HSSzbAW)vjMk@^hLei#dWgQgcEGw+~0 z2QydfMl@jpNXjt5f154m-L&HrNVFqfaaB1DoAaih+fgaF4BkMwaYJ58QZ4Hl^U}L{ zN`t>6Uz1sddYcZ;-M307-2V>8S+WhG0B$kR=kDD|$33q?!m7$ctKPbd4fnsXaU`Rr zSXBS6K7S2}U0hhs_|KMC_>oS&NE;E3hDr27O$`+y_qmU%RSD%*)-&sBCL(QT>qej^ z6Fd7u>wB>$3ssvGSlN(L9I_CHVh%i!$A&lf9qBsB`6oNxSOo;^!3bS4OLA%DL)QeckzVQ+UFIjJD(gejP&Bk< z-=nIOd$fX7KB2Nz6r^wI|LXM4`>9@Zd#VkBBY1bG+D$W zQm2uUii3kk=s{hJ1R3MSGYf^g$YDmcT_7LlXCn)XpQZZET?YjMU7JICT3X26I7pFR zP;l~PR<)^Foa@%7le3MGm047jd0g7fp@p&1w6WKZq4WJ! zh50qJZMz0?6U*h?^*#xzxek_xBv5Ij&XhJYTwa|E9&dZ`>V;4XCJg*l{w%hL>nL_b zVq{yQi+KsZ`XHdsq;K87f9NDmzwy_s=W<&(6ubbHmLIww_$#vU1>eI1EiZb_ zHYV~(AV0#4bOHE{Eyk@)FTq}(zmcm2ZW-2o7Ud%NS7?9a%_RQ~#1B9XeEz;~6J- zM$?G(+>wkyogMo8yqTXM?RhB1+HW86v)ZiW%L*PWUcVU_?%vt!E|pKNK2Dc(hnobd zwNI3kF!R#Z%tlk8Ggw584t9iWaj4Eq$JTP6`>@5t8u2snA%?jXj(%o6x&P4AS-u<| z#0_3lj;otP54+%Vd?Q)AvP}M?+f~JI&*?60|Hh98!ae%c$sfg8C5iD2T^e%hh3O&A zC+Klk!9(!Ihvw^W-!~N4#D0M#dZNBpr)KJdF9v9MI)L z&@LSt3;_3h*yarQOyv*OQ=2dlg~>___=YdY&I<=g!D zm?;ev0*L9-vxyJdUIX8V>uWm!rN=Vv4ag!Ofh%2@Z^+10+HH<(oy;>Rr>yg4I{>2^ zHLYx_KP2XAwo_dRt_hFQHOO>Z4^FMy&%!FFswUg8ybQO1xeI|^*(UKrQ2-?V_=$K# zs}=t6r>L5XHsru+x2tdOm?CItxuoznd+ zOr78a%+I^X1x(u}a0nrXD*Wt*lW&s`KkFs2{^IeUtltvpEah1KkF*a~Vl@iGJ&F`# z1gw*=_A=#QTn=s(Ig@dVO38QTBBDxbu4OV17jy^?&88VtA8@}P-e{gt>NwO!$nv11 z;x}R*!*cRRfl^;T?b@gZUpEhliCj6#yCrDYJ;R_uoustHZ3F3eM?S%LI{wUJMXZKsnEW{eypa zi21!xQOO49rE*}=ghW##U#3L;IUu;N5nk=C#GzFMkqpjCY}xH~@nS#^D7G=a1mIJDNv zFNN6fT4OOqN}VuLIXNDZWRK>uZQ1J9;~fr+{_9p7|D0Z+l|~~^U^j(&uZy4*@x0>| zAWfWqKm0LS;Tpj)Sw~n~NnmGZ=W^?SJ3mpijkU|Ful@!SxS6Ax^xeYwuweK*`V@r%37YV{YRm(%dJH{{{MZJA{|4&u~H^>Ev z{B}@6f6a7M1)4<f-?RfMBqhMyJ6hDk zjn;hCl|Za_W0DRh`1&NEVW)|fMr^L@>$xX{;RMD?m6xZag86MT_nnDKwx$-Qs*82| z>yeuR|AxIecdbA0nDfZ}n^+iHxXobaa>ZO?2U7`nh!RPx@+3L>`Fi(lW&WSD2CgkW zqMf?M{!4vsyI~PUtXkM%q*2%0ri;B(jvH_!x(@#7@7p2^sS7>9p|?lP^ki1O^DmV+ zILG!&?r46If2M4#{)4su8~i}S0kVS$OO<%DbQ~hL>9E3IQLn z-F^EHibTHspjpnknR!xk+zs3|Es@)qPcu~rI%%4$qHG!&>;h`t229{k15jHyqJsaJ zrwLsnr7-4~It1cR5zw4afGt`Hd^6DNEkWP22q8*TR8$-)D!;}!x6usSg>gK{Iw6UZ zrlt(wMtj*=3pgL}MsnvWF#;(!UrzC9-%!r)HPq)b;?aFhYtL>{es}j+m_(G$3Id#@ zoV>kQP2F)jjGq;h(&&_@|IRDyNE~c|eu2wF20zfT*7~NVlF!5lfp`wp16V8_zNrQe z9h88pvbMADsRU>;o1^F>vxD2UjoXw4wG&XeW#{B9f`Cni&O1G8TZ=X_c%{RS|G$vk zK0GSvy)Gyg>u+%8 zuH$iB!qgjPxbHDADzVM`Ve8e8#|?}i*Ll~isrrgv)yKq$u6W;7w)Os@U=ZT}+Kg*< zH7soWsu+&MVCun-@d_#Vi#lM&5SM}BF>F0r0NbMJ#mOELV}q1ppxwkLAaMTg%K&9< zAGqJ#?z;v&{J+}4HtqKSCU4HyzU0T2H)_*wT>7Kk=;-c_DtF_M1e3W{?sN6-+Da?g zKht*8Kd;q|Czn5NrFWT))VUBzuG{;JaVJfvh0J)1z8dz+KYc(AX6EO0inb3i$gpuk znrYovuNQ#TLW!7v{>y9UrQHxk{TdPMGxNB_&NG#Fvz<$Bu^447H8M0rf>9x2ec^YF zKK2Afii5*n-~B4`c*)cN-iecw6BxZ2)7C%|Xs6-e9^^lui_zppz#4WSftWmET*Uwq zM=q`S|x&`N%Pu~f18&mUZQw56_ogs|&b(TjFR1{vFYPG&V>BZ*`Fd+aS>-@z@9 zKdtN>|3Qb=^5kmw*j0A|JM!I%Y#a=KK;gjU2@NM3gdXM>zav%1j@|$pHNyVzk4j54 z>ky@v(?1A+=FK+MJpntMg>xLC6C6vK`*!W%iiao=vML92d^ao!E8?yz)Dc!Ab^|$$ zoLn=9+7_px$^bWA~R6tfHQKyBzw<*536|I_2J`q!+ znBh3dE>BLmyVe5wkg#kk7Jh>D(z;gd#vX+Yx0*LGN___cC&pCxZD<_ zcORqe?E%4j0VX~0heNw2))IfFHLF045j(XE?{E3fo#%>*6~_9?unVoX_YMSm0pu+W z^)X0L55ds`Bsz!P-;&jK^FvOy zDvTbLbViP7lFU}7gOx$j`USbF_KXabNfO}t=j~E8BQhEPrE%o_ifta$s)n=mLga|o z6MJK_Sk`vcdCwy8Y-%<31)N4I`41qvFat#D*lQShyEeu-t2!Ns2||A4!s@&Gxxn@T z*HM-2EZP10_xWrmpGeqqrWR`@l;`Q=ME`rA`7pTV_|LmPD<5>sS#F3JZ96j=6T*OI zqqH~msN`$0^8`CL;}*?f3Sy5^;Jbn|ES5n7#6`kd8luA_ZhP*j?-SSq*||VQ=;DI^ zfG*k=aItLa#zpjuC`l=VV)CO#_{jvG--zqK>+; z)=8CpP4%kp-~-#cMLM;Ss=3c`Nk~4`M+t{DkUM1len04-C8?+;5#7!UK3)|rnuqD> zGoN`o2Q%)_xU8j?B5NW+SmT-mlJ;IJIu+-eHdNRd^mnX~7^Es;XmZQd24B?j<*{#r zlF85G|2%L)^!TBGeZ@XVyP<@HSQG*{KmdTDT00V{Vyl<>NbelaWf(0b{BxW>H+@{8 zKOf!u^xU*+hY)_4XRV2A8z6Qr7}I`ft(qikYF1cB<~;iP)p$7ZRhGFr0h)B|m;U~I z0hhbAl|O&DKHBW$*AE7LdT%w!j@$LZJrX4e277-X2_)6v<2r8UVKlC>q{4eDWW@#wJ0YA?uB0^zi zW~L|?141Ta_M_ zQ@(Biv;L$Mg1@$>!1V_nY_OZWeDOlU!$fdtIvgSF2YJS;lIn?jIXi=3l=c3$gDLjw z^qomoOKSh+k%YSOe&0SaJfXjR{v)zxM@BOZ>DX$AmirXea3iO%W<$wuhLQ!%tF&^S zrzogJFDboFy))CkZBJ^+2S=Uj}kHXF5d3}Sc3xEGsF>9|%i z@7)(Z5DNf}0nkgxmehL6n3YM1%75z> z+aLH7AmhWo|5agjAMAce+xmhJpONc_i12VzXb%7^D=m#ClyXmBf1YgZi*XQ)=hHT? z-ec$Tf$lQWtm4Pu79W~4Ui=)jj!uz4gWv$@_OGLwJ_4{)_i5ceNXO>e(=!<>u*y&{ z-s`hl)SL;#RbUx5Am)*VyVmVi*NJM7)%f58gfK~Q-+>8{NjdCCfTEd^O*{+;P(3xS zOE|bTeD4XQ7rK8^d2H+;icsw$$|wgKjcik9qY4h<6}!i7YktvmF(iD97_3y-c9RVd zj-R!vwv^YX%}ARVu=3j3(|2&GbLhj0Q;*&q$du$?7Cq;Gp`$YZh&?0?9f%Xg$J?`& zmN2bo^yGpE015gDCLf+qgfH5lJSEVCg@|J(950NC!Gp?yP`&$zyXF}UQEpbIfiia0A7f;+( zoOR}&sX*`k^ibd1<6C}Zbi)o&mYTx*rZa`G4ey28HKv~a+@j3n=v(N&z!xWzg$t_- z0-sDnd=lRO@CU+OVUE9h*42n5$eA6wdqpCJorWZP>d&pZqwP7S_VR!pk2& znoWvV9~CiG!cus@u%DnnNp1uMo%e8A48$n7MDG>ol=?0NdT2Q*AyStU{6p$_fY3T+frCX$t?(RJ6 z-QPdQnLV>-r#yA9b>(?JoKn{%RvJb$umXNO;vJYEI9qDm4>J z#m*m5kEuae0~FH-%j50ISH%&BaiU5C9GS?$ig;4t_zEEtO!*%oK)_{1js(5+^ixd{ z^Ahj*Jni1Hm02YEf{14N9g29YvbD}Vu{IB(AS**xQ%_+Ga|CB}Z(&liiu zHd{Eq;Y;+wk4H)JazabF?~PO|N-dOdUJ`M$JEMS0Se;Ml9p*n&0p0VFTq8T4;5J8) zBgTHyM-ms$mVQvCP*q8>XONTkygqN>{#(E(kJd-uM)@e_x3#o*gXstJPLO|NSzeW)nsEj2+!-V|(_{YU)b{0m(l_m7XiZbNc5B2E%SH;>2!h#`3U zY_;F5eB)WZZgf8ttM>F=P^~UoHL~PxM!BP&FfA@jlc{YBRU2~IUi3U~rpplftf8#= z8?0ZY#&LRq;g0S-V^>W@Yx1|C2iE$plDhWI1!6F^srcR7i+J|Uz`BiS%-HW{0#_4^ z2^o$%Gg4n){`(I?#)U9$+8Z9o{}7j#e+;=128XMI;QdF0+Zi4#|J&S@8YJ_4SL;Yt z8}Sh$GqI+p*6Ty!(?&Cn4l?&Fn-E^tr4HG-p$u-I`^N)p5B8vq`*F=R#(o?_=Kcq& zJ_fZpexry^-k+aGHJ@O9THW?j*ZHwEvNkY^T4k<;W3$|cXvUCt;81y=J)pOxI+x*8 zU?EgIk+a|bo%T<@j?BeSjjf@&x|BA?=#D+;k`g&B@Baum@$tC}U;mI_AAbaOSNf0Z z*Hvc_a3(*=Ag3Vzi#xb>Zw(f3);8QWJB$sP)ce_J#d&zngz->Tl2~G3vR;Q{(C-E2 zLu=@sY!7ex`bdV(M8(ahWZF8{Id_w*&O7WEiFy4!whTXkql?|O6xpq?EvW96sEHDN zfTvRP6UJl~<7rAoG?gW*hEfVh>g`NFa#cEJq+RMFGR+Ro_zhU)LiBX%->s&KnZn;e z>N%R2i03~p1JK5Ds1!3E&MtwHo8`XbZpT^o4ln>IeD(-MsJw%^n$SUr2wj5o;vZq~ z2_RuZmUiZ*_Cw3zV;{9KgI=-6*W-rO_M6I?Z^3Pe=_6_2UL2(Ip$^d=ywVpdwHwy= z%!tjXzJAwr5$pXKXXaiMo`+CQ%gW~mzcmhHq*G6Rem1H%#|FRh+iUrYKW$wF!(YJq zv#n958u&X;(T4ruRE#!WGSdI3lXy7WZoT`DOfd}=KS!wIAU^=+uN$MxMJVqIkPKXq z>LjdBswbE*qj}FO#oh+ho$j!1Pp9ud%%V>GkLjphewPuc<6ES-D*=K|aN#Eb= z+?L%`_3-B;^H5=4Qpl|(1o>7sFs~M)o~5hbT1uejs~pwD@R^>d(6@LkR-I?Mz^MFK zN43S!g_Kapr4SKv{@k0CBPnwL?bmB~6TKE09Jg+1;dh)-PuXu9`6O`OVfXTjy=GV& z`$3jJtfY2yR(GlvQwlOOxf;C9nTA9lPD;VC164QO3UD&?RYddDZ`U+EOX@>IInc$U=yrLf{`}mz+{$lDaZ%t6@_!_rj zC6TFV`F?xLq~_gh$OqhH1=EztGPlY6OoWXHzBa~#8NN#mFFW+dDF4Ekn=0=)-`g}> zUXBYIlb8BWq3^qX=n8jE=Wzv8U`_UvP#a=sk)0+C%4HVb%>#f-cIWhqA*WzB#C9R6 zR}@JZ3;J#!)oS%^m1LNZIJu7f>8T=wZF_=bvboq-m>GIuitRl^i=Jwn9i0}#ur>@$ z1kY4Me?kcGMduE$`g<^~6ONliH;0fT+aa&Nf=^YvXgtT2cfunZE=vVHQ3J((6+85` z9_X;ogr-y|FDE2nC(^>#8g)S+i_(E%VcFnOfRM=G65S3&yMQCWx-L|Q#*(^O&D6ULE z@Et_+__F!nE(?*B0&H^?E zVtagyJ6Fw7eC9d>c9Lyl8;p+{_X``9+)E`&H^a6FMoPr!stbPi)VQN#|B-p6m0d-2 zqL)KW5|$sN^6CH5PL1NLOkf)c>Z*X)ZYA7BPRy~JwF4?D@E$@hX)J7Iav;zKFhLoz z7$N4b)b$d)Zdh8z@3T*Zpp~&Kf)kBS`RxxI)iu+tI)q!yUes%4Lp$in zi>*-VHAjXa%hK}LEn4=(O~S>3$0Fm&ohRsv(5~<$f4h2%JI0+M%D2;;I{Itm>PvAf zY*OKtPk#ExgjfW>)(7)dY<18K_xlEl40w4)w|x{!72g8d7@$xZr4ZrUv5;tT^5ENHu9qTo|kO?X7M1{Z_yt+BAsxeX-Rn0V#G1Ou~;xfA*`uqzeJ1H$H z&(4#JrWKuEqL-^wXd+2AXf&2i#TN1&j+<3lDXRijFN4LFM zp=w3>gb&ded(fv6h8=tE`GJowBQ9hgvPcTuVoy!*Gv2h&i*8tO?)4{qy?gAyHDhyK z-`&Wt(ua$xl4xE!8nkrj95&5;zk9rF{Wusj8YjJ+$OO#ud16C!P}}4rD)yLTht)pO ztmJPf6Eibb**FwfXDCV(dVQ(n`>Dc#=#Ee>H;SzF1g&Cc=S?i5t%7DRZWO(CBz=A2 zDWl|2>|EiA!O`kyhy_PNH7I5-P9ANE^Uu1xDv3Zp!7w!IE^+gmbFI5-zG{ib;eHqy zG~J=t*)@aeN+Q*dG*mity7x0&DkRBCB}0`vJKIs(-#jjuCcv&>ImH^gh1)mSDElM>wtI;5UB{zHHN|& zEDx9{soSp5HiTZVgl~ba3shN%wHjZ`P0m(nn-a5KNr%vPEwy4?I~+R^Pq zuHG*zb<2gx*+dIsxfXx}Q+huwo5B}(+TW1=t%etSJ%fCIbJdEN$q@nwlZF0OC2t7P6NJ=Dc79_nebe-Dn>97a>GfG z0eSHd!W#x+lM+PaP7NkGz`i(t3%Yyz98%7|r^^w3Q6DyYN$@gW3dEThGU2|5KPu z8dv5LI=m&XNm`T0u`t$@>U|o^fOofn6h1XL-?Hb&>$x?zCb$dZJQxOtCg{XEFL=S9 z55X9Kd$&0~eh&o`8iw{m=p~ z>BcF6KO-qc4^xOs+ypr-=MaAwC&jAeF#cSfQzUc#VSJZ1xUe2 z?t6czfM>6mHe#z~3yRhYyc7Q*qJ$~NFp{CZ{OEZjw0wBfEaBIVZe>^2Jf7*38c9l) zje2h9)u*qg0ZIX5H`IgUKHCX_Z-WutMKPL$%FpSjZDd{^pOk#1ZG`V> z^#a03Z0)sz_5Q79WlmXE&-P_CqB7{5SJravA|$IZTdQPMfq43Hb-~9p0vU}Dv8i5y-WvvqJeRkeY@@m*i)e4BYwe9k z^J1nefg1%QLLkZ!3RkM<^02|nPQ#7@#<{u|^wj^yfBIpb1FrxPeP{{D= za+v^78gZNgzw{Oyi{R5TDKB9$H7A2umz7k{l#!7U6cPmIV@~n#hBhqLb0>5B55tJt zCNp45ar0CCz699r?t5GIWy24>hWiB!srzpA)H&KNd*0Z-=v(p>sewlwHPy4PE!Csh zWi6`q%0`X_rRU-}vd$R$&(xF-wM!^VNkda|10xeenSxKY%zTmq+_9^FOP;}S+&np1 zr|wQ?`dcGX(a-hA}!`|lWdj>!AyAaxA^2y_@)*P)Z~H>4j)0jZ>bvE?TcoTh}u?|QsA zkS^>n2|p+@hU3-1Qb^PFd2p@0Mt`}hlhfQkxuaZPq8n+6{A3Ll_lZBu^(_5Oa9v^< zH6#FUho#L#QNr0;IY&c`tq#H)wI?c2yW?4JI9BaOJk~@ZTO2g+_>2)%&Sk^bHh&7* z5|TsvRGhKJ=NE-BqU$^wYS*TCcOLU>OzSwF$!A=Yj;gB5$YB07Q7D$HRHgwr;^7;z z%(>J+{$In<5rNpbXRFT`FNSHdS@JNDKw5@yFq@1Ond3h#eoNVC{O&yrd#y!fb;J|f zCf0EpwY8MuI1;|~KbGz(l4*h7^wX=IA2U7m&Qzpc{@%KEqNP?Z56HudJ(J3^b^7ng zVG>T8u`xNg4=z=2Te=EfwOo{)Tx$BU5xrIv!V@IDj6BFbC@&sO`Hc49c3e}6TCBc+ z)Op0I_uf}}Dj0)=&>rjo0d7596SK~jYRA~AucjkL5&1N7bs*A(Ii^LWkGhdFWhx-| zw@+1K=9->(K~zjr-&GrL;Lcxj!LQB{&vPZ6c-D>K&Fv1S@1Rf_c*t|#2V*XVwD?Q6 zxF+J;R|*d8Wyri=f*?#>4qRfU$Izgn)VJpw&E9WD`NtNO>{}9?t zf5F$W`s#QRaZ(`;CiufLVH()XHxLCr>v6$=1W9NDARqn)NEQwr-aa@X%fN&StIG$4 zxO8jp_gK*-`Tq!t3hpq{(~A_39h@$NP)`g-*jF&47NeZ;xeD1vZqmYCp}%5fmO~NE|#nuO&A8 z^~+n2K_y82-6yT4#?vXcvh$RruXbdy!#Ouwtb(bisUAUU%I>!O4dAbkC!m&Da9FTBjF*M$F>J9UYg=~SDb5;-gV=d!%oZov+fr^@OCwK zP9*pRT3LM#r|ja)QEEhMvcl2_RFxS&7h&69+WmHZ(J^-I6sq{N(5Yc%gKvCIJj{?R zr%}t@#A$S(wZ_j+EC$gSo#KA10yJbiw=EUCZEojl5Z^VEj40;)!^r2tMvJi}UJwLl z*F5xGE+Ia%6=Ry2z}MH`TwhFqhkIQBNRtvtXC`|3#oU|}3VwbPz`j(gEP5@QuG%5| zE^q!Z8yl@2ivv4xi}~0s3~0OfnMQ8IX_iL5^l}^R$vkJ8;G7G|`4ai+(W;yKLsDKm z?aR}7J1rFl!og#s5oC>z$EttAF>q)))M=iNX zToAOH1cEoO-II@oWlxW{rLV#TC>Tmn7p$;OYa@++ACA1D1GU$eB)v35;oeQ7(L3~&6qVo zp`z?)lPnxm)y(vw$;vR8S0hXfM+7D7wdT}$25lcMV%jEOyQ8^&990VwZ){!tRQLl= zyIS|Hz>1_^UP<%R37JXQF6DZ~N=}+J{W-Kawfxvfy%LK~4@P*tvEL%S1#{=(56gqJ z<-0ETYl*eQ`x2AmZ0MtcPG2lhBN#NH6rsD$_jAn?xW)zszM}wkpbQpYxPypXLJ&3{ zfXfJcyaX()tj^2vhSgh@lYg5$JryvH1jAtkiU5Br)LOiZ+kzyiis=ndle4dFSywzH?-&bbec zHgy!chVO=Y!;!>Ye68?ujj(SBkqY&MrG(ahygR(xIi;yeMFkER>gsK&yZf+6qI?y9%a41AGsY95i4g0>qMO9#H zeuBDH?B&;v_S$%(jgF#La%07WSc%l+Pi{+R?)xoB7?e9VmXY=J^VG7ucAEqWQ^OMl+t!hQh*iOyL zsU@x;nUvOi^CxjzMO|ISIPX&)VH&1pMorR2+3VCF<1SjM73gUF9caeifD0>s#>SgxC>hDQDbSkt=aQrOvgP#O zPmH7u#qUOSuoB@r&LVkwb;N#w`~A=EDUkBtc`sc0$JEBNmK5IA2Uj#Bh19 z>DC?f9s2H(uRQlO-bDz%{~3VgB*JE$Ag@)LQpZs9gmZqn6Q-{Rup7DFkfb;2>1q+yR86Kni!o(q3w z(8m%E$rpxO1lO(h-7zPl8NJe=Ua!ubk!IY;I6>biye`nAg5b}W#zWtRsFC&I8Wjv@ z!weulT$jC}Dula;eomcx0%?I!s(mEt}M9oC#XCGa+>v!^?p@o{ZfQJ-pR1js!Rn8B+#L(vr ze}!&j1?)ZqEPBWg)(u(tL)p?#AfP&Qbp4$7{XdYv0ZAc$WB&I?`s;-kOn?aKgJ^)U zjr$YOn>;+{c}ZF=q~`02!VC9*aS-8?ky+yRGBWTyPA2Z(fk;^mff}hE&nKmCo8WEw0&oaBclo3Dd`BOv2GBnCZDH^s=6- zRu~;_=!9sw3C&b)t=_(kT+JT)bJ9XMRzC;h#N*C+^jR;b%mvQXRS6!R{7qR+lIMvA zBZG#Y8OH*Mc-hH3i2?Sa%i6vO{X`KaO@$3X2+-ez)R(y#v)1s=M!I(26RHhlWYD1u z2h#LP>n@1R46}D+g_k z?F?Q(yA}|T_Txu# zgdqdi5rhNv^75cROQvTUycCa!*xtJH+bq7Nc=pV>ce#d=i>um8X*D5BGA`B?tfu}- z6VB{iVi6U$t2uEXYzow>R%=7gFOy>&hEDt6sZO}4yx0o*v;(10pUw|PAODRJGU<60 zWG01+YsYRfVAtZpE95rcc=3C(bhHFpj#6if=+J(h>k=#G`9mQb=B)|QzV%UL3yafP z7UF&bX}T*_v;K$Qn``)tPuGUCBd;cR947f7jQG-@?DS$+n-8+xj3#i#O+;zwMRpc! zM~5zM@@Z1ZqB{|taL`6%Pa9JF$63_uc5^y?!_)Un^|sOziHRsndBchOpd8}q+vldFT-=1h%~oP%s^GD zr>}3l_!}$5d6{tZ`x$_zdT_50yg97qjo0U<2r=+vr8&YK)Xmh?Iz62XZ{}_nT`^Jw z1Y9~faY1$k`e+QSyzT$z4RV~aep6GN15g%p^rb-DY1354hr1>9x9$tkDBb@gwv^5v zX>4@)=0(0(&12`_pdcMcO8tMm7=(G8cg_A~8Fjg)%(dM5mPPBrZ+ zauQM!wF$+y%}tG%_;j zB(3{Y(GLLwg zGLXPsg{cUvzk^ScFB2^r|8-KQ>VePNY_>6ZZ=qut*sA-F2+|liUDk7Rh|imrV%3cP z6c7TvkNbF4=}GRF9^ueDQCHgGfzxo1x$y&p15q*9P{;x;!=F-9$sm^STMqu%eMRCg zk^;M0;)_!XcJx4oKo3C1@ESns1{3ARutq^~br<{(>ApN}ItmwD<^G6-)_9iCQ(a8M zCf-CX&x=rm{~r>=^i&KWnG4*fzNq*R04<*t z-a;rLQ8u5#fIrb8_)hu?j}FhKSVqJ2cgL!+?Yo^z5?I39;G# zo5H>`m*Tn&dZv^GU|!*%0nrt>+dg~0vIHwAQ`0325@;cpz+hznyzL``mx##^38V~b zhs0ple|IuCK!YlsW(p58O)rBgb0iOF^zedxpS=ltcNm`j_i$}^6xjuEHz(Ly9%5h^ z0(#^SX+Kv=x~d`C0mOT?(;fvRt3-DqQZT`6^5NMkZs6asLbW){#xp-=gv@$Mc66&x zs$VZZ=I3B$4OEk%o1afq?5&Lt|9Yx1umN*c&(awagNj5U9s_azb8&KUb+A_=P8U6n zg(uU&5f|ISD?6T}6l0ZFDcp`>P>c)Z^nUO$hI_CAC+qsojq1lpdOch{=8OJuf-u4# zU5YNFJ8jX)DU{#DkX2Mv;Mo|2;}*`}Y?;3wl+v@2C6-q5vg@&oCS4W`%HqKcBtOOHas9UYa?9A*~_k*&t0I)c`x&^|NZ1w^jB|MOsY) zNZ^hI=?Sy*zFu8IA3#C%M8AOSVuOIrw^1UhszlNWY}%k`0rCi%TavTeCo`l7K-zUH z?6m>ld2k)Dh>4{@t%d~Bg!+r^I^4!%M3(6GbT9;|LjOD>LmA|6j|m7?tIKHQb=PtB zF-RyJw^Z%Rip0>?J@=Wb&lglzYt6oVN!=;Ot0fOga@&1sV{Y(|b$IBvW=#;xe(9*l zR%Z{?Dpg1A+(B5pRP$fo1u#OBeTTAG#$B!5qwTb6HHK~cNnK}E=ojbF8)q9+TJ*L0 zSh_oel0!w5|3{QTzq0qQcUycIX4Joc<1YRi-%v_ZJeXj zvD36>M`mik$Efs-*emSFzo-ec0RFPu7>EDovrmlnwIAyIZP7w+XsKmW%GJCzRS(s* z;SwVJ%SdiO8_1kk)9qC$W8P(#u7{jYB*a=p%J0HO>0~9HgP3KD|#P!x&Qpt(Pb>CZ z?aRKJOKx>k_E?->0yrPb0qdx{y;s;u9bYkFtFe)~FsRPx z%nCS-2FOuvayMD5=6<52vciRi*lC!)x5x*8H|T;1%=zkQjBaXYbP>!#3jBA`@QrS3 z2rme}?)>7gbZc6%p7y@g^I-^`m>;Q-tHj!bGHic;_sqdnOrh}qSce%I%rBgIN`y;e z5_C^j9*M5Hh=|M$evg#YF4~50JEw~+bqVFy*Vk7L=8c1cfQ?1#+DXNBPL_}Br#h=~ z@N&jQ>&wWvM@$`Uj7qO8*UPRZTFwcYDd$pX#OnW0R@dyWP<1p5)*L~=nQ>d4E75ya z2Wg;?KinAG$%|d6lSjNX5)#cuYDRx}Nn#la{Pn{AM!6^gsBYFAD$)ZSW(>fjhXyl-g6Y9a zbfSA~c{bR*uon#tuGzs$^6IK41MD*|Eob<)A(sZ>N+k6-)rR||0!=)!f>&%ngD)RX zS{83=n_j*dyj$zn?)eE+stEfELNpFYK0;#%BI@;Xd9{BQi8BQOY|p3N;N?VjkHe9w zERGpDgT_Y|P%l(SDvJ(##-}7`(Ch5+*53OLmYt2z&I+PhY zj;Sz@4SBR*X(oDj(o$@xg(+Hhba>zUUqa!YV(Y?|2vtA;p>W3jtdP7?xN9=_ z*;}tj`BoI}&1Y|`;Pry+5Um$pn&D|<*lzv&K2l%bxoHNs{dy-OT1-vO5qm)9vH0#B zjhx)UNBGKGsCZ@Q@yg(SL=(tp#>V%A=|TNkiD-xBP6v0Sw_`=d#t;c%aV zV&7xv!eO5HCxx1m`Gx7B@xTL&V$+r&}lMvJrtk z?jC@T5U{qt7%dwW8+-p1%nlIBr_@^jQCzEFm_f)Isi?%H&Ea6G%V36o6uzCWpb8a) z$t`1JCkZG7LYTjuw(Mm>2wU1Lo0jHJ1g!vZc$<1Hl@(xHXB&e#k`*ehRmWkk={&BT zTywMg56+Z-hfb!NDcu6ksm)8j-bjMn_6RZ$S; z$oi&f3=?#oZhlhb63)_(vk`-WC|kjYG>p>@mK(0gmD4kq4j?2@PN*C)zG%A){-RSK z1{x>c0?uBq1nzR@b-wrkm)nn3Nwnc>w~@M%;u?lfuv*RvfIC~I+z1OM2}&NG1kiI7 z=J`9eq>I9IS7Zx88L+}+D-~;+O;(0NJ*l1w6-++jQM>#588BIxP)`3LFDM^54oZ_k z=aNiqn)t0qm;H^B@%l7VSnYTy>l7p87bhS@F>G{bLYT<)TO60?$4f({`fsUNJ=`zm z&Sd%bqkc#E!0KbYcWRw;b#}bHMSgs*0X};8JkwfTz0Y0GEbq34k+bruNozGGL>>g= zGnLvvcFMtF-dJ!&Rq;~zuqOzR1`B04`3~D62kE5g>apT^>}f$}puhRI1csPwdAEB> z&fL(}FkE4(WP@rm9$zxNX5H@g%(ZVP{r#5ArSq{Y@HOH7ecoybQ?Ernio~xlNPkk( zA2WYPKd=LzBp12{W^RTGEP3b54!ksNB|6O5*Uf?IIqpTxn4RNbRo(6j0)%561=1dApTbO!Vou!1e8 z{&>O^5wrtQZi=1N79S9wFze)u0{jYwXf^;whlYnCAc=~eJ_PiZ@F7HGg~WQAcuf|) zQBp6X6P9`cIgneEIbzbqxKL4iJS1=Oh3Tg7uYR%)Gt;Uaj($=W%@q#BBmjz5Wa2I_ zggG$wU1;}jN!yd_z|}e)t>25Civ=tnxjknr>TZ5$J8@WU9};Z%qB8bdcCt~tzg8WO z^A;`ro|W(A<+VCcatF}@4!;$JNx-%;yeL#%Dk+=DQ_F=BDK4pLX=yAj#n;s=rgaIa z;GceiNDhN#*j6F-0nONBT->(EXKye?vE(o{#M+L$QZx)5tyBLA*sD!7a*>*ykiVad zyZ$%^g`hcdb}$Qbj@IR>qpzNn*ZaGwO-ov6GYV`AAm3an%@zw>Z(r--8%%D^vX)bm znAp>lX*GQB7@Lmcv~clOa;*;%t(;3g_(`F{X6OJn0fkk+3n$LNT0upj1(HR*yyk66 zfzpQE1&q3;NQ__hL(;A72M~w5eGo-8;9`VqMiJ{X#IT;ghYRpSWqP~{vxNCM3)f4y z-r}(2!vUXHGPx8Kv|n4Pg|?XSdd&Rhn^U(UUDS9FIE<+rTf`#tK}vA@O?dWkFcW$3 zyaKz`3t0wRA)LVn_XdsjbV3fAC}9*95fr?wqx+DUziY|GWv4H>=F2vp#Oibfofl0W zdwWdnVa#Vr$={_tCl4B_pR#|Jycf5sSoH*>-1*Zl=?8aq4ck*GzfnbKe3h=MohBe; ze%yp4xIUJzWxVj+BroG~Vl>I>)`gKvOFPVnw+!}Z@XHs*TA&*9T?o+aHvWWySz#zo zfzA(|q^sO`7y*<(z8w6G|9yV0d-}s_668ok3_bg^fX&lHo6MGSR z#P)?!5KVy8Bo|tXwRf%gS>#d@XYxw-XRUpqp$Bg9mst~)-0rMR-!7Xmggt(0I20d; zRW7x0Ow@xq9iE`#P9o3A8!c(PZ2E6NDM^JV^!@7bPU5^toc@=4uvkG_tWC=@K1xxg z4~q=x+Aeb6+q^Lo02)$)t{8d;5U5FX|!}VI8Mm5 zD3efRI4^{USi1!zVs9Yk0|}AuT~kwYc{nU9A|iq?H`0<$hx#KHy`uL?4|Vht%?tNo zzpisPlPrY9p&*EsiU1GiVyp3`%d3G6(34`MCR$KfcH`A?6w4%ooRL_lIOiq*v`o^Z zjrFhFCIaUH-L{Js<6e8gOfR_XkWcQTA=etxf3BKQjgQlQBhIXuN0=<*@9K1ih*A0H zS-1dgSnb6Hw$nzTk5qht*fbrl7b8TJF_Bhcub{L%^Rs1Hp*)CQ;VmcW7HE&_)Y^B- z>9}DG7H8v*qL@V+64RO}SB^r7#?wz>zku3G5Qp8&O*oKb!db!Cw2#1xL~=Ps3|dT3t-O%F*197Ziv z;r2b>Az@fYJXF?4pqUbhqO5g*sm2`!w^chH5XGsBKC5H?-wuSS?_LAuU*TGF2&e|o z7vdDZ)HOwU)x(tmWFC0gA{tRB?Gv#9lI?C7(Y;UHoMRT#Rd33&)5cl^4V)`iO~=Fz z4{SG;ikXUZx9=|CYdz+(Aoj}189t8kdL65;x5%QA$rKlL^jEU6=#YK&_TB0ST;@@F zxg#4RsG~Qs;amJEv9V(3f~J=Z4GkJW|6nWwmNZAV zL^M#WJx2Ln3w zY)(Do^PbR4sr-hq8u;aJfUxlm9DmjsJCwj&tIYQ6(=;_QGMX~tI~)*}N)?zEO3TT~ zIe^8Z42%f0MB|}>$N1u~x(95X+d#HV-CIq-^gwMv52{SCGv%W1fHnc5W+6rF8@UpN zZ&wY@o+hB2{BY5A|26xDVboBJ;-A817S#7~2VW{zJgGGMTUQ1q)8Q9cGDUvWT z-ON3$pRBUwlPUekh94ZyPW=ZmvfwA$b?WGNxQkq!t@A1#(F{xugi#EhkR$dq!H5y2 zC3T~;W~96OF&xCqKmNTkxZ$2OjxPWrQt=|-`xd{&6CozaM_Sz#|CA?RDZAow; zFb$qVIl1GVSy{LgB$bX3*8!BV(yssuv;S>+AlLGNk#;=BgLb^AAihGGUPMeRuX&R2 z`90^iZv!;G@xgctL91YYD(<_d@M#M|vcSQBFj6F~>5FG<=NG3PoIKlz^L^{=49a;* zr+E>torW=Et&DI^uf!)da*@Lh4Z%3epsRDd@!&L+HXJp7nEUWDevdl%4ZXvQfnqT| z*?9Fx{dfSgl=7_X@TcS9;mY9<5Vepis=b1j573OoKpEMW8~rK{9pxy-ZLJ*G`4wCb zh`0ClWlTQAsLYSPiq+pXamFXRTKXzFlqG{t^x(SAfSH#r5i;Fv-n5bcn@;*kvL^0j zVlSdx6$X~N5l=%xl2ZEUl*GPbe@}a^)Nvhy?Ub7Ov5SW8M$&ve{oX)!saZA+27E`_ z6&$sps@-L2OBRrIns{oNz~icp2I1LUuEzz_s&=11$dJ|~kzGwxG&o}o5F^bJ7D`{z zoW{`-tvyIdALHZ0>0*(^fX3ZTjWi zsaZRc-kblPda8w3(pTjh08+y23+w_IifOMxz^B6F-1ygL9u zWEeK2ust*bT}X&2e;8>xzp|TCiK>CQRpzfadlI5CQc!s`a-3M_&OJsPrngOR-M$U! z+Iq{qtfTbEnS;NkrzGU;_D74RMOu$U1NuBOnKxwv{r6x##cbtIbRg( zIBs6{7Vnzly4NQLnj6ut3O?c;M@r+%PBzyfVC=m=qQ( zl;&a6J@;f9L%n@l_6>jU7l-$68@@__sCQ&9SGP*!7x*X=6N7;$tD3S29u6d(?0hwO z1()3cG<1;keBWa7Q;2b`s5iw@phijFZhy@2 zfCb~n{3=R8Y0TGM0-itH0o=18aXCscA#b9xP*A0oQVwiSGKII$*BvGWx}pPhc?p$m zZvHPjXX?=6Fn8D=@Uv2sQsQ$0EaunA1fY%ATSSpQ1sa>2ygFpyqV_CLP>m8b$3|vv z{ty1Rtp^4Ys0$E*4dBzc$&2Sld=5U`1BL^NV4E=LiUqK$%+{o+ zYu_}+*+V7jr^!7zJFB-@{(57>J z)DZRc^+q!p+~S7OPG(oSi#SW)FA#n+jB~e7pFSy6madIZN#S90`p$evH+BAabjM6^ zOPTZs01s~btENZx02K%Y-?s)sf`uRi{8{WemZKXNiVm?C(T<)uOA}7kVaLudZNDFF zkz>YR-aAxk$zO8KPM5f=4<4f*+lB)HgDA4 zlk>3|;TsF&QN3F=*Ji=Q0te7n1_Pw}qZz*0BWI#>PCJ(w@_urb{iL`?)V)VSKLuPG zuUlOnSfjUX7&lGdFr*${_#i%tlQ4%$`o<}LDGgwCHZ;N=3evvg_mpCOds%bYo?AHT z`F`OUpPAEF!T#&O@I(w@A`ESheYeN**qg`vDeg)BV*)*;YH=EWYKo|es6f$p50VN} zDwEvX-4UkEKX%MX~xB!I|3KEReI}gywk#94oRW6ovkY++ks6JVNT<(7? z4NrPr=3ph4t-a$iQSqi>9r~r}yqOny&P#t1Lx;JYZP^u`WVlH~&uYEMt<=gU3BUuF zC}aK_M+9hMU;nPo-oO_Y+yoUQL}s@)AX zH}%_|x0HD@A1WlU92`>NRl+}Dvf1f!-J*IeFnhsGc6j#5!oH5%y&u-m*|5vyef1`? ztK`Op3j|*!(TKA0wzSpTl#G&9@cpzwB!gSsjT+GdHUp7X58Dh#F=Fh|DBQt`V zSlLN_&B`2W*trmhCi{1~1#i6h(*3}gWGF!Z?flER%Aa7D*Y&TJ@#;eCssG@{sh}E( z-0WVP@ZuPz6F1n6=nHl*lGu)CBi0!HQj<|y$gY>UINn+tFQYD{N0=}WVr58FVau%k z9}QZBjf17l690jEG4JtuUr0w1VV^=f&n}%%Jbg9DLy*4KxiW&$P(1wuGEVV}PWnn3&Jpu{rpFwI4>unztNwl&o5Zb-)a}OW-?!R{j;M}CTRb*KHX`V*(j?^4lyjZ)}aG&R!wWo@P5-vcD>U zXtAM1yVJE8RrtF?Lh%o=)`H#5!SaF0pI72A9|&l>tEc2UD(|rD zRPAR-P1Kw-g1o}kU@ZDMNX|4I8Gp7aV8=IE3V@0DV-M%P>3Z?h*<><+n3;daU?;x_ zy#U#CGpws_S0`g{|HiR{feZGs@jzQOIH8_d@Nhzuz^ExUCIS#6=oK3qo4*T~C$O`2 zI(^c1x-g8492adlO$oj@io0d#1S6ut1~O6Rbf|x+nA0YaJpt7~9 ziWBe*;54#Szp&9^01NXiC&v~d{6I`ov{r+5{|>Ydw0E}zr@r@asO``4k_0(JbM9N< z0O9d7i7^%XF>}&HT*H1_O6(-J7 z2oCpS7-!=1>4tik0YkK z?28JK*R%?6@s&}NW>8(^WK-|EuM$oSrx&{S9#0LO;-MU%g8NHJ@|GqekTh8y_)s+k zw@ia{wo*(jXA(Eu+a3RyyIkX?*4)L_Z2n1zPb&yLG*WaIazoLvS)%BzDK31vb!80j zWe;JCoX}8%ue1i~aHHad#FZJyL&v-uvn7)m*DGOISw4&x2I*&lP!c$dJ%b=iuhjL# z!hZ`0yKKcm)iIA(oLs6wbpR&6`tSi!V+)!W4Y{rTu684~sE7!pS7v4f!)$6tdm%V5Q0`_fsn?cN=zdZ{8FYR_=B$(PhGgE;n7EnXH@tjZe-(8l`HLT0JH`sA z;boA=(p{-Bt>><3L^D0`SbO&XES_7i5zj3xi9*2En&1z?|6%03hr@*yoP2hqxI~$+ zBP#;@C-_?rdxt;Ii+fi&HuiR02^d)1)bvdA$MLV6V9w(0nmF=Am4-Cea<)# z$TRWBOcEgJzaVIZ--$YXb-=hzyN2D$b>#^!yDj#;NRW}mU2XUL?aZ9dTsM_X&gphf zt_bvcn;)Ub7;a2DMf?daCKl#9^$T^T^);*sZwL0tT>XBW%c;w+Ky9&Jyn|r{f=nt`TDQs5`X{r}h>BgQg4e;2GYfHZ+Nqv-6f@axyBW5r}h<|gI; zM8XcfYk-h`i*!?8W!dZO_ArN3S#UujgTai9TVm`}24k(V2Jato>xrp7+?|!flgL&W zY5$L?s{qPs?bfKMfC?g^AcE3LNr$AAG)PG+N_TgN3Q9^#cXxLvB`w_`-Q9iH_T0Jm z%$%A3oZk`8_wD_@YdxuD&$s7sktsBeyR8MwFVL{KX1MVVy9@9d3damv075lrK$vIj z)|9&!l^4a($0bu_HI<8Yoa%goo!GZ5irwOLh6Bu9aQLio6nz3Fs`vdXEE}z?55l;O ztE7&YO(_GhYiHz6%4;PwhZK*uAbkQXWVyGpaI7EKZ+)HiHDt`UDaH ze$8g4t0cp?T{fjIRA4`o_FGk`w}SjC_f>uT{*H{{kPqKr(Fk|B;dD{us>!fKFlhgP znZEAeqJcRA<8s;%BBK}(Px4jGl@#xKq=1`Sj8461e}tw!gcFwMMk z7`22^08zqo_fBKOp^#xco$brnNYnT(3bE_EeGAugMCaq<12s%1*vW44!%l_Z+F*y) zClEDKU>nsUgf-hY)7Gkt#|_@D8=tO-S;mmAcet;rz7Nd?!~1UlNr?>o$})O9Hxee+ zOW|Eg3=yUu(!F$E(gQo+T)u|LLJA#20lCSy%1$>gzRi8t0O0Mkii+Qi{mx{1YH8G` zWu=0r*!k}4JH75Q&*jJou53Ohm-$!1;nA|^A@EEOsC%0)YeYv`3 z*Q5$P9;~$;k2*KaPpV*GixzAbKye! z6|J3~jey~Z0vxj%c0~W)DlNms{TH%&Ge3qghg*V&=mvGE>)E{C>8YV*QMils7*8{F~o=qVv0Gu z`5u_2DhJnBdZ+lQR?J&sh8$~~Z%t@E&Z7C>IwSU&FKk3{o z?=<(-`DEk>3E8QURwCG*C1bfO+E(LeOm{$oH@C2G^-r$Gi+GMZb=lU3My$fH(m|4apIH^Eg=VjD!P4Jab z-)~^N20PhLsbU$w(pUKY8eGl5qbS7ud;!K*D~;GBvA4lA_p0tQXwHFw9EfR*Teno+ zziEIPZ+9A;1q>JoGGFDj6}pA{nAm*o^&K?M@NK=R)BB8(JoG&Ngy_KEX6pEH+)`~&_1yI+4rJ}o~QjACsW`fk>l1}aEHhZpR(?xB9HthI57 zU%$4ph#_9rjr(prVOQ4W?p`7>A77o}Y!Qs_3ENsJKsL|EnoEm|SGFbfUDvTU6bA{?=CO@R5-drCbqk0Ey>md?CZWJtoS`**uFl;_ci=!nE7y`e@HIk zE9K={hgLtG zr?ob(dP{`n@Kq4A_xN2*pKtd%P0ubg0K~{o?_`-+eZx!HJ4nNld=>3@@Jw5tQ^hA4 zAs`e!4-XG#5_@>wLasEqPJ^M`M+jHHv9ST8Ub8QO{QS$`(2)gT>RT2@&^>gaE-VhO@Y=F4|C=-Y3zFAa9RIV5s z|4f|uVqzBDsuUmXZ?9@T%!h|)t%2(gJTBW_GZDSE-Zc-xvk}-AIrZB!=fQEq6FAV!?@u61a<`oeaU+dS{Frz1%3ntFiV!y2rman( z1N73^N$DegCQBVP)OQjT7x!>{+8HS*DV0>L`CyWTt`kb> z=yZPpm8mQa9SEg?`P5w$CGWsK6i$RxQ#%R{*u!p)^43@4tUgzfv^8;QIO%0dPHAA| zk`ooz^=@ZO{*XHSjaDp-!-%V0qi!N4OtSrH+cYGxjJTMxYuz~!pRhZKEBeOwAg`7_ z1VhE%{s{`hVR<26TWMkeSLk^bb&B6a#PpQfi^-XZCA($KGN%gRO47adUlDifdJoGi z_}?QZ@$VEeOcQXDQT}E|gJn^#S$bh3!e}VgtaA~xuA*t9F=+efbi0HA(j<-i&KlFw zSzXR~RVXC%78Qn&BvJhUODVL_7L=lT)q0n#O?j#kz>z*c`UbA*tqK5_^YzTXP|Cvc z|MDf(5=#*$LbL)>rPLuWm_(SA{P)Jqo6KN&Gc0Rn2hA%;`+BmJ8D2128Tc^`3F$#m9E*EZjJM zT~<(Ct(D$%K;70ZyFbvH+Vs>Xh8PY3THfm6l%6O$CZ<_se?6-cU7Jhbv;dny^J2mZ zbzH{~*Y*7Q*4%+dqA$!6SP4ggZkh?LcOyCN4*2KMMdDzaXgnXEH{Ib5_3xE%-S)puiIw+$PWX$e0Mox;)q4o_{wg;?+AuHY* zc$+c6mH_5i)w7v7ChpUQjyB#ig9&pjmoGXlUYh-DlOtc;>k!z3ByG(xH=?~(D z7GsNzYqHdMmXw>fr6!`n4ht`E6h6?|8F;$Er-3J`{5-Ms>uaNgOZzQR-jh`_80ApX z0iKFr?@kuX!Nfci3@mxIp7rY4Gdpny71$I#mh$z(n&@YT`n5O%T|Bga;5!!4C&DZ^=Uy0B^XW5h@|V%G25x1awUC=q;awGD^RBT3ZF zi&@XC5&I6Fxay0R)icK`(SX#lmRd4Tp3M!!sf0Wvh%R5NM zDXeQlimX@veTJ8qqgo{JU`pDEk(9EkesXI!G8rvKM=>+oeD+9BNC*|_Lk8Oq!WTwI zccjZDfmVf*i)-`=-ML|EVc1$(@h&xMGT+K6cAlx5BI{n}s(kpR%_;cyR=K|V0;(E* ze%bY+cS*Z~?SeFI6GG1@Tte3(`&kjzEBT z4e~awj491-ZeZa~d7Zp(PkLz2GMfj1s2l6UGXuzv{nds z7X&*)a?Tbvkk1p2l0UV!PByn=4DCo6$$FOeg$BO;dZyNMD}|4qsMdbiMlEcWO%3{* zwhj(#k84O3zHp_*;j2eaE%f79#-IGSb4sNRgs?V2~$cVu4o!D3j5 zUXGrq0I2;f|B*WrSt}D0kZkm0506->_wCh;>y*U@zagAEAia9KU9SN)d#;nv$9ucw zHuunc#cQe}m>I+>mc}oq`4_Hq&BZ3+sQp8n7a)IO7J?QaF9xKVBgPBR%r)kD- z!J`nuMMz}Tg;@C&r{roq6yRw3!xef;HPs!~r)F=y{)s$B0vHgEP$%z0+nN>_ z8M7@da^rDq09R@M9tKSI7wWIlWScN){%ICObS1VP9o?M)BRxgigHBf4Gs38-#H)J` za8zYvo5}&fs8XBkgw5r5VX&jwrA3?Sl(Em}#&eU6mhR5(gdD&NVfqe>BnS))pDtXN zoFqU+g;yKC5(xId_>Xf>6FlGPK6I5{3wkqi(1h*wJoc$k>K1&RF&DfsCh{l2`C3Wq zs|)P`g03GF`s7fZZpgz!J*yfpyDFenRgI~JJ54mRT;3tf|2J0YkVD;v2HpX1$=!o` z-PbPU#%_9C0xbZtRQz!3G4Mpg743~l85?^&sK;-sgd>N17kq8dG%9YIo5bhW`e9~3 zSu4i_dtz80+ZqlNkye{YoeDx_N^cafU?3Jugqk^6Qvouv1^l^0&ELO&udlROg=XRE ztzWl5p%pS(r@g>twIDeOl?)hK!9U+V2K)Z6UoR~!E&B_tD8N`}P!MN&-k)L64;w7V zPX}@_SQ0}kxM{Ogpy-MI*pEFzjbp7Vavi^C(wR>*s~ctXz#M~VM25z%zdAz zeYhu=Er^XTA48@Bz(qJ9)8H?O_)#9FK;EQ5wUoZ77I5@#+qra+8L6;=C_}PH-?u== zrwj&#>qHE)s_vF;o29J--XZYv7XXcbG1Ir?1Hl5jG%9YIEdp(by1C5dPr;vnmb?19 zoRnzBVY+5NPTzA+yG!SxDpD0yEfW3ljdTxG$VOKmrx!+2OyaCLd$4mZUH$1S1%yD&i-uJh zYgt%dw_~Xjbjh)Q$UVFvz&m+faQ@&bp@r7i>D_#{yEL5BvayBwrJz0s%yy@puFia| zmk|id&QFn_%pGuEieL8uUjm2Z+*9fG_iFw&<-G30U*DXKKPncZ6`01xR00D4pf$cP1R~Cd+a8noAuB!qki*ou6U9FFsLKRw900#YM59f%BS`u*= zb(d(!17u+B^Q&5lxNVx2;E2n-ESsG#A2yiBMSp!o1vJXy?nC@mP?R8}3!+E5&Z4b9 zo3MncQ9-<=qtZ-z-^VE?o<{Kjo|tfxQJ?T5E&;to(hD$ozh#ROla!=o4xScCCZ*pf z{l`yxJw}^n!UrBsc1jk>MzgZ*eAg232r$(~7T@XR^t6FU{H_+$%7IW+}N|8 ze<7DiG)xE9m*2?I#?S9Qy0Eb!4H22klQm9w{#(sL2naNQJ9e|bP0~#Hgq6?_z9{K) z2;@NAfMsSRkcR?+0>9@Q3VQSv$o}Hx3>iO6Io#TchteA`e!iBMXN-^p`Jf~v4xlg( z{6t{f&Nv?!f*WrT00;^JX*D9ygu)ki znW(NqIE@OJH&H&_xRYsqQnS8~g5*Aegaa%Fjx~sRolFecn*QX!6NK!ik=HG=Z6n~~ zgL4VtUl$rd$@K7qeXh?n?)6YvZp>Q;i*_GcF}0aJq6xvOT}ViJR4Sv&DkkMsmSW;p zdiH$pX83ymJ0PU2I`{}v>3GVX7T1hqvYY*%$fx*rknhL;V zjzX(a_!+4j1PWmr(iQ%0Y-l!-RTun86=Pw^y_PG|X+iN)r^>UcdS*)HTS;$QTxg@M ziveg^5x;e%WmCNAf2v<^&>e9JHRBm~FK26rNyPT)VFJDU{_uFBe?LP)A%@zEuve*7 zRW~C&2)iG?@3a}Bx0_#TqPuw3`;EneC}6USsIW@?q_+2~;T8m*`?=mZ0dpcy=r|8! z^C7kDp^V8~dyUo(|Jco9>v1O84)pff;KyXUTcci#3lG;zDUvDw(56SIl6K;i%B>U* zX49PRV{LW=s`hcF-_Bh98YrrnO(P*Hv*t}$>FuH>N%&voNym@MSH<1TU;EC4@ag~^ zpWEN++1B_>WdI#xeR2?)+D`yE+@B;-tTY!coJsMgqdIZ%@8PKyV`D& zw3gc&)LxYP2Kx!@w*7}$0n}et_|)Jf>**9>cAIj3(ML^<|Jtsk{1(9oeG_PPH}|43 zD5QSjeyDh}Rq<@TnA`>(0-761Rff$CKgp*Vx>FcP{1|mL%y|EYnvLoVyka1UgqgO| zHxVRG52a1v<19s_x`5VOjOvEwfN4U;%T>Fn2M-{oCBPspa+;?z4rvwp$8-hC+rG+f z48i88z^D})Vgrffy6{X>HBIcX33fR%SX6MS1;jQi%Kqq=Qh0gW$1nwMg^bNR}WPlI~(jqnd zDa!s7Os2mym9vPVZKuDpffD{pqahX+Gq^U}d=qJ6QF3KRVf5VEEwV$uj~!8HivjJ{ z@@0=YxPOBU4bT@Y`X|PLw3+DfSxcoRd@1_|EQFj^3%8|IS*Bz04u>a_5HkdPL~mG} zdla#kAdR?3LiCvX*ea-t5)tU9>n0&9AN|lajVorbvdn@|7rKoT9QsGumwHPd9-;f- zk02_a=*d>4HDnzckP@_TKd54sh|keg5G0@89*tY{7wn2_juFemuY%XlZE; z%;gkYg)|;Shklgq?TfB3a!kwG%iA8_n&%;5HN*v%A5F=qJ0f7E zYSVFQ8GUr+O^sv8L`{U&kM0HeD1`S%KB(otGPXKL;_zGHs`$Kd(x&slWSALFd5~-bh`P8sH0q|Bypw1U zEb*e_96?Huzv=G`L!x9FJC6wjnI1$>HK@Iq`UT8B?0%yzqHq}jh>RKY!fwHjee!SB zX6<`eVQxR-!gYsV08uXM|L-Ls%N6=QiZR(5Of?H+bCqoq{Q_{$R>yyhR#UTy@`M^R zAV3615SW_fu`-4lr$ePA&8}wRd-s09BMgRiFG80%J~v*o>g^kKmlstr5$Xm*3Xl!m z1h=m~L2qHdO+Fx~aEcwd{yw~I!18%Ppp#(O{PEQ7Gf&W_a}lLgn>k&KHc0K8CK|=$ zV!8|-W_4@OZN8+!4?oCUR1QEOfwjey!eco=8y9wcRR!~V_~GZK(o?>gv}}9|8_J*+P89Z`F2pa` z{Kb26-Wvb{?x8b$7& z%d^Pw2{E1xO~YWB*ue--3bboAzKcM^Wx(bLwrgItc~oN^f+!T$t>$||ao=D(;I_|K zO5sPg&SzT}r<=hcA&m`Kd>ywKs^c$m)S(rC$8j%xrf0%l|LF!0NoW-E9o^BK4X|xy zfIGb+ai_E;oT_9TLT1NPt$^Jj=j2pj^&W!?EXv@F4olH_sXFEH@sF?jycllE-9;}& z-L=gt8!$;@mJW^zUj0j&RjmbFI9yX{=X~=mpZE%Aeq#U`wi2mhR2$kcE(!Olu(2`i z$Fnrme#Z?^`=t>4r90o<42e*add*XY5bXM`&ncDx+L?*blrdn z0r&NkmDLsIZs{^e;w;@WxgGGpKk9AW;G~L!;Fp&fcj3nL!hEE@k%TZ#7RN}{3zTVk ztwYK-eWl@Czgto~I&An4KL@=`skRx!D`JBjBX9)*j|9}p{$-axL!P;kta|BUXB1@h z(CI_g4Wt4jf+!Ho8)2uB>K1_Q0ebp2H0QSdbN4OxEv6O)LW-vRs;C!`OG62s4A?M% zcY~R`s-Yx2dtF_he$M~viw8hE%0{m^pAB1Fy>?Bd#4z-P7KyhOam(NA%Nf7~k)>|0Zaqdi?MGR;}mU+U0$0*C8J;4cnUfz~OET`DO_~ zMeyBb6ll-7Hv?vYuO*Lu^JHb?RUYzj*L)8RZTj`)mY9UZEaIAkf;BLC$U}70%0QO# zKtol+pNm8TeTDZS)|>DL*RmkAYsKB&No}VhC(j2@Fq`Q(8b5Rlfzg_rp1w;b(n)xz zO(vQn5WF?m#Kc`-5$rd*gN|;&S-T_RJA2T?Xx_s=Ikr!X==SqJzk?Chzt2TSMM;7? zRHPO65qMMSq?fDSklQIr883qWf;9N~v|6R=FOxxbL)q~Dm|D# zO>NX4oV-l z&*(Vz(B!?A8FHJL`$yGAWfq30?fXRJNHPTN{oLQ5pE47AA)_ZR{h|WOwP^HGjcFwM zM^b;#7`2(KhALC0?3<|F=V4}w#{j6(cZY(dQfl7}TAN`VgSns+*vb9_ zWCJdqO8ad8#e8{&qUx}1098nsic(!JR_uG0#pESv*9KtA;EaRXK@0|lJn>Ghbf7=b z199%&RlvF~a5wtGUr2zQ`*Acdumw=WQpE;Dt=A;4VkRbv2Fl+ z^jh_1EFPM1&7@>YfQ9gGANh?MO)Xu664>^ZWV5ch#hkV$m{=F}QqiLd@Pj6G%#Hq( ze(gPQmk?|G_U#e>USe&BIRq>I;;gc<96FzCIu>wmdmL_5(4Xjd*8(O5U7|}uk2t?@(mQ%aC%TQnK@%Ip zaq|`U9bnsoKmG^+Cim&p^O=t-BdLe*v9aYcbmmNjq9>@tJv}VVps6$~`C}Y&%zL=6 z)SqkcLnxj{?i*i>6dz%rBa9h$fvrFWP{>k+)da=m;of}J~R4`Y5{qN5|FpO74m za7zbH@bXWrLOdDL|GvALsMX_h!o(K`=t}gY)spP0mONWbJQ*8lnAaHJTH#upMw zj=98!fddwNn9YKla`fBkmOK0oIrsMwk$HH_I(Lyg&LVOignxs4E=Y#N9K(WW54bI| z-U_j^siJBKCPlu3n$c2|hhRiT^WcYT(mm2&P1CrqiQRPkD?~7{u=GFCw&(0=DC}*g zF}$Z7y9-%3044RuNxYzZ@gmCCiU3g@{*$lBWUykE`xaAay;|66Y8bcflM)gYwW8l7 zy6JmU|K2djUx$1%a&c_eN1LFc0SSPB#PP-tOd}B`8kj+}a{6?0U+5U}4;od;Re+Ix z05fFXp>vJ;p%>t+-QG@Lb4fZi=r*)p&Y_m98&9gFHJhS4c@IbuC%*+-KQy7(AB^Y`uo^1h1#C16{sY?3DE0^Y z`f#!a#NWP+kJQ#I%$8RHRcAgli2=9)K|MsUh6A<(;{Rlk+u*9AH|i`V4U?h_796dy zyrcZgUHl+P=!L|XR>XIdt=;YS{C-yu5D>f-W9wto@z`aob$4Q*_nSJPI*~j4JeD;k z<)hX$V)gUKu%6+%G0xeS zV1lTX_euk7LnQMUB=Q^T0#`x(7KzFtC+wgoDeeNnbCQuP--b&Ioc9lvrKN~OSs$g) z-vmW9{HZZ@iIYq1OdxX@oNi#~jo!;eMmI*IL0Zq-1TUf42XFy+Qee{)fJA!-$fe10 zv}Y%ZYp!0$6Rsuhq?s)z#_D7lcB16(~BGo>+RE#!yOa&Y3X#UF4GsWm@AobpbBp%?U5G72TdS8}Y36O3B^d6Yu;hfs-h`5@ow9R0w zkgOdE?Jv2gU?x|Yu|Kfbx2a`~%SkKC%#*x}BY!DKMAPJo(Rb3lAtvNuLc1!@hryKgi?$4(J8|O>hZ6E0eFz z@;ccWq|7spj!_mXhkGM$=+%Mv`*Qji(Z1i#zB8w@Qh_GEwdaK`NUQ| zVd(5FGn>jJZoUKA>+lI(0rZocf}$r;_!g9Aq>K0?-S5g;E=_Q`KVUO{^W{Vmy5f!5 zieRFZj^mYrxTF4?niMNJ+&@6X)dJf=hwjk6pM+&eOvn58?~zCtDE)=pGNz={l$fDG z?#$%C5Y2w(^q$A#LhQUU7tz<6uK}uU30p!%*+5OQrligDiPs#qt;e>0E20kVw<&eW zclr|IILx?YK);Oq8o2Q}(8iu$J_k3Bw2=o)FhHN$W+2(o%^&?LDkeLPrKi^Z@IJK+ zYG-o8Xsc0g$ntD=K}H^^C~0}7vV4Cs0DMEC1n=@NLuemyyl=jZf@G<|!y+?gYm%Xn z#irLCol;-p?zq1K5U6d+qVWhAfhTvJzGY*ptmh#bf?3twX)hA433{}vpoD}c=nqJQ zthqXUcUAvwVTSe94!f_Uq>yyj*O==t{6I1b((9!dAFixVja3dz5)qGM2qYK6Pth5& zWlhg^T=o77$Xgw01E*TxtoRmWQ(C`aIsbKt$MJ~)DfwtYb zWRqrryi=}h9b^hn`czA@!*l11a9C=?eucZD=r{&BlQ3QUOgKx`+lfKib?8{vGWKFh z7a~Fs1^T919B?x5WyH5vNWPdH-42U+t6{Ex0HMF+!71FS!=dZ1pv4&o$85rs(5 z+11r{kV5K@mou&oVx?0J`#dQ;&xV` zQIQH2uCABoc7Rff85*wOg{y&W;RzGdYu02MBlR|3C@%}mrj^oYyOu@rw^s3Rdp98t zOh=hB552E9E?4(eSFS6jn*3=0DSw_u z?ImODHj~d&^*joa8_a)KgF2Cli|41{Itl6Tq+tPW=)q`t!Q0h;Am2`L)g}?st|#Ww z=8{CM#aO5BQPh#ioY`$7^#bDvFqqIFL!<$V!@#y89I%-kr=`_X%%y;v(YC`Q-BA=3 zI}fcm`P*|M38yfdMQhWa{^c^rnih-+-W_%2} zDvEd|M0!JR9)h4%8vt0>vvNU42LXXA1Ox<-s0s=p@Au9ij0gQ3o_9w|N=jf<)au(; zdOk&H%h2yrAVD%2Snvr^iI3NFSTBA5@a@*ZE#G4kUqOxK4V$W6hmJ)sit;Y)UO_}J zaLI$>1&l?A&-)ogTZYB{)AsT|&s0VtYZY0EX?3#dcL%| zhrZfy>N08PKyOt`z#zi_YNM^J&6X|Q2AVTsTCLh>HPMN-BW&7=>`$(=CgwD#92}1x5%mcuzA(>Oi0#zJjG=zqjpA;Cq?V*s49FTPghlpe` z_yt_M;u9kYF~+r(cYXRSOd!dC9RTW>uh$!vAn~}_N;Cl5&%&?E)R+F(nJB(a_Mh_8 z+1YuxNDm9vb?C8@p?eS8F*82x8-9EH@`(qbh32xx3Tf(}NaKTpgB5!=B#p;3vFefJ zfwnf%=g;%|#n9CR;KDca|@aF^Gd?80a5biH!>ekR0wYkX~u6I zCj%1U;5w=GMiPP`STlcd$Ydvf4oNy&qsZr1r zN-s3gG3+4T!60w69W-s{IeSVGY1%{9!_BF<_oDuc=_8LZvadUr9x_YMNB@%8Yyyz! z2Qe!Mo{>~c;F00lgMRRCL(+eI^CCphzM=JxP=r)xc)(F0!4K?F6@$nu0Y{w(UQ$0U zAHtnnp4N*7v4MKh*313$o9Z z@Mf4nwfGg@NiOLgI1%CDP>hy|V#|3+X#x2=Ru!Az7(@D;P`>HmXlxv|MDQW@j`4~! zX&{|J>_GxSfc}d_diw;X+k2L3sYs@~?;t{7!N_B?}$PWh=2u_rqci4gsyLIsz zuPDRHy5XHMdk}Lma;LlSV$RWK46+uN1^g|rrRqEgL0*4M0 zsWz(0u?ncLXDKV6v<6Kueev+P3W2>oph*2e-A}p(TOB*0%gNrmow-)$bm`B$92(;3ntT7GYllS6Kn^9`LeM^NZJ;A(j!<^YCaI0f(C%=i z_)XDB2Tfk}FW|#|0mTT$jbwNgAsjZ~W-536mYP@5PKP+L`tD;eOwCe@Mt}+ncvoM@ zq=K0Is(uQcF`mP5LAbHdZr^U~&Kv#?I)5pEdNN+F%Hv&$2@3LnRg$sb%>z(lBeN8| zQE;@vCiw?i4pD%_YNP0qAcYP9IhnCi#JO~O-4Qj&>?zwfiB55y$w`L}vB(A_(dTJf z!k5Lb~HD4b!y-gu@{%FCaUE)DDPp*Xqg4*GtEJ$%#8n0l6f6)=ArC!-PG!zb25#{YyX1D)C-sC9#ZvZ#E zqaUx`e4S)_r#XkPkw}Q=}FB+ z-MkWj$RaiiXtDQoD9waetyBrIjw5YvD)Xtv^iV6dz+h;t__648(uPpceVm zwLuk=o&-@a4@3x#Sof8?qPqKPb)BH(j9;Ww4aX#q7ReV{WaHDL8b1WmD_Jr4$R&Ao ze@fQbZ2@l{F%yI82}nMJ2Y>z;gj`q#21WqIPvf6WO|jl{2AVo%~ueMykq4(Z$YqVQ`zmlbp;m}FCVPu12i+h0h3eE zH0jiV%L8a=@fhPQYVl^dJk6RlCKqFiQu1Gevyg4-39}W-0J{(}13N-u2nGfQQt$*J zok*YF??Oq0S(rT2n(gFuA)<0&&Bgq;&fcJIHU|q_aO+$#kwR;bm|OJmCoQbYHY`E6 zw{yP(Kj`XOf)?}Q;?xXck3=C>^@Z(P{9-4Min;zrh}=fxMP;ltez_BsmM41#kha>t zC#|F5sHB9CXz`D5i|B-fXqdA~S35O$@(FL>fFmf$^{P~p+v>}A?4{~T@EBmt05VXniZ z$)WqVDTm?pTRN1tgKQSc6?EPhyeVRCLjSogaID! zZbfVQKBwLCJ;(hyi?p+3ms^1hz^8f<653>-800h>MtI z2bC!7rtDJ?6ohmNLP=eCU#1)~87O=TfcI7dHw*{8>=Aj|w8c`sf`i5vC?_gs;n0_5E zipp7M<^z)F6gsQr#9Gk40QfLZ{K-RGDO~MHCWGCa>)oO^sS&o3f1X8!|5;UC-Rwwi zHxndB;}dJ@57`@XfeP%nyUVCy%166il0-E0lv$KMj;emsoDP{-ap#A6!*&kK|5nLW%KcTLXp z+8r5S5e3N8>ZD^DFs81EE->Nb4t{1}0K5>U4TtrBp?;IJUFqG8xJ}D~zoY@S_Wc{@ zvbB0p=eGnxzc1%3xGXJp03uyph4?_=AbY?EIzBP6y|Ytj)ZNjMoftUr15sopMJCJ1{o=w<3h-s2^8lLa^}}IMy&RKVd$wREpH(Z4KZy%(iA6;o6Sonjyl);nlcCi@=Ige z>2FT!*FOM|^2l&X*_r+51rRaRHupprRt)5m?i4w7FVyf&&4RQC6&0$6n_*TEHfO{k zfBc86_$KrRPZ!sy>9beO-`9cOrMQiY5C^Blj4=r7`R6F2JBEVnY0LfjRIB&=M<|n% z$87i`jNZ8q=F!oF2EQgtb+Xmh5j?$cy|_)n8@(}6r8T(7?~wP2`xDyE?aZxh*=Y1# zhn+>~F;|YQYlkHjSn!nFLG4<=f>vd0 z9fMTdx(ba}-(-;;mOhi7Iy=e>SSLP{!pE7~xWoq$P(^O{HY~{5d=;@k5<)>cl6H7k zrD%_ZvWZ$cLj|)5D;X@k5QIGZw@oE8E$3^1zkl-wrogy38E{a8MH(5H;W$C6ppfx9 zJUsl_-#a%xtx#`vNL6sjO;Fus@wV8q;hzaL$>T_ljB9<-kj6W zsturgrNdpT-3IwZv0BzC13nw)6@`;wlowk50I9>e+S=Kvm?a0pzk*hC07*Cdl=E!< zIW&W%=nlegx3kmE@c#J&4yYq%HSB-P#uf&xtugK>r)>=F;S72JMyx?|0I*4rvh1*w z;PrsdwYo9@SYwFI(*=PR$gvFvGMm0)lfg7D5}|9CoBK1&`{ZL(r>?j6T3t75UyYH^ zzjp*ZB}L-qjT<{n_qEIyf4p!=?88Ard!u7ifE|LeXw@AoA2M4!1tE)6R?@ws@xf3;e9NHWg;VBp*Vkoegw;n%iLmF8Ne&r2E!iqfDuIdAOxlfTllHTB3k@L&tvRnJvp zxC59WeBR_<*4XQ!*AC9-vyFFrlOXgU z^!2kY5C_{^CdF`+%~zdI^??3A{^)SqI9w0x$n^kC4!DdC7`KeE*J->9zwp*Rjd?>b zK!waNcZi9J;XjDuW0~;?lcvKw9z3cVfqs50ZsysLS$w z^a9_f$wM=5ajrEOY9sx?_c3nD*$FVNxt-l2(xF{CW|{WaUz&^_RGc97%j1yWjZ`>6@)9%D&{UzL_!<3oFI^Ey!iFxh z4H6kM44>hN9k zF5H8K>e?Go=TlpdbZ%%+I8;{G1-@H3iT$TwY=Jl25>oqNy2%)e0{fNeTcRr)SHd;$ z_=Cui9*pBK=aP+b>aVxaNa}qiG$BP=sKB!QiYf)oH-c|~GCm5RW#n_Slmvl(4Y{{84)IMg8+kR|QOM%d<~p*5FRccBe}2*{9v`{^6B=;^a4 zi6@tF_j~{uDb(_P*CFs}nt807Mx!f^n>r;Z{C?EFcKK37*ub! zJp3;N7+((8B6z}{{X`=*D%A?yt>9%!OttI8sQ9Uj3iI-JhkBjU1Rt+w>*`2!PkKfB zL8^e%x+YBt;w>WYVCdMeg9tH;S=eYz4o4dru3^E19|4z23oI`#JoZ~};j(Q9tS5^w zjTP!Gk$VS@!54T-V5&#{p*8rDwO>+9?8)QD?y!|{t!;v_2sFh&&=rjFLQ{1ZtqP!c z3t?)e)9?k-UJ7eHfddavMl@FRltN`x? zj0*^~S3#ep(**N|w1yT8OIIUIs5p(2&IfGP&-^4?zlE-?8J}UC*{8J7pv<#*_I=}W z6+3P+i1O`#j2Ld5ec#IUSA*CfK(Ma7Ji%FI#EaOn-Oc?waOOk$5;A0q)vR1u8#!^^ zhCUyEsW8slRIBUlO2M(Q##)z#US!N)TDU@##j$^xxLhk#(3QQ}IMWj3_CJ3aaTBdM z+i9=fX`_)oT<=T60tQM^>-6a8E;hCRU}>NdO|6_TED4H>%i|rQ{-CGFY`r84wQloF zl^eHJ!XBM*A$uQuj2%$uXMb_L=m78j@m^nB>#Juzfc#jk4Kw~~XfPPa#N)d>W7ep& zem7F610_P5!HoK6|K0FN&<`|X1O^8uRbDEw=%_k8K7{yL#aA>ag@uJ2wtElCCTycl z^6e>BD`5>@H@qoP_}Sh4F&gU4n|+*_%n&72!0ZY3@KT;YIFL&bzu(9_|=aSPQ?0tH}S=M-IF=4lSCd5u9%qN@;t!b)Cd~Om*A3!B7 zExp|qhgR*PJ)8HtfE!&`Z_yC$p4JjD^&uClK3U1~CF1OT=O)wbcE{Gr(((a|{$1#B zIXhX+E3Y^=!XJgX9^lSKcn#3++$mW_7!0Vc{8MJ8Rkd!j(w_jmRmJ)}9-uKbtv%M& zbX=xLlRIkD{q-3gNzYwc(uej;PEV-E5G8T>+@w8Sb4btECo#K&bQ6fMKK?K6JR_wF zum~E8HX>+6-1=@`v0CXTLI$u6ISq{uaDu(*7G%;N;{H}$n+mjK`~r?q0;uk)Io%V% z6+-;LJ^JL3VLCppUVYI$>Chr7x8QchQ$$g782kPEXHA?;TnT>cCJ$|8*#dLBXSSPr z(%p`U7u+&)()m6bYlH$WX%`@747+^(=pc$H*+_) zysaH?cbS7jU~H^w$728eCF4LckamGb1KQ~oZ>f^hYwqObKiVeKYez+5Skf*1+A~l*7N48zUteD@ zfAK@bcH~j z#zf(+Cvtf;2ZWuSoiVjOBQ)N{rKQCC4tt-$WihtB2?Prg9|%h3SCE$gl-Y|XRm@^l_S@Pw zu!wK(bwEd^EgT8TmJs}sehdgG_|~7Gf)nGI8yV_*_!ug$(hJ% z32J;D-mYJBcH43i9h)4j8Lv4p)r>u1(rbzxNHSt*XKJ@&SR9Gg3;clY-ZX}9CATBY z%{YxN;``ty2sTy9%@Zm)(h@B(c9)U@ET_NRhh+K|cyX|W0}%u{Dyp6D@>quO;loGM zrigYQv^f6dhxz%>SqSgI{m*?kZiS3mh%JA@W+6tPoB+Oz~)&-qNQa)_^y$RiV6XoqH7aX;qcX?q(!4^pxa>i-Q3jNqDQoMieIvT z+Rgq<`#&sQbzIeHv$s(}Lb{cf6p$|I29a*1k?saX>5`C;QV;F$O* zXWz?b_pe>z{9>M&Z_Or=Z&35(SR_Mmmu$LtnccIk4bP~IHmaBJUnU8-v%VKcqpZ~0 z$!vYa@liER{HOWTOloQBj+l(#@=97XT7mI5Z>%>jU~b`88Q6%6X?y6)d;H#L+6rlC z{KosJo}NZKBR@CXoLg`{?u2p%;m<})$(6SEJ+&Q(B6Wwt498GT*p7w*GbsGd9Z=;9 zs3rjMP@U8@l!+dka5wE)^4w>dt_S5bz=~H`owNwk%F@;4*40m)0Hjjpvc2_ z2q@PaBwJ9rP5bZ^6chmR>adcPusib}D=iUs^TX%QBvYPiEb3ptp6#wcd1!(+p z@GNJNeUO_rhRcfDIoH*t0GVsbS|yKb7lb)E2V@12#_l|zJToXV2aA|V%BNo~Sadby zYm?|xrdo-m@^KKF(He^XmSyqMJw>&h=mfW;L$E@w=TA?Mc}EOtr6`AaH`z7clLbO3 zys$PB9*>vm>VR;RAeYK~E^nVr&j|DBO3Qn=#rCJlq>XK3P;Opghgma`6RwdWZ$)6*SW$y1>E!J6AD4jo7@W3n#I(FAPg~DTQQ0rZ z^1{c*M*~L*3P#0L$pdd#nKqKaiZQdb-=7(Q`L$OS4s$Zv(cQg0?FR4a`P<%*qCYq| z_(ES_(aDLEM06l`p+|LorJn%&TA;N`2l`3rKO{B z_8M{E*p-D2;uyQnh&Ix(1OR#gK^EKqNG5s=2UZwcL*jMuJ1sv6r9V%j%XchqnW*{mc&tNY4Mbscs%${?LP#_{E)bs3&|IF5=P#}6Nl37(!3WopYqFOZSq+R5Ljq_Ukb~+a}A$=0T6z}ed8=H zjSnsdKK`f8jGC&voc+h}8GzY`u?#|m$y^vFcN#aY=2P{zUQFvmB@BmYKPj!U+xK~N zSl9LPZ2cc>W`VYGMh2s3(%<0aOaG#&gb;RIZQZ(I(-fL3ntuPu5!Ou`@M-7f=YJa; z?GmB50=~b$cd)5~1*uk?pP}_Y^hw8bZSB;;CzhgDWklD5cSGi?Wqa5qM83(hPinGs zOqTCjNwQFGoX9wY%Nf(=g=uio+J+)r1mZ)nuW5{RVm4zMjPE}rW#XR8JeUp0WqV5Z z1_O?K1C*=S``<^!Q zR7Illp|6;Y6-%yY?gaNbcegxmQF}Y!dOyO|&li>&f99DHUeggp&|a|378zfyYUNlw z<8kb6UwT!NsMqFh*(t9OnY>;o2rTm zWZOcPNtKSjX>zM_ND+F&Vw%bk_CMmm`T5EorKz1na?XSxkD#3cTEjh;UnA?elj8Y? z%n9H092GJ~Z9P_%N?3vA1i4@J@?6;QVN>P#Mr9+Pp0OYEd9<5b1EQ9&PCijnE8US^ zgOA(uP3roO{4W}VyT8(~@XkJue?F$VLO09)ixTj|cq||{1xdhN(HKvmC53Yl&g3(N z8!Z}EOe5Z24`>x>BP+0uy#Jh*9C&l=;U@Gf_x}7EO2K}uxjE?Q)Le%8jaGbR*W>P> zKuib+BNp68hnfbP`HnVnqMi3Ej zhhrCEOW(`n&W$L#-|!M|=^Ew6MX=Mu?(5bRcdvxcroDfj-E0urW+@ofplel{YZK*} zD-d#dGnSVFm%pW@3TuQ<zdOAtP1`&9)dZ)ck-;#|5hvqO4dhWz?r7#~iOS1|W$q6_E2Ly=Tc{gm^1sy? zi3Mh9m-ChhRYUiUZ?oBYMw`;>y1UJQ?WsQ{?#%0UIzNlK zOn5P&)x~qxs50T&aY!qzUR&oQbhm`S*_8;easbDcD!nWh~6>(8%r$o!`8$ zj2-_eCw(iYF{rUQwS+q3(Zemm={zTWQ1GE#!A2Ym1v!vsZ0tyDSZs6~WeNq2YdaE| z41<|5M-o|kH=NIK=7Yk0(18sOH6jk@yK9}peWSxUyqi%0A~#+b8u}@Ob;luVwK@7+(rAEq0b8f^nJKc->a(! znJh5%Z#}BVY%w%R;nJStsIY-Bw)Nk>&;7CBR8LL5jbn$iN?53+!EuI z@RZ-^*|qBw*C%+Fr3_MbsbZLm0cbb4!jL?X;JYNBCv8@0R!Vqi=IT1KzByd&7Fb)e zNlh(PGik0h`jlVlM#WX(z+I9A2Tl$SSiH(Vo5i87246~8YUuDV(vxAy2kSd?b8*2> zWn5idCEF|kJ5IiCWn_FjL1FDBe>efP;nvmX@pv7MOpcD=t|kz=-;QSLFPu4;7}0MN zVY?I*s~7h&EqqmbM^RA`7E6)4a4@)y%E!Xxn3!c345*$8J6Pvyew^Wni>m4utL~Ow(3={9;Mu>g$gmAn0mR zO4o+Wp4`@!i1)VtkT<7ms`%*9=$vfNt1X<$= zG4RC4K1}ql97YzrD2qB7Yv^%z7zmRdH$fM{yL?_>opxnmgKqmKCdvf0&v&kIO8P47X4H0w zeXUQ9=CZ>*^}bLYx;6!q7k?hP61nmooGg1MLKn@!abt5UiM*D+7YIjK2C9Oo{1VPg zI5lsmf62=!%gofZmUbKOo-nJ`T@rvA*w!WmZiuScK+-dc(RI9D4S-=oYx};jQAG8-!u3a>9tJ-kr|kr`zxf`5nwJy=&+PqWyr%X-*Ja%|3!rGank<5uc zXB(A+BO`l$S4V#5XVX`%`^faLcmmxHQLmUU3y1_J;YKEn{F$bAHw8~eGnrE>;s|-_ zWt2QHArr2w#d0Fup;0%CwTZ2@@h7t#G1TsdON8xQHw$NDb`qWfYp&D9^6WFI$MHze zN%=ob6B9wMG`%EWT6Ad^XQux0jCe?=FBq+CuogOTIiW$Labw&C0(vaB^u&7FoYe;? z{A`0w8vZl1gC%&Rt1Vh3!y}0lUp{|G7Zx%t=WDDmNQAoUfQ#8ixjnOxWKo^_Zh!Qr z)&tTDO(?q-PoDI|QlxZmex77SR`%3PAwdbyZz=ELv4=pNHsLl>bZtuidZ;fI;*`}Y z8I48>4Z=c0Vd{dQT?4~jHPrp*<9tzCmHv8vA^(fdJGdk$o&+Ece3r%310?6qo2Z3= z1+Dd&f3|oRVT6^XOzGxJPswE9w5&wnzE7oUoIRIWTD=xYE>NLsOs2Rv2OiuVu#XAc0)>`Fwz2hO%_lG$4-5}WsjA|F zi+!%h?~G}h{5-Pp;*uwuWE*D+r;cw!G5nL1n7^3XyFy^WUcjM{5fOG=u+LNGAD`dC zyh-lE29?T(^=88baVh#fY_39$S<;OcrMD{bD`8Iq%9Rbyx~Bm8O3#V@4Do?13MD1X z1btUxLOu8EIx6yc<@ga_9GG<&?uh|Qes*(uueS)RdjG<*CB4ey&);DGKXsIg#b3SO zBe?yDPY7*EoMY!kq08Ea!dHK%N6|Af|3kj*y&*T(hGzL>gWt?a*SCA!x1YD&)JyZM zj%~7w9j{v63uCZlbR9QjvhXPQ0ItxA7GVA%b1+~r7^OSw*(F>Io#Fo_?0yq3m^I?p=Jx@<} z34IN}Fh!4PHv%pV&o#_AoUZGmwv8z+WTmYWzkkfB3mzlhha%gL+qN%Er;pD6@y|xo z@u1q@$XhBDBv=$(-XG=949BV0RN#gH+O4XW@}kBS#l^yyRaRcU1NETB1+Bc1fhQ>$ zKH?V~E~uXG@Y9PPJ%0R7cWD~cczTyD$t0D_tFmU5=YHC1IEf^+t(qk&>|P0BMyQc6 zRqrF@EuZr69<-jd?wz!zXO1i@_peU?4~#l_M~jWo&Olv#c;4*0a5s; ztLv6M&V~?sZjJXlNWscdkSqB`*)Zf(4^`Lw zT)CZn2%V*JzMsF!Y$i6T#CVqtt4kl!*bPa^@614`?;Jf>95;8ev5@=u^XG{+tIm=x zs7c$M!7dk`NwDv^AQ2oIkK4-^N&$&Kk9oScs~WmSdl+Cfa&mIev^hRpnrTp&QP79ZFoAl0P6Yg5N{R$~A zUtnC*X8SXA|F;mw$a(trn!VM7;ZU6$F7KFy-PmE8DbvxtcZAtarL7dqTiH{7L5B7L z;}Hw*Ux)Js*6xj2MaKWW+CxzwB5#d}Jpkc(<)6;{8q&F1t;jZFjp_G_I~unC z*Y@&qbecWM`d%5N>7Iq^<`C(;>+EP*Itup5BO=ER20>X1_BtxH>$$%=67LHfUQWggRevA9GP|c z74$n_Iwo35$PoY_OH*6$(qEEYvR_ha(V*WDi_+_^o&SDulAR9o(=WcIl;)3*#cS#T zOO;^3Bx6GLQ~FkUkEex0vVczv4ejjva^LwAC&mBb2e)T_33;xJ+$EtfJgSQ2MblEP z3v3?|>K-r``U`HUS?s5ilGwL0d@_?_g@YDq69f)U)-I}jjyLK*Gv`)h4cizdY7+TB z+CIjOf=XXtQvjKIV`F53?jNNuPw#rI9@%dl9hJZ(gR0W4%%2W#RTzBKf*cnd8P|#O z_ll3|8ZKP2M;!(D7Wn>YEX7d7DNP*;3SS-@!!>-uzyMK4PDARNfWa$yc<{k$8jXTw zvSN2IG(^B!ZmPOT^S_b%AuWu$9bH|{XVttq#oo-$hjl3ob!7$$@+oa2XN|5aq~z?M zwHmdq!KfRxE;iC{!tO z8CtN=ps-B{ZK&rku!EcyjuM+<<#_Dd#nKM5^r89r%n)j_v$v<3Cz*r#BIEQ~(*%5- z1}MjkV8xA)w^q-1cAI}u;+-{!91CHE`@RM8_oboX7WPweed|fE!C^&tB>sB*{)G98 z^Nr3MBKTuH#20a9$@{G4CaAPw8G4MNoUg-5t??-N1n;rb?vK6WA2Uok`78>Nsa21=ShPdK(0FI)IsW>y^?#X3l;i_sZbR#i$jVGlG)qxS z*v(L4-b2yZ*_p`ekTFl93>j(wHS`S*O1Qcn$`ZQLCm#}QF%{PN0~GULuWP;Zb!Vpk zG9hDNxd!88nX%Z=Zh+9p2Y?glHbF5<)hEeJQG=w)_Y`K6O4EK(Y5y$Kc6a<83rQ)v z>c2F>%}-TFjcP(m2h7|&FjTN{xTr!dLjWvsJnKYu6m+}nM@3QeFqt>{e@~bb0b1&5 zl0tj=Gp+W3*6x-H^$G|;sw-K*7w)-AIUV+tk_NiO~sYu*B9R4;X#`LC|V?$1Od=JO;uW96Av5GP3!LKV@p^aZ;aVhhw z*y3!HjG7-RHCzA#Cmue2G*IitjUtQKOB-XlwFyI#c}ZjfCpiTMD#9wE4*l2q8u}8) z6;SpN0QNEK4WcS5E5)lIgARrhMJ+8NLOuImD&$vclCpm)D&4Il9>6HI`zKKvH7^jc zNHr~{UG#)pt(Qf*dikF3NYnGUdD(cmadk$0+6)XOQ-0=u2aW-|)GGpyA&+&Ab*f(? z@49jnf|k$M!+L@v!WN?0qwJ@b&j8MOxQ?$tua3lH*H+NmIlyP-8uw$nNab;@`&IXA z&r=eE!p`n4?6=jdjN;jydX?~+klmMEFV&X|IF78Uda2%@GfTJ6b$0VIZH!x`;CV~p z$!_fU@=>(jNVZb0;zgDnmiL<0U2f?8j>t_^p|ZQ|Ve)THIvg&tb(>lS(T?h2M2h zx08q~7U^Quml@Q#{qm}-IZ)T2&v8>rrp;^zM{SSw4ILTvbL6%uc0?Wu^SV5$*n_|bx*2;45M9d6K3mm zo%SEViTEeHtE`pRZH}KsmY}71#Qtzyo|xDE5$yJQTTpQUW0=RkXlYbY5TlHivze}Cy=NFIyEtGkH~Z1B@$f!m4-ZrrV>zal%i4e; zzc$!Oz!2ZNwLsiX=rq=%eP=%q@Vp8&>(`f^Cy`YbHy@xK!*D|J$Ng;{n%)MD39Yt~ z+h1FHhbiCuE1N6IqLe+k$|$yuhY$?&%C>2`nbHJ#0q!oxPNyoX1N~=}Tmlu8gj|uD z-5$49HtX_};TSA*CU+~W8Hl#}(y7*(hcw87B1%s$EbHSV1QMaiw7maLo6wrFJjXUr zQZ6vmm)TD81C#jS*Q;emk%F9sns*~l4)sO0J(S>R`75y& zji>$m`S5jd90%+zD6E=u1YOPW!X6}e8R)RlwJVzG4f2*{3>PfCh^I9flap-Kw4QY3 z)UE8!sykN&zVi8t7mu!iMxwR3`Tg*)2F$nOt`$}ohkY^Nzp%1m0`Cgw2@qM+@K|`C z?ArSHdZDXe_l58$SeY4O+`GS;M(@hqv4~bh3QXortWgf8$jd$%KGiaXaG`BZ<3xcyvCzAx^^lL8cxEGUP<< zp#i-E=FdWF@~g?qY1T&SvW)nViu&(~cemFr3|0GIhyi|Ex%dN`n~V&jrNO7vxjAIg z>umiHp8#e&07+}{iW`2I&=prvKubF2eA+Mn!^uBR`aoxrc@+5N|%OK@+ zPNiq%n7Y(y>1Ny~zg3GTqgQM%&#ofE!>#^wU-mhq#nPopx*YVZPag5IuvGLLHV9i{ z5N$JS4|Gf7)wpfcu7%e6oY|<6MhQc)2l%>(uMh)s1PZmRmFdj<}dLTGABDPIrcp>lZAat@ZQ+mJ6*c5m%%TA)bg zoCsral0>tLW6o>5PjBy2_tW=B-!s6z1#WI{;i!aUuh{Ak@;jz%$!!%<73#LsoQwl~ zba!vePHiFv13ZFU&PLg6JVT&{&mJRtjb;{D*ue5U!N0FjW-Cl-NZ{S!kAu!nt{X#J zp~=KX0FijE&8{(7CcvOY>FPE9*BY(_y%E(^nGLSp26C8K#Me8@;6)L!{R0t$OKQ@> zv8Jemt3Rd}tn+7EjpiV{iDXxO`4XyEKYQbG_h+s!P>gVTAKRnS5>P+YvaXw#=TfiI z$Gkm#<7bzpBnvd`PCYqZ+x-+B*T@%eTj4X$e^p_nAJUf)g%%EAD|k*Up$(-RP?G5;>MbT{x8aFEm30pbT4Y`Y({P^1m`qH-ZfH?T3*YF({(>qu0ZZJ6xl-wRO!Lw3@B?Dv${`RP$Xb=00PU(|_6H~m^H^X2ZIAy9y1 zIU-G*q-n49SNKh|zh{*PK)jLb$`mWYF-nAoKca9zk{?)|foUBT#9&qK2ahM{I{+Bf zE)b&>9qTr>^PF6kY!Tz} z=SbhnA8&L<>bv|p{JoiVK*jzjnq9@Xiur)!;mwBv<)?2X9yBJLZ&vu(?C40248}$< zym`WQ)umXZAbDcGd~j&R_EPPfu0*UvEG+9?e@WCw&zZT2p}HSGCToQ?)lK0g?_q1K z@_b3g1fboQoSfj^-WLf0g@?9dhdt7U`!D%)Otcq^ccOM$KF|{p3&(tWOSAax@9aFf z2wgw@!-!X5SyNgKeX|4^Aw28-zDI}!6*VnNorpr~+sV!V{O=isG4nW~(Qw|tEt z?5xT-th~d)=eB5*h2XNQZsyKg>LgveNB8+Fef%xflAl?|WGb$tksTCEMx*ds$S=r# z5g4lyb|GQZI6`<>`w{r*x6~PEa|g)$?l6q z@%6k(Y2QhlqI8()F1c0=LqkZ*Glt{Q=lksdVZ#v*d)M_xL9DK(@%Z5^?mLZMMhzrD zSg^D~tj@1aK0T^CNFZ>!&y>y$_U}R)AsHd*U3x{@ek2R1ESs`VQb{NWBq;zb3^S!bJwou_kB`px&&>W*`swIsC# zlmS3Lb_UMICi-fLkF2HfBsO(?2=g!t9Ls#*18WczqUJW7Rs8X+(Jgl@mY9H~4$d*1 z@*z{*f-egh|H67hHg7L=h{YFtxRov`-mKAj+50g!Xg<WgY&#Rm!#B0gA75XoAXoy`-@17{yC`h95$M) zb`JM71P{}FPh-VX-v1@JI=Czf*$Xp<3YltlF%nF2zVGYp*hioLLV>E|Zpy}c8l+E9zg>|XIPtt1Vf7)THT+Q7o&3Z>)!qAi-n(W?6*VB^vxg=J zO(^8sStqoYvf&B)3=f_5=!css1;X`vGp8;h0h! zwle=K1e<=tFb&QDlU5Vk18GbIRK{)dl`|| z=h`?l4oco19)@Y2e59S++v2lW(&M*Tq9XkAkuD7|e0_P$YRhW6z+_?h#BmiJGfl#N z`&M>ERaJFdyiYhyIe+<@%VP;i{V6K~XVKn=c!{zKU0iW-bs#LH5PtvjC-Vn`A18GX zkcN`+L+n%wZGzr!@ZBiMhIyQ|e&BN|l!z}_M;`r~Jsa|unip5tSO4~HB5FcXwy3)| zRHx)&>G4RLpUS{XaStaxm^2|h1LPoECH0lV$+xa$i_#ST%gD?Y=4o2B> zu&(&sv*+Ctcv%@`Zpa*R7(w@2;P+0YL7q9_U_=;!274&y{D0H}(9WTqDzBC0hNjA0 z4JUnK# zo`1%*ap%#~r?LBGe*F2R^#RO?Z`ndhCjXcj85_jnSv~~mKeylJ)zMM#zB~yv$o*Om zB0`|Mh&btDTUu_##Ku}$aZ)23(<>6)T39>3E2l?33SJ*OBci3<5;!dwHp9fP$Mlzx z+x6Mck?};65dBx~WH`S$V$gjyln^4#&q&b7@U=P4on!AhCNeQ}a%#Cg1<_8;No5~~+WU7YY$+7P$&42@~M za34+pW*RQ45Q@tWm5VBt)2Ps}ae27pN~3LLx%tvSV5{6VYAO3j;73Ctm{x#f^B?!G zD!g)m^qtCVo``~oy6Qc}VS=Jo`L$L!|Ej1C?%xvS{oeYv@Iz(&H0*UwNI>JQtp|%d zINuV%c952Ko#v=EA@1MV^@*Av=>CCk->QUnoCgjM!#9Q*Q1)9DZgjVo+>`NZe0}RU z>Y6p{$+dL9hKvzC56}$wf>ZdX7UP)Tj7NCe`{UE^4@sR^Jbi+V-(?gH__F^pviz36 zj@!B+CpVRC*spI9`E~A{#$dllj>xpZDN8aFx9Wsi7fiL1^sDaXO9E<1Kkn{e%>LNV zt1?|?se|>g(gE*5!c?f(7=QB&3kKtV`xq$Pc1m4lI=feXhlvfwL^<0EVJ9uiz${y9uPC<+Udg2`jf(ea7RG8!8~3S z%$@r;>nbBIEVASG5e2(=# z_R|sd8Z6FtU8nuz%fF8s@F3vZoh(O)C%pCub*@{k*Ji}ouV!GNz^<5aCOQq8R5t|a26mI4uWQ>yqq-W$LaN3 z(c9bTeQQB{5q+1Eg1~0?_rL5!Ajog{jF(wPUPgwAmN4Vs;)+g1*5>4&56^Z|KpB2_ zGwr%_J;A(>;0s3~9wLlLl4aCPK~LZAu(?6l7SE!)$Uy2oW}3k6x~}EAb&@QvS3fk~ zl2-3WZ(QaS;qF$3g=TTJg{ug>qIoJ8iD)l1qL4~Ib4RoYYpZ;|yvXsDTdnO@NX$(7 zj8^y?OPkk!SGiEaFPRwB${;<0D5$)Y^H-vvY63M~FrzBIxOk8w@+UHph_hICn#VO^ z7shxkP=EnF9dN-)!^K-Z(p_7WQBw`dpPZciThNB2^ntJjSdjN1VgS5oI<gS} zQ?wi!0CC{T^=Yz8pNTdBzCz*lOHwY$0=hIh48AVhK7U6S; zfO4QXi`Wx0(RpId z-OFcqb)`2vJiPl0jq<14mH{eF(8h)Z9xm6aY9KQnTs`-LiwUFs;)A4in`T=lZ%me$ zEa|X!Ak-Oo@_L?BU}`4*apm?>wBP1|)4ElTEsO{7H7SX|%xL}$NSA*L3(-kQV_E9W zX*S-op98e8+9z*V=a2qToStr}Oz>TEogQ&ATuM#ddNHCCl-(8!{iMg!JyAfYB&}$O(Sc%dXUD>@oD=> zWA0TDGXlh51nISjXCqTRnEl$yW)p?i7jsbKGbyRYOjVeFD1YS5kFb~Je) zIo=@tM=&ikeSD`s%wWrgNw4_1#jf!Jj2)cY!!_~IC7EiZlSpLjj~~|b4z}4j$BMtH z&9G-gx;pr+N{1^-gpO=~{Lr}ZkYE39$h}G&$|6169ym+tCJu9 zo@`SEJ_?R}Y0$nZ*+&X_-@uYwH%IpbAQez72qJ;KPLK)pdiHm~HBb+KS=lXi@v|+l zZnZ8nn{Si1Nc%gmKeHoK>FN!LXW0HOlMUD8=HaY3Qv}UI@F*=!cbPO^ClUu{oBz|qJ2+gnvQU0 zd5dRh?X(Hc&-CtsFbu5;fj|#y6@&6|jg-fh#>$fbXwr$sLtpMVyC%&g#Ncsm3aK9} zAS#p8m4}p=moqfo8|Q3Ce(=WiiRAcYCk;S!^mRzli}C+fQX*81g!)_^+VShM~>NI``v-2#CXIy8nv#>vsVhPhbQuh~o5~kEc#4a3b>&mm49MsHem5w4&J*u3?PJuod}C$w>&I=Sdn>^A%uSnLk=hkh>PAl)kljFeC_`fDP!-BAu6FnVA$H=J z-S;pVY)O`d`*RaY9w+yue?M#2@(;8@+I?;Cjm<15_}kQQPT#kBBRITjRMNS;LW8kJ z@O#`6SICKeW2Jp9J@9q!na(`s`@MdC$R0xKPZsP&3Gc4_llupOt#)ehiG z%QtW4P(Fu;Rfho_Y()|Z-`}$u-#LzwDB7}SS2VeLN0X2&T@ZrO2s4Z-Y1in>l8med9Xa> zH6~2>8xE6euX{va$IvS9DtyFc4mROI|G+2W1WT%LasibkX$-n$DLq$!r@ zLi}>npj&x?q7r`hq2r&yDXxO8ZCH^8&H47gnS(=Ts}k*BrQ73PBRL+*|20wk;;8yV zMs67CIcHnd=<64CirTiCy2W67b@6LUo7eLzbzpr?aj=&cg%Ob$)UE&t+}*pMSy&zkzRGWNXZxnOx3vtIqjbj*tn zk`*79)BmJPorf&Z2}#FR-&4U`AsmfLO7Ag|o51_trr0Vz|4Tq%dbaeu=WZ|Bekk&X z)Zq4r;nivki?3=c-DUA6rJ%@(N99=sVGwu>@bK_n*|MX2I{8fc?L26ackh732P$KR zcV<`Tb=!JhAN!5q)Fxfsrz9ssQI#b8rGhA2E$#ESe{mDBh z_#y) zlO5%a6}Cdjm3n^@yX?02!&l15Zh=_{7-0^NU6b|+N{{&o-Zs z6Ljyr-9~D`oingu{JCILMBFTI42N?XC(n9en9|Nv{N#=_a?U2q!+PDgYgJ?|?%|5- zIy%h;A-4J7&sm`^DEk8bHKlq(Zv0QiEtigQsi~9mKCw4q_&3&;DJD&9Gdk6PSyZp! zm#7EX#`Nbi0CtbT&NIj-spTs#A0W zr{-jyxiJ$d+<}oueO*_*DpbnJxUvn71X$W!3^B$gCTU6g7?fSdZ;^phZTi$(bG7mf z^POc(?A)X$kUpVRX-f{M9LQ=O$rAh)>v0fp)mrwD+2~eBaYIKnlT#LI2LEs*g^G(u z7!O<946=zbneXZxu6qocUjG%ZutDkbG6h+TJ^{sr9>MpP!QvhU+25*SCRv55_t3yD zCpZ(%w-x?w=P5_*J-mH%QIWsrZ(Qqt(KDd(;^w-@ww+0}d5^t#v+MXchV5xZ@9^-K z(WQk2QOO0(P=})mofU-VGb~y#jDUE{Ep8XGCo$bze=b~?k&}bCHi-OxuJ1!0M1oaK zUX)XugKV#mApqve73t@CJ{CHy>5BF35Hu*S*_6+BZapwtSQ{HbWqxvU;=TowCJHXk zJQey+Man0%Qfp_2YX37MZs*$-ERA0u*)^VxVm)}>O=bn-Hh5Nndexo)j(qC3QQTne{G-cQSyu@(e2u}~q-zwRZ)^AuM=GtgAc}bx1-~qBm;rnm`&MlyXaQ4^Sj%G(sH%(tBn1*Zzqd#Hw zMPw=C)!#)I>Uq4atDEThJ+_~rKur274h{*U8{S;3L*{nT(Mb~?q z9Lrn^Uv7ooi@bGf@mtU9or+dgAJzHnhby@EK=(Yg;Q*kfhE-nJbHdxvOOCMO@i%4a z_((#*610Mf#oygD;r^zPECk$mTVF2C+???!1HT;fxFSc94G9GD=nbCJ2E{2z*=^-V zP1J8sHAdEOSMlXgGS{{h_nrsf^Mb8J5blWzghpjR*`%kWV36TF^?&sA%|EZWIA0m{ zp{Tq5fx+7lf2@C>oB;clXv?(U-Rsvg@g^tc=j~kAGFa|X$%dfIC{*EwWlcZ`V(S$6Hcfm7N^ zNt+G+&qxmr_;veb?R^$?WHuxbJpNx?te)6W=1q~GU)-sVNX2@P&;E;asb z_Yy%-88C6RE#xK)uzg9Ve)9bx%P>KuvForK<14IYaLn<6IY9u7VL*kyz+XWP66U-= z!fHD$LoTwMCu>~l7x|a-juuLN(%HG+Fr&bjbi;GLX~RPyl)eOXwxR5C;6Z}Hw9273 z!rjv5_j8bWr}3s{(+fwamsGo8Vv>iaD~&x&A&HZF3rlNj$KIfxKfJ@ApV0H3X&fGt z-4=Z`#WrQ**DmbU!C8>gs%a!g;D)*sIOgLmseMvi0=mMC6%>it)g>6h+aOhNj$ZiPE0D<}d(UmY<~C$fj!GB36{ zW69U`g=g~??gkkm3G%q%+QD8jmRdSP?3zH_l2HalP z&ARkDayN3NG&9Lf8gFrIiT|+529ue$emMAC$sax-iFjP`qoYex2n5P>DB-kFC2ODd zJ9e$+Bkjka?nTm7UU z_#rn!`#*s`6fFHkbj}0r;*>2gS|bcx^N1sMLq5K>ijFyx{3=f5*^auy|8BSHPIY%26)k_ zz@bYk1AEF%}jb^*0>XfCF!=bw*n_!&MiD)R(AyQOGU<$pOBkg4gTGPk)9jyovcu z*llcF5|MAVcf;4pX$)QAF1+2(9;>lo)mV%EpsHhY&C)W;&B%dZw00|9O@rTjxG?u5 zfLvh9bhMY&Hj<&#X+%8klaK$TQbMWAo3nP2g+!fE6^eABu`Ue0moOi3vb_r43i6UEOcVjKq?6_o`3PhYJ736hof zyOFu_NKsK*uSiH}o=9-Hl_G%`SCZxOQathLC7~QbIHOhVL(%keDrz%60)vUKL#G_p zqigG<4p;>Rra{Pa8Cl=AP0!7-u(acI3ooPnFW6o^+my!Mm-vvbtTR5{XW^Yf;4-e; z3JJ?(va>bsr3N9zAWGRiC8%9c!AX}BR2HE|HAv(8H#G91;X`NDc;Y{z4oz!mGmw_s zyY#BR6-<6^LLa|#QzZ`f~}zQ2`rJh z3Hl%aDd)Gg{7SM1)r@qQBIYr6bmk zx2$+JpSRaKYWA;y+^+A9jp^mj6L$;2NS&raM!i^@i;jsYQ)46xok05ss+3JbFA-2*UjwmOwausvs8edv(g#&aT#$c>Fhp>1 zow)9W__ZOZyDTHyjibM0_sAGF864Gq`{xt>UCi72Wk^Kz{fM@nf=9p|X`LY$Xk;zl zNt?`lnBK8)tN&F&^g{2P8+vl*F&HIHe;*s-D#A>7@8sKH zIwt3HVa9Ev(fv^P(FHLTHJgf{NHV^*;5x;0c~eF8sula-xtQuaYh4}gJ#-;7?f9rC z9ceV$WW{@%g<<`A&%ZVJa%$v-G&NyJsBAg=0QO)r=?Md8bmS~#LN#{~UF0IFdAV7^7-R=8{5AdH5db@J+A0XBd@}i6B1Ar+tm7c_P-@f! zlje!fU*XE@VQBlsM9xgLw$nrOMM7!#}9#Xi^TnA&k0CWvtoX_*j~W|gWBH~0MnniiZKqD zYEzQ!SzqC<-nj8M-VCL|1j;I(zwCZ&tH%A+s9h2FEB~V?&&&VOs?p)I1RZl!-Egx( z@goZ(=H^VbVDer*g4d~kai%~$(oOw0l{PkZZ!ig!>IgFm8FmvJ>8p^x-KxjmEP%h?EU?2+! z?yX&B(b2EuuP&n)!Wmww%r@A%nhqUFNcU*0TUq?cs`I`bGbm@VTn)|UZIjB87TW_X zOcLoAGep*N1_QYP4FaP@@AdWE>e%FtI)oFNn)*O@TI_ql2wSof^c+J5ssaM!ml?XR zlaN%r)Jzb6-XsGnF0{aUcnuKO$P$B%wf=OB@HR0?2Tat{8FH_?(OgF*EPVuS%SxDms_zfA57dD)MSb1hkCgzt5|>Wl6(p#6A5FS_2ntet@!~mXBal9r zWV}tc3H6#-Dkpx!w2VMOdE{;rqM&I_&|lQiJW*&dm_8~2b2X?Jl*IA`I%eTs1M^VD zcV-5RF4@M8?@k@xg`&UUd$1;Pnfoc zP8`ANu*~H-^~dY|+;^kjb-$}Tn})hb1&egqia^CUINUNmR}O|9kVh*~T-&+aX_*(M zyKsM9{H(8G{}Xd1Ux=W*?oG1H=UW!~pJ>96<}D32;?3MTI&#}blz|DF`P6;m9c`{f zTtQcp2XpTXC_~|&F};m``p~rFV}`H~!x{@Q?7XH)o*AmFa^u8%?{}=E3 zZ1neiJwmDQwi}gaiGS5)UWUnY)cgBOk}-P^?Hpa+WP3>I$8@1eu{^|=jsdEyu{sY< z7;lx1YKlCB{$s{dc-7wd#<;@{uaK}XaNbPWm(kMExz_Yp71c|5dkeDF??1eD^A5%< z(7FmXzHcx)%2rNY4ewN3oo=|beBvGfY-Z3I5)WF)wy zZZ6R>(5J1kfEP^|c|;7#Tk__unb0z(rxnCI!yW@hmMayeEA{$YJN=6Z7UdN;W(p1# z!Fk>Z1SQ_&sxlxKeeRxZz5Ore`m;xZ=H_F)73L8o3Q*zhqbKx^CI%&#lR)^#|O;Q7ieJ$_GvRpmj@YL&GbJQ5imK)XTGuAhE@LZ&PMJ-4Orx5xKS*N%V76%8RV#tM5A<-+6^c=L=vp!yiQF|??Jht-LJ2P`q$jZ zerdFU;^Tl=yyh=Bkzja#7sD|(b}AWcD!4{;*S0S=kNB46ovPdp>g!4l@F`9g9+#k( z)-t=8eaio*v9N31U5e>|iBiuY?ff!xikGX>#7F)B`%IX{@%kcWQ}}x{ZHJipL6r5R zCv|ywqjB-G-qEu6`n8k*xhCi{M`YxfdTrXk9%>t3e0;;=tERBgfIy*IcgfZSYosp| zvSLyU`geUZay#Yrjha~U3k&R9kg2mLBY#nwS(jiyDc9(W4p8fJhj-F3%rM12$2(y4 zdHq@&i0c}y_mV1ZyvX`B;cL*LF{NcbvbcA@lZIpWWdb}qI}+6`=?Jwrzm-F8CdNkB zv4$@*hH>$}?pi6x@gI49CQIF30{GsCClwVMyaXTlYnA=Km>OQjvTBc-j{Sye!5Hc! zkj!l}x0q~a21(VhB!&zeIyBnB5%r=DUTj6T#KhG!KgAK*=HHqn>#28kcm(w6RR&*ZERv|&W#_M#s_LZN zgjIZB23An3P2hM(>DNIo7!Af=GqdlXpDBT8Z~;RCf0;f6sOC~oMF3gVgGv&#shn&} zzdARc@;$SuJc~<6z{SCt9fb9d?_l*(&G?Wy{tRmQ?W0FcJBE6CdPQl{hSVU(gt$Od zyfiRxDF*H}@Acm@L3|Z(MnH9zsF_CHj`%VQ#u5t6wNi~i4rV_Oaop{h?kU9rnIaJY%Vs=f3{FoT5l*_4#4FcEAnyA%Pbv067 zqmjb1u^Po{1wCn?5monucU>p`Zmo5{-dnWF`61jun_y&AX!QHITp=k1(Q@Jo-y68H%Atf!{B_JZw-6AC=DJdY`-Ei0W{(I*# zqs}OU=j^lJz1Moy69`2njK#&3WL5ul&@s-fsP^~G`95g>&?IAQi~{N|4rqz&*wS1C zqv^18s{0&GvLKp}>kgIybXKx--F~dHh}Q{kT9UvzkMZdI$$7q@obKikdmPf&c!uiE za!s<5HSo?Y8OJQb~Y-3-0m_H=E; zX3flNFjDgq^eeC~c3zXkdP&%e!xrAK%jT`WsQ6uK($ACt8)vT1fq$dARaJe8C#di^ z^Av-koq~>=HNZ1D!u1w?{9}WDaAkp@V*um+ipwh@n8{11j_cQ7(iR|lj02*?@~e;VsCf1r9J0>_{VS(V4Otky3&9OfsdS?cJ!um zCzZ}gqDsB`NSWQCSpL2~3=s_#&dGg*54k+T(;e~qf$Zcg7EmlX0y1S?kyVdn>{2%2 zijtyPk}_7BZcbne?OJx1gid|DOXV)Mp5`jiNxU=bc7NPl5FXqh+-+&kQfl;D$jG#~ zsKwcVW1WnI;Xm=;ePKQs+wS>d_3$CP)70Zo?R~6tjnv`RwvHbiGr{T``QWSV;7x}; ztIxCPHnd0SBsGHJ2Y;{VG-72H(!NR3U$5`9s<7oQI*1EoK=B_Es;bZG>n<*EX!$)N zhMV~Hs6D(NyeYT*Eg(9`!q6Q{hn1H{#f=LRb@B1fV!thDMFAj($*rM-qF)_I(eE49k z;O*1oPasC2U;q^*6cGlHR7yZZl)kz4Kdz-z|3`x=;Vy`Ugix_al_?;T%ScPx)eqFz zFCq-5QzyY~ibTkRn_ur_eKS2|LbgP7yxRphbiYSrLyp3hX7qNHM>NH?Uy&lHN)>;( ziT5ZLNZP)B(n9yMQ~qIfVeF!jN)~>Hqnac!)&&hgmBGw8kadQ+GI5@+)9;S6JuzG- zadBO7tv{XY-abArc8}I}Mm~=R;Zi(JiTS<6KE+6X-A&`gkI$nQqA6pM^KBc4*SmOR z>>DHZp)MK@>3`E;N{_S;AGX}w#Nk0v}Q47c_DuNMoS`VK5CE>BHon*3X(x>Wq zcWCA@dJ8~f{&~H!>P;Cyz)9YJHB}8Th=%B&Qb%@rRovD0N(u6 zfLfSb&GgokQTzp77Bu&sf}gE*qoZkO^9xq*F&8yS#nZR7=i^hk;+rXdnk_T!{jSOC z=I+kndwm%zM1er@;ugEs&Tji)+Cd-31y?h0$(caEgvnzy(E`qO2948;&PYg~gJr}; zRj)7RXqVnmdaTlbCY;=U4ct@7N*eYt!G{p1dfEXCYDI)+9jn1MGr6kO|0fIBq14zYWJ;`wf%wLAi<&EmT&)A{H!{4D6 z^s2KK?-n!p5%?*=_p{rN;NYY*Ob*e(%po`9N>aj$xUw|an84*_Z>vc!di5eUS21H_ z_R&$1$p!btr(1__%&8(P2+ zc!uQ;$zx)7Am1pk0~c#n_-dMRZmK@R=#P-n2)xjiQ~M$&mK$6MgY%pWZc$a z9bZBb58pqol1b~?X@ZT?@ALFM1 zO!6}<4C>H1)v<#uLJ zDOx?gO#euFhV1ODV{X+`Y1)h}jHMM%pj(6XoSNgcuuO>SGjYueH#embJUq@MD1{!e zxMHh>e5^_QK3HC#Oc;Him_>U{NXQsFA;y8|BTx0I8Tmqk&;?F|xYI8+b3&tD9HOR| zpv8;mKr)y^zDsh63HMumgLKe#?T@LD$oi1eniRlX2|;%ER%V-*Cyr8qeV8|8w}UI|S`?chY^C@+?SwyFgC;=ut4u zix0R2|C;=Q|EHyDExh4z!E^+NPjLC%UR0j*I*?DO0^6r@_i{_toKVjHP2FRnqwUM` z9h9T#SVlj9Hc8jgk_31I0dP#ef1d>1>s>!;>PMe=1ZJRFX};XYqM;ELmoj+Q?2GG1 z?ZW(SGrOfpUTTlAmlNfVu$1aZ9^AnwWpZw(cm4c3nY7i@qNRa6^7AJJH@A&zg+=9i zSi8`L>ll-)&&#}!38YS2`Lvm@wgVoH&>mZCv+p8eD#PqmR*mUXK8)$d>c790 z%niM!GAO~>9y``9ehkx=_HQ#d0zKRAo{Z7S=MVJt?e+SH*n0%<6RRHBV|gA6%ekK= zbd`}5k7d-G&&x2s-7K0oMMzV-F4v4dcDHu#70AE(Xs(vn0W+Ypdw-%izi;Kid zQZD~l473X*&;BOOCjDb-1Md?s24i^m5VMq6kZ$vY_Q?}}X!Bd5zaHpe@TREEg?;=u z!12H*tLZgXI;E1h_%CtX3Y+N)3BXW!{^)i7+RQG5EI37^u|pzZ^{W#8#r6Cbm5d*Q@x@1S9MO`FaaSZ` z&gM1~{s2~;y<>%SHDqRVQs_OxdU|Xrtche%?qFY1kq&{&x4TVb-st)K5@Ctvm^`l2 z=^3_I(UBxUV-)`bdw3Z>++Lf z`RVBR7e1n*i{!VOxp*PdAOyX6up^HWCL5x!1~rLAr;M0g1k&Y4n8rCpNC9KkNr9%* zRY}9F{PHb#6Tr!g9DCT+7Znu*rS`*w6_umUCS_$sj9T8Ag@mRqJ!~-j1LmEdG`vKu zw>+Hco&%}!$CSWQ&8N@hb1IKZ+!Cs0GxPFNdwO;VFMwSydO1Jg4#HV6qc)1o$*W9R z1E)i)!*gnrKRtsqB{9{D>{5~*P(2ZQ8rO50Gkf35+bN2``u)G1a|f|=mJkIe2>CvL zW`JX*Q#K$?8ag}cEvkQ8r(D+7Rp*;uYHlTm+gBK^rw79X|glR!%aM9 zdS{l4b5GeIx86uGzW8wG2KQe0RN8u-CHL%I(nG%m;U*vdf}7JY z3(OKjXX&0EDH$1m6JURMuKE9ZMkga{d7We){ey_13r6rG+AXX_366p~@hHaoh#uz92nud| zH+Q|h{$9I@rN>aN!dIKhPHFVtPUuIx{fg%V62FIs!L*-`&8Tk+s8UOW1_%xpKs3)) z|9!QIWJJH^)3lbGe}n(5yoazdvN(>8#oF51&CSilQY6ZgPIa@>;w=*zs-RF#ge()+ zh1=`bEB7gdi&aGF!O;eV>jA$L#sypvh3+^ol?L1~_t>{WBLht<9ta&ZTf9k_gi~m( z43wr?@C-P05dv_*%+hSnb%!f1ZVsQ&*}Wu8PNyq`5ZA-oN^#&(TU&q57v`zF$Lz-+ z+WqPdI5r0YC5O)rtb6bJ{+gmMr0_$nIx+)Zgc#3Tp;}Y0ytc>Mz_RE<*tNpV7g{k zY#L8aT3}%Qq7%MWr(-ijw{Q{yQ!Vcga@Of4G5bk}|fmCl(I{u4m3_wF*k++QzI8 zQojh1KijpHj6BCpV=vH1k8XQ$vox4B{?@Wc0*0ZI{d8YQSZ_|G317dMDH9QZECTxc;9_$MM^Fcxo0JMY z`Cb%T8AHIW?&%Tfuz36M*(vM7g3WV+#?){A#i~i^qW=2SPmeg@D&5-;eL-;aDM165 zjI7A*u1uL>B`KfXt|@F?6fFi;FH#v9U&*4qjh|d}a2|u91hye3g~?8nFQ?m}>`bk= zO|&T%P@|Mz?UuM$BRH)cMsH14qdGdduaiEs8n!@S0q9^xJM<860C&#G#vmBzpaInb zTRo)ZQK%sklyqbCm(dWi`US~~9dmKM1L;qae9 zUuF+>qcz?kW(MY>tyheEQ+oTpZWdMy~+V!IP)FwxaoC9-_Dtl^3t~T`n>FUq6LU z9TZd&JpXh*j!yn_nA=<_97g-Q82eFjF@&}H{{8!suUIJWsr;bd>R)@{`;adJuSSWP7!d4`Aup8vG&Q>&8t2ixT$FvP|Rq1dPnlS@Q zc5;{NB}O&U-P?H|m+@d&R!gW0Fb6aVGNp>R;>jroGyVCsd~?NdYPhH_TjwnBEqs%t zWCw6Tpe`D+vo-WoWX>2Q}W| zH+WUnCoe1aYxGCRIF-vV5y5~Iu6TvEFDF-1cJ3jHg(djuqv26_(~Dw;X>UcFvcL$J zK933}Pod3oud=h*C2*Z=*8439WGCOVz1M>U3ka0gsFA|!>pO#+WVI}#TUB~K>n3`7 zhw}NKnZ{Ck#{qJ#L9!f7Mdz7j?E%o9{v#k9`yxykf0gaEdM{{e1Ry?k}2 zqS4CBg=qIgoG=@`AeW@|^*Dk}gMgtLD$&9Flsw@T2KEi;js)`4KW>$Zj`67e+kzMy-Iy^=2*`-zq%zW=q^p#Tn zt(TiC_}9FoXztf+qheJ0^{Gs%-kem?p7UJ3T2C;W5f2%`q;=xd^htMFKs7{2y(~qv zxlFYTni9vA*Bxfm|FxRq$^`aP`v8xfAUQrM$&JStaOj3CPZS3*zrUUdr#ADHF9 zMa_Z)6Sh}w<`_AWJ^}hvUmm>ye0DnG&zO4TZ?Py=9YXx3KZW1)B|wLXc_EW1*I_m5 z;4IKvkrW#G_^s71W4UG52x|!?S-mYPn@A;{`gG_Mr;kxkZ@p6V9ZSzlT_OF)>G;52b8}>wPJgxRhvi5GfPg7X#Qt@@^O9P z+iylhlr^NX!${>>ZEbrraWnvw2p}9}1F4hXme~pJ^9r&Xd=mku{dy;R8@7wxeT}hD zHXn3S5r2O>3|4z{J0ttf&`R(F&|A+YPe{Vo+d-ib&z7cbfP%s`Q&;H!TzAtsINS2r z3XV9KTfRDa@i_`msp zMGm;q35$Zh5rP16Gc(_9TAhr2=DJ^RT+*Q|1r!NYRWBKp1Ij=)Rf1^eyX4=yBfjG=7PF8xKNY+S^V1;7-s^>o?yXKfmbxU0U*GU8|7eR-cW#RW+!f(e}z56_04!~Xpu>7+%eLCas!$F6+M!{HI`8bIroO<;Har)uBn zdwoC!CZDbFX9oULwf|5){Qq{S*(>AqPZ;PALiDL3{^+j#CdT8e?bYrynrU=Z(95uS2bUlqE>uC@fC%P@^Mu`;Ta*+ zca?%ui(_PDj&FI0{;mk%8q8Iwl=)OzM~m{!Cl$~6DW^72o9`Yg&~X`~-N9l13|a+# zy(dFM8stqPJ@z+#)S2T!u>co+jiQys#}1|3pN8 zPJa~$B?LzjcJ{aw;FD=F14E^T|KnN%G5lTeOBonESdLFR7}XR>mVsIyj@V$D~P z$~6;C^yJNbDNonHfGZy$6W3MLzKCJHOI8;E>ZP}4NdzES3eL@K9@O6i0x=YyHl3L0 z4`#9?%!yIHaU0-FYq>#d-SN8xm=6fL`Vb`ncY4+feQ0T&R0eczTiq&S95C;+oW6KE ziRS0`GjsHu#e+HkRncKz-ELOHKS4tfz@ruOa<&Gi?T=t81Z)a?V6Bun=xCJ=Mlqq@ z$q1}R{SadU%`*R&FFtl_!{%Ng5l!SuP70GmWS%I|(Zqd+> z|2aHEZ3pHD2q%QE>QmW9+S-#C8P$l@?FkX}F49=za;n}wOZq@<4nMJ{rc^{(_mZ8B ztph&shYZW433A=cX~Rl`P9MtZ_xJGes_=~G5t{Y$^I~9V{iS+B{&*m<6}v|K~G%*3Slnr5|39n6&%*j2@FD>kQ{Gr$@X_Hmk!Cza`7iv;_{ zGtS(EhRDLa2LMKbu5V5GV9HqpPCN=Ir9KR~A^k_>>FaD&o0m3?|=uM zc^S$dG`3qc`cLWyFwsk2DHXUbJp-&30dK^FtABf^)7mDRtikEEk|%H)g}u|s8N zKj-D$^}V6_(J|}xY_@ra)dOl2x}?IQsdti#5-<+4TAAiuHuuDyGPjz3uRsj-OU(M{ z@IAzy0!RE_5XfnMH+-Knb1h)bP1@btOA8Tu5b^+umO%BQJl~sZ1U3FDTo7dy{C$sW z?`aAmg=@^Y6PG4`-K_K0$Y^-O%q%2&g{HQ~RbXn&pWj$1Pq>oL%VX)jax!r&xVj2w zW=t$G|KvOeB2DJ~`i{yH6JTl~jj zfrVGNsF#4eL>Q|6ukCT)z$IH$a(?WBCLMf zY)ApIKh4)iCHQ<2a1|N?qed=`KW=gopc)j#V<**q>Jd&$W^Zgy);et)d5NV{>QH`| z0k9?5DUC$q`i2=97%fn+Tc;)TmeuC6P<0lCxsw;FQ}+@fJ{@|p`k|T(ga#`ugD1tZ zYGiw-No`p+hBZ;{&YAZN0kmyNets(xWk=c$4(v44gJA;OHS(nkfmedi&=h_t)4*H3 zIw+4cjSHev;LDUo(5-0Hgo+%B$Yu!c?n>PYZYvmf6J-ott4q(xSLB zFSXw(P{~0@`5^qx?vV_=@Vt8VL$g||ogI-8Yc0q`kgL!Z=~#plY+;A@(}$m1?B1g9 zjfXa@XSvH*UqAWktb@{&ypPY;U1s+UH`6(G#(CTVMiuIt;|hjgn^I$^`{S zMrurN!>XFC4rU4NwxB>(>TVzJoE?x*fRjD|{sYM7*49hYjst)Zy+Gi{9L7Jv;-VEa zzkLuety$|p53%-W)ZiyCMd==Jc<2PFb_TvzPkCpY@F6}Ah;zlmdYGe)Z3F5uUzSKCmAs}zkYDH9pczas24YsifO#+=uZAJdJgs zz2|?s={c}b8n?H%y-Mm5{+4Fh-y!4G|EyWAMHHt|H$mCKhedX14# z>B3z&YabhLnk3|TEh)3+@E;TIBl(aJRurNVg21oIwNG^4YHO1U3Le48#%;5-^8xsu zMhc%l8WbWvA>n!h(V(sH0U;syr-*3pi850xumN?nVwAx%{`t|K?w!+#0*Avt={LU; zTfu2@XzRcSLUU_3zSS2L6>S^~Gkd2ry=H!;#P_k)(C}=33CxGS@v1BFNq$PN26Dqv z^&DfF>yF4o`N_`Z`4lQPA=}oNTa1Zl46CY|spIZn--55_lrk0qtP^_Crb%EqcXm|G z0Y07h0LlT8ISrIsbqabhrdpp%-}2VGh=3kvA zotd>jWbkFVgMsRt)j`Jx`o(0wL3S+}5!7NtI)$A$m>nHZwER(V z<+9m)xv;QI`AyCCMT|DU_^U_zz+A<@=G4;rsVkehf<#J&F<1lH3S7o zeTsb+X9`5q+RfQ3^sVyT>SUIQDsN%@pC!&U&H@8c*&w|*-n^S;JARzn6sge$1!y@gh%LnHt*mUV-)-z46_{e8)5pQnD3Itp7`(I z1^>|_7}^z<%;ip)@o1aPx1gUw2wM^F`L)-q2ASisuhRb^QJplQ-v1xddlv&6Y-K z*7Yp-s#1}XroPA&6cpUu-F-EUh+Ne5+^R%ESO&pNOTAdTqjn{IYRO*s>>oZnUz%km zAvxB-v4_h}&He)TRL`nNguR{_;-7-k38Xb<51G3ijcV}iG#u0kR`slfgWuCwW>&@Fi|VP#^Jy$OrI9knaTI_iVKNtxR=@3f-wyu0<{!nUmx(gU zSb$wUWTKlc(+=P4^_?_r9vkD_$0BlO>bEBHE?4oZ=>OiasN-Rxgyea~Hox2aR0#9_ zcly%dn(!Nld;=)v0hE0}pnCYojukL`E|*`fz;1Ec-hOPpx|y#?aAb1mh+3g8jnktwUGvGed`%mV1AU#|qKwVf98`wZlcAh7sunto)M-IW4;4v6$= zv$kpMUC~O-wArg62tK~IT7==vH6Uq*|4VC9t^5p8e7C@(7Znxt*|jHd;R&m!%FS>9 z3^nXeG4N;p{rk83 zFILe3D39Mgj@E6$^BvB!wFlMC2`tzSn?mG&VPIRXc2^A^-+sAtIXSsLV>36VK3>?x zol=yB=UAEl%?5?7c*)H}oWHW3h2{3(q;lwBRzNbVP65y5=9q_;+7U`7xfyFO9+50Q zOq&41$G9g%Y`cS6-MzMX^2dBy@x`J`Ln9u51oLCsUYzLY4-^dlnrB|*HC2$>!axyj za+kG({m{;Vfvd21;Dfw9AXEsMy8B@i_OtP3B1~ofx=d+5dEg@%^DER?P0ji12K(@& zwy5a)qD)&LQ7R3k{OBC$81Q|DeEot@h)-Ne(d$wdo80YB-eckUF_&@4&# zjauZ^oC=*E)svlIDRYK|xB@l4on6SjbUbIst^SW6fDH6Q z^UW)0;YD-ywz$N$%vV?SnPng?@~4vCRnEe(XzQW?VLp*;xluhuQ69-;BW+h%K=T~# z`Z~`eo1GPr!|wvlo-AeW1`8Ks%Q9n3L-s=-|7pCS2{N2cxFLW$1h9jeh2xkJN&Q)H z9QNVoWSkSdOLol9uVMqI;^*azGlOpS^hJrc}i*vjz?qGTXr&Kue8sz zWYwMDRaI$2pG3oC|K6C=?|FXwWyC?8DHdRxi~SOf+52QMF+_iNcqgnA1F3@#WzVso zGX*O#0K5HwdjjlGTztp4VV9rFS2inexVLw>QyQtwgF~9|ln!&h?>mCy2NP2I6YS>V1wc#7+>OzY6bO3TmK#yMO?z2ng zKz+R@L(lA>p1-obL>QEBc9)Iq8%7z}ssQ2yDn?vl-UO%s;9f}c0Js8S6w}e6gUb!f zT#1~$pB7K+$Qd7A-XadG0n;Ps&nA!F$%e1T#GIht{Y5Rfd5OKk@Etle7=s~%2@MS* z92TE0ULj3!6u*0Jyr_UGia_`1{`k42a`n010j?Ny_cit`Ol%hcpMUDPd34;z7rK+i z-cmdWyuo|nwE*v$Pi$9|fYjig5Lx)oPI?*&grq6C=`~dwQ+0syrHJqaSU9 z`^7SRk0BU7DwvKTD1?Y-rzehuV(vmwZeyL-h@0}&X89HiJjVFV zSHdBw;l$-MD`GwduGsgEbodD~8S#`xvWML8@x7m-+!mBKYAaqnYE8pb){2K$=Q6dk zMzV!h%@#_Tn}0QIs%yJr5_2*0;=&9tq=2A|9hb}9?{8;^U8}FgD8fGbkjUB{bwG9mHm?*+zv;vV3nCiJ*ix3bJ9Tw|Um$@Ij z$v@_u^TG#BGn`lQs;ZwBn`Iw2j)3$dpHbxM5TAG24h2fPCor6VdVwNb5|O4&@pQ0N zePJs?VB2soQxt;a(iMfz0wAe4AqCtLPSa|7nBHsOxw@NEGCEqJa~BbG9wtilBHk1R zGOstPQV1^4MB6`g3n=nMePJ*C5^;stpk zhs{ZKeczPsKu7yc)x*%VFnErT=<|9xQi|ZVkpV6=s%ZYBsspU_mBFkVrii5#-o#%Y zAE!>jHK%zIb|JS6g`pIXj}lw}Z?~Er_;8Q7juqXUd-fw$wKfOW)sRuW2A*Fg-*)L9 z$o_t7_tZJBJnZeaUjk+i*P^ADR}7)1yc-gi)JHWZSgC(}Lz3XR zw3Q}@C?d@-Jw92L92}IY-G^3z115|}Q{`|FKLc`uVwfO)KB1NkrsJlu^x;Bm$TBqeH{wyQ&bRfwp14SBN>KvULxu%1vtEju<3AUxZ?4}lrvnQ4 zH7yAb_wdl0Fxnl#i~6mbqpeAP-}%xw4%uw5DM0rdEQIkKR~(S0>&^~6h!uxP4Z{8`KMQ6 zhEImI_M>JzvcSOL3(L&32l}<+`tU}32+*h_x|itwh$%1Ja@npMH*J`hAe)Zg$3sPq zU5lEkO_U2t{gJ_{1N4Hxz?(cE)qYjS&!SP0k<)&sPdwS3?N8|cnyuE|^;yhp+<(B5 zB^)F7`Sk|v?A1h0+QA8}f6dLDwa;-+G&i z9v+f)6@{s8pspnizg@1ycK}BDJxT2VBS0UUIdu)a5FQnk?=7=jhlfhOA!-WD6kKnM z4%tve>7e2Jw^f6sW#d=@kQk6Pc9p^=VndXVU1AHP^y(>dn_7u%mzM|?Z$3$ZNVr~# z=&&gixTY{K4-C4Naz7&u>isHD&$OpdF^NRn`}L7|NN63Z(qSfCzAzdBr&}rTn)%Vm z6eKtiK|8+@s{p*K(5t4Vw-l)PEuiz}H!XY2q^F>E^b#>wQXt;^jGFobc-J+bo^T~$ z{z@Owsr}VNX4e^_R0l9#f-!;E4zPh4Smj zHTI<5qdHt*PXwxOtURc%I_9miQ6Zwmz&McEb}i|>tm_G<4)ypfH#@ZEUKy!(r|#F2 z$1+4xWsP;`zAwism?A5Gb-z#5ywavrIlgeYnRm9wx z&~!#r=$5oL`Ge|;)RIM5EyqbNguMEFwSr1rk!>zrg6QgQW$?sPZU5DeY^ z)Sra3Q)1j|6)0Di z9JVZAZ=#Aynf#CgqK@F^m+j<#eHPY6i=Y1}ES-S47z5kU&W|6u-eQfcpse^Gnyo@; zmrwJ>H2^-l(4NAJ_+p2bIocQsY?D|~Dq5-4j4UizzP=(~im;|_hNn~K)*gTuMuq`; z@=$QJi8uy+Odk;Lsa&F638%}fBw~J%KRd+)U8PZF6&@*PAdI8_&fgz0W80lU%-YVe zO%7Khfl!4a?!@nlX4&vjfq2ygm}p&nrw;+apBW>@xtBJ%_ArPpsq2qhp~|g>%g<@x zrSgDdfuh6vnCeu7(cb6uE==p_?%yZQ8m6FUu~!`yUFnyCflhTgBueNV6QV`~ZIz%imX1qp^pCW1 ziqTD}(M`H;l~vN>#qVw8Y<8>k^nw$O|8!3yPlpRqe;26VifelJXgxTzApjOzk>j?I zu+2A>X%{K*6#@ZXhj3V4H2iQ3EP;WyzCgWt^7V7p>#$zoWgZj>iC$@irpV*%72t8_ zs$3z*;RaIcxwOc&J zBl2JeKKjh$+??+t|65ihQ`WPnMhPp~qs$4xA@tL9e|{JLGswy^gJEd6Y(4cV#=WFIwoC^=pzLB@HKwdkITCJl1tFKR;7MRE74GgoM;0Jfdc{T_572-UkAGL6>Z)Y242B zK#b$>`fZcRYFt8hCUBPcBuUG^?x2gm(ha6!ydRPcfWhIgE~B6ytWGwub>~r5PC=Ed^=#-P`Tm>G3@zFV)qFl9G}>lJWM_*a!(d1bRBsYQ<(e zg;rI}v~8A=Sb1(PTByum#1DNI9le~K61z};BGh|O63_H!Wxci}UYHhx1|Ov83NqcE zKr?X(HPr~6+kX4Co()5vEN0~DN?a-E(R!q%BzR-s0S_uMu}I)}P-N+KLu0INgsskV zbVIpiULYfh1Dy-ZnnZuD4oC+#*J?E4Y!2kE_x#U;!eJQ!iYPod-hU3x#-D*B0s+Nv z+^NY%_sC@NzPyBi6wH+1vg^&tHXRwY{?ShRF>$pB9V6-A8!Da6qs;eVz^W7ds=hc+ z869w%f`Q{mXhxYHZL~M;Yw0!ukFX9hZnF z947VI!+|a&NU=3L+x_vuHIIk)IA{Q@lD4+&m!)h?t91ZriIoX%KC(-x(uv3Wg26*i zeNi4HkK$5&E|#r?sQ3P(5c}eVFH8Gz0X|FPe-j=gC7!>a9K$LeVDPs}j52$P zK!55Od~ggE0`Q-e^09D0^na4x%opN_lvDZ+EQ*9J>}?)1nQ;;->+i+UMjL2^QR~s5 zFTAeyn30i<$^4sq`w`)cR(Nnbgq)6b8jdTa8bim&Um%rqfD{f-Cb3*jv9?UsWMbQ-1DTs z@fOdpPt#CZlZO;Kh-x(ggyf57{eLU^;rvaGLmN5<1`P{~EL-_!2jyq_>^VI4-S*XO zu$YpIGUltQ&sT@`P(|Cj?(ZmT#eu1XqWRNVFt$U0Rh%*$>NE9Xs@RNce5*xXsxaeImrdCSxHHG{|?S) z7;fa#?|N(@y*vLP!mMB3+eECVcX4j5vf3ypdh=LvsWSA(oARuNhSVa>SK_S{lU^CP z_ro9m${a3GCS6wGCiUY|e)&R@Owsm7p-M^HmW^LW8B#6Z38aUF8gV=^Fg)Wp-n!wf zndw793TL1^JBu_FTkhi!$p0fkK0V$v*hw#A@uP##@aG|;{pzkS>MfIS2IOH(pIbSQ zFxkFl`fo`n!25y{bBc@a!$tFHmmfZ zorA3n>1ZXOsDjtW{E}tK4xgHB}ub7}Tn*eT`f>iR3t4fqpXF7W2flr5{&D+4v;gcD^e*Bd7l2iHFC! z{g)>4xH#U->T0$p60d8}wOXcTur8$X2luLtSNSGx(_o>WZF+lJn9pC zWq$vTiyh>@bMIlceE^9QshX*&=`*)oY6KF-s^9QnbaWIcks@;_>r=EtZLw?>!M3Uw1j zW(k}A#+ALU50Sp#JS}?S-p>U+>h?O1?zL(Vh~HNNlySzItYrLRUDn_$AT}M?>+S zcT8?utG|dOycCodT{De5?AR-w5K3r+icW+<(+ly za`S!B6^lnKmLsD&ose3A${a^jSV*79bulh2U4#A(^LD%VKgBZ|OiUl{zsAJ3LR)JM zET+ewJ5Gdt@~W`|UL*-9&Q9PUF;JOBm*~PDn_rCzT?e81I+o)Uy^JOU(<< zVw~(LI9)D1O7_)R+Cbi=V!(QKGDYq2NkEHSAceZ{Q~7pxh)m7WatWF+7T-VbOONZb zl~UguAB52p(mRk5W;Qlbknue7wL!e+Ab8LGH@p3i^9arzv{6jdr1zk@d_o%jh?&`s zt}rUqi^-`@wZoW;%l0l-l^XLsPR^#g{xkxcunNerWjoJvX@+MaNYUrjr&qQsTeob~ zrWpf1=5$`)k&3(~_VD!XbQ9EjX20N0&VMY98S$LRrv`(Dy0`KD zuWLi;7;vU!Q3Al9LGF|^CN|+A;@&+hk|o^z~Txqgn`g{s;nZ+u8%a@D3Y zlFs6MGO*_3<|k(Eo`md*Id9{q*=^b!6Z)ed(NIrcjZoSJ2{T?R5Gt0V=;ENy<`kHa5V9WrKbI`BXBEkboc)rspq<=^kbe{p4FE zcv|nWCFbcV04^Kf-}EJ$^9KDyPU-sq^6QB$7oaIU6cM3{LH)(T}nk_q!z=(U@fj2ETUe)4$Pyb zua%f$ZduAqcdF*M@a^hThTP8Cy2{833pHBFc?rdWMG5R<4S9&+Zh88?6Z5V&Q>}Ex zq{*+;J+4g=Tx~X=wtx~Eu7lpIYWZTZ0iPuWe56&r!O)tswfDyNhGUWAP!B22u}_cD z40K`!tLC57v5=um@n3t~4KYae1EO+=$j#Y+sE{ye{U-~YX@_1maHz+Ya|N5hl{qOm zML!_(BbpU7SX>1Aj3iSDBF*1_`^NsGv-3_95*+wMy%L%p-Hgv>4zZ`dG0-s((Qxb; zq0g(`C4jwFF-btse~$J;4ksWgOL?sywK`fv{pegZ_p?6e{!iMD5E zrn_`_mu>pB+mmBp1bSC_4i44g-@&C5g!l9sAZCUR-&ve%Sd=q!iV3nzoMt7w|hqZKKX%q(bHEc z?}CGWKEAz5K-jvGb|Tn2IAB526-Ia%|HT|IRaa1xhozMpBL5^(kAGMU*k?z|?(T5iT{6n?Q+ib0c;ze0db z>2b@u=nS(7SYHXy}!?7ip5T_mn_AQBDV55 z-mcN9YG{;uoS=PFn+yq-#z1ij#e9SDi1c!&9&BJ+29<9(HFAcu=wvJk)I5HrUy;LI z^UTxYzFG3EVxH1Dt8Xv!Uc6b>AWZO=m94+cuNSl64!hxDRbKvzYNlW9%e>a6t2rF_ zP+FEOTHgEL;2bPBJ(y?aBqhBLXo4%}#Uh!s;!XBKQ+#~$;=e+T!Qo+~FjIC1h8h2w z8YWtzT$BfgQQjgl^sP}D=m#+v-+l44(?o2#Q6KE%6Ap06$@pACq zGvADV9CPn|$68md-1I-IUXq&m+F5l!L;EbLEB&!tbR4aO!0^?ANX?8e!R9asYQ7nB zOf`Ci&??plHBP6hXl%ACY8kyyX7En6w)VLpe<&Y6s`8)_Qy@p?%LiX_c?Rjv#HmK! zH;Zr4n_x4VtHe{eoin<))9XkURURq8={sn>^q!7DlFs|P&iQKNiR0ZX3GUJ|-Z}kaUObZ{7uCmwkwRLE%e=voZU$t8*-b+5SJJvect~4Ao4sRAt|hKV0r!f-i>Bl37tf5$KPuP z@Fe~}Pxf(DUf>jE(e74=e(rk{EX~en7jdRCWcgh#nQry`kE8>8KFfC#ew1k1+(mml z-|eHb*L>tGSyv+p3O~R8va6CEJAv`7O89Ba7=Kz*buKX$ePyaCONdhYEZZUSXlU%u zM#kd2%>SM+OyT@Cx!yOO8J`2)jK?+$uTn7)Ye(y5&e>Ug^x3wRuS@h+u=Fy8lJ) z)&3Wi`vyT(RT}cgl=xHK{dEG_zVhAH_On!X7aVNQ(d9!Ze%&xeDV}|sSoXq0Jv}wN zxJOm?XKFd3(d&Eg;bK!$wl4G5mFLzfW`-V1I@gr@=( zULYuGzjt&TNu-NC)}))UA(O<&-f{is_p&_874URU)9Pu3NNrLQlCzHfY7HuPQKHn$ zZ_pq(IVl|;(sUxXj**czBH;9N>VwWMIw`4T%v#D?92>MY$yR2q7f}303y@8@##%UJ zbf?ab=EuO?({SPj^pyKwx<)U$FxLu+KX^Z){QK9qZGFU{o$G{w^$+gv-xqkJFP_Yk z%Vnu*HsB)i#+)LxcUdb##&wCp&{qQ-xc95x3YE->H#X-krks@@XHMWTqH#7-wQ+^x zo5|?L8(?Usp!WzR#GQ6D#GVt3b-ty3^zang3j` zPJn$J%OX_9J{y{nQjel&++!He!otcOVw{eXRKU1tK)G)6_)g-g%|_t4p7P`#VxjP5 z;O4&kmz`#2R*%~krl@9crEI;*y8Olrmo{#9fb;-r55+-7-y6Iic_#$Fux@;h60rS( zWwL4*2R*(I;t6=37UKT&IiVQD zV|?zmc>Qc*TYJ}|A9HqZuN>LZobsQ3UhSD}jC0r|BjzC@W|XBcrp(k0^R<;S6r$Cn z6yhWM(k{wfJKx3JfZDfl+%oOt7G1MpjrGaD9BX|(nW*v}{hErSPG!bM`;%Qki40W% zMn8RWHRs37hPs$6I7Q4`O%{{;cLwziiRCjYD_y%$HVcZIBA2^jn1M=(k-731MV)U| z(_eD*pjY3?#OugqS7(xw5$_Pk${JxceeK2^5!O0+!KSJxh_A&-P`CAStOuoK&R6t~ zEVP9_O_wXNXTQFEqkndOT%7u&Cijh=x3Ynwnhi@F$CB`if4FGn!6Ofa3RE-Xt{{G=g?81+>Q#lwNEpz2EYl3D#(R2coq zVl1J5CRvQ%6`d2iAXzf-C-gGQORq_kQl7lT0S0AEE2EuzYy2Ae7mgN7OZT13>hMPe z-l4FVski6+)RH49v2IG>_xER_u75{k(I(-^+n4ubvxXFfxW2t}H33<)8ru^+SpcYw zz*GSBucm>4I1m5Nl(~C~ikD*7uOX4^`8#5MH78G!ldg|!3K8YbWfoJx|Nc$fjLP?v zq|K8!^S26fKha2(^7Y%freSO#Zj0hMM-fe}jGWMv9?%UmN}RRgUn-SDRy1$BduvE) zYfqRtY;agq$;1wd;$lmfo;vBay-#4`ZYRXf*hsHrY^_bdY){y&IJzy(8*dOg_arVU zX5(A+xj0{l?1aq5&6xEIW&2Yl9BWS}2kSp>C@%iv*yrp=w^PLMq9}Oj9$OwkfORVj zCUXRN9{#_Sp(P^4OIobf=JWD(7}53piH20DLO9{)!a{uEWCEB@P%>rr=;dWMIK1N` zBKm0s)&L+;MZ!#QScObb8w99*;b-95a6`9>gQ8Wu{wuTbw}JvM&3k`-P0J`4%|38E z9&Lp>Geywz))<6p#tHv8GJr-2s}VRKUb%x=Z8Z1S^1qbKBRVy3|}`G&1c=jBRp z6O!QIG%C1(5ghzWsPY@Fm` zc+$;HS37*VQ;ja^)}+va?U7)2k>jA=t2ze9U`M>5I! z^Na7V=a;&N(y{peh1;fm|2=aK$IXJQ#)Dpdqj&#lRpvu=w^yyxuOch&{bJ8&{Z3|) zxsmQl+~~W_h_b@kG@(zHyLU8y+hUhh)&ked-WGLj49i6bT0$nU=85Bd}^eQ$^siPChzhpVZP>@HJgq~>9?U7C2LTP*`}A!?in zW6hXGtWL+wo0o7x@@;5`L%30SQ5F7ul8ouuAS9%w)S0BfKP2yAR!y(i7nl1lg>iXu z@A}^3eNq;K6)qkt@{NC(qrv&_UN+n&GrHg2H%9t~;*0$7Z4(8H;TEseI~AAfnx-ms zoa%Tz6zmETjD+zla`xk`nz12i!S-gW)RqRHg)!a|Yd#p%h&F6ek>Q{V*lC=Zn~Mf_ zkbJ=yX4%hlpM4yp09jB-i1F@Sl9tdt{a^0V$Bx>f)l+tpHBQWkdBan^^8p@+_cbLNm8!2q0OkR!-ukTs4v`WQo3$Di)xnLCyWYp*klLzZ%2TIf+fflj&&b2m zJpXkSF446u0-_Z}%gcEzcL!>hU)ix+gsXcI6B}+c3LaJgxJLxV%96QFSFS9qnUFlAH{Sx(j6pZ*sE(8#IJ6v z{r7|1{NgFeOr=qPZWs?cN^EuuHVz{mi?1t{fpH<>$MT{+e&+SgeBv^E)j!@cD)H#{ zN;LQa2}9+@LWh2;`L$B;v7InA z4IvgT9-cxN!eO?K4E|=lMz6{xip{O*YCzyK^Yd+BJOQgJf$jbmxF?jY9p(v9cuBb_ z(m7s@`zz^w6F%`as^s*Ub2gmOn%u&=IdC^Q_p^hWQ9R4xag}m|#_q7?eIJZ^SfBfDdRRYB=AxXN-m zF0UKfL?3d;w9y_@e7^Xey9M*_m-GC#IJzsc$z_x}lv4Xv& zC|;-q8$Do91<&MW!PtvXaE!v7Y*dALE>6w`M^Q~9BZ6?|3Z3sTtrfw*f;|r_P(4lZ zH9)qp1EH;7^YSzswjKYw7(-CRN#5E3w4GN}qy$oK25V5u$!Tko)z#I3xJ<7uK4 z1m*?yjA81#$zoHMZFY=*OE-f-m1vkUR|r84n*}u&zVcdxlM}|;w)D*^UZ_L!#&V>$ z4xc?fh8Xfg5o{8J%30G70f!a5VPpmg7DVw0i3^A8)io2uEM_+oDZhb{UaS(Ku&PA8*9};<;yBOiMse z%*?eB?N`u)7I3b}Ugw??=Gep6uNLOy#)$|iqV{7G-6<3e+_ooxA}a42CQD(rlHntz z<_Ro$2^5yfuk+CX2Y>1ZT6VBh)Ed9Orx(w%Iq69at#zPt zYQMqe?77j}N@LKysK|DHOZn~+clypB3B2v=g$fOxY-Qy;qEG(#?zY!L?V^^$0ge+q z^>jaKbt~@PJpni{jz0d~xqmRH4f2$mQ4~gDX=#{wV@lSQuP-fIQE&hlio5}h_~E)c z0vm9-w=*f{461QS!cX!AtxJ80otnS>Nr$oyWqWQsqD$0UonX>w=qic1#9zz*IAykP z(Dzrwr~?_3sMmWIoNo>^L0S7QrpXiI!Bqs|D2N=4brOMfdGcLBvBAoG!tDgNJZ`V= z-2EZ>d|&3P?u`Ut;O)Zkp;S+K_nwW{=;8WETuRCtFr#5ZTy+Qhs=WALVTU>V77_oM zVh2pxU5ra40{~MH!7zVibl5ndg0H&D=g!3RH-;rd(|{q2M>91w72;Lb*4D5Kt0jhk z5m_Y8-%XZj*M`{_RfbRIoz17vlDE({^Xqi?=yWzLfcnAn0I$ehn_-q2uZ?d|;O69< zz`Xk4K-M0_ywCpGfpOrJq;HE|DXG46z2C9v1R6c=ymCFwnk+UQ%3ylqc~eYhr^w9{ z3(L8Xj{<&S!Qof5>>5|)0QVeVDY4yvF z{3p4C)6-D^ayRU@;jQ{KD%1Pk1bMPNRC;NC|JE6$R<9;FMNdzx&Uh;<_N))@Eua4~ z#rSpH?!mJ5e6_p+x*yN4Tx{tAnV(HDQ64O@O)k6+i-jw!T}~rpT?bxHv!G-(74zQ~N43+I@Vy zABj&JPu>~b6~#h0*b?#nRkhI(HpNQN4!T{PNm1%ixo!gP00nzi+HWC$x^a4WHBbo&(W z)wh)_`RN|Xzzq%q$j!>9Fr`0qVqBN0APXk|OXQC+1}5(C@DNgy|D{KrKIy5vmb^Yn z^F7uF$5yL(3QZ|3+Y{q%SCot_XOWqge=^D%mz@Eri#-1qZ($NwJvT>)B+M*+GAX7% z%6x1CEX7f*7PEob+4nT_1^KAT`TXa0;;d_LAsAGtvcLGDQ1E&XTwdu@-X0oW zUDtg2Hg5XGuvD$x$s4mYhpm!-CWD?q>F8NC#1hg{VIw2A5|TVLdPs<1klGRla}quc zGoVjh6wg|NCKmR@GvY@z^AFK_qfYMjtIAER?5@%w-jqLKXkg;|Zj}5iM0z0emR%Rw z$rBd!cm^A7;>MqWmF90WTd&k)XOHA)>#BuXrvHRVPXwKEvB{-dy%xP=+>Ht+5|rD- zl6_5&F#fVgAyLyXn;*eNqC^N7&)F5ntX$%Awl9}1Fqg7ZmU=aS9^QU8ule4U1-1L; zWZ?QpF4nlxNp%)HElgh_mS=!-9c0On#H_J!@YCO3-Gl+&rD=dV^H&GHq6$l)x4ie; z<%wuS5sc_L8tHV$9Y==EZ%(9kAiC>2BzYlVYPJ6y*JQPW&@TyTEX0~i{)hN#DxHll zNb1k0{SCW%O2h+RBivv{YAGpK+!H;_hv{F2VAT1d$+T}1EnwY1r1Sa{Q#qtX1BP|_wUSFW3X6FE6zfHDc6>(it(cn7k84+jjOOVt zA4~Q|_4mtQ`RK3zp0><5dVgsZ>_TSxDLpzQex{q!6Nj(wfx<~uXNeKKvBQM-!S(>3 zAOA%Wbx)mdm+m3dz@$9l zmlxH-o(mI?x)`yz*WqoRMi(5*eh;ep6CXU|V&)Q!+y@GBtua{mpWqgaPS=C1`1~`N zM(PS1wfz`p@t(#2M@pVAN@{BT=LstS3#d!jOvK-X`}T)DPW8JTLag%dl{9o6i<496 zLtO2m%#=BvHi1Il(d;z~GWZes*Zf$GeS{tc-Wkc(VZRC`o*YTbhwvbJDiao^El&EL zpO_5f$?bm|Av>oDA>OSzx;+q=6j*d(Yj&C>wpbmHQ72=Ijnj|TfO@R)H_oH|UBrO$ z4u?dYj0{0-7n*_`DbFVfEdBWbs>kE0{e7kq*Fj14=xqhXSiC*LHw#5%|5xGttsf;z zN~(3l*BbriAmnQ-=4~Y%XOO%3&`^}a_6J~KWHDYi~ z`)y+42|$6^Y!=0lQ=Gri_Nu3WXIZW=N36N~zPfg^|BCZ#w0!E{6clNvdB~YWk^nE zYwIaNMxavbEj%NqeZ}#wUkICs{Gt{Wp`I(S3()89E<~ylP4IqcK~4_lfXuZdSaaKO zOYePUT}Ut1*VYaN)BxGBWoDv2t+GQ2C|UfxDv|EOa%i8xlK*)`KQ7ss7f^ztA~(o! zqbUdb;;sS6m;QtF$~iR;y^CrJ9RmpQMYJR>c7HIf)IU766h|(>Pcd)SKNnZ0n)P*f~FO=8-uL31h{j}CaJQ?Cqwe;7w%(? zrlRe17n)80MH`;J+jEVv=A~tgM3~$nu{+fD;F0zvZFd=#yhr=j1fm>+Gd<>f?Z9-u zm?x@(8mb}UQyDcfMWPC&ob}m>0A-?R8+zQ8Q_T%Ji^nmWX2hG<{D(c|Wu+}pA7iJSZM2Daj*j%r(KUHNSf&AA{=U;^IQqly;_-)W%cb+NTdjth>0n zVniE3nGVKmp_TH$));TT2blNcsl*btmE6WtHS_x|&2HR?avVyTM^7}t`x11YOe}eW zt9SgG*j@GUa!X*>;LHVGzfutYr%!XOtqL-rsaTYr3SBwU&bwaEwKk}x?r&W7Syy0; zeYILR$Y6x3b}$0;vBXk?qiTWtUn%E|fpYXJ8}?6eWiNzB$$d|W-BAw9e?B#x2LKcK zK%CItLfQ64iJ6m&D{Gd9i7ApXe$C;oUfqI=n_Jtn(u}MuG#nh9EH4M0LfYwn{mk@h zLNr9h?w+1v&}4_f6!}=jJ>R7Gr-1gt-{R#hF*sI9fvLczmh*!IuNQ~0Fcj`$2fAWhy`1D7F z1vwAej5=N(l=fJbFXrc_!7Kn~(6Zjm<4q_Pt@f$G(9H&)7m?4EGSj)~VseSL$px2syCRTG&t&pCyM(yF4qmSmQcRKI{i@-HT z?^HcLqm+uIS(3}{p@)4KKvh24GxL!2;%8~x(%EhW4haHUG#4tNJLDqArvj3M;b_n! z!hJOK^0C^Uf9tW;Fp3Gom^(-9=cZLagerf%3GMKgdotp^wl;$kEPdnSN0UIM8zOnA z4oc63NB@d;$U9R#M8)sKS+kQNtI`maa-5j{oSvQHOKeP8^gPv?qX4tN_lKMNQJc{O zhDOwl#P}JM>6OVb{PS@3q3Pct!n5cHUv6O0&cq}R>8

>N8)jlb3MlK4z8tGcg<+Y&3Qc^uW@meK;%J;O?leP0%2?M_RCzS>$> zl_Gypb|(-2r@ukS=2MCW&_SV1dr{X79d{|jrqX?&&{z}UBcBsUsMa3FC|9qMo_%QZ6B%~uoafGIR`RaAi2Fn-WEk1UOx-Q`B#;3}(p_p?c~qlNp&StNI` zs+)H%MY$s);G{JRP*)_;*3oHOcY#_v9_1hLAvdY6q2ZgQC8pnh{>WkTHW;%DQaOKT za%#U9uhrdGiv{x1mgQ1~tk+l!4CW{bX&Ap=TOOvBVMXh*Q^)#j99{|3a^l3R6D%Hc z?g-m5_fH&8dNH_%yW=7P>~=zPzhV87PDN|mcq-7~=_}OmO)78;DG}7|!#w`RSPk>m0f0#cbhXGyI zGz;b;R8Iesze02vb(Qg<>3Ceu+8)tt5wJdm{_1gKqoWH$%Jlc-?T1I8$EgwG|HNdC-v4ZiXuwL#Th~hYkWmhzUjOS^xQ+gmBXs=SQU+E zgFN8RuOEYd!Z4SzeaLZjObGfafas=*!(RfxgHLvLc2;w--?QjQqu?C1?|pH)XCn7yIHSB=z}(xx zLE_WXIc4v9hi{uIau;XuTBIy}3*k-=VEO2TR(SKu|7~ zg`(`u@8X-XA{^4@^?<4W5P2A?d0UdgMnw|Q!o@JNI{%tcl0QY8JMvtTTfIg@>4URO z8#K$pFK4jB+XErTw$P}a9FT}VtE?m>Ll|!qW^BGAH-I^R>!M>uK>;3ooWReYQl5(m zp9-w6K7Q*X$-&KiPhX!BTJzv4uc4t@i6t>3qcKFlY`y#P@B>~91gI+B{;Er22%$y* zllH8C)0OYz{=d|6o)FQeMnS{iGoR0_&sqjz5b2p;! z(r{NuCc!e&@%bL)Q1{wW#Y-rJOY%vPs*t=rosY%2N{BAP_>0qtbeX`b)ri>8oxKO~ zJ7--jwmC#Ppm3|Qn?gm1zWR*~I5ROl{XjH76H_MME!ZHB0CAcueU62Ti@O`j>%&;W z=Ca%1(qBp=p{HHv58pW9G34W0z=ZSE=oZ zkzSdHbMwi`q}Kvya#Tpwfb&VP6V2_qs~3MCw{S`-+j)U&KPQ!#LR9A2XOXwKI7wDn z%MM-VhS(}K;uLLM{odGh_lv`LB#cYN8?$v4(y7fJdN3~7ylS~pm6H4>HE7w{>hnV} z?}ke-ry=|IkI{kHv@}-(KX0(BFXSY@Yy*evq?nSTqD$a&WIqb~1ZDsS13d`(fDBR? zN94&cxWOA`{Nzb6fF*P=)uZNU{OMVT;7`#dv$f6Rm%v^D{YhAVDLZ*pSUA1tcCrB2 zT^MMv@}Kx?HbnP~MIKBz)VAsLkBa9f7s^*0KuydVQr00X5EV@%hJ$~M5uyW^OVJ4m7f4(Lvl zez{x{q%1lpC`ekt&-Y;Xtr9bG-@T74C@25}BgtcIn9zrh{Lp05`{X=sv5poC~=pg{w7__o7sBb_62XRwt}qlqnf`Y5tsvTH%o<2 z%Lh5n6J{LjurEkYeG#peaQtwGAtYd84sS7zF=XxJbip}jns4|`D2K}VMD2&&Q>N^q zPXw30L(Hh}^WK$`Anfb+I9GBawv zrESUm?n!2n>AsiUA z@TU@0=}OB3oI|_m>6usTPlv87-xi-5gfJL&qo$41RWv%?dOP3~owMrRURqz9ZpCmo zbLF?AF(2U0=R6lK|5$Y}r;G6{alKp5_wM8ScxE3y-P-dw`N7j|^b=!qhFzSw|0xJc z`q93v{qo^=sTC`hJx>vEd&!W zK{mR0Dj|66YT#PX1Z~8tnFI~+0%kISvXz;en}AEV=zD807M9(_-f|a~-84w7?xtv9 zbTAM)4e!;@UBmVx5FK^f1^fXbJ0c0F=e}1? z7{ltx=S$ATJbn;=bM3?5tu48i+s)u)QNSjlT3}`64(oXJC___^Pychkr}VXTcFn>U za?@e$R@Fy8*XG8tK^^wap&wU98Gv+&!NY=CtEj3<(M9R;*K=MHq{YdZKD>+wN~A<= zdn=bwLUM3bKi1KLuwp$&u5W^lz;}Acgk84zV2)KWC7Cy0+tFt(&nFF0$ECsinWfA9 ztw4~>4Uh#~2nnXpeuKcV;zebde~(lU_JUMq!pZOhZ5rMi(x3ROw8&xt80C`yeW!=BOJd*!3m zE+myK!EYq8!+Uwd=mRh!c~oj~r3t8O`&Z)X}+cvF}!BIg&RLu-mJYh z_s`}Bv9bPLwAnMc)Fm;gbAzGjrI5}S*pzeJ`7AD1mm$yYxq`wZ!XyzxyMf0}1!!;U zvv%pjO(_Y*daGiBA_aSudVd~4o4DY^g(@zoXq6D+fVJGy8P8#LK7~}f08sSUrJKDV z)$*!r)0Zw|ex>a(HAGFCf_8BM3-KlRp#ra~^?Z)hf~rxP7`KrKc9Bs%0)dQ(gn(DD zGu#k$8_rfm_Dg)Of3;@(uP;xh=dUmk9xxD2q38$>S|(VP)71`L(4jL`J@}>*UeRtR ztdKPnQu&t04k>3KP-eR=n$W{!krl4r;H;~q6#~|vmTr>frXFjtRP{V9&O0W496U{@ zuHCO3OjYW8{tgiJf`ugKVRD!lo@Lf~Ip(3TxO`DD6w5a1U!PU!!hRiz zF4^*H%-w9_-1Sx85*sb@3Bo_5u?x2GGf>$R`477R^jx5CBft2yKGw!WE>{&1B4v!* zQD++Q9(^R`<}FKubApCTIUm5SB&+Dw%9cN?3M(3RZl&NboE2bLdzOlubGvCb5WW^s zdp-FPs^ZIciZ#)0w~cjVgodiMj0c^&2PBy?*WxWGi<|y&zO3J>Dlwh17F;s9J~G)m zU5?4BtR#amI%ccxE~nB5?~h52Iso+T z0OQUpy*OGny7~P@d-i;{oFK?BJOvK{{r1}CU-`ej6s=c_9HJ+`5IB7O>T+-lo1s*t z17NB?+&;bXc&1qOE@v7SbwIR~#_2CD+sAJ>)&?m-DCz)%k}>GA1LzJk_80`$15SQC{nWY}>yL z{JZZ&VI@19$3z&Iq($2MfGwlGjAZV3aMd`8mT0-~T}gBZ!28btwgK!D$jGshiu8yz z`LU@f%Kdud;&9_)nPOXE5s@r#j0jlXzP|i;_c>+=F^r0k17^Kjnh_m2MDUJL-A!-_ zt&R~=(Zf#skoE4zpWTf(aP|j65)Jx)RSiPHhm8AJOQ5IPm?-XoNFlTb))-iZzEK=X zq=r3am;a6c#jQEtu8;HglqN0eMF~I==#(c1VkaQlJvwx2#G*?<Bx9Iy{@_+2#5Ib~Nfp>!u*~!Wtj@gAqunID+ zCEmUu=+7<_ydsR}`Rri@v&@SVv~jhSw~)p<-XjDR3!_2>Ybw=`YIWT^Z`ks+`N(oI zKmbaN`@ZiXMQ-@)gU1D+6Ma4DW~GhyW~40~=U7;5L3$|PF|7FX)1Oa2f(^WV^DYLC zOjo-eydcxE5lruu*;X|Yl z45E5-Ypx-|3)I|Caze--Tpm|9zXgvK9sr+mIy&Q?)xn)um}@R_yI5Puw~!r!C>V+%l^RLIFl$2B+^>8ucalRvL)#dlP#;wiQ z=gr~03hv^mr(?x_p`ZX2=j#H6o`e7Svu)3ezV>)l?QuXx8-H%Ob`Vfh*}iHl3@RySXA?|$qU z;A6_1+=G;O4KtK4v&Y0CScnoO6x1cIim`%})9xSdn^ ze7)H8oi9Hnq`t&C44jq;` zNggMCZ*!l~^y_~A(KcvX8r-U%<)R_5V`L8wyye3dy+6@dd)n}NCDX21?_i(xE?M0o zd91>@VF#au|Mv49Sk@D1Yt8yaq z6x+HmRx#Uw)J5*^=4MF8GdX4Dt`ef_*JQ026#%w=^v%Reyk4#c5v7a&9QM>4o^Kk) z3Yk9f-ir(85v-b(PqvveKb@D;6K2Z?R(i4@1ngyM-O6QE!2Wz|@;p7JsE>W9v660% zP^99cQ-nzz!N|S*%lQE~!|EyR{UyQPDJVnmLqy|=sB+8kx$kG&!2(tm-!Cm9n=*NC z;i$W^K@*>o{_1fJ)SF5JBauVH85`2Rk1swXO3^bvfogwlCP{kOZZffK=vs$stV?!j{dJ+{w5X$d@x7#smY#ule*4hy(hE ztqN1;T-oHWqMN_|BmPp`rIXHEYhA-gNW+?kgw_kR~1`Xc~&T+p5()LR9Zd32g;l z??`AFcwiVRPY>hptwcRj#)$N;Ytc z@f~uhw;X8Y1_E_t5p_SvS&Wmg>@5w|EM^B@BYLCwhtB-$CwYw2Q9-NsIUElKn!~zk z;oHXjqRe7;-$YdllzvuQW^HMN4?dkIKxhJz2uhQ|>`$Fn&MJzEX#7-f!Ppn$CU83p z1i*NhtG3aE(OMA%V6s@g!^M(?*|v@|O<&C*#fMezu&V7)j+RFLXK|*HV0O0x$;XA; zQ5XlVxYjRjxnNb*#<*sjiFn6eHDYK!bs-72I>&4BoHmSg;mqr+Lz%dub0sDAKV$Kz zpIIxCBuL30EJ{v6Y}gCjRK{g7OeC;!#rs_}SXggkY@jDgbQ;!SJMvKFy5FbfQ$32l zZ2C*W22iCbDnePS_KaM_OY|{gSRRX>Lo&+r^;R%8)nj{b-<^`_2WAvItd7aHn$BM$ zYlll06-T=gb#4RrYe=z4Ga0aUJ8IdE5ml6jayv7&yoPqseUeC$5);FPz_e^?2#WMGSKJhMjSpeG<@t)UTmOs8w zh=HI#GIX=So(D=a)09+xJAIeyS1%MA(}VSbmGNSN;n-(AoT_V3*+hw+qFQjIJa=0K%wg<(q3CY*~Vr1;Um7Y6I(_+6M20>ets)>8!##j-h#rW>5&G(Td%mS&zgPLrfl)-+-YsXpIhA+-mb-tAkKh{{m;;Dyng)}w9rDf zf{-RZ6!0v|hlhmZZ5@=;Hz_EVe{vPS@p!X~+kI^8(&UfI&AWB){HZd7`-!{+W50-2 zQ~HALPsg4Jk(3Z|?r%5x<}2p09WjRhTEUw2DfQL9GM7&bU4XMx)@gb}3>9 zcZcu3WsUBG2ZR4&CWw{s-?T6;p?4OW_I>~_Zc6n=qxqNrk!fN~&xL-kb zb}Qpa?{fT$=U65Mr4QCCL1|ji>?_Cu7i~A{mVZ&8)A)k8*@aw#aG#6YG#~nE8&^iU z-J{-@Xy6UwUyDYlehU(R6a!dL&~8dTX7Qu5ae;|Lf%~#I{OqqJ7+{q;L(BW@Q=esU z`=Ts>G*RtuQSST}apP!oE1SlHbEIS0!kZVcv(M>;4N9lhv7@ALm6={=)6I>i#rl4_ z$*hJq9Bw#Ew|2t?uSJlT_BiG**}z@ncU0pzE&f=rGhK?{8Ps5mt*oo75HKrZ=3e-w zjLoGF_A05ZU24!%M4eDA%k}r>b7dTlDvo4p8V9rg@vor~MA74_GBa+O@XD{AR|YaU zbWU*qE^MV&b_mDE%S=iT{|{E4nRiP+H^WtPe!{3q`ai+}ft>DeO^aMeVsL2Pef#lt z=3=hkywCaZmR|KsTELQ|@932v5E;W?&2Fp6542X5*a8YZ_%O1|%HN9!%>TFrk9ivS z0VB6QUuLY?sPkFXJ1ha38{}hQ6-u7|ER{d2XQm0B#8wPf&!iKh61RgWqt=rDjq~Hh z$uaarGQR$@Ha=DoFlAN#DuI}5$m^2(^0*ap6lFn>lqpo+HpOs4m^^o{DdxR|3m=1J`%1A;A0r zF=sJO$MP+5um%wgXyj4qXQSV|!N2pg>J4oq@{{#{kYmpz0i65)HS~sP;+?nOs@3i^ zotHq4n$I%zyety`uP;&^WnNy48fyGTvi-!(Ivzjb={pqHAiDVbZD>T2#ND0eP^Ca+ z-))3THjZI>y>_#(x(|Ey4n%sq>35IG@;5I zs9XXTf8>N4Dr}u=JbXw1)6oLq)OTY8Tk4SKbIjCdm;yx~Swx~Yka{JjlAL%Mw4H#F1O#Vhu%_Go zV4yFE4@a-eYkfX(seaaJqDEGW~A^;Sc-%{S9|aFs|KxG5OV0u!mc zFjtW;ZSGcv@()0LNp2;sUqi@zgvSM32yM|xEI3$%G`Bi*V(xq~s}^+A|E2?B@>JkV zLlS!urkU2a2Yi8204WAg4D$Wn>7$JnzAOq%OGXGNc|=RpTJ<5 z<}NtUNC+^)Ym|oy^~-mm3&ir%Ef_;fgCQDn5Wa)%2XC1+uIe5dUYn6e5La+gQj)X% z+M^|2R3tk1^XGlAi-JEN?bc~+abaP|$hxA11uJj?ScW6@&>Vt@0e%4H0~9R0^^p`U zmnL`AIYQkv98QL-;n&1vqSP*Ux3N~OYHXQz4&XGKl(EsAVV^NZ_EX@dk9mHe-QMtVy1XgND&y36ane~7*oeEquMBK}BEF9IYma#AP0Cb@ye zA_Zf(2|q$06ctGU-G&Kd+q3nh`wv7Ij?%=eryWI066@GHu%Sd!kQk!T=`gY%QnpJF zXWVZxO*>ADIB(eEk^TItAr_;-Udr0bja@N@w})rid+2P=sm4|!?p0S|i8Gu_0Ec6Z z!5Dic&`-mEH=KyRGX&>G#>7Uc96UB9=E%lLh52)MXmWKRXWCA<(4?8Jr5mGPIN4#UAc0*&K0e#2dh@-^hhwY3 z@Zr5Ncd5hHLVKX`gTT!O=LOQ>2sn(vH}VPpJ+JIJDQe?-Y7hyt{aENpNymA2Nekj+ zM>Jr@hW7d&brWh#9xanwV2*3WC}N`+=}3BM{31KCEnh-mB-WWTLKTg`p#SP^M%g^~BWozxDdzbb5MK$lzsCKbP(1DL7+nkIBVNC{M4T1=@%mIUPF zm|;JLLks?^zH0?-w^HGpEd1NR#m0i@xN39E;{)Cf(bt261LZGeHO+G^@vINP${M+} z^vv+p57yYxrVJhXak&I@L_mqGJ7@)vKsw@m$h&%61%Y*>3Cx1|rLlpE&@3slD-@DtA#KIA~s5|UjLxBA$=4JU5=*u0Un_FJbO;_^syIM$N zK|Z~lDW=r@qS}>DE!P8SQq-{RJ7F-vZvFfvA(7OfCLlIB58EqjMo^o1A#n;2I1k`E zO6;b}B2GMi$0@@+Oh9K7dW(>mzC(tn>Y1}Qt^vnNyuXwLwP#A5?RA5x^4_N0SLdL6pJaaxq&RV#cySF zMby-Y_QE5V9 z)*!QH zD@OPRYs#SkY|Th$rUT@+Y2z*C|s)-8MysNe|RA(vpB&X7u)AmO)f1Z0$`|P1vxud$Df&Hqry#tB4$E-X$UI!7gs^pbV`FHW6m42xR$M=6Np3F*y<4y;v>1F|+URS7=RXYsq9{l-LY`ih zs0RMRzIi)00w`UWs60zAe7Og!=sTI^=%s(&HV`*LP0X!(wlBs^g^RhitXAl=aDcac z$~RnG?%(JPMOQ}@1=iPym}76yFk1J zlVzvU2ORHFD`O^wu)plefF;`JV(09DZ_TpmHtGJ22QW-dw(8h$MU3cpPZ5{z(zYJ$ z)-ZqIsuvw!)z{ZC-)&zt^;_9=efmU&{L&Eey^(Qas78CToQ~5h(oF!>JPEC(4^Swm zs{+&?-AxR_qZ0SWQZq9%%Xs1L*hw1jY=LgZY1u@)U}x}XL2Tvno4Yh=Os<3XWxuQa zNz*F9mt`jM;JMNqAVYxW2wd_C27yo5|9thfy1 zA2@?RyFDN&ZzfIutuH!)Q%)0TlH3tW>|^-Y@Ka2vkfF?iW*B8rH^MGoHAsC4W38p~%F?t&Qdd_9!At*csJuya~ z*P@<;s^p`wi9V&V>37)myfPK7S@-}xROo}iiV9_&BZQ+p`teNZuO`)PM;*>jrYAKYC^)J)Ns7l1P7I(m>8_tP?%tVIhaITV!U7%A3;h#!n3AYHbnL2ty?6S zPtaZf$1vO^g*U=(FGAO(F0{EiTYE&P;4Vq}<92%aN)neIbuXD8NOY5*Kc^zA7y+=! z5r-?EA}12I!4O#N@9z&oK=MUSl$@S-N%u*bp`pC|#g!)ll{2dK_pXxo6#4ieHG5>t ziU=Ftk+_q{>7E|TMK$FVd_XZ_DR63Oa!cy$uE@?6{yZV@G3dO2;4330GS~!@Ff`~Y*_^IF6xe{|0g^!~`)lbu`;|W{)VDoLO@c`G=un@nzGP>B((h@tC z%W(Q%pAp(@cO~E5T)W=7wS7SV)(!=pvGMU|%F4=OS9c)w@O|AddVtm|rF#rj_o(Jr zC93Z-Nx+ibnx5)$Xiflb0j;x13p@)a337qnBf^{H!gyFD;~f&q)ZTz>hf0Q;+%?Od z@YmEH$}K&rUEL|+Q^e39pj@>F?)D0+HawouxqB-5Pek{6;O` zPccONjrXNPcTW$b#bNM6ZdUUk^2uV63>*6Ts01?3Y4ikL&qCn`ED@*Sw#1ULJ~&^pG3U%O#8 z!gpDpv~&HEm=YE%+sZY@lQBCddCbXE!+h_1=UhiL!&6dGsm7&a|MNA9eT|5OLgtl$ z*>OTMi`Pd*v;9R#OeUd_$`=tMlQe|zkEI1f0Y-udW6IL-=sgh9($)4|*>SFHoH z_ok`o^0OemsG@tW9o;)i;g?wZi6Gv&dKaNkuuj-T-ZaIt@y(ksH0BY! zq`2dZZKfGWyfca>oLxcCK&eN()+OVa;>R1sQ{5_7*^d03U0n`8Uo(d{oM7v}>SH27 zqv9Iggr4GX;L&I4H2N$hoONzU@@bp5HGbmzZ6R5#x>95suj_y^Ib`fk0Gx!6c6TDDWjm8)(i&7xVSiv zBGGGAbC1+tILCN)DET{o)8%(k3WHJJ2!0?OcrfJ=l5upWJBhVN@J$Z;x&)V!uIW+O z|4*N4uh>201J<4rS1r^`>UWOI$;p9o(lAr2fddA;UVVN+M!A7?(Bt#Pf(!Aoy*5TO1MEdr1&*qT6rQZhLr}|C--fEw1MY6w7cKzN!Lv9#* zVX5zfZho9Dg=soOqNhbcI$9RDP2KJj3xkSedW$xCXsCb4*3iC5-dPnsDPMWTT1las zfg^v?tBHwmFEg{6>?cqK)azQu%h1iZJfcYW+(P5*(B`(n0J5d+K+B0`JpX*3W|vT< zP`7cV?ygc`m3#_WMV=+8&$zzIwbA5=P@|BAnF-5?t{b^qVXxYL|3OCCo_#jq3g3Ip zIPabog)1P}pZBFs&m?9IVmxMMD4Fh80|!EEklRyP#7ZiGfs@Y(VipAvK)46O{ax&8 z1q8A-cM0I&_WV(-^U*{?$g_mxsnFU`0Z1w5=or z9oidH8TPpJ9VUan_uHlB=j*Nb1Kn9|t-aqL&g+WNV*!ES)hilSwH90MxJ5(#Q!vy3 zAVj4X7O#v&FOaX_X0)<4F8|EnuMbGb<%W(YAk-I;+gV~X`To@jtWlVp5z6bQ296q) zmx~%LI9nQu_G5AgT@c(P8$#=dhR{|BDug~N^@Au8V3LNG7BA(EvnCFO*;kL(O@zqJ zn3zuEJ-5uj_t z7&rYs$xMwT6eDq0i5Lxue72lwv^m{HYUwnzXVD}C`Py_72M1hQ7e~R1vvZL#-AVh* z=3PgRu2hy3($&>9{?tRJY~f1p9(MWL#=DD}lnvfG=3|l956*Z9kOQK!;0N+R;;;cw zw|{CjTJt@6|B!==X4S(dR=;-)eK46j!>Q#IQCa836y@%uD z>l@U7vY#DHcSPaF?88%}acpLuKvxogS26q3|M|#&*ifgGRtnX|crInb(W6H%A222g zWZM+8VT#%87&mazG=Cn=G2OtlCxT~UeZ$+g{1iw^%*pL!0q+^v*x2BnW4LXeo3!&h z0_ZpP`y73IeUakw<$StH>ps*j)m4e7cW3BOy>paPi;CpgYwvG7rl2Y{Wbf57Z->5=U0vKiCHK~UYOD7 zL@=R=XJkyQlJdju083!#55W_&Sb)Oy*$)xtgQ4j+*|#es;-~Fb!9oaHDG&3WxinVA z;$8M-Ve<$Z3e@Shp;o;d;WV+vckj-T11=fKk8gAv)I*W!q(Ki!o3GePGeUHUnk3MW z1;rB_j)+3Jy()r8{2VC4NrPYzqF80w^DGeH1JZ}pZ#V6KdH=TM$CZY|cU;#B4-(iC z9|kVOD96ZSD-c-YZ?p?BAd9;-J`0RBJg+)>c*UZ97S#6@JZp6e{QAJIGH9$-yVo=L z+oM|@#oYd47W6M6MF1=}@ALf!^>5#5o>`j~_YL4DIb@5<;LSv$0iP84_ux~eVY{Cl zr%VdQ!$QtD-bQzRdRoUQfk~GNDpU|Pjqp<^Pyv-V-e2LP=s&cn`H) zAc1@4c%4s{L-{!iHyz=pw~M5vMCA3QM|1%9ePiLQ7JV6xi?tVH6XBc@Jf&|WMW^T0 zP=!;8#uGK(dQ-o=Tf+k?05vNp?LRps})YsQXxJx{sQ=^Psj^zz9ixt$5)>RO= zRJf27F9pGw-GuudULH6W8pL1aL%lHC+6?d2%&*MA5J&FRIMQe~_RT0r%{tk=O@I93 zC5P(ic0r>OqWRE_Rq#n`z;Iy`TC>71&|Q7}Zk*biu`*;sP9UOvJ3ak)eQK7|K-`LX zXjrHSx*-|1bTEuwwX-`d;CQRy9!~*7N7jza#)I&MI0yl!H2tirfcl~EO2>I9k;($J z+)DW>k6j}+mFjw(Vuw=C6 zhK2^dNvDBhuCAtV>YBbie*8Gtt`WAOKxVIUf>wSmU=})WXP1 zget;QZyLp+X5{i$zQn4Rdy58o+-3}<1&B2)brp$7dxS; z5V)RzZ9AeXaXakL(rDObG3j@mq(2uQOY6Gwq@hXu@>m&m*Z6J;+3SCcI*T1&St*ar zVECc}Ufh6H!bd>z69$Uf+G95|l3<;dH156$qp7Lw%^UG|o;w-ZO=D#@4FXO~jP_*( z)z;RU_}i$ftD|YmS)lAipQVe-A&_AH;H`tqYc6-k(L;xnCo05x2)lC1kzFwEGD!{;JKo-gVDqU#?V#eUw`hMgtLRIHr%Vt+_j<4i(C}sQC%!r&& zmv~VHa&vQ=a$TelMeq3PpA81Cfe*Pt-uVV6YQRav{&d})mnFPAfK@7(joSZ$@lV%5 zPgX^I0wkL;+VT7nX z5acA(E9CaV!Wb8|>>dc{y?q{pJ}Sw`>`aagSC18q86c+G*%mO&Pa#csIY_L?tmoqY zl`y|p^(LYY)7!;PL%6ty!kSgZzqg;Noi5ryS>n!Y7&sqQYH7LW6kh2Os5Vw)4gv(y&LRx+HU97*ibWy_bF z^kQ+)2gzd39?gm9-yIG=dS7w;X{#cRcTX8%Xq8jUVr$a#3!ZRnd+tx$U5CQc2;jF z&tz^Xk-ML)1^?SPLIh)0*ZqfD`#)&F{~T&ULRr=!-@hiRI`FR8S_Tf@^MS&&ImFNu z7Z=}<*|=O!PnN_=xmAl8iDkr6bNoQE0kASHJ(!RJz#K>xd^mV4pGY!j4L+d{_Vef! zDIRJ0`La?rUSgmfdhc0OI8cxzs8G6f&L99xRzNBqQx%I|gFHd4HnV~N z`Z%`jgz~(87?2P=nt*3c_}30QgcmR5bSce-}ZY^cbbAY61Hf>G4KG% zy?*_gpyR?_LAvA%2yT}1+trkk>8V%K?MP0W+1lEQ@84?b_0GnpH5~N3(xi?9-rHH? zI6%V|g+f~SC~d)$$5A%jp%OAKq`2#56eJ+9{{Yk2#l_|Fft{ud@RDO4f7_f|s$D$3 zqhR#)_PO$-UnlxA%Ll!?o;15fI`-4)#&U245R3LjFJ+Iuv;@b$F0UeKiEOVvOX5IN zLeUMmv*N;HGJrkuQl%>`!L|(Oc=nadz@Pp~Uzy_6UURi`7bDoS=uC{rIrV%wt(lo^UA=QY3cg4iQV-L$X=TcGdbAEri$or#O*Lf&N zp4B^P&^As%Q&T87N^mTy)I>bM*QDlvP#2O#e7bsigphIB%9VoqtUWe8zP`y+oE#;V zf1#7JvoUHtU*P%`ptQ$tUw68?s}u}JyghQdUUMmB5e|zOoEagl3PbN<^6?>@a)p`P zrcdiBsTErzydR8Q47zc>Zz4UZaXN`&+t-vqWnH93`1*h>VPzNp| zZKt^tjm5l_jDnGEIO-%C*!0YEB}pKXC=HrQ?`f*{T}HKowF75ghB@)>DajiX+6xq@ zmfzWWiFXnqN)VW{VSG_>$a!o71~n-q5X9RJeJO7bEqhMkZ!DkdT`lL+j$Jf%cUR$l zF`8biQattZ&Lmxa)Gt7zbV1|JJM!wLFoihvK-nKhn0hHlz8bAPg-H?!>Xi|^-c;NK z@g^}wV)m*2y+{hp0pNmUla0Tn;{+hsEuz;$&Q@)$T$&_+D`H7{|7JV??a%$g{I37^ fJpV~6P%P@!E->_5ReX)?(UM1Hc^pIrA4GW6=?zK?v_sPT=+ld zyx-n?$NhR3I%MG9i?yDZ^HUK|^-HaWdpcIXr?XB&ctu0KLT%R~PS=ia$;1=TMzs6+l>}>BO!oy?p-!I^{ zb2Q`O(4X^zi{RPcedL5fkr*R?(Xu78EKq1D)IC{g4fl_$V;PlkmyC+_t=T^gs* zN$y6^pkV*t)2NuOBA zb%$iYo4$6dRj7OrwCPK?c=d5V=A~(GI<1xe#Gta|O*XUgu6t@u3C9%$x}T=6qmdHKY_y`^1GvA-wV?q`P!s;a6}+f|hb&!0bEsItl85)!(k zo~0zGt^LlKAvic#JU{8(JNCRwch@U7BtGfuezSh@*08NW(eG!1sFuhbzm1MF-1Nbl zvBSNu!o%@DML8{WicfT0hFvdWMX^wcCVRHH>shtkDT=BDQ+mvKU01cm{>1ti9UOe(law}r}aqX zX__`X!*id1Z`R%0J6)1*W@%?(!C_lIM+sHVs5@D@Y$jS%#n5n~!0Ok8DJv`Mbf@UX zYlel{S*aA2kB*iOckbY5=4uC6^krVYd6R=L@)URb2b#UPR?AbFWVPUGRtu5_BBecEsF|Ye;-uq*2 z87n7EOK?s%^E++kdEGW6P8txs?pU)mjiFJ#9cXgkWD*Z2E(7#WMa6e;Gj zkdV~eTtbvl=?I_0v@C9*|Ky~R_utcfRF(R!I_8k`sN%cjG9ah_ou z(j7eDn)vc0;_GV7@p>Z;qqqOPSFF-7G(X?*9gIwz&69Tz4i1`re|h!py?X&VdGdGf z;yR1a59B_YDYi8=Wud&TgPErp5gSXY=e1UX;i>p{E5`5cn%7cBTvg)nWDrUE!dq^A zd{kRkS1UYqWKY^17G`F2m|H$RzOubxtCYc&>Teq>8w$!C>&JU*jW(rYAw{OG%$%I~ z@S+zlUc3|c5_EKQtje$dq&Jb7$trfb(^t0orJeM`1@sb~sPyzJC$M1lbT-;vuxMAg z^T|iiU}dO1-`uqS`ThnDqu+@`Z2=F`tWZ+v4|<)t#M#hE(Zcq}z3gIEs&@x;dp+S! zdN)u_rQ@CtDCATl!@@Ro)(>wOH((Z}rO6CW#M{+y{CLb?yCQM2**N`z<&Nj*C6vSV zd`BvKc<;=!XP6cfY)T(VTchbmX1&jUlZP(hUwP3t3OeMqqC0bgpw{PCuTk+d!zLcu=X{T}>2I*R!+jfW0epy#{w@T61 zuNpdVvm3h}`X|+b^z`&5-riNXQjyWols1C#U)Qgfjxy)LS_?2JX!QGgY{*W4Ni%-!0-t#|Q<4u37S{6#-{ZWm zzMh^HJEQP?>2t`s@KO}A8y{UcCi6d-3T3>%5*HU2ArnHlwDpdF@>=Xb$;jIDSBpOH z0X!0tOENMt$GZ~NmX;U#ElY5v6z<*I@`S39|7uTFO^rD`mHlkZ&TnP6c-4d3=QLsK zq%t-!k=thfJ6qC@XJnj#=hEwf4fw@PQ;ng=`aI0cw{;4Vlcfj=2v&-S+TUJZ*{!$$ z6X;>$@B91@VfX22S&(ZNPbaEL%fLg<+S;1J%F3!4M*ca>2&3;oiZ=Yx4;@d7ii_37 zTvy)-+l`A^#4AglYQXg7=I2vB{l@s78&FM+WPv}4V*dzdjEtnc;3~SxN-M`s9V1F!-rSjzJ47RghTT7?cl(` zo+mwyr$X1t;iyyB?n=p@OFvI{3;f=^ReaAcckTLhWLY#-u2($J@ogj$3QPTc@uyd! zx~Aq^ZB^CF-@bhd*DbZZIGFp0WzDTIglIaNUbHDB1RHsVo9`8-Z*+;V*c|^hjoCe< zIrOqI@R+-3;o<7~jXhi+E$KM3eze@ls~TG~=OKO~IXSspS`Yj1n(NkNeNZpag%X|X ziNX%Ihv(Ar~J0Co;V_~ojeI8od+EN<&ow`^IxeV&F<^AnTI@7>p;-tXenu(F5%+t(q zKRIyN`Zi+wY5@9Gmz*j^^T&^L7DL&pX(x%MhG-S+;nQPo6W8;sZqgwCIXtYR${lx% z*;VPbK`8|FY@omYxof4qwUre)QnD*a2$ckPu{QCz#%*%-r?WoYWF?|j;^ozF}(E4;37#Vr4?-RZh z8%&b)tNH2;Wv%*~Q*x`AI@(e0qvG@@e@{33GUlS9qSTl^tFX()cd#P&WPq04IJLN? z4HqK2bWNLym{CHHkeGPS1Sz9QrJsv@-fmuY_NJFOF}=vIg8dA$7Hrt!s&?AV3HBS+`_pTs z6YEaZ&|43$9q-rvjT*77%o|)AZJrftl~evG>|hkN8amS&^PraP%9XpdUeW6NdaZqB z4m0T!GPiH1PmGL=bPe+<-%)Oj3J<^S?k@ZxK7Ptxxcm3-MVGPG7)FwEy>+*wH*em& zyXC|>JUr|vw*H9{8wR(vz;~A`SGx$k(siu?R@Wx%2NVjMgdtMbuDWD!?bwS*yw$U^ zwaV{sF0S{BTmBrlOjNYAa8xOJy1V_u!fpFI~bC5t?+vv*cbRzb`?oJ?MjStm}!}+3&7B>biHoiv-_|L$~_7NP4#el0RyGS zeJQ`@XxX5(Y=39y&%RqG8{E z)nfKuv<==ou)VyEorRfO+ zsIH>Y%|UEHQTn50|o%&`ocaMU-9bN*7;UaaxdTw6K>e^=SvQi0p7$%X$AXfnmNI@dtCeku3aN^tsL7B zINWR`L95GId-(97Z&y*R?_s+kI!fuIZ~*iGEZ8z=-7aa$pES@>;o;%wlVXB`l(41k zh3O=MX->EH-C7u-&SNdy&$s|(R?f_fO;&Yvmz9vWaAjn8I6Uo6%+XlP!ZH8VtIZo$ zOyQ}@s^U-zQWJTlvihdaE)Y2T6cOX&HL!IXxDzJLvM=yRo#-mgtTyARv&Pm$&;kure+9~T?EgnU0sfhB!sQ=^Me2ZbQNsW>o;$ZQ4{vuVTUyeBOo~m_yBYAR0kUe zhvmu@%(=Na_o?7Zop<9yiD*ND2`-^8e$Kug&it64+IKg{aJPI73Jx#yd6a*>QC;{& zE+hW;fDfT72gVutQs29G&tODsr(5QCt)C=n5?W&5{fxklr;V`t#!cTMZz!~zka*Ye zRP5|vMvZ3~ptaYkI1cvSEn#wik@Fq#ILTc8KY#qVEhmR1A}Xr-@L{mqxCc)0(~g(6 zfsY?QzB_`nei>EO7hSjZ;>?)NS|!g^b=HYT(Kc2t$F-Bu(1g#=&#TuMPsJ<{;_|;I zG&VLa9I|#_FJFMY>aKIF`AMT}Dugz@mEaP$tg-QBYeCxk4<2N6$*QScP*Yb&LtVXk zRlCBO!_vyCXK3hoV<^c?l+bwHlQ{i(mzhx55ZPJ_ya|cuZJ#t8_Jy8$LZA0-)njF2 zJ08m~;9R#|8OYnNjE+ApS`c11I`VV?+$$c9L4V_Eq!t;U-;BRp(Z<$BBDUwx8v@v4 zmL=ggp8j0iDudbC`&>4j#JL_y!uU4aYIjG+Gx{%m8>=uZ6p|(z}HLy2#PY`*WPQFGkY!0eu}>r~~8TLUIA$;+W*Cv-~AML8spa zw_8sub~Kvx(|X*i=TCPd3{M)YB%S7eZh4MR+23vM^Q>S1K)K@7(?{jp#q6E9aoW~) zH)+s3d9V~<=fuO&vE7TFM_aiMrGR_;Zn2+sFkU(c zyWaffG_070gRPkqwNJ?!oP4&U!eWQBuXt&qsxvu~!nYk1P%1h7zpFfMz-Eu;P|uV~ z>p{p)ntW7)o$od+5Jg(xOfLp%VTKF6_H9S~{+`}dP>{8^=WS?c;I-(z^2yC*w(Z5i zpLQNVBwFx(qqf+~$b6px&-hs63ax+)DKa}{$KJiwSSK1wjMSWANddmZozDQKrWO{G`W-Yfrtc@}7O5o8&vO~n|n=GP7a9F%;%i^+Xe=Suix{MjukUhNWN{KJ6J95ihci{qhIcwq@RSK z&9L(_+iEK?>QMN5DF~Y=P(H1`WY8euy6(4~4{P0qczAg1*RD0GD~ksMjLGh|gd!2i zAWq{p>Ja)hQ7Nkts?Aon49R7n&2Y~tDJfJpo)X$tt_fD1?u}51I4@e1B71?GD3Cqz zXXsUCk#-Rg5niu7YpN*uNV;1WLW${@i_FK$9VdMD?|oW~7qV*sMA>EK1#b#l>}}>; zmvtU^sHHWtoKw)UHPdP=7>|bfyDjCSxHHWac}y)vcG4?e12yq>cg#2PYGeJ z0LK_jH$6AEwUw8VX{5I0IKQo)=9ukdaug72Z-9VpKYzY{>*DM@`Q9uM5P>(UcqkI4 zqUz*N`#VuLZWItO41|@c=;=iQeN9XX`KOxdUwJpJBwhi(a|>{0ZO zB7tE?M|tlJnNbN?Qb_zgaYE(-lpXiU034+JCF=f@VV$Rp{gr&*#Kp0I+yj)BFTd)~ zePjVVh~sWv9V*@$(6vKD0bq>$@#DVt$yTd7CizKuXza8J2?+u0Sa+D1ec*bpZaAg^Tw$v*#x=l-bx4T84y6> zV&dCu(wFEp9VC^%*-b=)L_-bA8VibvkB_Xpy!>~O>7AI76BsO5GAzj8qb(j`d@*e$ zhANRdfmYEzcUw`B2zm??Zi7V<4suHgk5buFOG+-vgc1iUP`cw4tgWprE-ngBbZlM@ zPwm>*%}~S3P+Kat1=-Jaef0U0_O~9_LEEW=u6g8AwYqAp2NHon@g_N}- z#&nvH%|qDT^$ObP3@}kPKrb&oevB0N+A{*|QSIwq3J(Cduiy#1oc>F3F#=FHA3ag9sP)6JI*n;L=%yhby#NU9pAq-10;V7pHjcfjmkI$2Wo`}4>E z0N>{3W}hS(vVBF;V5)e5+&l{oPT0sifbo;kuO@#ZN>0W=DO`PgJ&*f}LeA!4^h&}0 zsQNyGn_uTGLzCB@d>irU<(c3WToBgyS-0%>OL(I}DQd&c3Lvf&$^J zW4UsdtB+5$a;UajDmyb98;;wAk5FJxP<3KNM1*LD&xLZVPMd-UKS9cfhS@c)55#&W z?7+NUb8IViw3t>+nZCG$QJ}(}+;4emo2J2;A*n(51NnuE3ii^^xRTFkwZ~RKb5)2J zBm$A7WbjWx?HSDtv(8nQk*X>&1kCrO%1Lx}yyI&$OTrFH>4INbgmWrZq?0EhCpqir zQP7tQ|A~?0v-f_GA39SzPwnW&!Fhva{Uh1*yBq7xj<$J-UUtPD6gDmhP? z`uir1f@GT58#hG!Q>O6~5r zx(a4lGa>U%lZ?Pd)>nAvj7C>C0$QUt@ME=yd$8J)ZuP{+#~0a-iJXA&v0vHs{)U+e zbV5XW-vlUR0`k)V%s8*l(N3|igtNn>*y6&y|A_TS^UW?OXzfmsDYBoseFCuWb<3Ag zn7Zuj?7Fwtb<@}W{Q0v9Z#S8!@d2pAp6EV}x5RDe2ZRi64!xtJL&1#=ZW3QA;KZk6 z?AuWg9-7t_+s)rU{HvbIV$`EfFOawSZ6XuNURYbeC&eaGiN2sNcV0|oo$+RWmBF!v2Z>>H4!g6FveD<7KIBL3KN;qEUYxw+X82^4Tb1JHi!tLKZkY184BNeuo z3;0Eu+E>r;KQ93Eb^gVU`Y()y-GoKfiMr2a7A~rP?f-HMh2f$-L;ttSafI842fWf` z_%JTQ_;Q@dnlqLn&3|Kn$F9KU$DiD)G_x4<0zkY258-|^oy$*y;cmWCp zbXW|-3_u-4+48|H;mZyBoFGD3`^@w-6E6Dg+qVm? z2DuUN`2PKS#MF^blLmb(no%-3<3SuaNV$Wq?t3qF^E4HbB_&{S`qLZGL#R7DTCJLJ}0t?2d?fdto5+ay8EG#Ve zUfRKd58e-R%u3G!J|8?g(wB^H(!5AYiUE>j1k-6C?fD>!h>_utfrrv3+U-ZJOj`At zwSfpT`XqiGV`>E2`QGH_gQo`cc?Zf1J^=v;A?R@{tp%k3r?T_&vs>ghV~kA5TV)YBN@HiSPe%pC7kM6yz5A>vTzcs( zEY6!ycbJ(g!VYsH*QqgK^nb6p5;98A!R;E1fcbbkL#^IgFfBiy7*IPhCNO89@&Jw7 z1aI&%j9y;e`Lexz8~vAM2}R_>{5-RyBqJ)h$2{F(*0Y-7@bK{64Kw1GFJD%*)LWJ4 zByG(D#*Lu7{zoJUoCdK2eZgyAl)_MSCWDIDmBr)_+~EKr@rT*t zhb>rSM6Lv`f|@3tKePKP{bgt42+%*0cDmiTl8>?^=vg|CpE$qZf3LvK%^lKj89Zby zZ)=;cOu9;)U7%Nf;b}272!YyP9^-x0$)hmBsGI%%3TZ`z^dc0n(!fLZ2gyq(4;#f1 zyWz`X1RWaL+S1Yizak{gK0NyU%NbLkL-X;%Z1U-2d>^|v!(*lcV;@AVQ4OAY6TL$c zn-TJ|vd#?urthw^vY8MSA#znF-0Ym3SD8vH447Wn3t-&w#&>9!J7!R%S7NNg{sHt| z|Fqb+C6d~}v{iiJ{(kG&iuerMQrX7He&J~onUMyszmW#l?M-#{Y-JL!eZh*>+{*nY zq2yP;G!|CUNvses^Kw`oNby{TYU9z2)c-~FIpQ`K5qfBdDxmP~?d2QHfF|gf5r^Rq z)~%x>KLgmISFT(^mZTb!U6D4JsDWU~ra=u#WuJ~9S|Jm+;{1HQgNpVRE_Ybr$Qtq_ zP#{2T15pX~MOE*#7tMYR(uvPPV^q6Urd$|Y5^N5XwQ+iDa&S-kFo6JH)D0OH2hcJ_H$K|;^J`Xgy@sPj=HY8o??be{m*;` zIL7cylvrItRK07A{T&Bkj66#F>fqNH+ z0cySge)RS0%|&OCcM7b(tp%YL4|Dx?$@+wdStYG8EPi@~KiOLCE1U9TE539@COpS; z-&hV-i?7H^z>v%=uKep#DsJ3Dku*8l_nRxz*!H*m>$tc^*leH`g z7o(dr)q#G}q`Z>GH1li3v2lvlF@ia;5X_NE z0BUip=i56m3{edYw-H4etaRW*D(n<$_1~boz+H3jDk~|q43}7`s;Hns8v_tf2g}Mp zd`wDw zqi>gk!U2| za`;R}sb;y9lX*umA4Q|zKrk(P{{_4Zp1}S~YvAk;7lK*`n zzz!r(q9Ahz>Wl>Md7D6)m*|VPuczAEuK#j|WeDc?Wq$q(z>T}jZ}xtD3Uk`}`C$&i z#DNiX<=Dr=qk=O7>bdXn$OJm-8HqyOB5CcK%8!dw7a6YGF#7d7ifRrv9;{0BrXM~lTLtg;kM+2^7*2pu%+Ag-ad8nq^+oF8 zYi>hu%qqu-<2&ZvXhe`r^c;~D%;xw0^|oLpQPGh&_#J_l39rAFa+XqX#cdvIDZ zF5kS1hibcZfMIa$1lv*CU!B6C%^N*QlX9km%Wi+9GV#TWfC1JUuW9#8mbYkr-=?@5 zNfT+-TOjvZElA7J5y6Wu2WEt63`tbCXNu%{vN-IeWdoLQhZ7Q1z6W_$*@Cm;4jC8} zN?ClD0w{mc(+(OS)|25}`Ot5`Ihv>$%Mqq0+l}N{mo*Dr^%erV5DMXAevs80K!^kD z9_d2>Na&*T%j<%If`Ho>LgRw12hZA>gMRd9u6+w~Vl%;)4C-LANiSUrg*41HgDPzi zc_`RtUJDz1`nWQ&xhpxhwPP>DD1HcPW$i#i-5kzy_vm6p>6`YU)m=p?#f3?(YsX|S znp@Ba?MGqdX%bW$#C-nD*%Wf;@;z#-JEmNO*ckr#iR*Cnw)spc(q{$)Kg#jg{EoO_ z8Uc>ny?a**AX;i#T9J7V^W&#anIR9Am#4qRcaD>Tzdxp_8wX;fL~>4UxS0UiDP@s6 zmeWD`0f+}gwHf4QG!)?AmI{{@UcWQ%*Kgl4+up{n8LuRu?lVhGPDZ)7xPUW+KHuW6B8M6IYr5YE zVcGO<34|M&Q`EPe-W;^@{Zn(4H+Hdq%&7$K7v2N2kJMs7g%YOC0r`Yz@0*FGiDMX? zr)DWE7j@WC{J6NP>_yHERq7S_XVGg0Z{y<$py<81kCoGp|LS|wYqwNey;M{j)qj&n z1Uaz#&&|#zfBj0)9>>!P`T}8|=E_4GF$iISnufq-Z*Q^J_pgHCW=xmf(qcMia4a%B z5IEC#n@{oLa~w)>rt3oQ6@J(Fh<^C+A@8W)z+l7zUzi4fKgam;55U{ z8??eAZA7Fk;0cqPQ;o=QZ-FR+vb;K=71wyxGiKTq&tEaEiOkva`k-Zr_BU(lmT3A2 zC{(p;4q5(PAjklP4Ac0$wKHpla?beRcFGl`N4nWNCnUv+t0p@3Z~=BFlxriw_!qBU z&Fl-q%c)FNP3}I?dW(`JBbCEV^>1bS*?8>a7v=U;+&{Ec-%B&lX}_FXuZ$ESVGkZ` zi$;MP(lbc$qNBB8q`!| zb3y^iAEZTlpd5|G$S*?8xc)n=`=R* z|9ZD{lau9?ktbX~XN*cRUS6WWu+#6)0&j$1LiJun1;OhjXOWX9?ef;&9;Gjwy{^U6 z`XsY8JDO7jq!o}>2NRA!oLc*l8URxU2*fg<-I;lKaTkg@q<^RdZhY^!`@)cq!3pPd0 z)e;I$Y|N>}=MYd;(w)*OB0M~>-bf0-!|=$t=N=e{J_^ApR7Id`wE@q?+bf+$yvb17 zk~StnZGzIg6DJ-QGo4l1y?FUD5d1R~N*)jxR9&bk6ciN6RaJBle=xVRYdzkm<#{NF zgNpA6nLe7yrlMGWc&5jVOLa+{s^a$nT0$jIaKQE8u&4Abb&ronfByVg!cG=xl0b++ zdK?@-9CxQ|7JZ|!O&o_&d$2C>TVFDBS5)?r#OD)YbWE|Aj%i=3yqY~gg#;)MdM;lF z)QL{K^Qw0`$vvOPev%|LYf%^&B~sxG(6VLQ4MmKBDdk{C1^p6TXSkFAI~h`<3d%`Au}MFlS#(lRkUXy&^m1)@T722Jjr6Cb zqtS~;(Om6{RDV0Tugq@*oSl;3IFGO2wu4l#$|nt!|IeQa0K!4g+|gboI&jddT=;5( zF<0|+{W2y-Nm@}=>jGUWxgtH~C#EE&tZh#G1*H-*d#H{J_~&q50(b^2A+dwWAjn6{ z!()O91cp7D^FuZgJiGt^!;tv^KMrZ%Ajf=K5C?S}s+a-Vz7`f9-T;BIXlaL{LU#Yy*%x=| z;>9M%t=&>eK^0z-m-hQylf)sCKx|o}^FxFxyDd4^Ac;gZJ3Y{QeJ~8|7L<1IO8_k# zB@eC?`JQ@Kfx(1mGf*v%5KR2U2{z~!($dns8s@~I7_OD{5T|I^@ZA>$hGl%PIn@Sn z6m@lSz~m~bssW>;dV2I1F;Ye}%=Ke1f4{k3H8MJRLqGoe<3IrRU<^cOvNm`+5?qo6 z9hMYRBa@YRyE9n3h>MZOZNO;1&76vyrO4OhTKJPa*jLAyx1T=Ee4U>JS_cTwee3ia zF`(^3SuyVTAYd@q0NNs=lY<8f$)16kz1sT~-&z+g$Y4un;D#t=1@&9LX=w+7BCG1p zoFXR)=u$@ozgj-vqIT0v`+NWh`i3 zVDtX^^$Q~GEF2v7mKFWhN(MK#S{Y{=|N3A&m&2JPZoi7o^ur&&C&2H_EaB#UdF5w! z2+zo8Mc+|(rjo4{-$XxqSVWQ&{>^Kxd~B&MZNfMDWhOa>h#Q~;C{0l*0BN?>jY~(W zIp;9+#g74mt8Gctc^P=_Z}cS8Yro?iF-x-a;7w$7Api{&x)l!GG$c?cgIJhTx5MS8 z5jYDWhv#{^rKx#&L_kg4kCw7Pv#SogwPqJlkY?+Egt{?@+5F!!i+a-nu@RxRl;4 zW$l<~TeLIO>Z^g@taYh%iHFJP#dv>A^0WJfD3EaV+m3JZuN}P+3x(*hJr*5z5 zkiU1AT=TS$#8jjqWn32rqLrC*)ariUgW+} z&(7A_ma-t}y?63yWGK*XL$are%A8hK%+aJGYDZqF&OY`fS;IyoEp)!6Iis`DON^NIlMIQAA$+vnDF+BV7ZqVW!(Mp%ql()k@ zw}t*5fMp9>RyP=}mt{f@c7A&)1?;KLx!!d7T0i%qdm}K+^*wpW+Y48kqc(@QZ{4B> zqZY9gsH=x!V`EVO+H?BDzkK1}%Oof3@jqwe%Zd1Mhl1&1QZ-rfAd|rDNrxsdgVi-Ooq*c zh6abBt+cz9w-i6mu5&=HXG3D!%(_54+;4R}q?*VjOf_^e|MmBSM|#YfK6`dmp>OlK zk6!UrFrjk>qt{Z2mK!ddh6Z+Z=WC!y170}=i_W3Hz33Bli=|bXL-MqMY~5jZoe*um zBLYBXN`C$dO=f!f0cKg^;Oy~B8H4)rcmn1cNe5h#t7dmw|5TCj+6;f~v#r&I;&;=q z((9uyKqxrI;A{YfiVuWSAYv9(xv32*Iq1uqaP9*!_K~oL3Sj-0FE6|k6)CiE58Mw4 zM1?UA(2Iq6^gn#(<9J`dZ8CFkr84u*4JybbUdBa-8VBaK!`h^KWP;OGr82rO5`xy0 z>xaBL!=Pq)<-L|LZNvRZ#ou6d=)L6#A<`(~S>t1D7*jK(>;hx-U=K(;d1Xfh(N6WbC{4iet{Cmx_aQn-CjvH?xkn zKvhjv`_$dtjY3JxB;SDq0H(B*ynG7>mg{U6|wGUnz8oxNOy4LIdLS}+gsb+7TmR~1N)6#8U0nJTWG{}XS`A^fi zwtS_F=}S!wLr+i7KQfZ>I{zccOVQE9!a;$aV{uGU%MV>};4a<+OV7^DZ3@I9D0E%Z zL2z113L17$Jy+~L;p$j$hyU6BNku8}MO=YrguEhr|@kWVZ~h!tds zLT;T70uYez0R!VCvykV}7fjFUny#%;A;S2;JT_vJ?Jb{J0ygz6xl2#wqUsafgpw;h zjTNU_0Qf+Jh!=UiVj(aV1Bw*(BFQdC3+hs;17M1p%rlHna#>WAc4=t|a+_5Ak8u$Ik3dD=)?6>joIcAESUr1Q z++&07pesWYyG9w&)j^#~5I}&ghx?Jj&Ci8Bj9V6gY>O{@e5}jk7rq+Q#h8YU^?3{2 zylDhz=pC;y`i`eK2neYHGzyw+DvM^5rfS91{d=x30@?k`3>Yc^v;iK1jy(hCxWM14 zf51~cTbyW?yiB~;6)b0(C6k8RLVH{*lb45;5^gjqFz(e@9MpX5+?+5JQh=k*Mhq~sR$DpNnqKWc{P0qP zxBr{W$fuVfd}VgHptGg`R>{#SXsJEh<*98X0)`29j)+9z%^u6XX-x^2C&w>=8GVk~ zxG%34%^|TZi1A!A0>Mv((VZHxtCa({ZoV@i!ee{hMe&WM z8(q#fzZh&QT9aQi3NFs5R;!-z@%4+Z%P}eesX#|8KsQ0G;;5iR7$BtSvc9sYt6t2y zPl|cMY{5wxors>3M$TD|?)Ydz{OMWm(3qIl%j3pv8VLTQjiqlA2C^AST+E4{JxHKs z;3o6fJ+`m56zlEn1w?%LiGurHvYTWn_>{RIcZ13wJZ5_b>H}i1Uc|)owjzL#)@}#_ zYV>!+yu{nv+k1?PS=^NIBtB)!WRHa#o8ZW*%ECJk-(RtLVWpe}YVJ#b4>ytMqzs4za2`ZEBb|#1p+R={A{21%o=}$vBu30Gg#$iJ?pbihMlR-jxLFPP)fOP8Ahz|=pI^b0k8TVvIGlFZ0 z=l>u@S|%I2@q=zU{)h!z)*UAtH6-SZze-Dk)XS=>Du&kZ%7u+}3Ag>FNS6K4@l&s4 zk$CmzyKH3}O9z8yKV|ZbNq+X23=(YIV>QyagNY;%;Up+66pOn`O6v|u>w?b8wdZ^t zFzjvaIGL=zL!Z1+Au9lk8T&NBd7Y8~m<`e~K-MeQi=Lh~Ie`#2j|S%(E&O7yvvsI6Nv~DTW54%E)S<2OdrOp1rg7dAoR6uvxcWXz%>HdMg2Fj#ZDVf; z;@Yt)$D+Z-y(c&2d4Fi1a9~Wx8i}X%^q@f$4yqbxgyfTMJXGLI%(fssf?FgJjjUTB zAr9PLt9O+8FW$S6e8!zD<0P#0+8jLSYGq{M;UNLFAP7uaIEV@kRUzo&z;htStD&KB zS6XsXe>FFY*;q3-l=kDEt(UspUgkF%=35YZZcpCceu{->3bPCy&eK)%Aol_5uTmbxw$dl>iM2 zAj@6#OpgH!s=Jmx0BE{g$6E88&YUW!gypC|noMdqxP|9VD_z1tv<_$0Ed>jl3iM0> zF+jPJZ(O4bWI9NQY{i=FA3TfR8=!P{b#1T~Yy_lz`7#=!`%gb-+Zk9NwRNkUzAn@?`i09uTViyNHzIBwRPPu{O>en3jxg9X_DdncA} zjFRCEA(moUm9A39y;|$de<^G?b-s1!MTnMGh5i-tL4(2sOQ*|A^(IVa15sMfkS8vR zDbagZ4s|DwZF4zl->7-KHnqB%9?mKtSuRhVwf(pyQcB7YAdhgY-17;QGs%D#EdwHF z2v`j;UA`>+ISLL7Tw*+b?s^Eh_-M8c{7bLEroH*p3&co>zy6;19ry)1r%MpsUnLGB z^Z|)U)8-VFkS!e;0N}=8LboG-D|m90Q&5yOjw#!WQstwfPOY~gyWCcF=1D;>87*l$ z947AKg7PnQl5Dan9SvOCYOIc)SyvSRCAX_9wBJ%Z$701#&^HJiq`~irSNFy0?cS_5 zC45(prRdq$o43s&@n*|-R(VT~ENV~i#HF#-ykp`y#N04tCl`liY+Qa9Rp*Un34X*S zqr4*;9Xy0&-J#Wc^%HFpzTNdawWu~=^xi}R%I&%~91Q2MIU4J#4c(M$K@$L?3_I+c zaD2OFSVKodL}%q&=rH;tW7eU=4w@J5cve&iZ&% z^xQ^Qgx}a`TEE3=%v3R&YW+(h*Xrk8Gd@)(zA})KKJ`rNcx(-hRiKav`hP-8X3J{# ze!9})E0(C)uisRb()$HsBRCDJBa&wy&Rx6m zohi9~JRHO&$f9>~iH{|`g~*XoIX%<{_vKGnz~1p-F5%=bI^s8oPr`9IHGFSi-v9vy z9d&WP7?bF_g=;e;urna>i96ZqyyHSERPUZUZ04Ot z;J>9Uz;HMz^VQAO6RUw46~{hpDC+82GBHN%CZ!EQsoq{SP`p1rZ=cN=_)5pJozMcIMowj=D?f2NlwyDqg-Zq1I9kda{tB56O>x)p=;_F|Nt8*+vZ2qT)aqQ@Ngyy0Ig0)q-y zE1bn%@Zm7d&ret8>7Jh{6Qxt@5$ZBs6A;=T{`Rs(zU=ldiS~rlurZes*q-nQapXlu z#j8M1yLIc90oM1U-u1;Mwz=aieZ{L9;Y=si8u zTFTm-9sJ73mi)EKQjo{+c&c~QYV&bWH%CSsj^oPNEkw|$F>%p1w^Eo#-|$oO{45(_ zyX$A*Mr1p(i3SEPC@ZspD5%3z?N2XXzV>;d70Vx|rBmunbL3yQ$EB~A85q(VCd@{M zO*f7X=R5Vfr9&q=PB_NJzuEoNv+v^&8>&LDn{ouby zBaS0Ke|q9NRAr=VEIGp`9sDp`-vyN)D#}ek!TX;!A>Xz`b3+m_TEPE?hTE%{vTh?B zzun)LQIAFa5$$#4${$hO(8UJ$=nM#LM4MgROHE}4ZxDqMpL!ooY(g3WWEyZb>?`yR zOti=b{0#jcsk2T|j!}RR416~&Ee!)s#DCS=TqRzosC2z7q8-$-iCe&=M=Ga4ND^H9 zg-~wTJZYvi0^PJ&^-x$iP8J^5LDMAg!EaadCJlgY^O)!?$r4%eO%?XG{K zXQYoPp{}iC(Up|*c);>8l|bgE@jFs_(Mi*GENX+Y6ZsR)C#{1~Xy%c%#?(excYjq3h8wKOx+=O(QegIQOvV>tHuR^4j3o5l3pBuLj7hcBvbW0pEJy1MIIB8tgQ;Xo99gqy~S5 z#TArOJ^_<5l|Vf;b#raOFj-852H^0z8|klVtzxgjp{X6i>-x{aTthdU3^m_Q-nB9< zb*XGI{A;sOnnjW9r&f-mym6%Gxf&Q&kn=#rQX-ls--}e5--Aat$wypY#&4r|yLm!y zY29HSZUc!=KrtC8Fg!o7NBQ4a?Wh4um?7H#$BdsKU5+ldfA@eeYoZhW#JqvUM ziUKOQ{K6HAH=vL+t6zt+%+zMA<@lwDAXW@jJCt;XqtqqM1 zy8?rEBcR*?=E#^pOKx&Tck`+2KH-G3-p=W>R2 z^l)G;K^gxnNGVDGe!6eoJcGuBB={9`;6n`jPPF8Xo&ZzL;)w+SHOC(;yS)6@-yb5* zE+p&#)iQH&Jy@0|aR!5!!QSO>U(a9^e7MnnE%Pa*Mnwp%LBs^MMKl->P;!yuilEQg zizFyCj2*8Jt*j>&&@3+B1C0s@YG~>hetA1q)=xjq`BYus`G*#jS*UecUD{cG zYC7(?E+_ibu1Wzfoxr#F1J=lou%l`(=%Dz%~R6BXS( zU7R!)G)Pd&QgFok3zQ6gPceonYmq$xhE^yIq#0^60b6k_*R+Fm3OLc$a=zSPuSDqsC8HS<1N61uFc z98qrX*{5Y2F;P`zz1Vu>Ag37}B2$X2RN$xp&^ym>UATbYEpR;3dT=4#y-{;a4CxiH z*r)`pwUfk?#=MT9XlfMCjdaI`)>< zxf7FmC{ji;G#8bwzYk7R=Bc7fz8ze13ukss5GHJS+jzOE&8larao??J7q=c;}A zW?$F1{PV-sn`eHjhs5of9nI6fmfWArM;$co@hE2&yU{zzTca~13RiZSx@5(Ew?A&< za4-0|c=|*eGjPuF#Myn>f8T@kU}dYLtu2d^`5**OB992swgJoRH4yGd}z?2kBQhB#n5!l-IN;-D(pb z76PJy-Jh$Oqa`f_Mi~qk;N#S3ldlY1GNyXydf`l%T(bv89C2#AzOH;8nDba$6@fAjG@ z|KIq=IAgqHoa1@jdq3-0Yp#3V_cc{X**Z+~xTctWgUvdZcyueLjl0~GXj`Y~Gybf! z1;mPO<93c)AGdYhDSi2Z*@ljXxL{Uh=4~o=_lSY)$kg;FdO^e5<#n3~#Zug0Eyd^B zXT;6W`FQiBh0N8f(KvPu!}?F05%vs%ylpa z_aX`1kWOlsu5`0KC-^gGZn zJLk@m1MZsy@7lKO=t`!CZFvoiH2I}a0TTCJ9S7LQ?iqOIjiwV&kL`Gy+|v;>VM9H9 zK#66j;^Y?5RaMDNxT)hIh-E2Z@AD?NX%9;R6hQ5QO{s~fv%?ZDLi5b`c9wtVaqyKB zqRt3EfUs+CpRdj(QgS7kb?EtBBxnxuEQApA|MW3514<1wMT*JSpiSt1)+c^T3T=Vv(#DO?}Yadu@E6Pd;fDk47&hH$^08TruR!qQu}RWbu=hR zg+r~eD^*VN7=*EC2x8p2j5yHMF*1{{8iRu!ZeVy)wklWp3y;$K6EFKSChZ4T;jr<& zVpJ!UU~Lz;-LxE$(5Rq)GrIZAO9pwWWPH(d@XwT9b>C}mm?5kHH(219!O`ChWDCgi zU1MccH~|k42+YlGZQy)G`lleh5K&Pf*Sj{33Oqz2=uK0o`d�aSbH79Y81o`#})o z0uXZsScZXC)9a3?kc;HW%1k26DT7nAhg<=WZ) zEhqO{>5a3`9`EMir{+)eiGsRtCRcUJ`Q(-c-v=9T-eB*}2=>o}5g#VF$G~A-H8t{B zinRG(n#C-zMt*hUs3QZcxk&fu(G5K6`#5mPf$U7wf|ob+yYISe=%B>F(+WzYl@1a9zGm!9CGz_bGGIS{wu$&mtKz^~_p6f~Sc$;HLRg=hzotyb$x684aN_J&bN zbdk0BN#M-hnh%qF=BuG)F))s<;bPvW?zpv=G?8$hnBfCCw31LlZb#2D+^~eNeBHHo zxvQtE8wMW@QOYS)um8Q`q*ZXGXx!11k12rL_xSkqk-way_`j;cEDW4e13B8D@rJ|4 zLgZXfJ}HG<|3MC*#LD#dfYb&En@n$*!IuQqa$k@#o_EIaBSsl*ZJO3LA6nb>;hiOd zUg3<)pM^yRM%&_lG_69eJd@0Qk_4d;-q^>~^&;-OO9j-mZk;XhU{3&fxBZbqeS6`3 z`1!JRE68EDe<&|6Fz;N+H!E;|XyixoN@U@qTk2B`>JzV~lZnk%RMEdIo{q!QzO9yx zSV}oQC2dQTf1YY77^i5BM2D1r)4x3g(G0-BN(aXd_;!%yHo#!Oc}7e_^A^Max&H64 zQzC^Ikm(+dg7+8M2@j<01$iLCyF_s@UnD*+V-!W@_o3`=@)Q!$Iq();D@_H{MN@2y0s z1a)J{EKI8`XXlS_J|wk!v0Q`3Eqo6#SJ%?PlWX0W%DlhCT#HXsR|SZOqaOS_1k}Ln zf_Qac5Bu-zysDO?HD9C%dvOnBW$>ihjtuXdA+szkpn+QuI*dQu$Rt$flS#|AyiQo}s`V?mL6h3AT|V!XnK zy6aeqZz{54{69-kT>b9=gIpXUAibl{0v<%a1+AzzZE!@t;d%lI-^dIGs<|+Rg{u^Q zP|xE|_4%iqObqspBpyUt?h0~55sq=@^A6;(^# zfW`q`f#9AOLacq7dw(RU$;bkMM+bk)>4$WU3evrPOP}O@8T}>K@z)`pp|xe}5SXN> z`cF(EBs~4Aelsepwud};H%=jP*3OKtdQ8_3mLx(cgp{>K5jEV1Btd;0tOf@n!Ux$~ z{U@a%XE8WXL-XHf?2iP|oe-c<@NPSHx}UgzcaDc9Cx~^Bd&c%GviBLwGdmbuuHcl*W@A-Wd(Qk{Ms%0H^K9Jl@Is4IqyV`MuCjrkxaHaB zZg8GK{RG}(NJfB)qa3kvsifMi*$@!OZ1t{3oAVq=?3&L28l9A)Xt&OGK(yUVKUc#ORUod)0 zIjXs-sX>S85=tXgM)C%ZcN1Rm(3-{xgRpY~JKwybB!mTrO3g_ox+eWP4UFtl`f=S< z?_JfsgB)X|*9hL?HQ|#q&1B6e=bc?aeE{7SL863+jW&be--!wg@UdkoH>%Qy-1+aMTYY{0%z-HR zEz3>EsrkIWKWx;w1mZa`fQ@oApXq8iL5p!4{ba(?2mMR+z%8D{+MLzNfR3Ka88;=c z&^SU$S(fiLi;TXJOiM$2dvKn?2Rz2J5VrkUnzca_*0@9N=p^cc<>%L?J1%_ZPKTM6 z)QTpfkG@06`i1l3fE$mzt7BR6iv_mZG6gUF{X&c{_1s5+36v-R@-003Jnk@zBnu#Q z4m&)&IZy^Lfz@3$oEA=!S5PFtfkR(;CNr0J_<+5bz;*mOx%8`&K^FFFRG^f?=7xM} zIGRS7g592247#)4hS1KqmTf>)pWpXZ+)V4Pvo#C$mQP``cgTNBze*nOedEO7?dm-_MdHr=58lWfwJTW5BOMO$oR~i?WTreO6*3aj3D`AK=Yq zEkMz*=RWhoaR#eq`HZ$gt^j9Z0_!4lp-ug2K|N4qIh zbI^Qw6fZ!AT$UkG2vq}Ye}SN(AchE#kbv$W2`*a@!>1gbpINHkByFDjdD*9Ik@}R$ zOgKAU)H?^)spz@HOLT%Zqivl|pQ&t-kT%xv`Z#_EyB}#JCl*$q4(7Z3(ohaBxj4s# zngmI+AkY+)uYcHt@R6^J9b{d<`WwMQF)K7H70p~42Lmwt-f}9n^Q4=#`k`)1c22sY zVazy6ieFwQT?#fR(t4_+g9qhzm`ng{D30YiL&^@6=e@JWDCc47wAN$Dte)Z8RFvF` zjBDjdcWCnBz7EO}h}U4?0t?=37#Ya-|9XMm+yL_+AdoROz6ROW9{|flo;29Up{EG! z0$*sJM>0two*%dvq2L|lLbx6vL874mJ3)YI_Fi%qDwx6N!@xAQOA@(vd9r_m=?}gC zO_Uve9i`h%YWaibYh(`FCLPuEqs46+sLyLn%;Mm&$3{WC6D1sR3BrQV@W4jF>z5dn z-=A=6`aFD?2L3iEzQ9*ldEq58@e^=Sx zR84~@usTHCTzN~yB2lmw%hyHJ2RpdiZ(}$kg^|&+QR$SvjqY28iK(glB6YL)-Nni3 zyRM22i$@~3zYj?FBCgykMgR8F{r@__%najiE3JorVnVPD>a04%s*?n9y&mwdz+nJR zYxv!y932Zuhujn+)Sf)?1&tPLxJaV8HbTFXRY&BKOU?OZ&vac9j8Y8DUoxSAjRU^P ztnyFk1PM4oh<@iT1{l|dq_emJ3IXZ&)Ik_{q5ZQf@C}prG>JGMo(uFcz~Shv-=oBX z>VKc@0}w^86%AHxi1mI0pDi?697zn5gqmXVdTTIyx|-!-2jqHqTX%6oe5F5}h?d;S zj%|)8FgJ@0z4W#lNd56M2O8ix1a%WEn|5V7NJ{G6bp`OqQ(^R(gPTL6@I}<%y#ZoI z=D+_!qn^m2HCe68_80kz0EI?O@G6>soQ8gx(biNQd_GE1Zy`VvzXjsNu$fPSaM6FA zds(nkkaOsSK*n@N?3q{I`fDZH*Qt%TuZGu4eH{EZN)-tun-6t4Sdi3jxk0%x@ zHBCNr2=M5IC=rYi(m$f2dW|Gd!6JkN3^P^rX1{r?P;FFuG72{*tR^V_Nba}bl1i6K zt;cPywAE?GL~D70YRW@{eTVq3v&R2D=Yv)FX zSZ7Ax{O_QZvmp%+BHxg#nZ}vp!^=4DFclljaB0sn38z2zh)X$g86nLo5m2fS%uF)) zF)t3AL=pTLXwQt`aYwkSbH2F}Mu__fVyU2=0Z|CKf#jVz5u(PM>}cpH!tBDX-chTV z+c*!o zn_Tn{3IBf1zFTcP(BFK#zr&CbEvar>`iRJ`>o8N;Cg$_!;nh!nCq}Blr=2Nq3M>G` zM+tWd)S1HIrXBzGR54-x`fr0~MN_Hlyorbl)s4nj?oFv5m8)67Dvr~$Z|F4C%fKnF z_NXVk_L7{sp_PRqLAZJ{>ehzK4F3XO850r#`V?mmztU5`tq% z=WH$L4>cUz&J53v_rP=n7t`KqSVxG$m~@*D42gGuMnLeC6na1T;Ka&g?c~Pc8zrk% za=B1vjc8&E^fjwFp?*xaj`9`%PL%zq2F-}sgFxB6B6;d%*_2(Gdpx0DR(}fows2yj zq09+Z&kw)8d{7+EH|E41(X)ezPj*=Pl46548tW};}96#woJbM@e#`VTV#YSAG;Yna}* zM1r>risi!M;$JWByL&xVl$U>(zDd25KStJNOHbmLLQWN)@FIx!QP><2Hf?myxAmo3 zcnJU+iVBoKi)r)FaXzT^0saiCe8J4+VC1BN%dYrsV*Kl~$@>rJ&!jNp zMc?LfEbrSqU97rgF5suXctI0Q&W&OediXjc{T##P+T;BSZ(s~}c^;f?&*yt7cdzg0 zJmZ3I9>naxQ~iwtU1-4J4M#|dZ}bW8RMl;Ur9H|xt@WA3^9lsL0ZRfh>Oiq8_(ls> zAf#6iMF^K`@Oj^imPe`tU{HcNRr$K9%jW!NfdQaMAyGE?UyO8&ujw6Ge^h;Z)<98_ zZj6hM-vF7$p59)>v;uaZ1In+E0D#OzDwyg(T@c>yjt`qL8~1SO`h6Z?{F1%={6_Ye zK4R8{i%YF(!JdXwRYlM~@7V4$0@|5~2fSJW>l*v@>Np=;?AyM*78#9O!KbKbn|{DY zp(KKhn(Sxr2gUQ2`{WCIaVY~8vg~jIh}8+xvqWaK_I;jqBaun%LE_{PB59AqVdTKE zbK+ccqsRO|Y3Q9}VvTl29|SUwJ()yE#4{H2p%j8E0T4ex2j4~lFc5Tv8!2>LDDS@@ z0{>JHh{{OgA8y9rB9j4%BK%*zTFfQ@mJD$kAgD9y=J=cmT>!j-`TOBud;G#?LOoy} zcU>zlhPT7i?KYM;8ujV)(4{AzB$4a?wT4e9b z+hU`rFm%Xb@RC-80+t?xVq@S$o_ww!C&_BnKRq3PwH-&WrE0-WM68R0JUrcBfg1EjS7cYV65mMt8EUsnV86jgf$=+&W$W!za};#ALYpQD zt#*giIB-f|Zc*Nx7e5mq*R3p{e6=h>#~92<)YE`??t8AcgZ>2mx`xIuntZF_p9Kj5 zYi^RTSO7hdR8~fXRIp&Z14;%F9bK4s02WflLlgn!3&j;FIk~(;=-ZSKkbarLy94WC zGym9w!>oL2sBCTADWb}yYji?|)BPdm^32A>iu&8XX&`#pTOF1-1?Q&~)2b8F^yTqN zV`OdjjA3u(_2y{kS23XM#(!*Ud=R<z!hb1QbF0e5?cK`%UJU+V>V>C>te>Bc(ZKvL_+reXkm`6C8f0U(XPofE+q;B!JdKzNUCl2j&MCnJw zzcn9`IxVqy@I@f|6D<$z4oNzb7FF-&Y4I&ax3uPK-d7UGY~{Wki0esQh=c4N9=rtX3idiLQBvh0eS%-X^<{C3Je3#T0|7- zG-U&5FoGb_0MXYp-#>Jp0FQzM29^Yuswr{9&5N`U%VdgH!UNWghhIXw*a)X8%5n47 zp=ETq7}IzF$J}hc$HYkp=#Jv09$z;pR%Rd4A53WiepVtK-_HjBy1_G!YAfQ2< zf-efbWg=SIP$-^jcD`h!r^`FI9Q4O}=abSp=m#e>E>`>ONmzXo2wA&?`GcRfl-C=N zUOxU7=HvqchF~9t;n}{o8>G5m{cYtJxRT;$Zx(^l<)#8D4_rqnV7-72mxv|LeH=Rp zELL>Z9HIXMDD`NI;6H>)=G+gYen=aCegqAk5I|PTWqwEQvWz&8ybc6y z)X7QRuK>CL2zVIMD*t@@+S;omjvo6#6)_b}W|t^za6No>?~4rff!AuLeA?*D=A!vG zKV+*hcm_;EP{#gnGN*@(UJe>;wI{T6b;Sg=z%{d|Y4|3X6`~m5 zcw@!&NN1&`VS?=up>Ddm9v;~c5bMq%coM**i4$Q!*BL%A_zVLj{9u#==yYQjS;0Td z^~vPJTe{?l29%y#Zv2=|{VNPg2zs)( z{3*KudM*_&mIOmzLOdq|2`sjzfqLvzu}C(C;2^9N?8o4sP*qby_5#r8U{n90!4Au= z>iGogf9NnWK~qUOYCbP7-nW+&I47wi2SZ;l{>KWVh%YVWgIgR7T9QMcDZ^HcU*+B- z0VvpM?hnP4UN{L`Ib-Xj;FLY`ZM!~`iK(ujpQ4+adsGnt=q0m6FY^C&QlKwspWgT_9=uf#(Wcm&XL05>hz<=eYN+}wcDeD~$tL%2^N zxI7p@VV$=+y6%ij_W+!LFT8k=M}#;vi|_>mwk3%$5*ZHgZa~%qp=-tSfPM)70g!tP z+nrG>Uc`w={u|iNYfn3B11CyAMdEX&5_GM`K&*r&(Z%Wcy0bk5}^n1{O*= zU9;Z^fb4K^w6TD&0G3arNnAg@rNs)gBWyzYU9E*`xyOGf{^_)8u%MGc^#)l66wi79 zVG33 z-x?%%2zZ?X6<@WTI& z=pP)74AYD}=$e>q{;aZPRX6zJh3zF2-j&Nu`etLknvz#N#${8--=s8ZNnvE!Y3@F0 zKK{0JF25~vn&l{HOf=;mBCkLqlE>CKsy-3eX5^lbn^U=mA;NchNnSggMUNyPaHH`?bW3Y+!IH&d|wm-mYSh->Fw32M`O^WmE$1D;o`4$5?pNQr~oeEO=U-?AaY>u4I^?)BN<>pkNFild;ag2A# z$@y;s_K9< z_U3aP`tsk1U@`ntdgbZ7T}^!^>QVaTW<^_R20e~ z74Pp069-Um)}YFJ=MX~Wtk1=P57b5oQXGgZ-f@?W0LF*VyY*nU3Gn@#&zFTiqX};A2R4p- zoDS5*i%4FU3bA|5QR!lrF=;(1pyB&Kp*qIj0mU~k5L)rTCuBhJ;#R8mN-JaSK@m7o zy&dSNNA6U2z2*?AI8lK(a_9S|mlr296M5Yu%i2e!1E!5%B(=YKRg~C=a8#C<(yu0bJGi8LFPNf02)Pd!zxVp8KJ3y8}=ou>PRWE$QY~)>RItCgeep z5D-Xs?qeB?t`;jb<3+Me$7RdY0CBq?zop6XhWR{i4m)w4UYI_Kg=%uJxIm^3gjW<;~po7Qgu#r(Z7yLx;G z(+Ev=B=4}pEu=5~as8k^WSvhTU)~6{;F%4#XXR-M_fykW+@ za)rydIg)$O^?%Er8(<5yV7Y|UJv8BvF%Tg8LJ%Hl&;iLAnVkhLfPV;RP=SyXH?*Sm zz9?p)TVmy5ut<>|9RvF`SDUGjPe+gnpSWkDIU94ForL!(#Z>kf@?N{hv zp}kKHhT3>$M@PS5C5~SB(8g0v{8`SeKaa{*cijz*_?Ws71?X zhXQEu&2W0*H~JUHgD77(u&x5gH=^*I(|h|0QUC!QADPntN9{J85-hOK!4Zhsf<5pu z4X;%#Yz>H53*}w%j~`D)SfA?iVS(X8QNgHy%=Y|5kg}LgETjH)a@4)A1Y`hyL(k}Y zjpU9&B!nUik{}ZyAZ>t+s`7B=EmS@L2nURNUxrd}i}AZRtxk*lkFV}U2);MK7(R)PcPNo&B? z*xV{Svm2gwv9f;IrBwB2DCsaY-*wn}k!YPuE2Kv4u_1?rCTEZz@+|PgYPQjQFr;}s zTyVp;#rPL8#0R;J^6-%9dmmlDdbQk?I1}L7Ae*G^!ThOh0CLZ|)SS?9)SSLVel72l zb>vxob%F~KG7AGjt_Y?Zfri5`2zFNI&aMcKNB+%zT8q+#hRo2u_WCFasOrPXi!=Si zdAJ+bA*168PQeromy+cJdu=T(2u2}{R-w#oK_qx2(UU4=V0RZR(Q#v@d#CcOls5)MWR2LtB~Fy z%)bb!n%Bx^8~0OGA3XgF886O9H3Xsz!abYC!`DTp(tbfE(AeL1i;=8&Y?tVlzdL=? z$@q5tp$}QbQ(Mc1uS*7{mW2e{Oi{wQHJQa2gcP1I4Qz^3H}Wz%?9@%H##7bTE;7?? zqrKh^%Hu^|xyL6>0cJW}@t})Xd(j)xdpc+1XVo(&%e_zw-{{~V2DR_UW}w)4^@7dl z1?efq_wL%|Rm&2-Evu3Ae+XS-zQpeep`Z)&Eq|b=prDuq;C2HHavNB6MyjFXRPGnu zNd*Hk%qi8L3F@I=NIJ$>A}*um?v`O0L8PQNW*RC`^Dw9^pM&oTd^q($bvsj2aXxS) zhfa_~`_Cqa=RySpkgwk;Tf?==%6bKGCXa{A5+0X-ZZgDTy&r$G_KxAJgnhka`S;QT za@gduhn{{S6Z}58e%caA>dCi=R;f}=cfG?YWKe3R^J$;GiB*@%)$v5f>y6TQ4K)$e z_G1H~m!q~dKDlCEmGB@e2AJWak*M#P{^&CjT@7UrrSB5j37sr-QG%ISUp7gWj#K5O9MfN@))8UUZ5FDr zrJ=)?t;}h2!1+sf&-j>zWo`IGaZ#;MIe=gCc#_7OX{K~nK^BU0J@@77%l0xdGPRI< zbu>5hrAI+K9bO&a!B^7{QX#oem^p>i>R|ta1_nJ$C9;5H7!&-VFt(@Wbe{7Nrhz$< zYdx02(0OnhQ{Q`4aQ-7)F+<1^LLiv4kpxwLKbUFV(+W;j%Z#~7x!+dRd(C=m^fttD zXLMeUQ=Z=~Xc#_VlcK=DT0&!l;u`jQ;UEIKky!yi+%-TpBdrN)88JYh!|25xOW*Uk z_ewJW#(qy<0`qqO{$Z9rE#=svpzv61VQjnGc9`%pT({=Y85s?JP#YPUJi&V$2q-)l zMcGxnfjbLU6qNVr%XY3`X5^P5tG9(+mS;Msm~rm|vAqCtQl2mj+L&rlbZ#ua}FnBt6AQjGiTmXPby)N)f9^oe6oY=9Jt83(=VCY}lm<=Yx%L zi10RRz954>pG!nZgJB&gW1v?eF?>)W@NRQ9MLxfRh5vGDh>HY;1hZiuf^8lqG=YyB zt~n^gB@fy)xW*)N)DB#Q(oGXZTu|>Eo@1X|jp3mA;!J&O2wlXjuIo|bPUzS5YK%CM zYtR#_@el@q73C{K94e|foBP@;nd!fu5*BZX#vIbKOnm*{qc{erIzsz|e8G{BIzZ6D+Abd*{uI-XoDjKlAD>>mH)1%1Tc5uE*J{|Hu4C52Lh5C zk0SjODfC>Cz9iz!0VFX18A1H>5T@^&RGRclcQ1@zYuV_;BozPmh(So}d zqC+|=1Z9k37(YA@Zk%em&~3;kjLdFBdh*qy$U3)|=Z+6PGu8_RhoX@r?Fh?M%1KK3 z1)&|geTa&hcClbaxx&!5D*B6QLZ4cO^Q~ZqZZ>A-r0Qg{G!v(g(C+7_L&=dX7`L94 zL;VEZ%-*4wD8y3??pq|=iOj%2K#0HwhD2!~NET4Nvjg`GRCli4wW3}>EKxb0v0G?VG%m#5~# zU3Z<7ufok0@oxfZ6w6Ki^mhv-1J&RnfBcJ*r0+Sz+h10XW%*lB1u+6G{O_;M~MA!KHuk{(x4I*vQPk&b#_1cvdp}8)nO8&`&tHv*I!Zb3HrhKtc$|;8wCRGgVhBZ4%*U+ z@WrOO$23)(3vss%#r9)8u7!5WFsPa&GWsBN?CZBDfq!kubaP;NS;TQ_rOK&xE+2;> z-uJK@=hV}Y&Cuf}0XFKrQP6boajG74%+b3GhHU)MJ_VN^`}>oTae;)c#*-yCnzs`Jz#Z?m)Y0WLZREn z@p(tP{;u1dkW+(?otY#|wevd{Z+B$zl8TAq2@q`u1NEbDxMz7wpF)y@f{2?B4a`5v zoGSWNlfuhN-5q*4-1XdV>;3_<@s_@vd(T*n9d!iv{;@(qvjeZ^6^p~%E4`9<3Q+=e z!pDshjnpAT_L{qQG1}%{{M6n)dyQK&S}H|RohcF$n3~~sC$FGrs?DfDq-7tjR_EK2 ze7z&SQ3hO9#lf4m{swCbNRhtKclgN5g4)6Y&oy8ScHUK}RxhTffkIy=R1 z*>hcDL3G=*Ip?oZ_*mU!aa}Yb5KJ37L6Oh%HHn!bcW9i9^!?`q^0$+E|D`CIX>vRVVLC78bSzneBBC22@vX$rX2#+dSPMVpS^SUgI)3^ z$+T@xd1>sFep$wtri7wmMGvOtL^aa3dp)rn}7DV8xotk2N zmGvkj^j&IMKW2}KY={;(Q?e(^TDwTeEd)FuD894SAe@*>eGh9hZ8E^;qm^_|i}?U~ z_?IAkM;ecfWwP;{!|?+K19hY5LiX#-0DhuLM>=2V3&RE!Qjk8%S%DQ zQbvYLmEBH03Wz*tJ;HEe;pUJpPi#CU;p15 z83UPHcJoty6rvTvqXX(CqEv>;qzcv{g~jM+|%N z)V_|@t@RvdL0*`_pq+7vTL!Nwq`7%#;G=_XRb{JT(6Ei9vJ~E*H%Z*p7u!*icST;} z&~i|mPezTuC@h%{nkI=PqJ~BeD&_{`B4*Ygd68K-gwmDOqG-xlXmq)Y$$ujpg? z^5fzqFtP3TYZACv1%ltkyb7F^$ekOSG173rM2PPszi~fZIJ&^jXv0L=l}pW?+2KfI zTW+1VdJvPR2vB5@MT&6phlplRqAF6ew8~IZxj?~g%F6R!6WZN}ww9;<6JKG1+wApN zXsBGdhrhyDnwXc2P4*^0p%L5nW`TR)Nz-G=?*kXEo2>&kgo@X>*DStapbv;^-uKK%Nj@x0T=8~&hw+n!M!M}~rwFHq4V<^`5YN zwy$ra6`U6lEf+CJ$I-Y}icy)EUu#+AEXzvny@R%5O>V9kn|g_p!bV2p$=!`d2_u#? z+JftM6Zml9b>4*nd?jtM3N+0OXYYjq&)`g41(&65i;Xg9hFHRstpz zq2LQ>SLVAE7g;lWzjpFXrHv$M?}*gr`-Bwg;dpk0)l^qAZ}WD`Ipep&^BQ@KQYE9QX1Wu z=zdAq?-TXw^e=X=r=|6a-R!2(XW-%DV#1@NZh&#P?WqwWt%uuYKh0|39zyx}@z}|# zOJ**3(XzgWv7c>f73ysq9=R(IuYAIfzBaw;FkfC0)8EhEzaKI;pVy9e$iuK-G_r8PyL!s^!fO7@@q8<@RxGojHl$L;`?DFT*MVmzcrdXv0LCE=ipPy=)EqG$e+Rp9 z*|oahMBBxM z$8pyNS*jo+|F^6Vr}=tY`X6v@Ftm-j!(EKl0fW zi`h|DU<34waA#?jcCz&jjDF7-B`H4Kg&8RgX3}|s1rW^yOXo04V)9!#kl8?LHi410 zy6&^-70=sdCZf-AFoe;V{yn$gpH2!K{m*2A`L@Zgh12r(j9J<9^X4U?7fB4?*9`Jc zQ5oa6WLIw@zyk7RvT_l7D1Eghu2ImQrHkIUv`{>uOE`64L`B8>rNY*G5s%|sH5VMM zYsBBQEUUiz?R17+K}#`BGWA#E)4<^LqBgT^Qh#Z(MS(R78X+Ft<~@GBsAT1yI}^;> zx4vho((bH%Th;BfR6D$z)No8oK`}U=^R?Iw{Y}@a-U7omRsc(`Fs@+&VxH{{2v;E( zp0Mx~Saima1sJTXrWK}@^UKh_*y>BiJ*T1@{{CL1O+gqZ<)JhLc)K#GPYZSrN`4-6 z+dFUE`8}>%-!3Of)xEih;;A3>g`8M zlD}$SB+&Bo&+`Nw?xlRK9+CT+b)8J|G+LMd>kzBJ__TvSa}4bUhU^daeO{D@^>*&Y zf=$84)jfCMzNifIq?t7gWFT{jTBKmtXT0~W0-_`D;nMazVuu0;!#_8aXbIg$@cg)7>=kpZ)T?u-@8Tn+U+RQ>)+?uRwq8#-<+yAmHb3aU25KaHz9#9fKe%@ z&A#u1V1~i|!Pc+bMDw4;uG%kD*}O#;sBNh4wrBcSZQ}IWc=+d-1gi3Up`^IlDOR&@ z`JS`Vf|*8fPM}aO<6?|vp4Q!9HXM!X+Ac%KN=nrd4SsuAi1YV=iTGCYBJverx}<*p zj?6!U;OpF7m(c?7!oh5r#`_$3|)86u0?Ml#{5bYE(pW+o*qeD`v-p z>mdtv>Ti9-l^*yJ%C)ID*zNVKT3EUZAX~d&I7>d zV`DnEee0kSL!G~y{o?(SjcXqS-rM=qvl)h z0Q(^0TwgYnIgRpigSUu_TDwx9yw63Pkw~ZPzpp$_8{5AQ8Xa^ePb9`W9(UZnD2t&u zkCNz2{@82w?iv7o!nGXUk+7@Fz}!)?uiuaq*Vt`c^M=*9D*MTu+=i*Xw<>Yq5xHiPH?o-E41p9Y3Fe_-3KUvt#y-(pc zB|(IaoC4*ycqtUY%5Y5%ZuGac-hi_Zn{Xo~i7{3sLlG>bVgN~l8Nv{nQ`XgeY;AqV z#>VFG_&8;7^|O%MJ-Czs?I!}d4f)OMOfZrHnJFP8B;;NwX8BgRYCBgR4py)|wrC_p3Z)*{1U9<8V0_S!u#wgEQDD13ZxGh;-jbPCMqgBX{9_#*otO)+D;VhF z+ONcrjlo8K)%6dY-4G14)UO;3Z)$2Hp`mGmhW@wj-z7eppkKOl85h?OsvdA~%&dB( z8xO!pG+^FNL46FR6nJ6sD2ZLtrlzMMwW<+8{=#}$(TxbgwQ%<|fo1_1V9?RgQCmBf zE#}adwNPRlOgOZ%4nt1(H>mmU1N;-U?d64g>&^f&*L2XI6BH0$}9^r-Oir-)MYxjBtFhTL109jcY?N z^h7h?5EE`dpzRsmrG&Nx3nL@S#l;1PH&NFv$14spCFgw5$mJ2XiNVTZH&~aj znb$SvK(7mA#3&bolU)+N%1s#U+|dyRCngMTLT29p`VTCx2xuP=6tJ-88fk1X_*T({ z`1pqEq^k>liny?@nkc3Hc_M3X%|l#$uDkCNsh^sWRQ&GYBq|82FtJYewaib|#7#TWThs(Q_-(8=VJ5+{|`54|k@3$)%cwFNmQ*YX$)W_ytH97`oun zt_c@jNHV&{O+42uw)#A?_pQ4;wgtw6G9TAQnu{zarqjk_qek1)en065nsp->{GrQQHABh&jBLGgJ9TWF z@(WG;!jLh~&c<}S;#tLeR6$@Ih{%!f%AD*#G8MsVf?fjo%OFLMh0Yt!5I=^g7%n5E zFHY9<*SgB~_Bb3b-MxN2u*h-{-ZwJ zDh3}4>n&Z2L(6Xd8h_K7e`E2T=Xr@wMOEJ3F8f6Lgx|&xad5RPe*b!Ba%t`v|Av6m zBl~b(zsZjq9JN-z9`N!G*gQ@Ya(kH}>|jPx0W$y&4-cQHt1oU*6?^S-?k{@gPw>pp z-GChpof+%jiNHlqPY=P`cb`7J=qjhDrEN#1Ru6dYU7~p~_j2WHG)u6^^k=ip>&`RJ zuPvW)j~7#Pjl^HIcQTt8iSKMP*G5HNd+lR0EH(4V{&e{Z6+z7?j{b#n7m0njRm|v>xw))sml<3Jw^tJvHYBghDSq^bz7`c%5kYf8SX3f?jK!u`-^f_ApOYd^ul48M z+_m0k4>3F};*VVxJ#nCd03YyEb#=46KSY$2u5yX|v8z>g6sw#(R^4>q@f1w(zyL;M z&vV~>;SuW^>_mIzDtWo${~dhfj_`^urNnhSz7Ll{2*)Go1bAO*+4X}2K5q`FK6$zj zm^K`1MQg*%7kE&Zb?3V=ZGDc>dNT=8o zFr|LHHgmi?`#ou&JFSc6YR-^Nw22Lk(69MY-1l{IaifjVH24J8(-Y^vW$~ob?l!d~ zN8w$@F?R`fA)ub^ZS@&UqRiOeh$BRpA$KXa_N?W)%?h6AuTZXpdj{nm$F-euG*k_J zH=q@83|U)Svsu+-f_ohKFMp&&)Wy@jVUpU@Ts}0{nBT4Sr87yLM z178KpuGkChh@PPkD{Ep-Ui_V9S#uqi`kTzyg`A!vvFa|ZTVs71PK*k}hU{{lU#b$d z^Qbqv&nH(G?rqMx+$y|3JZdO$vUQCAAaPIY06)T@ZJU{wwoR6$Xl*gpW;7| zNu6+-$=h)O(esK9stQ+UJ%R=IE)M2m56|{DyXo4BN*?3W)LN=fz8v81T;l$uk01Mc zlwWX-hL~J)VMCnj8s3wM=S#VTMe?I7!zG=D1OS>h^)YiSS}fetv39UM5ivhIhnezN z90|`VoOnXV#>Q-x|NRd1|D7xpDMd7vFo^+HF-+PBrMTE9K5-dHQI>o7?zb6(70V6Y zci*@=zV#!xxqWFHNlESsXR@2e_*tq~M~i7iIQ=+UsdT;BR#v0$G-kXnDZ1XQ&_+=* zULS1f!|3_vVM;vQ<=+A&Z{@*vpdzjSZORAzO%=jKkZ=8SSKEWFNo{AHum z4XP}6^8YIU~2z;moj;R7T%$ zn=+f`mUZsljDsaZR`$wE9}NEl-yi>~RtjLtd|FKONdT)1l%D*Nnk@@^*m1|1s#oih zH=tDJwVhadV!Hm_z{_oCJkT@Wf>rY*;wt4*$66Vej?e73orIVl?6-`HwKVg2;t6{X zo)}pql|($K`9j_1mCr??+S^IS;=X6P7k}m3eX4v76Ny5t;8pl${EjPMXEeg(rH@}< zP2XKr(p6zOr%(9yOnoC|U*6t~yepSwy{xCCc#4&k^=;Q%cr_UCxGIw$RQa{q5b z(`e?+Jgi|;Vq4;+h*ML7BWVvV>M+GCE$2~8?C#Ds86BCd6`1-sJ{Ja;^w(O71^HDe zSKJK7Z&=V!yLpa$ed|2V$Z6767^@W)EC0Blf5x$$tN9blL=-(OZ^EJ4UVn6?$!Gr&`IRp;ROH=Pl)`8P;&T^l6p}yDq z2K74fu1mtjqHri}A46&B$J*Nst*#=pdAalD7dfh-Vq1S+4DDCQZf)DC(`u`kJy-EY zL-k?fe~n1A&FEPDoH+1vVN1Lrbn|K_F-c8WY`g7D`jC&!PmLip=EVI9kiF)eh zUQW(0ZZ%fN&#A92&#ucF?(%Iga1~3=_;@To72*3JXKzhb@9!SPXNaCE5m(RUeY10V zyW`e3#h6$XxjVV1Z<{)%276j*n1i#F{_Ymr@~J*U^NolVtfAo!({{)I*~F}rAf%}w zhp8KsTd7yjv3?$Q#qQv#ca1K=7re)wj@B<@PTMj39mM}rRcVNNMGZVEc*ux9I{n>2-a9(J1k+^3swJD8kpEHf&#oxWc`EE^})q-7vT%#=C;JIGDaJ;%oA8l)-u+yF~eWF0V z%EDe#X5WzhQyH5y`Y`#x1`F%gVWRQ(JIzHtQ=Q&3MW5K%F#hmyYt?Bo=|Ryo*Z01H z!O{iB?m}Oj$EHOEN`pmhPg<(cp6F;d==bu%KROawyR2NQ6X(|EkJbe|!Vbn(R6KR< zLP~NUGgd$Aa4P6;)pyR7#X4CCSm*XMQDSqE4s(9vmS=78yn2HKmd~1X(87)L{8d); z^s%wDf&{oz`c1V;;`}R7WQ;f2$|1U$dNi zs;9k8WD!=7G*Y1uOh}hxYToVRte&E^Htt(D zlO5h*-YoW=k&pLfUhU6}+#I~#5#ImD?njD0QO<+e<9&Pc&jf#a|E`Z86mVSNRJ18h z*%ZCYx2Ewp^CCToZ8!bYrf0^`41OB%l_qlGa*2|kiN?y9Bgv5qpiHU&SIsSIARE#92!GrRA}dfP&#!>z@= zhBowkW65V({#F)W9x2k0T-|E)TOV`YsaGFse82UE7|{pYqGPFKkgb-f7_S~Gbqq{Q z`#G@!-MG;oS|$VxLUsE&!DL+sGCgUi8bn|>w|N3#OUk#gFdDh2pJt zOc5=XPiF0}N#-zS_rLp{;jw>PFQdn5p=8pht7tQKF5k9#KKEHxh|i|i-@Ry#MdzQ@ z31{;r#?jGFRm~V}T2*5Joa|p&`H=r5j*$cR056>hqxpqR4Alyudi>Yi%%|I*o_!qF z)SCHQKB^GW`+`fbsI68`MHqY6tor@B=9YH2piizFPsS64&ROH6r2W?RPV1dFJBZY7Og`06ntYse>b=9U?!&)v zEs{{lUX{JGH%W+$5VH5k-j}UX384td%$AkCN%kh&Wv|QL+lAlh?)y8Q@ALd|A4lDF z+;@G(`*WVJ8KC87WL+i|$&{0$O0lyY$>!xNFeJf8tf8?J#8I0vG821~9n~*u=6if; zW24hVNs~#Om~c~ZP^@Zv>@GjS%<(?WpYDWNmtyw93JqSKo|MmSc)zv#d?a)1A#b*8m~Q(-bjEQD~Pg zd8+R`A*rPeg=Yc&BqS6W!@kZ5&--sg>J`fw$?Kz1EYcRt!VWeN8L#lz-qv&&OrhRh zPUk$AAODzHKEkAk9Pa<5K8qPUB7gm0&v<07@Djii(WfT?(#re}8Lcd5n-TIbfrqc@N2GjDVLhkN5% ztXZxvEd{7&(ed#`>L{_4j}$AaI|ntk6>s$_qRK5_QuB}zniUpYw~ArRO`iW)L8gXD zFlkCL^TZU_@6X$4ZXc3~cw!FZue@|DoTkvF=aX3UpG^18uC#;|tA&*jZ5>TrMLqHM zNd6*SSTL_eCrgo)>0ps%XKCcw${BU)OUfN@FD@Q3G{D8f_-+LYuSsG2L6seq$R`a~ zTPZs(A$?>x~Z`tz`d_GXzUiH%CSI?OVkq;EeB4x-tD~W6pLQ%HMNIEx&%1 z`0YR5;C()}6X2rK(wHEI25S#MH?a1Woj-2p4)uK^p(kkR(mvB*l)t@6FvM-}+%A15 z^93`Nn*|xmC^xir#Zv$*-OrC=b_|&nH#+kn zZy{v5D~2CK&(=1(V$U{yV)M!ga~noaMfuj;q3oD&(kbr8FLghxYfcPnIy&h~m+zi> zot8UU@OXQ7>8)Fw^>E(Y$6xUCi@rOg=AdT4K}E(H|0O3Il|#Bn+@NoiC%)otqcd)P zPk2GJwdC)Eo1MX*HpWBk?R@>kG#+y`Ku`ks<^WrS!U*hXri-QV)<3cg$B1BoK{3_< z)fM;dL;P)K8#W<)QO-yW{WA=^ZhL4jZB)R{zxhdph!5jnj5~Vjo?V!_ZCXJA6+S*L zw|U51X>H|J(nMcs;)s!<^4|j<9)6m3gOZ|_0W#w&zUWOEG0%kwyn(rzVfl~OQOJf9 z#+)2U%;V#uirAp9-&~V7qkHLq9`d`~; z;iv>C>)R?md2JFWC!9V;In&~gFn`;+lZ$PV{ zo(*G_vw-jZR+li#Ltc#@)`WYsUXLng#+rf)rJZK8dxE?&5DH{F@{0|=qeyCyS>ywI8Ob26N3>ZSy^Yj!TBn5XVL^sFN(ZZNd1kKpJmgSyvjJ9Ns5i>^+;1Agi7y#(&SC*I7)D927 z1bpnNynIzhVTAzXI3y2Pu5EPSjN9L)V~ERt00?$!XEy2u!Q*?275~T+;x`vGi|yD) zh*knoytp~>-$93k9;2hcU>gU@kkHW5Af}6*cW1(L!)0{r#|gW1^UXy++BYiYwj2ol z54&sM_R5kem~-w`|GG+zkI1FI{k}HJJUyP8q80}phm`a*l`@9#lvqjFa;82Nm+ozJ z;*%)5hD~-=EM?b9#wnT~3@_f~?9*LZ!mb;w*gAO9?rtU27dX*Z%>sD;ki@^yj8-d% z<{}w#z;YhEA}I0mBPIVPJ|3P;_*o(3`$NjBLEgMJz<=O~&KkJfb-AW+DL1HBOu}c3 z*X3q{&A7Q;xP@%&eoJbC^qM$pzV_j9C@p_bKx=ME)hCoGmDRqlnlUKn_j5m0*PzoD?wNu{r^ zFLpFXK;=Gn+pOc8Au*SIQ`@LXzI}EYnYfaY$E#}Ld8d`1&O^AFJ3lJLkSIxWtzORx z#CyQTg#O)(xZmsJmG1Kq8o-DIFeyHLdXJYk0!;3B!KG~9cXoCz9fDw9z~v`h{Q=0m zuH#rS-H(WmU%yu-q%A-EPR`288!%5*WMhnZ~wC-e$&90XyR78P54*ts093t z6tiFJQ@7zZ)U;YpALyfhXZRSt!xCtU^o@^h40DO$jO%JxUzx5Gx2*^fq{#y*Cs=I; zfBeP+EbnbE`ndO16w4hwD!aeGc1`AJ2O%!F%ge)8u$V{Cp#j zmq%y=K3^odu->aLgwnK5BO$qRLt;#6NPgowpzIy$EZ*Ka=9yRzwR1$Ge$EEfkL+_f z7*2>~f2@t*tUh53$$Ut5cw}*jXr%4aM~Qa{sjDf^NsxLa3HChj8bC8T5O4BO3Nk)p?Br?3R)e=H99Y-WGCcgI ztWtm2sybbBi2Z{ARz@~IQ%M$WK)A~uo(*-1c~T@TYtG&3n;Eo+w8k9g6?F07bQ63h zg8>~0feAjx*qyI54k3w3C?IQJ#|?Q_xpp9(-p2_cCTAd~Qqk8BZ8ZVLF*1F_zJeo& z)u||rO!w@zEt$}2c7oD=t7AN}DE^di9^>YX>vJ1@W{wIC7>#G=?PKfrx-Kz)+8aAR zjk2yh&^WB{6xj7e_+oOT*_HUF-PxQgYZVc-{W&B3n?caion67a+blubJJ7r|*YxM= zd+lX*;7Os-!TI=;2bo3~OYI%ewNpSEJm~+8%?uBH&~6ZaX^X(`ZfUs(<0Awbs6Y}I zXbnubI9*p?f(jB8nb0r`VWK2rUIINcA9Uc50^fa(I*{m{5YQWYr(@*F=wL9G`_hJ8 z-*C?iA6q7^Z8ZOzX5!A015rJd*8QCF7WHUZRsTD;mpqhxQaL8N-y0fMw=fAWy)YlM zF6K(Iv-2-`>|N|9P->Xpc2`aPN{D(5IH%!jaAC-@EEcj}X6#k&?NuK`o{8Fw@?12X zZg2k?9_!eu&}!TwiwKlVSBzInqy}PSoj1YIl6T|JGBXu=pG9T8EK0eWvK>RqH2R=` z>(vRirfKwOnsiOOc*ommbGgfDaRO~P!u*1}5pT}TY|WoMw==_a?zq%rz*p!zWOgEG zb9(9TL(bN<4wZ~_ttgxBl3oj1@CKD}v9*Zq9e9@iYSU#6^8CNv^d~Eg`oMX@I}P@G z!~B;3{-mYR?{7?orl&K2g#zGIb1Kz2j66=zH*C}J7SUUM)Dw2Ge{ zrb@9_yiM;m$o_@ z5!b%rbJoj;iHkiSj$d+_?aw&9Y0cA#x?f+WnC*VRpBbo6bhFFpM0*9OQ=cqVpsXbk>07_sjO36X&S+20=vE|creUQfQP z#r!|2?4bkXWJl96q8TslsAnaAR?ejLIi&BCNG#P$xzX<494+iYQka~VR8)eOQ%U|t zi=)ve|I6vi@Afnt=(2!{CZcDyiGxAD6JA&9A8Ric@IzN3SM+KOpPrr`XpNNN&j|(} z5Rnq^>1o+@$(MeUMH>;8M{=_-i>s&4hR!P>%V_Yf`$}6*-zs0-{b^olr z^Z!ud^nsg|gfnnJ5G!^R5ctzx z-&*>V^Y$?$AW2@k25(qd?ElPT?>g^5XtAuJ;Sb%EyqEcJ;Qe^$M`*B=;>@ zSX(#1K$LpTPRZj^_xZl62`Q|ujqe#`CtcsIpgz$bJkOl@mSujE=3Yb8SbgeCHk<5E zmV`9YdrlSvxoc6F_`x6QYVJHZ8`L(=AX*(9-aMNS+$j$JH#AU$C&SkSvTaX6co5KLtEci{{@NcL$y|Htmw< z`ua*>tGVFbrKC7*#{|I70RbgfA-0$S-p&sn%;*)WFf}8~x0=WaiQg5l?o3Acf|X*_ z$`+YDBc9j!z-OJrd2+*vg!|!7{%-UhpQ3vI=4`3wD{JeLP#MtK#7>a9bg$bBZ@Js17DoGuE%ONt`rGDyr{<<(^ z$K}A_@ykQalVF^+cKlND-vP%$*#+|A)U~cF*;jT4k2f2>wVI0}lt6LzULl6>OLTPW zOk)s70EP{Rlb6PwwUUE`j4wXxA6eHg*ERnCn+dqWFGw;-)r4{dBwJz*TfZ$Chu*R0 zjhI1d`1}18?ns!BXfF4O+<8#E>qgids?My}ZW%!;SRr2P=7yOiHYV+t89;6qXxMRh z31fGOyd}#H{dVZQFl6<#8Kcbdi9Xedf>&kgU@p&g`)+oBY9aRlZ*Zl*CUKmQAW7kHJ9$n159mFh?kPXO*0 z&sVWz&KcA14X1IFfN97^pds&{aCjJ;3I81`IEibQ&mn+kzLS5_WTMJ_U+f@Ec$WjSM1dNlc)b?Ra&jX? zF~7we=dPO-Mnm|Paj_O~T>Aw3L=cqdOu)fI!!Qi`|ga> z#DODi>8JDr^v5(aZuKDfA94?9s$UDQ2hI+K>ibTh))Z4HnO$$8_+WxUItJ?8*p+N% zdD<%QNUb_x@#DigMlw^V`B&oc1*m(L5h-rAl265LDTW`oevgTP)!iwIxXL9s_(@}9 zuSKyd(|xN5Q!Yh{^p5AvYIXN-;w%l=`T~o&BCT2mVf-&hpAi`C17wmqcPv zS?S~8Wu%#Rskb!ceTX)Js@5N!uXCULGc`1J=A=44LPVFsUa$G@J3>hdn*lh-A*BMo zun^J#qpT8CkiZCD(5yLXxnc`-44^%{kh@k^3hU|F@(eH~Ajc*B{QYB8PZk}%R2{X_ zq_^M_O@TdDG3dtrNzhc|jHcfX{z z7A+Dt;?#UgFwr}|kD$i_Y%AG{Yg|*g%EI^as0d+2fwy4*1t*>YF#x z^nZzC@TOr7wN8wUEg?(!JrCAXfl4j3oli2bv|UIb=g${9!7$4rL==Bd_B_aUYjPeN zXiMmR|6E18-W;odF*C>1-ryD@ltPNb&aSaWYCsLWU3qsqt9#C(jQ&B`kon}Nyh?x7 zbLpUH|C;l_qFI^lca%5DUk~VnR+-^yms&5~8AJvt$cWzmdUd}%aKI^G$u<3E?WCut405E2OKM8gN5HrJ}1#f_Sg4pI5yn;$Zq0nY1&g5Q!Y+5chTQd+*ZY)k3(jS|$)* zYtN>gmMs{nU&;dA0n z>ApUzS!iXT-Cukm%?dVkmMu=nxZ^1Gf$hITF^UI*+-ZLeOkTA~M{>wx7+9azWVg4u zkmae%exkmm=!Jg4uwtSy7fn5szut1&GiQrF=slQqQtPbET7%buavlBN@Vl<3l;nSj z$*TyA_QNVg`l>455A}+As}`B>U0}DShzdupo5+8gH49gZbYDc~38-t&_Qb0@ens)0 zHTc@at~j5C79QlE9xDIlT%Gzn)*)f#ROz%L8yjR%^VMO7GQX@8k5eVks`fv%F680E zwhC&xM~@zbVx!8w{rdn;qq(Q9U%gyM`0l^ab86j3dQ&lmuhUi(mf>HeNVymA4GjkH z5G2+RUqoWuEVC0uyzysT7w|*5@q%w8RFqvD)RpU13#Qq2b4o$#h7WlJip?(b?e>xD?Nuc{|AYD+?v#FqXff!?vz|9v z1KV{X0!|{mUknvbhs@`h;+u^kWILG}hKG%AE0OTOedw!ya)j|B4Aj?X59wzC!tmxU zJxJze%FSJ99@D85D7}A0--LR~#JjpZzrDoPv<+#uvsL5m6&_w+ukYLW2{8#bL49@7 zf^TQHf*K&2mnNhuOX9d@+_inDi*v!hK(QvpJAn6?TuNDc`NodIp)%0Y|tWZ~fG7E{vxsBCFz z>EG?n2Ot7OI>Ee&B`z)wkr2%gan$(^o1o%_fIm#D1-Jew>@Z3iwcxak;kUySB8m}& zbR|zB#A~L$+?hT*AIaqplKuT8q?|vgIcKqA5k@8b3HXNDcFvZiTBd_OrBSbYm|e|F ztf_{H7PNA(l8ukUtIxlU7gz6%G0IckWg0$ZhUBqtdYJ1IWpK4Arlxmc1nGF>yva}5xu3*v-OGUy46We@n-@)K_ zEV|#ql6TxoyAcUIVN6}xSL`hA37q>IkBdT&9(!$Szv_4`gufFi=k z59v~%kB1x$fN)=qM+v9n#URT4oe^N&Epc(t zzr=z(7Zj=zcM$Yq@l<2N-mC|T)SiPi;U9KUwc@r;mQk-W#O@!u#|3!f?Hrhnpv%9p zsb?1uB7&Q2bGW1B18c)`pL|m~xsRP2Qd{AY)y|0wp;EI((8b$)8>OHoo6`~~&wR1F zvxBg#<#W6z#jB>Mj==sVcrUO{th!=%Bbhtj{mRktvFl>Iuu5Q;*|;rR4+pKF&I+z_ z%%-BOEJU$i_^6p;-ujrZ@128=gt@=VabLea0X_ViH*W%Q zraUCCY*cM=ulNqjsD{7)*kV9+Ja99&-1Shf+iCOjLn#NmsZ2E|+KpQT3{8TXen}tR zZSQ;H7cR8lEGP`QeQU8ZwwzPGdf`tt3ZwjJUNO?$;VKY8&)9}F`RI3bIH>XzCyFTiL9Nhwi=PiO!|Lj06f6hDhcDp2Kk=XDCQ4#t` z-g5s8+lO(aH0^FYe7&Vff!7WuRvafpg9r2HDUaYuwjGb$t1f<2`xzHMX6TDbhbL6E zfbJ0dA|NJSG?Cc2DBS{T7N7`tm=8}*Xxh0T+zPS-RrU3|x98h4b92oa0}0W9v5IYY zUWh%Mr2gYup5GG_qk>1zQ`K%GVZgD$b3(@rS^kHz>A&;`}@~~xu zCK{>K+R0QLG#r-fakhoFJVXBxN$V8h|3bx27kG9>l2Pzyv!%mb0mLMKG^Q0~H7j#+ zJ0^%-64{@_MLAbP6~dgPw|BB?M;>i4#Y!$HN1q*Z89JYfdgU97tL@QCA8gd;<%Sxs zKAJz?zc>zoX&xRP#v$T}x(s6^%QO)cdYdIMD@)t!kS?IEP6B;EkD|5a`*5%{aOV4ei7xLdv)Qz_Wk<%(?BOLx`Fwdb$c$b)d+ME-Td zkq(&&RgIW!k*xMUG@I~d?H;SEcuUiw&QiyaRaVFFS=+o*Xa&_9ZlUd0#eIxi&PLFe zD5(6m0+<5t04Zh+2-g^w6P@faxcC=YL{Pv3Rud8d`+ML(f*AyCL)WigziADc^ zwE0dF`D(4+)6Q<@yOn>Osz`-a&dmJzc(<<}IwbCu%p*)YYW#KgO|tbe^E+AU5xU!2 z4W*p{)5V13|3NBajgx{IJ$i8$ch88%s!#s zkj{3GmbwhSdcC@84S7iw)?@BS(EPl{7-SG@(Vy^%*{%0qT1lOc!19&g(ASSb9nS79 zdF6f9!J9h!MiMpa!~cr;^z_OFnHe^!mGt(ln~4`iVbTZ2ef;geVa_{iu^CyZRTIAf zthiiVHCkP3uP?3_OVu6JH>+(F+m5qHEjSlQq({unee&K2+3*uQW4ycrKW6)MejkbZT=9q8qoc{)0r0NR^6$3G@)fvXl8UJ3V*s$;(NMF(sV- zXPV5d(tlR-=ZNEGkS-^yM9TcZ-_7Gcnj4(s1lidsIJ~06Kbuu0Ba2xx?VdPvWDzb5 zI@;Jk#kq3SdKJ08O5n5kE-LAwQNJGtS+Ve9UKy5>hd+O9<&I(`o3^xz3*ou@p4x?t zal4XWdA9zfbh68M(kE!zIS$TZen(ngYldePw6bG@555b_JW1#qJ2^h?9U9t*ZTejv zLB7)JOk0C??BfeMKQgKu0!c5>iK_#A)|2xW1WYx#b}2SYaH)?k-uj-k*7o+opnx5P z1!E@(DiOC|<8)OBJMpNkaVo0sPDFn(e9rU&w?;6?_<5m4B{jMK~-+s_RQ2{D5lP+}38)CQ*@c zjJ;DLFY}Gw`+XCj2GiX|Sfsl(~p)(ktBVA!njUWWdcZqF-oI^iR6!Gj0P1qBGaKaINg z$^`yfFDpcHujb{5^6-3}nK61mL(_dSRzg;LVb$JS9!L*&FW>naCVbSVUz_xE)PkDZ zea3)F(RnR5CM;~`F83=M#$o?P-)B>ewQ0lcF`BIEnE2T98cI68T|43$9$``~#Wtd*ih-j>G-+?SfNSE=4%v(sAu7{XGX(%9r-%balqhH-4_)>u-YUA%g zzp^zIUTzrUjjp&sJ(}OwY+gfMb_9?gu{alq|6BG@#dPD7{WW;YN7|Q%$5i{k0T&o} z!9r-Fz66)&5ZcVQML^+QY-bEV0U=@I$LVRW{Z?a>RpykR74g&c$6OW$d4;jX-f7(w z61$7n`Etb{*#rkn_f9t`gs8|13`IN7zk807M5h>Lj}%QZnfx}2C}&qM*U?dn`;tP< zjj^h^U~@m$&L^&EO+zMm&ly2h%EXcorEUkZFg6T`ieAXu$uocAYtE9-+Rnqv3-O*1 z%pw<6n8Y95iFVt(UKtW;pPh)|5*`f=Mj)8~Gf5{3s0T~(Dg)@gEk0*)F7P_Z9{IT^uDZuPV{}?m$A8 zi)a`No-inS0on~NJRsNr33ZLQ~06Wr|a&!9ZKrl!WcxSx?t8p-|u;P|eQ z<-X%8fVhD_nn5Zu{cM*~YS*EhOP{Z=ooq&E<9&A6WYkhZ*v9gF$~4BTU{yo8MACZc zQQ0cmY@B|5Ln~n=S*;fN%7Sa>sN__{z3+vmC)`Lt#ZkpWlMSl`^6tqUqb@6D2l1X= z8}!1_=g4o;b*+&?^Ev#?6G+dGGACG0=bcX2cz9-zw+U48Z4#0eZeO=Z!i`!bNnJ{? z3!;j!&5-?zPnUlGX=6Ssqzg{i!FirVYo^cpH9ar>aYkOKsy+Z(=czU*zYh83n3ydG z1gg%#*xhm4gy-kJsIu5ZX=tjpD(LaA+`21#&!RJvCvW1QF8lC#DyYzKdUn1Jp8bwc1f}rAcL&Ln+jA>Y5p_>i zmKB&~lazi@*FW$O4irbm4~B2}ubt-;uJq9}0F{Te7MI_!j-U z2l!=4a#k*l3YGChf#9^cifWr@yTbxMpnL>r3}DLz^bn?XaJ)i)1}M8+d!qq7_3)?| zS5UiX&1}@_hi4GpeHaVO5XuPNP;lktzv{cNjM1rEph3w6@CO06K!>%r2J&}s@r z2y0N#uI!i;*c^ZMttieN0CXOn@4Yqxzm1iA<#l!I=i{j`Y%O`uwFI#Ei^(Erf2hl5 z&|)hgaN*yON3@OnB`2Hm3%m3(Cv1^&%3}P~=)~uSiGJmO5|6Ew(%Q%R8k0*0PV6lO z@{)y`Ti>o;_20`ECx@DrC*xkicIH%x6+docWYKOJ>v1Z1ZP`=~wPp&mqC>@uwR?Ms zl&%y;^J-$1pVW6}i0hJ9vu&ho0IG{Yq?tBmaL;|x$G>JC^GbrLsbG0w+wb&~eTSbD zdnok&x^BiOn#)wH3R`S^(lKt3Zg|v_+S%NCNJULe3W8c7)t?5){C~>1i?bGjnSm3K{*ksl!YYx&Hd&G| zdAdm+*f!3{IB0B(N0(9(;<7U6phS)^L83t&0(0fN_wO%3p-5-#*&YWNY~ce|TG`y( z)KmZ&m#!$M%<{YHnR6VjaE`5hF6y+55Oi<*jj%e%pXv%=ZzfYvxJ%VXFtsA(mhd`= zOPrj*{{Xc@Of|D&gC*ee^FvF|BilHkFavSZm36e0kz0*Lj%p=%t?cBAB$56xTye*( zTs*wmheEUW0=SKZQS@D~%X|!8m63M99^_MZX4woP&nxuh|K4FXF&4J&j7K`G{#<@A zfJ=H0SGB5Z1?g&3qD7f&_{B6tZRi*M%Dsc&PzSQYoQ#*f1$GgpCB)RTy|NDbC4Pj2 ziaU1Ragq#5j=?z3ybaYNW?lxds=m-QJuk18yD9o+P*0p#<;I%Ms|`_;nDDUrCZoU_31ZW^PApXlCMj2hvT!RYVPKzRUX`Bs(9O8&@pi; zLwK!%wY}>6$W`o!Ij$*v%J2vMbf@e(v?)RWb>#^Sh1M<$h1MhmJ+r9jjztmb`*d!53^bM5Z>`4rA;?#F78OQ@x;Vo9u# z6UwaI)ZF~udH4MUMg&t2f{0qxH93mbl~OpDvB!ICe9d%2D>;nTOO&E;8Oy4e9d+&2 z-2Cz3>(>~}3iJ$LawBPvPp~l9y0xI~L7lkus^{er#J66Hsua$}oJmPOj%!HaY?rdI zHPO6X_KWO>_>c>6_Edp!R-*{0sNf3+xc5H}PsdYt8zicpg1oPP|9&L(=6hJ!A87Z1 z1ka*oUr3Gl7YzXwJ*rw-(g6-v@6jN2XSB}FeCqG}I5fY&au^;Jva1|C#>Hyk*iM#G zdCC(ru-f_4d~1iAz*m<9K+G2!+K1zIVrexsw`B(k2tK2iT+jAHki)FxaoX@?$txMb^!eh57`I9H-5ZbFyd`RB2N6J^Q3I`SN2Wa{k;-E{rz5W{+&W z#_Z-R|3LepjA$a!j~I3w={2|SpWRh^A&9LI#LAtB=t+&X7ycBq*v^1I(U`TOI-=`% z*$#n7Dz-(RhG5jxyIMC&LY{&@UUyQkV)0oKI>cON_acxfx%a8D zak6B2dA#%ruEJE5hsV8d%+rpR`Zz&reIXO9~^9rdU z+nf|Q89Lz9q0fVxZ=Z~{fV_+mzfgE3qrSW3eX`A#mcuq}-swhjpNw{bWYVGq!K7E^ z7hbEu&A&mDMm&5E+PTz&2(#3hjPeWS^d}9tn4lFX$p&4O<>sMLRP7&BmrX9s)MTlh z!!@KY{5bzKJTG=UgH1hpTh%3Hy`_?*PsHvR^T?sOzjutwAtgxw%gw?pJBe^u;jI-W zi*_srFK9j zroHdWM#OUQbH_1*^s~J*>7SOL0#Sc6)b3IWyC|bPIm>umuclkBX#PM-B$v+%&!BRR zp4L0K+A5enCq9fD5s?E17D*_69_uNJ?^E@ihG8#*4m*f-c5S_h95MM;M%dvG4nv4J zs@FJ$kWw;P4W^+ZF@`O{gxB)2o`Xz9SYXo3YL4|({gL(!5_kF2M}b* zcjkD1WoH{G6SGGK7(@6}W9wC`JLLTF!ZOl@Ck+lFJY@8{xXrpb(iToyLR(_sVwl5`NwD+}5ya+e~o8J`;;=Vd(}=h(C#nnIzZY;szc zM#HWeH|-*hG@aF}4(DIa{d{qUfZ?Xm%+(?G9zGsR!qRTib_V@an$kcz^-S8T>al3S zJ#<49i-{)v>=ftUePW=ZJ7OQc`{H+^v*{ijtl+ilL~n(lOdGK_9Lc$8nJT=$nB}pOr?>2A+NtLfe-*&N`W#{L!qXMshVtVC+MnhyEF&wGT zvVs9MF~FW|ss80rG#L*KV-pk#>!jARx5;gM%tsT7h*2 zzYQM#vYQn1>@blnmbdA-yFZ9;IKN;yXd2nqvpD(~_p_%3as3~~hx^RUfFE8Q2VTdTHvyYXVKpyVXOqu=Lw*9B6;6r?8`Q1hW%<-oDKJ>j|X_>2n# zcK=OX7BV+65%|bnM+#9ivhwo4E~ZP8lZf8dZVZAlHH;HsTA%>neFMyb{TMN_kJkaD zZf(#7kS6z^saPTkHkU$pniS)Nq~H|=kq z*m+GMLux_N%=66m&HCt1;-U{DAW%L?hT8ipki^0|-DXm&NMgV?K2qHdRu}G)=omet zq@q%U|281#cv3(bXi%W^De6vLm@v@xDmP`~h0`SOSmIsYA>*zwHI7 zeARg$`fZS+)`yQadp+=F}d^KfUhnB!_)-cVToz=GLt# zZ{@s^Jf(Z5$5-`=c6=-8wDSQ6Jv3aAkb2`&F#7un2feE5q(RfDde$1(+cIJ6F&)KRXJrxyx&b!?VJICfE#RAw zUggr(>5#vWG+-(FpK~g3BL}$c2mcX(&O&^gR1iJ3&>6F~HZsEYpf&P~C^vVw43&F> zZN5s@!0!5ZOt}+kyAF%`p-IcF$C8q=PoMtM&ei7lYq>1{^{WO;mqcn9iJ&27W3>1t zSh0jCOEv2O_qxamt5h)qu7In!A(-Rkuh+0^Lz)&7Gjmc=(N{p`4lGu*TwFxE(fg`5 zaFmkNvmn^H>U6LhJ-<=P?Y{iEU9@^9t!p@A*tWQ>pES-C1ING~E?&S@7@58w zV}|)o)T@^2`$^|Op@Dh^$%F;hmw2D+WnKZfU$bKiSPRczk_vB!&de`JqciTmJ33(w z5!X|z$9fgL$}R+oWQLt48EzI2i^|jv48<2iD#)Ie-&2o7gr2vHou^RgDpC<;MJ!2T za{aVxe)czFu)o)UuTQ*(x_J4ID932vV{P^ux3RpPbc_-#z8$5H50?Azsoo=} zkMkv8)}lV{7-*V3P5e++7sZeD2>IlVf*DS&(pfw%)(%9NshgagN;3N@erwfrNTzIB zh>V+=DAjtVa^%f@a)}6^5hO8or*J3_m=&cV+9b{rJ^3T7Y zV!7Ekkul&(7p>clQE~^G=T=JR+pRpr_@X=({p{#SXX!qcE~#eZ=gQC8?*A*EQV19M zm`zCNJ9rc;uZhB{40i*L%}Wr(ehGXf#(Xg#nVsv;RJ}--1%DcV5=24oFV0y=4uthi z*mX6rrY0U@T}E*5NU!`n{LXb2HTLyz$Ypt#vJ$A))37ja)smK@ZW% z{z0~1dA(x%!G3MA&U!2%blF*-^m^~lqaDf8K5_J)!Up~8HnW}L!xb?J_$4C#k$ner zG&`ZZNC=lbFuKeD>gVa2J$4MD|0}BygIEn^; z+o}6@pB#8_xhEP&yqni{-JI-Sg8&FfIx%l%-;avi>h(ql#nrlyyl!D!|MVB-#L1WR zA6?yBQ+@&VX z@%Q4M2do&NnJ8xPFXVJ+oC#2Rxr6eD!{l>sy<~RCkO%r>b4CysYDPts_n(b~iRzajk%r8{znD!M#|n zk)^gOAYq?EqLd-ZtS}-=25)L1fM(MXQJHbz%=eYgPYhYEUq@Oye-Kk}a<4Xl8`t7C z61fw%LHUSH)>`Dn6m8ECrO4w~3UM>vN>vb!s6z_|mSYW1eDJ}X(7(pTK6F%yeD>z- zD4z9ZumWBZl@_1U#AxLDy`D~WmOQ)hA8zq+jdT=@Z005j&l_i7*Y6~NBpmd-q{-fN zf1?F)wftpFO-zQX^MS9Bfq) zT0>9m*!Zl2{)Fn6g?3i%w03dOwJ5a*tA6MeUX+(#Yo0U_pxZ8J<5Ex{!VE4Csh%`5 zB0%w(kOs<;3uumiveC6DOws>Hm(F6|1A)P58g}9!Wf!6>)H0QpKB&A|xGaKsfMYn) zhwU14I5LxAu0+vPlKt4Z(s3=n6Ap87`kSWfDcq^kKeK*jbac*@BP$|J)n0LDG)Yy7 zaxLP9kUvy;=H%bXCjMSJ^^bM25T&rPsti2xulkbyS~%Z9rKkJVOw4U-3C=SR&Ypp9 z;>UJ4o(&Qwl=(y5@S)p*m|j)pifHI~KuL@QYL+PlG|T}g>+(m|6EAhQyqPdq?xZ(?S*9O_SmW0SKyq!?{5r4De6H4Fp7 z=SP^^KZOX)hHSv@;t_*?f=yDprsiLE zy!x2~hR>$iH`w}zup)5UJ%J{Xj6rJ3?Pm%fdM|#$t%wg?BL!L>q#Tv=$o<032NUtH zA^&w6QgQ+F{Q?@B9{vA7PNCACY#3IIpbXF|Ws_oY4C#XkfxPlUYi zbOogcOGx{*rbJxFX%kJI+CFH}^&%(zwVQAeBSXG$&>gcr2G3lRhPX3M#1;0^kr40!bVAi^_(VC6WGQS zqcg_`RvIY`iewwxFRfsmSd;CoSl$6^U-ikQ6En4y1W22{u)_l}XMVMM%^U+*4tS72 z9S%+rL2tzW*(vJW4YduH_B;ZEf?#Dcse5}Zkf>o$eHO$D`IZCBfq{V+^=+%Xi16_6 zivZppl^rzP4IaLduT9^@yoJE447?r7 z9OlY&uHk4y7aQB48WdBNuYa&G(-RNFROMb@C+q*7n;&JBVK-N3=bF#NMx4dUZS(im z!X(!v2y>3o)JyMk$s`#mEGKHlTW%)DA8TU~x4xhEIFz<-*U;~9wv&5v-yX3jOw03* zS81%f8F}zo`NiX3rIEWl&F#hL_tsBGFP&|C`ly;>cw!4AwCkn@g#PpCsJh8X(`BO{@0GJj!smaqHm2S1gg4-Y@yU*8T0ZPzN==bp?9LS%Y+6;B8Dt1 zXLi2oqKl61b#@ZbH{&kEyAv!?Ch0t@So3xrU|rWGP0kFEi9dap+|05TsTv{oAUleB z1AQWfW>-Bb&D}qdS5%}I6@_x|GS_PUUXZe_Yx5YpS@-^h+0BApclndER(7 zg*uGrLqB*G&M52M*dKdotKj8*QhmhdJiqtd#+E=e{_*U1xtI*TB;Zz&@?+XO1n4*mB{cl)DLQf(WwGy_Z3+aV9*;#&%u%NVqL&Gsi%or>oGqOPU&=(8fT3zjRWLANZc z%QI339$Wg_z_%547K5#lvizvuRtR5rtI5VKShE751W=uU*04EO&-~aGYs5AaIfrjp z2HOT+OSkoeDR$HAPYF}e#!0&^icq*S5>QsoTB{q8T+u7y9TRIu&l zc_p7~GuUBJ#pWfdi!bmYhSiXQd4~7{7#8PUHa=}AnivlMwSG#`QfpBnqG=eDL;zQV zWqyAdh~ZG~{SL-f;dRNbYcoBis3Ge&;_TUAva1&_;2z^YC8qk)vMdAkzr$sKTg!K+ zHToC-`B4a+!UqL`VpI)cQtC$?eO|e9ouRhv@>js$&okHW9)KbV^4(y+&3mTM~jsC%qyr zCHBKvis^TI1;m2_(EjzY87h=6CLlkl|<&reT6~dauvx^^>%st!O2!+ug0vk zL;&pc3Dms!cQP?@^X8h~Sh779Y=?A(5Wuw~12z{rrTl3}A=FqN9c@^84o#uU7c3pH zNdPmSutylADmU2Ri7~Y6sRq2N*8FxgRRQgUp_|yRbUz2fhoa)*#|#zl>78=as_~^t zGa>?<*fj>BjXZWjbhO`$Rb8LO+&1bQ+cj{dLvOi(+1~^Jq+MWFrQe#>Za>lw;k#=5 z6C`G&-%zda>L_lXmlr+win%biv2%WSb={g;%*j%yiyB-nV+10kk%k$p&a@CkES}mD zW$dYX`^a`;{K*U__1HKS-k<%|VT>RUWUJrLaH1ViR>EFo-}SMW9zzu4x#6FGpWuR@2E{>AQvnT%R=d&E1qqu}yktBk0Ns6&QPrs|I3K{?bMOMUGsVzTZ6cg;)I= zf@@D?>u%LXe!dC(QlGbEyt>fV>}|Jn7Y>*F-emkjpb0xNo`+W}-*+>bF-^^FNXjMD z2UDGD4e7>A@-?p?BDt^JogCq|yUUrzpX10U?+%KaX~NcLX2!KV^uWh_{z`ow*)P-| z=o%e(!w^9m{VB;95q%^7nE_kDt0+Vjov7z7|o4Qtw#2`yOlW;;N=6U$Vd zo!Y1q?*P5Yde*4I(o*_~5zebRBU@l+;5!)vy((Nvz_16{1k&-0%M}dm1w}K$mPtl& z#>qqYk1`MGx~}J>=gq~9V2#NtVadjFHroB6&*EW0&}yT*#_5%pmKMJ`Dtmcbh^!`7 zj-sT?5pSjss_P>D&gg|hLl?m6Ac^&?YBpc$;pEt#{^QvPoD)I z9!%|1d?r_i=k8LokFTr7c#nO?j~3pgmA>@3>cA0D6#!Ncn}+3Q_lHOFM)+J0OTrvULxqhUk z4i-<)X|*3-%pWJ<@g28Mc(tw~fF}|tiQhm6_X!w~OpnyFHzQCb8C4DJjxh7@c*n=8 z@_u*xWz(GX@BpzSY>`eM@^27%Qb#i}K^2OxloTBCDDQ|qsA;->?>G-@C zG{aKD&QXc|qDi6U^mec1wqUN23K}MDk7+kDgi~2}6q-Q!x#Y`Ws47zhhl|^{&=L8- zYX{Fi-C&0!y+WuD>#78EV_`gRJd-OnYr(09wKv#0w_kT2 zt}(IN@2UFE26T|hcy!de#t(d8veNKX-v@6wkMtc`5=aUrzTN7dILemmSM@E&h+KC! z-fYLxxHsi9-d_{i7p(|35N4>1`7X~1UZJ zwmnEta7q=)!;^z>upOFT>jYWW17-v;Jca302+mo9S-8yr#I~ga;QEhFPmO@!0eXq^ z7Vbe#f$!n=1Td6WmX|vocRo^54x2S#Bq$#C+85v)0P8{zRNfMt;3M(B0FHS-l8};s zJnN1J(Bv9jXNW>wa_sipOu2D&eH|2Z0VdElFxGe!Fzc0Hffq1C5=*W;Dg$Xd4pu#W z2QE;f!P^$Jgy!ex2V!Wt9v)!uq#kd>#^@=wBqENd$yxV=$UZnpmnZ-~v;5 z5H(*Ba6$x|#kkld)L$YfPbJYZm3`RZ(aKVkl*#bG^YJ{#kH5Wgu}OR5Utqd187-i@ zVj5YEzB>sL4!bD;WneWI9oql*->E%#qUP$>4GJA2rfM~eb+nE#d_4B0Id5Z!dc%X% zn*M3x^WOnQb`QNox+hWp?sCJ{mYREM@FLIF*603Yp6@zqL*I zX3!X~9nrW`-VAn0Pr;G-AE=A~M{ah&bh#9f?1zuC}|Tx_UP_2PktuY6gosJ80_ww6|~xL-ufbjGyY%uun)0wGDPD zV0!`r^4HrXVEFO@>{me3h+j3-m?1E8MhaZh!V;JZINuY^6jnpv z)&_Q6ZG3fa=vR`Xp1$c7_u#5g{rDZob@j3k=gE(My zQ$E5}fSLZS$H3TYUTD9`w0b2mnTR5ql1@X{xFoD&6vPr2tJhcrZMjju_Bfc#clgt& zolG#{H#AJfoDK}h5 z@0$Oh(D-jY8>?MaX7HjTe_>wGnlTHpO|DNsW+5E7To`er<nh{BQ>?=^7dE6jnj1(RjHt z;PyD79cig{O2=@a$+_ifJ>{`p5d=EHss`pibhNZ&pojv07&wK$|NAtv>bM#gI>cZkP`-+ zXfXXW%G8q3v@egQeA3$LwmK5pa6W4Z?Bpu|iDwFaBp_2%0omqpiUnaa=JCvGo6Xzo z9~z<}Bm26FVG%3rIYA#BeshvKOWM3)LVa$l&Mgg6rnQ3(%k#l95dO{5jIPbXHl|4T zCd*Bn6^}9aLZjimC|n*ItIL?$t9>B@5k1rL-2ge|uMiR3X7WC(Q zZ!T@veKnk`iLXFt6kq=%0jkGqFdGKjIUt#@j)FpGwum>p;$Q|13?q+G5+JV|Fbz_( zkoj#deKIOC94jiup>PobUc}HDs#FSl8CZh8i5CKocD8dCeV7T$K8A{5<6L zT(jrs&z~=c3*Lno;!Wi4GYCa5C6D=kh;4nNH=<9#w9f{tw2jMAQ-pt+zum0MEhXFg z^EwTmtT`CnVIB~4w_<56WVLlk1hTu_BAoBhxk&vu5AC+ZWpCu-NYbWOvb5^!X>FSQ zZ7A>k=}YaH02DT<eqZ}LuKs_#m)9M$jL`+m?;G0j~ zD6XbMFYt*yU8*?y*P}aVa)csi5N8a|@-;nDqMr~#H-_v!L`p@Y9-ikm>F~&8)rT6{ ze|Z*T&>VgpDBg2^C?puMWtnR50IqwRZv=!tn$rFt_O;G~d2`n@KdI=$(ZWXeUAu&- zN1)6|a<5W`m%mi>?oE#J)YgC*wBZuphs_Fpq#Py`9N2 zF!ZYiwc}t4*kc&U50W8y{Ftq!rMs!abT5BC1`{)bfM)31Z)5Lzj%bq5=?u9YxQv6ai{&4)g7YBLZu^$vK#pdt**S0~bK8Tbpq|=1T(~S8cC_66 z+H0w@M?r~5z}ejES*Xg9Y%%duTJ$CN^?LZ{zboL88lv%3q)YhqA70U;md-M5vQ79S zgXZ>js{KB${cg{e>8Ad!T{PY};h`N2WWbLXBQMag*lsLx1zrYg5xo0LfbUsvyO|E- zLH4VAOdy=HXrP16&=-KZ^LSVWCIFBzZWaQ#VgM$pHs}e7Uw|(OQNP%pTHXPi8=RLf z0gTTCSoU7LX35mvIjq=_F@bdm7jMg`852rc1dC*_TD(+^e7)%~e}&Ck?E~Ym!m#ds zu26BjK}fY0ru?INDtQt#PrGLv6JG!l3Sk;{@4I?X;B4LkL_Y00j}73i1&dwQ z)QSIKEjYB3cqz!Km6x>m2UllF2R14srH$*t6Ea;<~?ElKpl`*QeWT`F@_& z99P$B@xZCn=eyvz7GKR&1o5D)iL7xW(@-1XM!rhbWjtZnnO&pBf&&&vAX8CMy&z=!R#{o;_~CIC|8MUurKd** z1eC`t1PB&`61>MiPS3`k>wJIm0t)w-&-=a!^4FeXf2OVKO!e#SIWKmBU2^~iQ?Iqa zb9aAN^)FKJzClFXIjNCr)J^@ zW7-QiR{*?KyYIsD#*KE;Jb@Ay_P6!oNM%5Na<;6!&VI3MVFY6;g3LtO$5R&o#1HPz zdHSSF_N+}pPtP>v)5kzNvF+m+Y5j5QK3(}8{VNAarZ0pl)C zJV2B{@!dxQ^qJmB(vj=4|0p-RdplDV88nxiBbL17gJW zndJ-uuzj#S{=2P$jJnA2Ehm{Iwl%&38M{1kzy%xadW`x3=@KZNz~=)X`=Dv>5dNMc^->vzJ7g+0|XP!Jd3bNjTCnlYp->&ZV^E=ZY`MX$quQ;@75#H>SQ62g(58iaw#Zw=A|ks-yG*6Z+_KzX zNPbbNdu(+$uf>SWeFziowu}!v6M%jS3^E|w2<%7C_nKCog_J4lg8&Ig7ZG^1k$Zzh zPGY8P4o`kKeITZ$ApN9P*sHbC`1ZRa$<#z;fFL+I+~mv2d#5A&F+?ri?YqwVJlx47 zviSDL(43sl**czXDnRdcbf3N>oQ@Hd8yM*rbbanP1n*99K#NhU)dk(A&VTDA893$e@bIXWnQrUN zX&FygZ~*%(SQ$^oCS|H4Y8~1?>`{BQv<3Tkw5fB01mVcy-c910yH!}la?8& z3dqVpp#tDn1sI*QDP5nm|CLoP{k?D?lf>qd4_#k39|O^%`O|JRfTqCI--@OjV;D_Y z_Mdg-pYyEz7MEd5!LWfO04EttBODTOm2ug^_u*L?erLj9Zr%u*GnV%A^=HHM!-(+4 z@>byGjo0)hi`*fkQ^r{;^^_SkYNcBI#;A^_yc>w0i??jLtm?RLwY;<{%+dfTcPZt- z`4r3cRUw#%08kCM$-am0C6-+FfSy~1CiqjZdrzxh)3RHh7DfvbH%u6&05x92kK5^G#N&n-=2ey%yCNF^{5k_ zdHM$2>H&Z`dQ>2I7Yr`Y$P1EWfrW7dL<9s$(T~b}i6a6xVZk^wI>$FSPV$drv?f;T zvY#N<6`PQNgO4u`XrbQAi}kj;K>gkg9dcHez=bi9Oa3DoR8vxVq{)R6FoOyb^l)Lm z!vBFsz|%8V)9?Vu*ZLsX>cX{B{w+m(NY{ryRPc5KVx-<{5f`P54YK;}_a68baXZB^ z%{7t7b3R^(KO59kCthE@r}BRqB!9TrzEHNA?NhWm41uIvt=42z*&Zk~A8Lb>B(@D? z1V_Hdl`sl~GWbrKXXf3yXzQ$a!$91~-2ivB599MKF-;%lTW8I3M~9E0agsR1u)Q;k zGuI?_lG4}&lDu6v#Idn4fMxouto#k|9>IxOj+`(7J56q5$%7e5Z>j@-JrkG>9(gsR z04vsSd^nxg=aKH{zFXDj2EK43TOaOk!t_WCrp0@S2Lj zdZ!dtMsN@M)vGnvXXAw|WQFv|hNy;ImRrrn-rlgLNH@!-f^^a9&d$yf)Dp(MfJacR zWbiEN_RrJY+m?(~8p};m{7;>OW2bu)Q1gm<0r->Z)eVz4<8xw0{SCMUeNtk&ja};yMm1y)mhSSlU9(#(8hRrVY6f8>fTmwtzpp1Y2SH7zY|a;HBD~nYv%}J zKH${+nB31y8Jo3$&q|d{?SNY>tGpaDT-@*g)X@6Im+k|R1WB*Z=jF{0^At|mXv&p7 zt)5y(nz>aSe}DfptqC8s=7_tvaOE2;;hnXf@>Q@=G84;DalZOy_DbKE&wb<+^TO7q z&W@!*`6yAN%wWXVFyzUdVqa#3X0vV;WxNzsm3|ju!V~YgNv-(%9f(YC{H2#jE-%iBEHkAd|X?y;M0xUw9CkF?|iFRWE4v74)JbAcwVMuX^JZkUOv{&MtG7!)W!;pQi zS$b*C&y=DDuw?V^Et2QDc|}4iM?D%DBAWlQZgToA;H$^85<>7I)3|kd1n-aI{Cl+H zjoU7*XqNB%8n)uA2#m`&q!tAbZ}0md+9bA+uDkvnba(436sx%^2-cUUhR@Erx)zSw zo6D=Ca&gh@IYaL~1x+gL<)yRvQ&c5tW(G1;?mwOWV(HX&`}Vtf(3KbSc=>uEHno(R zkMgVTJWF~C4VboG6_>JC3$Q0>7*ZIbTo~W}fSP)fsOIgjt%4TY6B?lhIv-`UtlgWdMU)do`dcMAP ztFMH>H7d6=O_QH;8b7^nTmab;xEEf5jR*ibRRgV%#QRi%1}5j_ z#TOSB2grqEC92F`a_QM{TZ95Wudzao?nt6eZ>7GK)tNRi^NBidVtRgKc}7#ixn+a4 zxwI~8dkW@D68TL__p8wGToVE^7)ygZi~J&Ico7AP)?woaUadwC#?%YLy>)-NqoL|A z>(@nHslC(l@i{p;%t<$=-==+Ue+ocC^M;6xzY~^d(OmZ(T+nGOAWoJJo2h0J;H4R< zijxzChZHFV={VK9sRBpFBOwGJfBr|b1HPL^| z`=m5ZJS(q8^9Oa$9WA4)xXiQYeIjwE90+)UZjubE{(r7q@{XJBS3-!ewuO~ZV9H#s z$S5# zRTQr_UO>C8s@}q;k+=6Ls9LV}ToNv#t10I0!+wfKFNK!fyqP`6O|_FkFzXr}6&*f8 z>oJ^jjHF-x1K>$>SftJt4^BHe%4jM+aZ)Qa);G8nh=&XNBE%J?&WSmU_22xwnofSe zK+*<`^HP}~i&LGH0Dl5>GjvW()py}{PO<`Ow|qU#;FC2)c~B#JuI8k9^sQfLZpmDpa zdAG~sMGS<{!Gbu&d(Ye7{7i;E#K~MslOmD^eT(^5fdW<>~0_x z1Du2i03^y|zk>83Z9lq!>=7#cKP!Dlp+XKgF?ATRErZm)GN&vZ*GFL9p}D`qbH-3< zHVx_c``fZV<>=UMwT>_X!l(+0RI*ka3c}d{*x?XyVC@MOZhWJcK*|t)HJWr15&vk` z-OZ^;NQym}#g$mA^LgKJgp}_WScHdUYsPEN_h_zeP2RJ0$Hl?>#^1zI@&r3cTJTK9 zV|#~ZXV$A3N2t=;GJzX`!g6E(wkH4OiZW)0s?M#bRSl>ODd>=OM;N%nZjqCq6U_9 z0b!n3>c{b|Ek8|g*x0bKAMT)#nm(PL_WiVgeA(=ZrxLKwjcPAV@#4l5wjs&C`89ki zCjC2>$wP$S78IsRBzW1lqH}4MT1~qr-bu(E_DA0wna2qYbZK{`5{?ZfjX_(s^W-q6 zk`bSJ98LMm!CPDxDpOdF9z9iygd?K9}&demsmr3#i6E|?42TF%@-N)m{>>bEZJodwQt|5U#7T6$PU0pG1{QP~d{dqvP z(%hBAIlNkF8dc~I7q4WZz`*j&&7o|Jo5$4@<{dDr{y-o#>EQ{|LU41ZN7sgEAS-AZ zv()~LtLH|ayAGrtR4FkS4qm?Vs96)r8s`-*i~dfs)WAx>_5O)Z8BJ~Pv!J23Aa?s| zWn{FCJmjR+{MPOdCIP6(W1D}k-=K6;d8h-PTYHZR;L8`eX@Zj-k9(w#SHZ4sA2&Ri zres?4%s5YMYM1;+a5J^>S3pxd&UjgrAVcFLr=4L_<3 z0~C0nrd?bC$?iEide_iU_(HR53~*|LmW-*X1c3hobre7#00SlBta|}Wrc$u)`g}mP z-oq0*<=;2pepC_;#Ja{WMb)Gi#MJIEoq~Hrc)7F~@^xx!SS%xq(z`w8vS4v#IYH3$ zeogHM5Xo2{)ICC0Wfs&iYXvsF*Dx9 zQ!)GW?GL`iB<35I$jFbn{4E7{zo0nvXzg+*^|`edk09>1g`Pk_HPJ6WWb{OKQ%rEG zYcqOc4qi}XxXH}Tq3mil`Ek2w?wP%t$IheGeM7M@df6v-lU)~PqdSYvPv-CSMRp+v(YthWaqaY|Helo765@g~R%Tz&KN zdje?9Rdsdwy6sTvsZ%?LB~U(cxN}Zn6%i~^hW&ah@7)E~ZeWfKuFYA8QZ-#R0_;3Z zZ#L70+R)Im*7Ru7ti~HzPq;n88QwRL#_Dx*K@wAyTgpdr??za%m|=~L7by^9QgwgU z-S~n!e3QDljFaeX>z#%f&TdyEIw{#Zj>zL<`ddv;eQWEPU@*}`Ki>;yXU|b8LYl3M zB@3;487<+gru^g&*NDIdPJ+9-lw| zknHmfbTH*I$^_=Z<=tJGND}^+Vo}A#^wBXfAS}nSuL&H7K&cCla<+A)L9f8zo}Kxf z+=FrOYH-xX`ZpbIN5?SHhQ{00OoFO|A(Ea|-FsQFFSQCRb802-w&%1tzsmZ5Fn#=a zendva`=E&BaoWevDGvXR`3p7?oRQHM^l;bMllW}^U~d@R6(c*Q{mL}gme*&p)?3fozVVqcE8VB`alcw_#97-+QR;d(=Sng5Uw*3Kd?)Mm`i0ZEZEwLv z+it@y&I53xWNbKXX#!E_R{5CYMIMa0Zz*@^Ghy8h;neQFa%&pzY9G2YP z*U@xC6p?(LAfP%^KT@qevWAO?x4gYArLRxU#DuZhRBCrOYWDuossa`{!X4ke-CZDY zwkYK8hDjT3`K}t_;*OUm5nNpC$$esn(WM3A4pmmN7=~Zh@)@-?n7x5S_VnL(ZLR3R z_FYNYA-ZilNCyv%KpNR3=FfE`J28nYuL+-+0&bo6p`d=~(``9s+jXTy^i~7Vq;@Rn_X?eYff0foX2%IN#5PKHkdAz3{?g);ZL)_<+HG! zq^UNL>`f!t&-7~q7GC8oeB==4Ga|HTu$N;zH-q~oNG|s9#9_7H-kT6!c22VP?0t_$ zaf!2AGY^MaNq6$5Xp|#?gTixqKQBJxo)+SwURQ<3&+9D~ds2txMw6lZt zC-h0+5x=uAKiKH%HC*4{w}=xmeOT|! zpprCL9pjGQPb6=18+P+TA9ygIxtNcd9>S(o`}8B%G{bo!bIm#DekmqyMc7OVEl@@bNmnvKZmNNVUI5 zV2W_@I=7-^hMq+Rr(7@Xf8=`)cgVfAFk9LFT^hA~24C!&@x3rz=y+J*NMy@6*S9AL`u-jX^)LqkCU zu-*fXHnQKsY--W!Nwk|?H%_LFLX^T?LcF$AOlzXf%~GW_R?A}9VjmU+^L1JhM~?<(Pr>n6*qD=r zHIH)>HGSOIgcKYeNII1yKMLA85d`xNleb|843=BH7QhaPx%x~u2bMXfG zrQ0lMo{W!RMQ5mn%t{56ST0~4y3UU!U4CPnEakEAdYG`llNWsRF>=4-bdzL|p`t}t zOdUU)mLM%|6K@}Dvz^^_e;!oNa+Oct39CEHiS2bFxfM4==2n=@e7)6SgRyOMb(;U$Ci~P|$#q2RB|Jxv7`8w&*l) zPgiXgNw$EouuF!N=o3G$4wrRU*P+$H3l;En-Sd3WmPc zuI8hw>NIM@XX8b@9w|+{nwmvdMsk!Aa(ND9oP?T|41wn_@|{C_U&WO%6B3|ZdLNOT zA57J&LOyWE7AbkG{!<1Zqgpal;j&jJFohzdbaxLVoPD&_!zfX!(lU@E2oGkz0htVt zp&&Po8{#H$?4bPzRzw0*lytA%2#7rc7ZM1vfLC7sz`$Ep)>jI+bj-{Z6R3YDdqX<- z{|#N?+VB$@<_A$3M6-qSZ->LNqD4QN6Y7@F456?~aQ!>Q1%K|9XrHwDO3KNZ08R|> z)>cp6d3kyL9(-@j8kd$99vAne?BTHe_80~dF@2YgfMo!MLZ>**1D@rXq~u#%E*%_SJcpvjFSs0@i$N2^{t_+<_|%1K$GUlNRoh{ z@5M8jZXfO&t>c|cO&az(dNgn!cI3FOP_I5=Mt#CRTc0a&6My!>N8g_CN=(D6MXS;h z=~WLp0h-(Ck?yDkLs+l1&pW~2zW&d>PA2ZAC&qMkOjb8Gd^0j`JAZ%@ijNeX<_aC{NyCQ=3KAgQkLD`-%{JKIp^sWD7`|^H=?cTAo`DA<4J5Tx$N7s zPGUud*WR2)?ao%JOtn+wN>Lr&#&*mL(uXB}L+9q5{=+dnRl0Z`r>LgYi-OKswfdPa zn{rvtbjaH^(D9)m{fslY>D6tO5$Wt7v=|gih1gFr1IFX)YWu|FU(Op&Hfq!xk6AWw zs*lXq2O8`bw&ok5p}1UVB3SliD(IVY3d7zfip}r+ukhbPJDz4L7J+=q)2AqyM$4T6 zgUK8vSp~Uv;YMa5+{6hXWwm(Tb2lkY0S&2|{bSmTVnPUm1XAzbr2{w%-98!`*E8@; zl4a8#pNt4TdKe9jjjvT&z^#*8P*4K~8$t)u_h8@p`066@I{XOCP<`NBp;m1qDI%kt z_3B4PhWum)|92HOVa;eWx;S>5-+FD+&`%jOj9S~Qx9bi1;q*j*>eB@4CRLP`zodkf zAR+qLx@x%VUK#j-MNgzjH^WI^GCJ{OS!$TtgpgFFcJ0e;gB6w^84E@AhI8Zagx%6- zzaD|1ph(IAG(dYEJ{e$uAv{l$TbXZ^aj_EYQj(WovC+CnVX<_7;kOQgdAa%d07d>A z2wGvz>-EuBlbPfc-=zuYV!b4X@Z4|qTq}zW$+x9w8JSiW7YaVX=zop$r73vNEpJ<; zlPls_yHj~a&?u@Xh+VwY)o^Q`a8HXJ^itUiTNbu?bw9-Sop?GbpJdpXBq2?Aa9g`7 z*9dQEXK$e4{>@R*YD4EA2RSO^Wg}COn@IsKtL2jF;$)xm41=99H&F!xy$yukv%Sxj z;jNUCUQGrxK33tJ$x^d3YDep*sBfF<74AH9C1>=dZmU^l!CY}yOzhs z%~Pd*$1a@T2%;o?0L^)m`ML#MCHH^D^CdVpl`gEUhmeD4rM#}UJVX`VqhW|(?H{x) zbp9aE;+`TuwZVNn2&MCo5p>NYH zSrzz*5_)km%5saPOiT0Bf8Mj`^5IBEzC2;Xkc_^B4hcZDeZCim?wuubfxc9c9P>qu zd#FWr^QC&Ze7ubD7mi41RdmkLmWQtFbIf$fT0>P{hrM}29aM8P(f10`_oMZ6ceQ0Foi{%_jsq~V6 zk#yCA#BuR29hzpoKX%wEvl?CV3hCqdMeWbh=-n1VQ=TY z(n`G`dTmhWqo|0fu712gTwLr#ERgzpspI(F`pOEMdj0w9nHe#pXC96lY1-!7j&}We@s(2t{{6+g+G^Vx-WR?bI9f!8HDGuz3k-BooHKVJTVAC0;d96RsY9p;|j zrg*aOWT(tH4=|LtIo^B37=q(QwNzhe zkq_1TIYEQpPLJQvT$=A8##G73vPOE>`EeYK^iC+icUd@^zcR-k81x+>fp6z(#*m)O z{gtyV{*Hcfm$zr^@-+0q(fn4%g41V@dpPWses$lw1(!iaM6D;vzl!;*_;~fd+Vhc9 zBp^d%WSOZl3xE2JsO&D)Nc5?v>WYw~J~=Ugf2{ZsQ##`l{ni@!&7HShU7gAO_T8mN zBlb)?#k&?I;F-^`$R8+#_g6+UiSCywIZx6zmx|D-8nWa)Px+RK9`e9@sF6inqhVENnUBmiK z1K(;UYpr}Uw$;LuAHSCzAmR$K)Amcmd9!H5Z%&FYNAkw?b~A)pFA-4&Kg8_5;Ap?9cL;aC>cG9Yr#X3A$L)iFP<05ne1u)JzCV}p_8-Kp%pD4*aT=Iie)m;K32P9WOyAv2gn;#zpuam^8ESp z-MRWmz@}LrPP2>`@TU0lRo>L7X5~=QAw(M0dMq}W|HIPOUzXa~*$^pwTfu=q3e%$E zN!CI`vhg67g5XfY1r%MXyC&q7bt`!V^G3;BjXdnva2g~U6vv#xl(-0MTidRU3^?rk zO#zP^?JEd`<0;Cwmn77>T-yhWNX*Qp4vwKiq9Ji{$@>d;LlI!WzY#jZiSU}DIGa}C zi~3L>={?LZgS7ChgO@|BYb>XKdRBd`KprxK>mkzX3r-dvesY;8l_mex(t4?ogRNZM zs{#I_hKf1v=8kJ>EZ#6UrT*P#uYS_1EdD;lrh)|m8)m4Qt|jMc7jUF_p+xd|o(l?frso42z%dwW{h#K!QICc46t?r0~#eQ3nU6t*xu-sXtqy0#}#1 zCWDtz%x%-ZgGftP_k(q-oMDnR-V|@&0yAYnX=xY+gtO9OqWBsP^*2=0`W_OJZ8(RI zACR68Ok128p67l-nE_6R<<{N` znhg;({_i^8wXSVU&Gs1>P0UKk_92XKwvnN}w|uMHn~ftD8g;nI?Os)iLxq_k8OQ%j zPj5qLrUxyKjHN+3vuD*L;stwiy3|X%${G`|MNH~d@45PT4jbj?Px$@3t7?`YV`|(f z(L>0xQCoK*`RXXfcwcU-_sKHe%`kaJ_(ijkTrVnSTec9;sbBG3%veOE%Qk;xS8bU2 zq_}#MlWE7BiRy$LARZ!~L(a-NW)>e874;YwBYbc6a+rzfpT{+eu2RPjW`dXZW3Geh zN6c5V)s{``Sc12|z-;kqIf(Ssk#_8Oc)RS=CaD+I=}J~^OLV=8brjwyS9C3kydxVG zjL{-qwRCOjokaPjmZ?&;y3f*!zLoKJ`45x-4vUmS80Zy>l*v#FOO;clVQYWg-A@{N zWkz&9dY$aO2z+5KFZ7kG)dS^dSt2Hh<)c7reer-+>QwG*=lMiQOXuG!W)HX8IKTX{l^nMIBD78KH4GMi))3;eqXwzdSFTcS4 zD;w3dx_&L{w*Yk?;XTyOih1TrHk9RnI1?L**vh&gN0oPGr{P1=V&t+$x!f_w4ub31 z&Zp0z)YJ!&QZEnB9E=C_uHwHTcnjaTyROsnSvFMGn~#0F8i9vz7|-y&(Y#@ympORQ zYY5Sx(zDK6Nou~vISss+ZWYo-B-LwK8>esz40>M;d+g1Xd0K{2yUVtu9$C^o@MyM5 zLB~X+MOE&-zuxTcn6EpL5)zur*tdUaXi3$FOGn){7-3m*W@NTxqv?`znJkP4y?QD$ zbdfJ{Zgk`87^|vG7!W2!wY0O^Tj(1f{)RMt8im=F2ET%LFd$Vp*|XW?u(?s**B7PtEIXi{L7G!6CQ3l_}J0svAC;9KbANuvKShC4e0yhfDaU7DFk>DNdWOu)px|WuPk- zH61&aCWPU(W!~rl2g$eIG%V9H1?WpP{(jL8e#zWNlN1*(M@x?qtbbZg;G*za{#;A= z)|6+RGJ^qR~mx zA2xf0G*l3al8}E|xvaI$+TwwkQR77${PuoMC+D+j(W#H%QMX-a(I}B@QI8`Dzeh-E z=>h)V=kjkBYibf5Yvt9&suwS0Hecx<^oeCt|JZFePbE}qdhI~(#H zNT{%~UV*9z}7&)EgKrXPj~Pn;4J@ z1m&|A%foH8BU59;fipRsN&NIJ_gqe|7-|Pf*R?PK(yRME?KL3!oX4{v!1XQ99Q_26 zy#GLSP9kUQ@Q*YzqMoU##0K1(`v;w_?K=eeiu_(qtH)hdPmf2CzI3)JP?W3|n(#b5 zJ&B2lWzKhS{z~bjN@LeGNxe7k=c013(Njy{v=VR`w$h?P4cmp+4X@UmNKe}{cI>7* zWs3?22ChCQbb8L_N?D=w@IHMb2GSdr^wezapr^PUD<)1|^m9tSO!9oFT-raSDnviO z79MvtZ5R8TLYB1cZQ^8(@hQs1uy`jfzuD`Bk_r0ViTD7{uRiGVaHz-N7u^RbcCKWu z6;5>%ltJo|Z!udf!68G~XdA!f(pE&K! z#sQqg<7hOw-l_-dq)mL7MlG5Wjg39=_|OXAkcA~BpAQcY0|3=y30Mq#>ltelNK?~> zfR!^b4-du7lZCDY$A^T^o-P`CglHXG0Rd6zhO7&3p04J)<;AYD8N!tB#o`0bdhGJ~ z7_1eBt*NDaS1OXv*c?KQkUb9M%8cizpFjC|YTtKIO|n@}0rVfEPqZR|e+8304!pgS zZvJE>q@gHVJlEvK0B*T&-@eU9o9{idQEB%?cAu@akZM+ug?JFvQ~5X7>f6;GA;VjfGvOX!)u zJ0#-OAGNn}o;f0U%xvXg1yk;r5G7`;=8kP|EjF^Rx>j{9R2_7RHZE9|eVP^0wZDAz z4NCK zkWgiBe;=@{>J2l4k-mTb{=gN-qp~i^Ib8f()fbp3Y~}eZ=$^l|XbYtfIwo@Z?EA>? zqRJTAF+VRhEM*U_M6IrY$P~pd1x5#_+f#KDBN?!#5Ivzup(;{taiCW$?`sTaXHAYT zM0_F_iTEWJv$Ej4k&$M@h^FAN7xdbS5X9kZ938VtN&wFMa)RS|2zo7Q!^-h<678}; z@;rjh?y5-Wi$&SDQgU7=QTlD%z@9Z#Zj9yRWJ{n!Hjw+7#X^l=38G8NdEw|mUs4=ew-e8S?8|`+x zfx+_jR&&W8AtKAQOmaVa2GtTIYHDf};dg-V53-91oYoH)ivO$XZaMHIfdt`52tPl6 z$Teu6pOi|wFFQd?En0N!Nms{qHWMKwAu8lFreiENfcH2*uUvPq8V?Tj6u=`*&{tq} zF&*X^`6!C?(bj3{{(zt}4Kv0KAAZyt)q+$wZI6>o^B(Dzw`!w5-Z5v_ztUPl^*YA! zU@}f(u-mw&>@g9SI|-^AA(@|(?#e%ssfFf+KC?>DGBCR3puV@a7tGjWg=%J$Kll81 zMt7=t=Cl{XOHN}qFmxm;S09oonyOKX%jJ-vA#2TEst02O6rYZ~xLUmU#5&DA!mCujv;a;k2VA5^XWPtS6SY`ozJE!Ra^sB$ua_lX;DO#Xw>1{EfV(H33s7 z4!s5WGF8OCB&b#qMN?*_lQqAp@S+19|h)MJTpg~ z6=+W#YgzR2Sq@LEVYKwcA6=Y5Tv?x5VH$=YdVHnk)EdSc+Y?_N?_9%+O@jBM|231z z*o{R;XXglrmD8(hnianB)fb|%Vv5P^0MF^s!VOJiX;~oM!sYkJo&r`5&l?TSRWqPV)sFrKgXQ35deFp% zVU1;6Zka*DdYPpoedR5MW5Mqns?wz1buG7OYU?@K#&5 z%Nq!Kaafiw$D`$Zyk#fiz|HkA<=v14kpKnDC#lSHowe;m+Aez=Z+*dTislCYYm5MR zZxXx>>D;P0SX=uQ6od{A2rjNI(oPK8jq&mE-x*(og&(2gGu3C9zxh>1Y5NUq?v<7C zYAs@jN8*ffE@PbIDS9gAxjY^+dv7AAhnwz?5 z!lk>Enj_pridl34DNqvbvfSnMrMtUfB233t))aW|f81L|Il5jWxAT|lS1~I$- z9>U9RjjkVqc|P)w-eo!e{UCqYYQl{IiPx%Jw!jJc;GOjoT$04WM4iIS`V2{@dsLn4 zHj$*C+FXKHoeCoIMHeVUypf&1j6GMbvtVj4{+QzXsh)OYJR7?Aqbu(Bfq@+7g2e&q zlqC#1aXqSg+va-)&{Ghn6}>b*){#HPw2`nY~97heNd%T>@|BfinvN|C*LAuOo^zeyTNII{ z87r(Cja9*R1V>~NI8975{aUz0ySvD}5w>;0QY zL&MDJ^+7{Xy{dLcnQZnEbSht2G|{JHYtAbt=yN$?j%4}qVpfjq0-Ztv$DD&WmcOa0 zuPhG5V@>-}rJZhtcS|i_$PWlDY|7US6%gZlAipv%rz8*rPNi zu-8mP;!_STz)*n)^$t-`sd`*0)9{BGh6MaO3T!g?`n>z{s;tD=l=jGGC29RhSsvu{UGpUF zquZ|bL#@ds4W!Slvtus%2?pzUk>N@-Fd6J68^&UJ8K!#-`ZGn)Pe(skH;0D4YI~fGYX~$DGd*LeoT-|)?KKOi0_*Kkcgv5lUKEZQB9g8G{-Kod z!T7JD)e6~}nezS+Lu|_m*c)o4a*L;@dTP*CMt?y8**c^)v6r6r)s+=yyY7ZShQ&8t?pL z-S&y8-T8K%0`6`mq3(BZj*;@`U?1=@Hmc1$^|md)zmge+XJ+!w#m>H}x~9LUCFasv zdb#%6e+Xu8eg#2agLa7>-Hqz+8va&^oarN{%sb!bN$y@TM13Q8tKlc&*&2nqS?;v@ zEGUQxIOy$~T&(_z;A$M% zbAeRGNd8%Z0^_b+s{T-{V7Ru81X|MxLiu@x;ng>>XBbDJ)%Wva%JTc$wRR{RXKP)k z2N{OAkX2PFInKsZf}XKPEt~S81W_b)x5SAB0f913VSx?3fzh562%_QmZTEx#N!ceQ zD9+(UspbWJS7mnDg0#`xY^;$$kzZK&5qpjOH{&`7R?ZoECfe#Zua)yqz=A`j6Kv$S z6;1}TWU0fGwc3m3ZNXnMZZ5yAoC$Hpu5{c7H>~M6sYo3&7DFHcP~;@WJo{B;N<|nB zROa`>0>_*3d2dLE)tnMTDih)%7c7KwwYn~)lGKu#H4L|n`ZjDq*N-c(SQoX`#SL`K zzkk;s>9#V;AUR!(ya?VhW2HhvzIW5ZD3W!RH_uXjkA!*7(dTw088&#l_Rn_H2D~C+(SFO$h z&Q1*ER~4}QD|?6vYMtLzucwZBp3*5+`HkDnjHyvteUo^Kk;yk*kk&%^D~#K38M<(sGlQe&~tR3 z1rgJgAN2NIjR|``ey#oe-j_OqOGp0xBRMMApPyE(HdIr6*vxxb_EHTCBCe^qM(&tF z^YV=@-MfmJ=f}(f!8)Oh<1_mji`M4)9aHjJ2Gy15R$?&q_5xVq;|V-T3=xTp`09e5 zsXvXDN0u8_WCB}`-GhjPRCoBZT!Ty0yN)ieysF&9=a(Zvh5f`KJ zrqR|*tE_q=7{^xwE$`)>{MVWnUp43c@AuppLfFI1gv={`l8oX_CvN~?IAD1Nz-2wV z#Z6Yhl8xox34^dm#Tn7cGG>LO{RBKJ6pU+~lqrQoE7!lsX`%TD6;N3jtrBZo9geo) zI&X)at-ab7P5v3eZZCFW{M&b71J@0rr-v{{EYH$cye?kk43>*U{5@UynE{LBx0Vij zm!`IgKQjqZgp9c4ZXa06#FMY{$LX?VcX}>PsuM#CP}D2UK7}1XWjgowEo~esvMR>- z`qguAk4$&WI6Qf6@A>Cz{y3#f{bQ||p|h|kd@~~WD>fycAKtbw;K$O&Q|8)c8##hr zDcRgMA0WVwEPY;GS-O&VFS7yn)tGK%#!RLeF7rhVlO_C8Yz1W&YtaW-ghIApGY&NEE zzQgwNvk|O>@nj1Sp$NBDO^uakIQ}o<9r;MlJSNGo*YdvClv@TSf6r6!&6fSLBUU( zajHjWX$3liKw+5X7}db?NLxux=3{ReKCaY6<0>RZ$sy{cP7(9g$G1~AJCED5jpXIE zWW)tY%SpC41CMTbSXn`WyB^9avEg=bT<8j+E> zL1L?J8ud(eh3b`odle1R!Ec)Prpt@Brn-zVGD>oTp>6|jyArNzj_&AJ4MlTuVC2^P z5bD>_fXu19x@V$cj&+&<*dprJYIrJ7QG}N=W4lgvy3Dy=d-ZJBdxZ&UJKMzVu~;!- z7T#>Um+jDbcV$4=0&RGPSku{cBx%fHULYA@xV-p)Uxt2kX1m$Q+xCWks6O>_T~#el zMm>KXt$r@q`K$$Kf7{d3q~dOb6^7ANiv@JiLf}W`?lH$mVh&aVET-QeR3ST_9g|Hd zH9WKSh?){t1Y%9q?M1_=sD5KjjdUo9T<360%-yYzZ6$WN3NSQ+2u0g;x$#RB@1-T( z`g*H_m84H?>HlplI{_N!3xIYvf7TH5R$8Ovp@Ect|W5vgOi60>0B#(`v{g&m2_?>Dc;%nU1$g|)O~uKF z%`!XFgM-!z0j+?&w`OD%fc=-OF|G}B%>>S1szPY$9*2C?gNw;?(cxEq&{-q!K*U`v znW(d~dzEm+b#%11e^t)5W^=5(4I{+$d1r%&T9B?UKm~SNpcyu|X;Brdl3mClq-14Z z%FCB_vhec0zP;wQ*_--qVezfHW7m!wC^^88s9E_a1`1`jZuk&`ihyrw>T#qTggRiD z%I9oVRhH(D=kBT>ojVxaU0>KS(I)mKk*Qy^=dqv73z>Xn`($V~u#S!54fBI+6fGaK zA6L^kRZY}ij+toRIkRU_yia5PY*PWeZe3yt`#PEmvf!1vAfsth`BmMnV5q^Keom8^ zhK`P{=Q%I|`>4B(7Ut1gH{NpcLXt(ZMlMwDk9TRbUB7?)>_67=9iN(Nnh$+*g zkf|vxi26`8-JTQH1rdarRVILa@d*G=5zHRY%{9^>YYC^nRFIeV4heZdA{+W8C!@!F zA#j9BW;lT7#z6_=$cdUmzI}fFVQ>!fMaO@u0acnTPYyx4K@2Q+rlMAu_Gb{_UVh&c z4$U^wr4N{!EuhDGvE!z6aR@n;(AIkCD~XLw8L)!RU0+8QxHgBJ5>QiTzaWh#O1!c8 zBHKvpXuFp0w1;(~o)>)I5QgUTukJ+Q>p$6f$4Q8 zX_C3XL$eNFLj zesk`ma!@+_rld;w^*R3`!#x;4NAcvh>g6Ql$D1VH(;}vFA>nqnV?8) zLyl52a&ysCR8*d$qubi%!9;^w`$a*K2CyFMqnv|R3~-r(UM^cZyHBq~0ispJz~FBO zY8(G%WPGJ_*52g{e%qh*d@jbPA9$`8;<_CTD?`0cJpYyy&RHZ%Vo#l~Ja(=fTWHM% z`X(7IC+T>{+l#C0iwVHq(f;QR>OV%HSSlf^Oo98+OZO|Qr%Hz(Jjx^<)+RV>l%{^?8}X& z{(Q}r*|yipym;cPo>7%dMxXOr*)s)$a0n9>mGB_thW%|wtxKL{*5Fh;w|=u@VjEOa z-&32dk8vqhMP6;Idj$LUjNQPXb2SLIsC2PUF&NJ< zbv5Q(gmjFH@KE*&q5teR3}&1swcK2xN$}q2*PCf_M|^Q{om&I%RXpw*j3Zr)JMc!B z&ZVr^*E{cJ-iSH>ajyf0YXxNx=+nqkqA=c`l{1CYPdk!@j|58ARhrs%FwR&(?xZp* zs}_3*eW{9n_4b-2xWam?S`=zZ-;@a#QPNZJR>#Tk%J!UlFE(<92KON+Ow!O!RqbLl zF)Vym2z)9IJ}9DbU$u*~j*0W$dfpMXW0$o&(WqFJw=a-XiUe9)+BPqj3$BYb))xB5 zz1<8jXwlIt%dXLVbS>c7L;v#RUrf@NNZZ%q0sT)VF#(c!cz!Nc5Pd%{U(@gvrOxLQ z`FkLJuB_+*wIB5Ek^?{_2+IL)e;kYcQxMub!{MP5UV;bc|6}9vd)+0gps6jK`Xi?A zVE(5vi52I%Hj~_a)2G&v% z@wJ35m9l?DaaEj1nz#hlZ)ldVZjckJ8v2Iv4e3CBt;tZMk_RY{ZxN_ zk?dK|Wah+iqa#L2&1T{|TKal*VXC-Unn4pDuJ$83`Z+1$s&+?ZS7d zvE@K!X>KZ!n|7#^20<*;A9WIfa5Ne(uZQ0!K!meyIKu;drdCVSv~w8$+cW19gs1@? zm^u_s{?$W>1Hfznejn5$JoIKDY1KJ=2jm1`!-r34_y4F=@AvqwRl{5e?{9p8gxtV~ zc6j`bmUEH9QW1&B0=i8&wv#`2J3C-PK@sV%E-@m$aX;l4HA#ih;1|COXll{~BfhD3 z{JJ}0OlTnSGD4e15V>&GU%244zn}sFrGYk$MzTE_{x_p6p~}*|q#$!#S>Bc)=xz$W0nN$H&(AhieRr`q8LRAhtk;zG z6&}faBP=dsiT7P%2=zg_8QRi571FJ;nBJ13g#G}Vw^+l#3S*Li}i9q>hE{G+U zGK)DfYTt}I*jH{&i~F;I64~inQTdfvN;lQAW9;bYtMlMw-hanC<(Z~t>EggYBOES)4j5-604x?Y5IMCunFsELQ0=thPF-nFMzy&-PZ_s}yIKXF5%S>va{+PL3Ul9AOaLTPOLbxO=$l4|wA>1tyW~+FCpY zPenmf;%!S?0M*LeZzSQF~OcFRhul;Q{`kqAE0tfqOTR{5fPu5eFuU}a`d7#dL z3^l0wV@~FUh+(5VKn9bOi3vHaEVj03=TQFp4WQ)`_&x?)wojYWzml^1o9BN*%?Obd zMt;v%gaeBll55)^@C*ZtS+^UaVe-!?=Vxq`g~Sg&YUr_w+Daa_(oOrf&&Z4OqG$3> zEv8YWl4)$|_^RIxi>=$pA_69&eknUCDC2%hR#3$Gebn!w&mTdK|@f2Wak#L#`)qf|I%1|l-XZC)IVluc*ka-nKla|51c6WpPw zvN#`9GU2}(3O;U?k>FOr(yJ%mC-+B8Jjzf zgG)%DE?0RkrzVF>nZaMtWhP|2S(oAPaa0Oi6>iY`ERPKx+>fMP1G) zvpY|c$X3tERI!26vu2ko6gclUe&@3hmy@M5UfwVek8Q@w{f12Mlr#hzO>_+LpKFym zi>x+#;iTq5X|H`9v=SjDfE+UzsEH%}DLk`*@10zQV`Ui!R38bbiR{h^)a@9WC~ z_M?DX9na@-%y`B#h6-AGfpKQ4^yHlv16+7fT1rk$jWX4X*f%E!_ts!!FtaL&f>u983eN0-=4s%u#}*AJ-(0JH#GvN6q?cF>F9M$89j~zCpIX;OgoNc62jf@CRsNK$B=X70cAi5z`Hp;-) zN?x8owBvZ=4=^5pLto9isE0XxpR^d4((a@$x$y5}+z>Uya~ajPiyLC~uRH!b6&Ttr`|dJ=bBq{@^#7g43iGW@gPp_a{x zObGY4W`pp6Pxdm!&aNWMy7=8W$!|_KQp>_|jEb*zx&dw=@42*m(w-tI4#h)y$VBwz z4xD13*JRjhJ=;|lPjp{+0m)S^n5q!)^CxLK$DssP|8REmi4SIDG=)$oSYydRuff4m zHL?G^THrt10tyk2!|$>(R=|7_krbtjrn`K#a%@Ii_2^NJfp%JMZuiP1M!3djxdR~C za9GWK0T58&HU0pWoSplEH;0eGG=h&GyNeUjG<<43nP4n_u?jzP3kI7lv)6-R1VU4y z_!q#4QQt_>OVP+EiL)rWPu<-0GhC}^@B6vk!4^TQocV>~R|TWv5(bG6*P&AE<&$me zweNvS{5x9RP+1?#b(FzUjdDQt$|5v&QL`l=(o`(`TmHDbBsyO*ilkIzNmW(nzyPNC zE@3Rk>=2-{tW1^>Z^v(-i$_p|N92cdOD<|?XR(G2A>)m@BYtnS2L^2~#j;sdxYP0f zfZ*cW+4cLDs8hb#O=8XwWrGZ)u;HK6MZcRZn4j*!7$Ek zkhMHJ>XOS&$jbkzy+<4-!;ZTJ)!=C9n3i>VDNKpxux~Q+MDJg-6T|KGpUXx%@9@t& zal!l3q|Chd@=;tK=g0~5IDGsJAQV1+QZO*JMZmDRIjG9|cF%~SfF@k^5N z(2F^wWBRg}Y|F`M!u@XfmD7I6YIFLDs)h!Ovk;mVZ?y>asUH84I|nlI-6QSJC$;8T zxnCQ3{U{_}y`ni7t0sy_yB}43nEF3~6S}9buN@>%0Y5tEdfQ9Tk<(i5`3s-_9Tra? z;e$$$>~;F)dH6~rZBGv%MFyRoIt2OffLj3=L(2@|c;Kb`J0Jj@7Ad{GlGxBZ&5#`(5&M=;r(*^v{ zdWM`8-sxG^dtW!_>q(l>4r8RHWs5=F)oYdtH^RuVa}8QMtLExjSRogxjbkaN7@iz> z`>F=JHfkMOulKjKNf?pIV520!f2;jS|2b1? zYhO5B^A0We&4lm<8k)s^hty#3%4#u(f)?Xj@x#kT^gOo@zk|%hB;k#j$(cup#3=-Xh5l>wNx4^v#e=YY_0)*KbKgH$ytL28` zh8xY@%H6bODG4>q!-q&RkSJTT8b)c^f2tOCLv)IfhF=5+!? zF#$jzPuJR0V`5^S;YCk(Q1)rxaN17ScA{#L;#1?zx#0_Ib5>2%1eBBOyLH2Ejn?pc za!w?1wl96D^f%eq`)+oaf&C05nUHSE&ORXI2s^s1V#xQQ33cU59TEoX_=@Vv0jbr5 z6Zq!ZyNNR?@hZSa`tih}HlyO2us^M_#r+w z_&To1jER#3Auq6qc^u~XFy9jDA8hrNMf)$Y>L3FEsGE-qsmYI$11JG!HhsY)NyD4; zsG)6&PoEQP2T|Z+KQsLxe<9BA+%Ve})#PO^il8Rj8rO)~67PMk)VQ{Y*RedfK)7|@ zSx*1u*NvWu`JU1FGG5;O{MIOQw>Bi?b=V+@2n5~80ZW!ts3`lFC2a6*7vCJQ3!?G} zufGcjfhXIu7l$2T8ehKjK6>}DhG0;DwQxe&oZR2(VH7DJOqB%#!Pmx6*RdhYn!2fz z>*HH+T^$*N`vyRqj86|k%dQt85N6jiG~LnA(Wq#pk*rKok2d7^>mAn6uD@?};KArm z-XWXyths4mr$whyYoG1?_^PJXzac|C-EhtDWhlp*kW37K>Ba7AjPxx#N`%uf2o0|0 za|-ad?I^;1ZWMfGIog^k*F_g*euu%=LSkd%>$7XQMQ;}84Bbyg8IWgHi?WKbF8a5+ z(8VKV_<-s5k(`{dU+5p5D21>8ic3K%oJuC)7Zkev38cj-rDB;3uTHmLd}Q-Y^nKU! z+LLy!>i_4}aAtQEwnodd&6wprw&vD-mzu=oyQ4>3ca_0CgbER{QSm=N<4Jwr(2y-W z4KUCBz_Ug0K^IAm@90+#3*dDipPDM4g`^Gsv*bpN90_JWMluslt4&0gm+KQ57s@yp z^>QjT>3Yc)UZni&;p{6m3S$eExp${!>4`VmkOIMT3zzHnN&AeG>FOCw_BB%{_|Ayx z{`Rb$jxY425bZh$&GJ42Oli>~LV>;hvgNHlv@}}1qrG$<(CCFSg%(S=V=e@5RnDEG zC3W|8ahoXDardF2$<)MT8) zke;5tC{igtDtONZ`}0#Ar?gCAl$sBlQ^lVrY&b^A?o{MC09RKk!qv^oU;O>MZo4t} zWKN5CYo=DIA?cn{f~+|0Ri&g-u}gH6@?l?r6nE?&sLs+K3Wtn#ayC@f^`b+buqO4Y zeXjmlM8f3n=RVq*`dObMDD=l}Ga(1rZn1S5Qp(E8CoAYu0o>tcWB(#h($g9L{Trp7 zn(1Y>X-{rw-YsVM9O;&)-6YqftL@@J1Wq~=DS8@4Xmi_W-oUprtov#|XrgRFv{vpo zonOda_X%@Y-ig4@%FrFkhF5iT`!9=cblw8DJ}#%*X!VJ7gB0wh|5;)yueKe(VX7!= z+mgSmT&EB`l+ASSFG2yT2#~IU_QgRkY4Xw8&5fS+;Qmn*8W6x9+q+bZ>yAtmOW|)I zc$FILCq)%LpUx>0 zayWM>0>*AWVN&Pd9$43=9E-8$SyRXC3HVWLOaH2-k?13Gr%i}g`r3N*7u_x=4WtK& zsR!AzyP|pD60(x5eSy<1Er-R*D>-hHPDK^Er`)(x25Bct&c-a1bYg6 zd%WR3#^kBG=;+8gj{z5#p-OwHI;TeNAU1)8qG00j^}tZulInEpL3+5m$@Z?WR6K`m zCD`w#9GoF4mex$*7o>|_10I`Pc#(mTnT(%NtH_7$cQU%p4DY>9%Hqi*P%zIYP@Z1@ z$)wHJvSG`X#-QfvzH!qCJ-a8N-Vn(@(tIkLDr>wiC?5Y|*nK!XBO@?YM^*C3(DCg0 zB4u@#nL|Nap{Hm+=cVYgK`Pm~`(`0AHIr-9KiH|1y8&sV0ZC!ibCgSxXuGe!grz)+ zdKIg0sqgq1IIC&q0>4C1Br3(m#P}5yjBGD8{rGRDc_SsJN9>+3Qc(X&sn?~DRpEDk zpTR

O-(K_>bvKAIPe2FDvsi% z<-L5w$0sPRE;e3>A_sG|dy+=S;nue3%P`rc`xvqCxMZv!)djfBVWc9}KcpsU3kv7Q zYYQtF7%**@1021qh$x;t~o300Jr zFBgPmYmd7?ECk7ScbFwgJZ4SRyc^w&iF{c*%`;>TRt39Gq0>xE6ef03{~Q*0zR|rV z7lU78el$Oj9l0bb??8=KPg6TFHxF0C6~?wH(m#>}t_(5IKy{Yo~I#d`$7BtDWJh$^!#^3_b;p|5Wp=zc3&CpdwK%Q2M6!0oQ!;n?PXY%@>AZ=(NG! z>!jQs@|`hmfdsBV7UJT?&rbOF8-s})$3eCXp_TC#!Hp3`d*Jie+lZ0mee4x~HX!{&l} z%z}ySzV=#Mjmnz~3n!=adaPf6@5v`QJ%(G8MdpSjI6iS&eZo55<1^=A+M(!N7;hMG z`}!};_Q-z1f_MfD=Vu1u4nH+L>u50 zS+cUS+(Fsf&FQF|(X4r36R5NV3v?T&#o)QPHz3Ym$E zZ;dITD@Er{bA%JVIv3)JH;K78C{L?RWzvErlB&uAA(PgqOto(b?>vq7mC0tFeAjop zZHz$?ZeEbtdU5E%Kk`rYYMF=koUgu9Me3^mdjtKW$ego8 zp8bL77v&ailw!r$AKyG#eeAC1=l8;G@DQG;cx00Y)4mW}*|QLD(q??9FA>KgUX?Dq zwbZd@>N@NDnisUUN5;yAZaUmbJm~uN53MGlsHkB(ksMG**h;)Koc4IkSDqqPS9>e+ zJc_Pv8m=pr$9gczm7T#Rd(rb4^B?qaz2PiO?c0@*lhOMu%GX>y6-K)=^QqJ+#4T7l z)*La*su~3@r+Lc`2kH>w20-K*Kj|d2Mp{B32LDP#mXw?Go!x?9*MFu^;CPf5!;KUt z^%&!_!u|m&k-IuNfL_)Q`VtA)#H`2ku~FWJJe7}#!>#nK$k2=2rBG2;++OZr`yQi} zUL1q(ch;p0H5zWwk5W?lo~2-`68lv<^bG~EdJQ zv+T-au=fBMn3Xes7rPOpphUX$00X?k!_a6he1omrij$TH4B z1#5&2otC*cLmc8IwmAHRx{IDO`r`*D3r-HJ-|zVu2WjenHN~iZ{tdqA!UX0!2EHFL zP(BGc_~)F0_r2?O>&)QH^i6J9uS3}4x_&TlcFVw`el)~iSXmgS0ut>k55*ua_@DSb z6?<@oybzJGRVmMQU#NEhcgm4+V>x|B0Aoo94ID(gPQic6Owc4>R-N2DbqSvscTg`b@Vf^;U~E*-jCiC9Ny z$h=f16`mn)4>LX>=#GU$-jcRHd1MrF(DTRY*bxp3rRo;24$5e+YC9@J){3CmD4v23 zdLbt7G2Cy0n#m)tSZ)V)5jMo2hYJt~o}b>UCsYGs%ug(V_v|t&sg(p%GwQaJ{}zHt zJn|TmDSLbAz_Hfl`WpsWQ0oUC-_=2&dRitfpzNJZcB=8o=HO1lHi?GC!EdqB=7y}}5RI?|?$8SxHo^zuA_e^~NL1!`ib1-;C zgog{exNyUq_E&(jx*arIKO~=)Tc035etdLxW(cB@RF-2P{mP}T{?x_y82=^8CpXP| zw7N&U=Je_=HRH89s`NTZ7M3)5A8xzEkmiW?w%6zpPMT^^{qf<*!myCcGWD*sDrh{> z0)a8CAWT_{agsdbvhnvpFord}2zL`W+dX)zvg32sV?IqDr~PX4sffhN zY!(d7bUx}3^LRU7w8M}=+v7Lm^$GwSdoNqoFtcJ{adxRAdWN1gV~gGum%5(0hNjx2 z;tL_AZyRNCeD5{HaADKgTY+ktD? zLh}`6m9=0$ITltY(3PBle$Cua6BS848j_mTbjQ=|F?z*npS`|^{^K*iP92Hwb~jA5 z!pFx`s8fPiYZFZ_)w%1pd8h#Abn>rI+4e(w5-)nXKCYF?v0qUKk`lYlZxjGl)UC@dwE7?DO?rR=7lXgAxbcb(w7NYw zr|z$t?#=(-eoN?$LKdh9=ouKG0^z8@z~>KXH1Mke?=bh9qpo@Y)CJ0cg{37e6O$1L zy@QAzs6za#7QfH{>5Zb5CiFn1_tGG9W26^O`&{5xOj=Q1wbIY)f82U}mdiBpy+8>$ z0>wIv*spSiFJCch^r#zAFctg9r-k#U`wF0GPH-GH`G~Co@?*c3!kL}}d_oa}cp^oc|FO*Z z-~iJ~UXE8z;ogYec-|-h-^rey=goI2Mva7=?wr474B9#o?-&_%l+&0m^+|K&jFI9h zhOWRDdb&sO#U$4q)pu=8Q`^lkzK6xv*RcM&X&ii6(mAplRDs5W+Dp^mpRQB)2e#vs z7&!}zx77Nh@hA1}{ysJ3_|7LG(NwA9fTX+Ns!zG-?k3n`DTB{&r&=GYNIlwru$eH z$98suE$w@3-(u~)?j9MJD>v^V2z=gaGTHcqnCH5O2@k0DA4%|-wfk-^PYC#3KR&F! z6|<$^fB5hRlp~gU-Fv3E?&SHUWVF86XfiG@0Fi5el6VK|GL`Xt?ZHKT3jp=(HwV@q z-YLVwFY)p5A2y%>V*6-oBnutXYc}`xJ_i}c!oor{n5xD?OAhV#M#ir?cZZY`6?Bs; zpnS;;t;+#px!6D3#<&Zc=lbeHkv$EZG^O{@>3I8MSAPZkNRTyIO#lrhsva5dK)`)| zQ$<;=BhkxJZ;4RQW|F#z<**;&0$Eq4D<~+wuH2Cl8LT?pOG89jI=el2q|Sb#yMyLp zyuMejrzQ_@Fm}gl8cWax_oNDGMHaF^oZ3rDhHK6li~Hk1x<;*vpW&m8dPDQ6cK78? zODp3OdWlW5@gCQ>hEkpYu?d63!}N;P9oyq(tb|@sg2@~nL;7)+v^mFg|JFKDeX7w` zKX}g=Oi-mRiiI5~P;EyP307t(NbtsJ#H~UWt^A&&8j<4JQQSl_@(t z5AOjJeBA>jq>_s-8Sy_qZgun@v^}HJ2ei`TdL;%ek2nRWz;ky2%Gish`9KJ~=$;!D z1TL(9K8cWsnr!Zg+&)I9;R%R~`#8<^u5Rn}!%`TCeG zlA>Zpa@rC}LZY_J9%#<(C)`G>hKAAYNnfHHn86L@Uf_fQdno4$t|gv>DY`ZyKrB0SN$n~`PZMG0v~!wWy58b$8%0n$_`)}^v;cczPGg0+~{%r<;#~PU}z%; zx4x+<)z-GQXV0Irh7x2%#Prq7?d|`&D=Ia>i!E7oCxgp%MrdTOXU3^&ADvR@;z9ef5&^Sx@UojV2^L;Z>|k$@vrkj@|L0Bjmfq zq?0q3x|KvsKJB}mcJ6Xpi3nAXf3M_1cYekODgOLxtD%-hz?=41AJrju@=g)4B|Nq; zdNqky+5L-yQdl@P$M$LAi8OZK9BF0?fcE&o0X=@LOViX3C)7<=;V5ZNz!->RIm)m<~yYoFtm3vHLcbD~b zUd92a`K0pREIu>6EHA{=^r&WoaV{W4h7ETRTwQ|jHW<;K2-O#u4SS}nf#$!&FF z$Tb+LPFCRhCJS?RgyIhzk%I64YnJy-;deU*S4Kd5fAxTT1}7=#YP0w(C$~)XA7r4KKC5YY?`+EDP1! z`kpA&#~lTkOrr(kWE zMK$HalIkrFMm7gkx0MwpT}w;RMBP-L`Agsfg1o}NAbi-W2j2_Yu87>++&th3fOQ@$ zNva+hX9M8{kVjsG*j22p*WCcZSgzct z&CfNtI=6mKBx!OjD>tO_?~1mLLq@mp>;-am)_0HsV|yQfO8|zWShj3Mw0U(7&h4RW z9F&Aw9yVChWgG7n`2zFJ8MCP2>n}o;G$4@pT1klyWMEKcX9j`wv>|3?vY;eoZ*>Tzd;ncVH9kHWZ!

L%4S5wmbE7XMPMp#n3^~i zc!J8gx~|fxr;}-1RT+}dmHI|(5H5|t;5+~Kfr!W=`j&a1nt-ZDf~e{ ze?G1i^-bEF3Vn`>npyJthqCN|bhYE!KsY(T(R>Uj+}zwOw^{yV)Su8Hs5b51;MLS& z?j`8N&^D{N=(%MF5OFO3cAu2Ze)DOTYJ2*jJjXO2&HH{SXGhpDrSs&ady zJt`KWAR*li(%s#i(%rS`4h5te>28tk2I(&8?%Z@Yckw@W+;Kl1zZ}Ni`(5v|<}>GS znvZt@5|1w52usshyEl|yfMz_GCnv{zrt%Iui~L`sGvbREaS!O2!65z|jw30O9|MLu z5*e9lESaqhluBbqCNRX>*xT!N{+9Xj+EW@tuflOAu>_oU=aAk8G+0mz%L^d>{>Bnc z3@#k52}Y7vF{#Qzig@1+6Bk0t*Be>q>_Y2e%$lCs(>28=uefV!%JusNeG^VcGelUo z?^EEG#V3xe|C-|M#AJK{r8%rJjq5hNh^bBm{^~iC)y?wXCRVNRgTfgAkM9 zEGk+M?2QI4eZOd=W-MnZDR|(#{i<1q*r->1wJ}6X+bCaa5hX-*H?5U(_1cDKiLRJ zS@I*I^Ad!fAh#n57mVfqMnf}qz4`I;*CgF(;h!SovFT*nD+m3F&ayWtsT8m4ySTV7 z6`5v#2BLYQ$9bT3rG=4QTW*feOd6C6uYSoXug_=AtJi$B-VwFY7~`12#FqTh(*MfsKyxCu8DPc6qR)llx6V+#B$tc`gB?fAR3Sf&mz-Dl6@?FG=L;{Pi+@p*s9m0wSRn006KhSsyYEl0Ne;9hHDM)AZ~oie3R)4mzeAm zEoo%?+vr&1io4#ZYB8BtsuW!7x1*<+SHaz!iNVKCau{H-yrSgSeOwa!_H`7;%F4x3 zjFxAyZr*Z!LOa!LPu;z^UV>zlvktY{Vs_h@y&l3D^>@t*tlzegM+ake1>HyUQ(fJ> z1jnj^K8zpk&5n+n(g9Iiy*Q3Ws{s)MV|GU%`0JsPok(=Lb~{(PBKZQjm}fIzyIc5LwlddJ$81P~TTHo61i&oY(fi|00BYinz4 zb~`_i>QAslh6Nzorx*V--ok)P!Lt?toY^fcEp_$v7(|dqWe%AEY$BNPyYmhpWr5EP z+)%!XKvkqt+wGyOWjr+Dl=7X!{lV^>&TFy!z9nW$lUcp80iVvGQP~@DaY#G6Z3fM5 z3{yUgO|_NC7IYr+ulVnv6`AFUY@T_%I+=GdNi@}~v_f`3V zHFbW5UsrPWtm7J&6U$WS9!C43FNcBqE28-@b|hD#f?1@KR)^)h7f<2+#{t=}C zMLVyLYevsnSiIc4sl91u%SDylFr3xVpA?%&d&coLw)J8I1olV;VTD3X#Oj^=wE zzdqsokfd4LoK(iHj|@+ZMa`6M3X?|!SzVLtqm{}rXU%C>P&!|LoPQj8=OI?IR0f92T7 zk&VWWn1DVN6{Lq#>drxya4FvM86{r%coNg|&3A&zzsFZ(-bWkZWr%8Rsj)hPRnAZN ztvOPVjMpjBL&iPjuq~cWFQ-^{^y=wF2_eKEC+%b#W|kjCrsV-)s9=VP;oqs*?4S2- z2T6-`_6z{{bll<+E=mT@#g3K0PgSOBt-=UaX0K-xf4VT)ws$GFMBHxzUTt}$qv0%V z<813XJeXkK2s!9p*Oko7FfADTD=S}AyvxZd=m|Kn&^E^=dPe=Q&VlEZ|a`sv~TDj9nVe5 zo4#32CfyAmp`U)ZG}IY>P+raOAR$gczOw1fswJV-dOiK)7gNhzb2{4FtG6p@);MY& zs(H&5#Pr-v7O7W?XsiNq3p;_f4j!1XlcxtO68)5Hi7Dc>_-9^+A0D~I+CVi7nvUK3 ztdZQxn_hN+{iEl6v*MueMUqw6o`xEY7s7goG57P3V2irsE4b(a*oyZ~pQ%Oen(U=9!8~47SbB|6GZj>v94nz*6ofvh8)+*a5#PNZUN#f^TEb#euDk zib`LhEH&w+3#EBA9Js17dB2Sq#%Q}1kCP<-Q|R#Z+8p`mE2ogM5i@0VFaZ(g>9u6G ztxeHl!r<`(Xzt-lOXW$Q=v)X1akaUZBb;aLJYKpQFSo{*ea(s>y7Zv;=7^^jMcU~z zj5!r5vdMY=QKXZ{X}f|`5x0nAZa9snZ^Fqv4(vcx?khH&RLiO4B|alpR15F>F6n8pqxZIp;*5I=D42%cZPG`tmB$cjKHreO%{B!~lybF#)r?rg&hb6adh@Jg~ z^&zP5y0%?(J&($)h&uH1v()z7q_wLE2{Tybes$f-+HC-VJk}SMOr0z+hKO^eTcK#= z%jTY~rSM+wR2DcK>Z|2gRHHUMZBbqxg|UXet}*MMKUxl}jVPRb1j5nQf)O#Y)RbUx#Ju?jj6uWc#jus2%oO;>eYmAV|IiPFSZ}<>$}cF zcl->vZ-=NB1juO)!S4w6`8~W~_`J8Qwm{RUvD7Ptv$WY-PgiEjiNvE={1^R{!$ub*?Gq#@r+(GCZr^ zynUBCNC}a5{VBe8v-8Oc17#tJ`+Xk>aPCabA<*XE3h&xUO1K^#UF^&L6NSL)N(CT3 zvKq5FU4(imLxr@pP`i@W{41^p`qrMDkgjA?qQP&D11DsH8`g_}}A0 zkTa<_iHglX6qNYDm?w6{gZRK*2urfiH2lk_agzC7H2Dc)NcMc?qJ12`EP*D{o-mNLX!Bvs22OyA~UVKY-SrtC3@oIEVBOzFtxE~v( z*^htCd9z<=_GSC(CIHG_$J=K3b=QNXw5jaZO$aVSNKQ|W5t8$k&#bFZYUh|>P)`2l zSo?Ixc^c&(*MoXqHYc|C)#^{|_t2|msg_pSO5Eb+izR>W@IDb2pfGDprR!cIcMR5` zqMEDKu9eMBhmcEP&)J9PTz|cX_KLyGRNv(6SA(3+NSxwF%2EC&Oo#okx$Cn%9p{+X zIFnO^?=31?hJCEbuA9(ZG$g&B}2)p zP;=a$(1rE@b9pALZ|_IT)f5@!S4<5Ag&J8FX6bX5)-o zXne(U!T8{Wx0Sx0cj*CyWv*riYBV^2a)J00B!TM|WD)I$1W85EJ(I5rG3$p z4@5xt5EnMJClo}mIH<9oD%~fd_?sE)?K5oscvmUJUTUa+JM3th@qP?~aj|h_IPSwQ zNEjFzTGWHHq;0um7Y*`KYMHejT?CcoukMh02dJ3mbx=rHAi=_g%#d1id%{U_4_aPQ z7SbX81_LiIiuu|!hQ>Is-$_Z39JD>VVxy{*yVQJBVN^r=WPsNbL9D}7YZ~fD`{0N` zmK9^ponE8V(oD=}Yg@nXqF&*@XMT2s_G%l5dlhtYTDgUv>1$D(Vla*1cKZakKO5S` z2&ML(41CdpS5+%~{$_#F0bF9>edCj%QzvMmEkX2s-=vs@a-h(BQCnGW=~4lFll*Pi z|2^2?CUJ6nOos)mch6~J@ZCMfbiptW)_Y7AQ=#9qU4Mdq1j`#SO-*c-Ifk1UNhGfl z>uPNsnqWAm@w$9(liO;@&;-Ah+R5C*Ib3;4KbM6PTZ^hrox)WY%{nH1%>QF!)4*ZSs`0Vos0bk4*9CqE zXVZk`brM@);d3^Q>OVL;R*Uhs+-y(v&3@_=_>}MK8`gDZWP+L`Ds5pyK(uUG6x$j< znC1ri;$A(GR!X>-la$Kz`4MI5ROQ0syBqd8{UFH^^+{`E{wvGLpXSi6!rk1F%+=;w z_Y)c8utz`o#m5NoS4->Qm>+eQ3*4_JdGCf0eIz#)$<{EV!SvR_t|mobiGb_7j#Eqj ze!M87xg9O8D^aw3jC)q}=QWO0vE``K?)13hkLJMvQuVMqm7g0v{r0YpZm0H{c=vQ4 zP$%!73imN0dJVvkVYnWz*MVda39=>_J@Cv!!2Av7D&P|q2m11|=M@e|YB+ELAbtMK zudh$2 z-#R}W@LhDL%c$m^$4eSoCX=$iK#EO%7Kp0E)3?I07k^**wxJ-w%`Soa#>ctY;Mj?g$9hyVA(tM45i( z8jrBlu&DS|$DoO}mT(KHx#o&DH+~8dmc&S5PvtJzwukNpzEVlgBi!0c8TjQh$d(>V36+*B+ z9fq(5H5Dqo^bM%Z4~oAblrjtQKmnct35%e}S5n?K__SON@Ayg3(U6rWU$m-7qtg*9 zU7{ZuTRR-#^p5#%OV33WOuQUYcNuN*FN+8c*6#>5tr+gc{FWOGS9O2#pmO{1=)3F( zXNK{!uq(=Uqcy&?9fwT}QErUrn6s%LB@q(BKKSr;E>`D9Ho9`qum0fgPXBNw!Pap0 zHIV|-vu_O0M3!+<-{X-!HE0NchUwmU=d+~_-b{r8M8~WX4`f`N;BOz!ewsb6D)Q-# zugNv>o;|qj(pkn;)a2xW05O5!>R^M&0i;yZ=xn{f<`{9qxyj))ydI%{72)r0&phu|H+M`!i!g`jo9)pS_V-;44=~~S zX>)(6>^L;Fx|2aix?ZdtY;<;TcevWm$mKcg8p@|&9XmLzjF)G#v25FBrMrfD<0TA_ zvTQ{y6n2eh@#w#Dvt4XTGq!D$PR6fA*P;&gyVeXQy~N{6i2i z*XK~BUViNATZ%LLym(_hEX3ojkDcx2uPPN(3zCvIk9zOobN{{K;&r48XD~K`xeF)j zPZ}fm+ZNL|y)A;t?!P=682HZr6TpTG7}}q1Xd4@7HQpkl?}SNMwLC3e`3=-unS)zq z1aGbmib4Tw_6`Je={{7xQBBDYDSSnAxMTRmO1|*5fI#v%Xe|tyAApE3=!$W|#%6kE z|1FEcadj$j_2lA<$9=50A|e2M9)icZ^yu-2KAS&#OioitLOVKDPauAaw5{Z~s2xsfe^U8Yd#qlQJLTGZ(SFeFMyBEX zBdLcNIjtviSpD*XjtP{7{WjvDyU9p)Jl9)eA&CunrY(Y9N6KUhx?Xy%2?zJ&2+XDI z?w4ODCr>vCpwp%K!XaN+T~**%U-2u%={zd_|A~O7Fs6y__WxQ`GfsZ+P*TDbs|*06 z0HA3+lQ>?{O|g^$SITpg8rTD69UYm$>kAapFx~P0(Y|p};%*%?msj2?S zOM0=+T@0<^O>(U*qs>(aZ%njVT62cNd!cjl0gZ@+mO~1((;MdzieQQUUBrpd%+dyt<5%79Jd++RR3nwms(av3&}14q!Ae zeo}{eD=IQsE_y@*o@y#bb&hj7i%67lP=By_%e4!4GD|l)(S%mx-Pvh!R9)lr;Z*dGL;^wSmDF%UvH`(V za@xzPL_$gDJ%tHloed9G>xwZGLeczp+10)Oq5?ObXJ7t^oI?^-Rb=+oAx!<#`x2w( z)m%3lG55F7Qkc%JuFr^we-&l9g@I_52sVnZM&wpSKmpzpdf3f}C#U?aSIq>>HDGT_ zOiU~XHmT`6w@Ld+gkC_S1_mWCAOY$Kuo5qgKK_d+(h^h>Xxe9yb$|Pb`@HvK<7IWE zn$qC9_k-a6PsuzvLEl%`bauar8k~LU#w=?zhAm&=3R1tjt#NwEWbbI)M-+2nSI{i#WMI6y_i$flmu?xn zh=l;3N%v4C-&8W^(b)ZYn4%zxu`=>Q4)nYxL34(SQX65|6&N>)kKn)g3VW8ejO9C9+$5DWy^Gtx18m zw9=&D@b)+Rq=i_u$=WI0RGr)?FE;gzSFh7~3b=v(j0j;IFp*t#F#d$`QyA9Ob?wM? z^Q^g0!@b+dTGZU@MK}wKiw;L`uvYuO!sML~n} zeMPXuwf?yFBkO8+m1!e}6wYjFb5BfQ&{ftTTf-jaguI}M2 z)D;eWU1ZdS-)ZCbSB6AIm{+tkx#Q;cL5a}{s>qjS(8Thm<9Od~b+!{@gp~DAo!=_? zpp|oXvM^qpRNmB-{QdjWR7@0W+Gi=; zCg%FE@d&hl0~Arf)=+Y?(Bj6UPBgLhT+0t5qnmd!mtqYPEo@EkIyUb8pmJSzkl#bxpDQ+wiaYm)Sm!#1;VN#jKkYY ziGM#ZZpS1m`+;8L-yl;gf3`=aSXSMrWKjarCmhj2YG$vkIBPp6v%RSCus8vvG;K0q zwt4H_q%udlsDpklcXN4pVe{~#hb1N1RVpJEnj@k6v~W!7pSJmJ?|8s&g-@;$ivlD>RWKel%?TCF?bs9Z?_@#eZEm`!RsHK993Og44G=V<7$=$DkS50k18Gg5<`JY z1fe@P`q&@Lw{o0g!UJ`IAy-K$NSB0QdbC)bOhK7?Df(q|p6}ho?So>S*TkR=od+$` z+1X0Icbd_5w<^*)gW11{pP2>?D=I_}j~h`!TineF3n89M#YGcR9;S%n={UBMyy(En z{FHq8@oox7I2S3Bc&3ph>7sRi#n)TAb9l_oOjyKc)9X~shJfOKxWMTsu&)C+>z)|d zy-G#+Wo`h?&j-kykL=7pll`94KGsN!peY-4?3P?TVj-`WBPl6K#^)-2csXl5e9z%r zLht6my}Ldgy;zC6BBVl!%7K%be5(ubTu%*UR;S(-7QSw1Skr^d4)%?14Gs^7N9XbE z+x=6pG#YB7Ag?QBiZ*5Zg(%}mv{D@f9Ld9y*@IeDcW>`1 zU6$!9ZL9I=@arxdQ?q4IZY+-8t6&Ebl;(t57i-YDi=iKQlu~D}NIdW8;Zy*9A*HyV zBKq^T4-CJT_RRF>W{VP$(k<1W;@?f}LRBnkhC(=# zI{zH8(BIt9gAL_pCCNU2m>~^!wWShZ(Cun^dc1o+S%J;|IqQI?FM>o%ngg&H_4%;?U6;~zIC4@ z@9^|IJ`EX@R%0Q*H+lv}kB)PtVZ0B%53@M&hJ!wGg|L>zMJ{eG=}?ZIZ}FCfTIU$B z=NM?o2E*kfENQPJq;fR)I;!+A*lh6KuMV>_8h$B^y%0oML=5!sP-uOVI9*M+z2e3G zQ`iWk?AfM}5}81@%#?|QjdNZPl3A=JZD$vJeUp!4s7;=sW%?m+XeS_5>tP77^jGwn z9o(7Z$*FCu)squGwV-HBcnU7PhtqwX}A<7^tmva!u-#rHl8w<08-C$QoN zAR0}ujbxCutNPyTRvvo17xt(V(U;k$h)-Vvb5iR zu!K<5x{m#gbd1_PNqT%4>Wse+;?o zlq~r<|88b=GRUntzr<FWn)vOR}@OObU8xr_2d@voXSXXPS+m1dgAhh zSkMxS6M|i8|Ec-?%jWWml7~u90%2dEAvS7eNJhq{eS?-yhn)U{^Kx}Y#qwGf6jmcy z^1@xXYDf2%t*?3A{HNbM0 z;A0K5nnHPW|8L0`g2C65>?y`KDD=%C{yk!gYE(wPB|Br4F$?11yCm6dEi&%gPlPvvr(}&!Jw*J<|b; zCed;O?>@*!2a5OQR5c-qCcc9fOaP-hQdj+ldYC%S*|UY27oi^;8{Xw$D=RAlXzEKX zN!NqM9>c3KQzqjN^kEo+Ujo2NFMAAq+a0hDAT3u%!UQ>JWn(?jt{%fj_qXeo3yV@i z-6|zqgRINiNQvO}9Y;;pCpNM@Kzqa(k`fp9>uhoxt3GAVJgT1dtKKHB{a{V38I|i} z&~hNB{R@5)d>9RuZU*A4SF{hyY$2d?ocQr*mX{4D##kz7Do*7X>O2pVz%kI2^GX*i zan+7pf1PaG=QGTdozmewFeP7VY4qb(x{r(7(SQsc=w~=s8K!6+)DBS&xx|0xQ?T<3 zu(`3~@bpJx&I0fcSnQQ zX$G7l=-W}LNj>?p)LDNddT_??rd^we#VeXeJN%tDdbd=G&+qVq@pmiB+x;DX+gq2U z4L7xJz5X|B6rK3X7lMW7aUVP31onAttxu03>cz$&j!3!=C=Eg=!XjY}%#G>f6L`<^ zEfc;S!Y#A#-E<7;$#f#RN@r?LU6xNvOR-CaB7!=bj+*z#3A9!0JKPU*@f=TN z$Yb3ZuBH`MNdQ254#-oyKZZ`cPY#9~PGJ`W{Tbjk1@tx_a|zoh>UV)?c$TtHH3>hYn@im98nD8`V&}5li}CTP+U-Il5+8=_`YCBA=eOd2 zoy_53**aI{d+$_F_lk;$U_Q0hs2?a7s+8NVi-5QYz>}Gt=UIRgjg5^B-CGt1Sjhte zbx+doXoI^Y?}D?0#BJFvrtwVF!^lk?tHYPkj+i-X$t6;_Vuyo}!HUk3(lq|-lEsN9 z4J9QR&EdrKw>?IYE+ey@J0U zVG==FX*H?WFG;mCk^k{Q#JR2ea;_O=7+aj!t+DCzCgg02PTw4Fg26vnI0?zi%Yt1a z?l7~WD0)=8ZP3G>5a&+Y4I;WQoerik{2N0|g~{B&dg9=Hb+7Ly5uz*Of2`XM=DXi6 zv+#Vgb%v`Y`G0_!Hy~C$09>ns-9VcFOS@pcCxrhD#zKeuyliXnn9r;0dp>~ zATzb}7E>*kWN`Mjv>c(<<>d22C~3ge<-v+CKha})^I5pXp4-#)9jP8f&OeY1QQnsD zklV&#Y%X(>#?s=z`e3wX`B1&M8fwux@%TrpW=>Bspg7F&dp4Z4TvTQjpWW%_dLy4Bd&d_S8kCtlvdOIA z%Z*jigZ&wM@WS&hIyTnm(I8ij9G$@iCaaeA?(D-?^O_IeLCfQNy^mDGt@UP9uVa<} z{=D6I_cHm9W0?u9jU$DW37fYCT1+ z{3|UKVWv5CY*aWDyZI<|z+xLr9IrwI#tSkKhAYJ!V;<*HrbGqP{_jciPk042yFZu% z33q!=zs?0K#r}#lvka$>d5_H*XJV95sHFUHn1Z)-u%B<)fro2xiW!L&uiPTT1ZSWF zyFn4bQTgL**IYk`nsdrhtwcj->sZvurY0NupCn|!U|2pAc!F>$Qd+Au`FE%T!vqa_ z1u?yX3Ev+mZ(!yB9(+nU5j&}3j|O|~)%B(EO+$PsR!B?Iz#5rKjQ3of>(0t=-7O;I zzXI~?Z`F3qew~vN#>S-0Zfc?x!a_okMnrpZYjfF->l!4)kGVkmE8j_` zbupSxCwA{;%PrjkO}hIF^)Zgnsmw7`F>!JJT!nJGO^G;Ko#FmiD$skoMGw4l1D;ol zJ%D-ibq;!_q0%6|h|)}cYTx}?;tzA^{npbpCmB|ljA)qlNZP$7spr#cjIM2(jzt~( zEG_ppu&)V9Yv29)@N%rJ%`f$;EH@`-Wp5Aedu1h~$w=B}S5$}Dku^MqO0Qur?2DgL z-EXIL0vfYD+rCO$QDZSymYaQg_cDC-vr|~7r82$F;p|A#Ljc>i*|Ks6lHG~niy7f5Q+x1GD>hc$52it~r;alo~wHU*q5*`;@vR^zANO6Qq$2G$AMzLmU zDZ<7vOkB5Ho@}$X7dvm4L^Y4{XD`POB^xQ;zYj8A-`wm1T5$1PWkrRJedXg}Y~5uf zyu|%yIN1tkJ~XF0_SM5xez2(oSmfupqR~KHxg%5s=ugjt z&VnXH9!xyE2#MV1;pof31x0azh;cqLs&i=YPRVG&?#@mb4f+(Vy z)0GsF=51zMr_-c)_sj837gs9UHx|g*JdeCBv(Cr=tXp2! zd$;Shcx*UXV@@QOH#eTnOso=fxg!u9G+A9DoL~NpvzKD?9>m4E#9hA`d0J7@qkv(q z=9oGV+l;zSx7t2>*|(ctx4St~e{oS-_YhWEf7mrL6Xyn^EL}jBgE8oApVUv8CShTT z1B6;cEUb_SliVR5x8~jnbjeLERuHBP-!+T%!jE}hSIHf zWqVa7^9_T65I!+13`X9MU$9}WxwMU_as2>l%*|W>_xB0+$$16K7f~ro)kfX+2~#dE zu2JYkkw)$C!r|8HU~>9GNKB#8_2o76mfn5IOJQug!qS@1IxeTs6^oPvV=6M_jew-$Y6C<}qM3ISggF5tLVI}gT2b#e@2j`ce|vgz4&>*{PO zD+G5S{@Fxc(N?zd`}-%A>WMbfK*zI7Ba=gl#%){*O3TNOEHQk}2wus|$)2vRDPJiv zLn07f`S`e;bfNY2_UZzJFc^{KK%be2$Q!tyFH>biWn?~;gnwFCs&yq1ZAwXfcXmdi zp=`N{KM_JG$mv+cvE3tR=F#|sC?`syh2pAQpV-3x;{1I@YD}WMYG>Y{rO@Z@0B4f+ z=V;@{eawZ8X!Bb5gN|<+#f7kee^ayX=D9>sgyWuSIu=I!_)*wh2qP9T^3lKO6%2ZD z$#xk$Jz%7+w!4>=)LDh@{R9vh7i*1TDWLuLDlN2ey`f~07pPQHz{P7X>DvS`rcOEDGSMaSA zw`&k70f8(vP-6)wM|nWJ;qM9WX6{R5G(l%JM>%S_v!)u6S(CuW*g-=2mVn2gXqD&G z5BsAP&Tcf}^^{1E?P7FC z+PDqp#C1ds!goTBJA6t3AF8L<*Va_a^RL|Iz{A&+JUtGz__F9Q)&q>NU_ddvgFK2_6GhNS%oPE0sO*VvPU-^c zWO}@QcTG^!@bDwg*XC^w?b}P*O1TvLzuX4dbw+_!%G(T%%qB`TlQo+RKLp86i{Q_X zYOvX8+nqCq$W`9GJ+-i5LI>YLW2WqK$TvIX zB!;lw;F2yS73&Mjhoi|KldZg6hgKNBhm}XExgvXCq+)tJc_kgA(+=GQ6K%iVGPHY< z&d2RM$owZC5OtqHYWC+sQc|Hm>5LbecFAKUQWo9Ser3Y08wYBavRb21>kI=mH9R}p z=Mra8jxrV=mkXUAQLb>Z&Y3EYVZKLzX)mr$UaHwZ%=ZeuyXlZYnMAwgkZsCUuyn1$kS@@t;4M9M{8=oT z2$yGr%**znY<^+fi)*udV(+s2oy?+5nAKSE%@xtJc*#5kxgdXZYN93(G0o3UXx+ut zwcLJN1~5sVOGUsOn+aKN3B|Q)bo1}54lvA+6w@S-3K`NDjvbni@g-MK$tEB*hD&C# z0`@|mN)U#x*p{V<0n~}w3d3(myttfF)!L%Yc)VK(xP2Qmi?d$0#Dx(vpC%92@kUo> zvdHZ+=+b?+PmgV#^b|~4HF5T_(1r*Ayzs}Xx2HXL-g1hHz5o_jme2s=@e`-R1Xu~u4yPZ);d*W^W24@tWpvxYPsHp{>A$g;TQL;c6S zo3CKx32+jo25RW0V~rJB4?_dr8Hu`mnw(S5>JWH$UXHhPxVH0MfJV?naC87a*ri@A zKs7&|rmBQ0>|}PbcX8(nl8;1gR#g>Bv5F*E6@W;m{#ceUnx~FLZZvSwfv`B-PoHPD zr=4khNN6a0Q*-m~bZz16acEHD8$Kr|7F1MJ@PjCf4hGg8cJD|0q45fPEMJ9m3~iY`=*r zaqALso3Xy|OhC^)n)hYB)MCZJEk=eWT+{nX?9lbm4I3LZG+{2HjE4Z5*CX>hMT$m{ zCf;_MaXjfh6tiP|Iu&ax)JkEQCW?UT`cjHVtL{yrrawOV+4^VU-^uYMtTrwtZ;q^Q z5mMb>pUvkdlXHHP+3pP=RCi&lPc)f5+xAMHwsEpIukvr|Y(@FpsE%4Jppi5{0?LHJ z$MP-A=8w<-;s4IScjJWry!o8%VA}*du|sjsGx<}G&&~|M77ipDk&%%NE*Z3fZC6)! zI+L@>PUd>wPhg(2QPH>vXk+q(U(EK97ZlX6i4w$|w7J}c$T$|q)q6j**DPRHI?r~D zMWND{!z0va9o5w*^pBb`VQK>N5~DFZl}gdd`R+K_H)}?JJI};|WI$clf0vugvyw-4 z?jy74R4)B^RC$=j;RxOLxT(2)@McSx%J`9{p&ov!`lZ^9o2b$M-$3^@bx>gp;Mu5c zbI!N-@wZL<#=S2mTkPKfx06ozvP;4@wP$N$GC!2P6Q%Hp{|4MJZf7e|?v6Fm9(mrO z&HllJ&%#TGh#7a+y{Oalo2CInuyisDaP05{vllS+Js*uXXJK$t{iH32LtRyoa0m$7 zH{0nwCrzrWAikBz>)rB`b%rUT!QV)05A(>1nYFbgpwUa>M`Qj6@On>Si<0K!`!4(h zO zQ$T$!N*iSyFIm7rN~h+rEW{^;ki{$3RFN2AWnxUzxYgxDrB2gpd6Xfn=y~gA;$mW&JSXt2_=v134Qw>S1Nf7+sU(8zhRiNHN z;;-)_-z-?JH+;UwXEQnr!^4E8u^}KJ{E{HS3@b-bl>GG&Jz&@irLIarg>xG8ihQ!1 z@61cFJb&{F_@Q0xCvx6Ei)m+@!*EQ(ZlHN2TDdiGcdYk=C9?>tvtrbzdAgM2M(Q(n z*VoUlaXq~=`*kQ5m!o|E)_SJinB@?U-sdeEG}wg{?8LEUj<4L^9648c^te1wVAKXr+9Ak>KYp2!{HM*mtG&;){?8yxo&a(nFNz1O6%&y&3 zw>y?N8fdW;!quv=;0YPKFMCEtB6e1@6DlonVwv&#Q`i+XyH5R5E$8C9Ohy*MD-G7Y z|2)4=Pd5jK%n-xF!vWj{bdCV`1vo^Lx=;=6DS&CWqU(Wh!Wq$D7ns~|5Y_Z`GBd3bnK zt_&^4vktczSXibs4O%jzJ!^Od@@i0@9#VK)7kpJVdrOb7_14Y_2EEKFc@C(Fge5{K zuh*eCDo(}Zn6Fn3-Hb6G+gj>e>ujF9_3NK_%i~YcX%p3p!>KKt%g&~o-@Sxy)I(%U zZ~QF%)4#P6~_gwb>qA6Sj%gw#_yV}%(;Nr+1j9nqqAb$FAz+>vJOw%A_2q)%;Er89+p zEb0Cyk@wod!GVc_f+C)~vv0C3%4j{GrM^|A)Wpi|PwZz(!mP2$ipOhucIs1?^fA$xnP`s zp0TfQZpzKZNvBG6B0x4;A{Pmyn?f`lG>(L0C4Qz_VRYP(VOT+?m(YST0?$WtlZg$jzL5@gyzyZQYODM?B2uiLjAvIpLzQNIyZU{$he}g=UkLvwA zh1vwRGbha??dxHmZPL#LA%_jnlBu*{A*%l!B*lErM@~g4Y>SpF{d))DQ(6hy!Kzc6 zS$|X{!{_I zte=gLlc@;LhJt*9q5E>Tdxqy@d=;ZDbjM37c(1L5h#g%hDwCUY)qZKdM2WJm&nU z@2ocaV}TF~|K^ojGw#p!YqHv`toD>FJxMF=0;TA|-kh_RyiBFu=2? zvz(z@TVIbF*nDP3k)yypvp>`97e`VG4kScgd7N%JPfYDlpDKKGd-b!P#8R~~L<6ru zu6-y!)$`yrO#0^)vK zxtzss%ds&v?Vo)---;tEX|%MDkBl_M2Ij)!)ea4JOr+w#=ZUn6@Z#fH;cHqllb2eH zecv>&);Pxaj-05IS*_;ERKXri*u9N*66ae>!0iU`qp%hp9{&m-zN@s<@=2V5S^?Frk^5 znZy1}6ac-SyEXrm(~#7iBL-W4eNf6Vn9Ldhu66+H2hJP#ehuY{ZMqsaMrV8m;qYiXS9l zki2KQKH8I=EX93eYv7Do50F`%ZVTAyd%$EKCLt!Cdvmqy&AaS%NBwj&;;rsZiB#-& zZbeN%FlK4!^VdMW!^L)9B*dv6e{^U*{v@?Q`;^%d<9+(=P=V@MC0vy7;sMhLl2 zp+Sefi6cPR{+7aG3X&dNi1Uu3#Bwesz;5r**{NkDJd8u~p z#tjy!*t}lYKLy$VGK2kF&Q|)85fH;9P1K8@4|>uRtdOaO-v{;VTQ3g zc=jS11XTcFlf0T*ka{sG8Ck$*B9YO0dlT?An#nIWRRaP5Sh18d{4-*Bf#z%x7W(t# zsvOjUproh}#0=2we7wGOKTUsXY?a>pzGr^n#DW`IzpJ|-wa?6yqAjSAcc1L%Crylc zH6Q?_hjg!73h$=}R?oZ3uC_M2hhyM@tN!;ar$zsuA+UX2kNGPxxj&WrE{k#JMt4L; z!aVe51X-Z*WQu&=$JnteHwmc*LfKITUL|1EhRL43SKzV}HfL@%$yF>Gqr=A!982ZU zaXMi}3&<;oT;|C@(Ai%Sb#)A5ch(|Bw-_72!39s|L;lDdjUH+6K5weYS&E%BbES%> ztV$*v9nA(S_OPM(kxkqp$52U>*P13hRm^3i$347u$*>gOMDl#rT~+T{GP5my@|97v z=Vf~ed{$tDl%N$qsqWolo9RkpL&LMSuB(d=v@Z}!uc)l-8yOMJA76z+LuzYTLAn*Z z8MC{1-2s4Nf4&xhmX;QDzC}gVKzj?7C^?)DrNGC;k@^X!E8si~@#dqe9eSi?R{yN{ z+xu`9N!!H{=ZH3q|EXn4+d52SAV#(*udc3Q_&WBh_B|)491F#EBv9u^5nj@%NK;S6 zJ-NRV_Fj_2e#(rdUw2>Kw31?fra*z=9?&lk(a=NzbI81U4%-evPF+1%Ad?Rf7W_Hj z$^JqzF$ag7hp^U}T(4r`hBkmljFZol9ds8I%FZe{1AVFjyxKd)l zQY^8lN8~L92q|ph-%;qBntw%La#9Zlez~z$Z*Q1?|Neaqug)D32pSNbzbzypunikm ziA|(^!;e@;W{2;5xi4yM#J(gm?84bNbI=N8HwFfVB+d0XC4X`70j{pa zSOP-ePMjUr{mb5Y><_M%t|xNcPP;k0;kM)Vmth*s_k<~gG@#Pv$Dyilx*cNoNCxk2 zY)a>}ddECw+}77uY%OuR7<7DbN}==bx;lo8w2K?;j1!D*u^(_Ru&JmT9yp@G&ehKk zM?G2g;8u*FCWn2A!T&!@y=73A?e;e;ihu}$f^-Q8DAL_3jf6-yNOyOOfYJyEND2bd zNOy>IcXxMpJ&Sw)XWnb1tU;SzL3)ZvWo8hjB!$X92f z=lda3uh9=3rsxl9Y7g=FR)P6mTwd1f=iLhb_U)~X&NGNsfppdFniI`9*wLQ@K8{Ap zKRJI;cif`)aDgaj`hBdV_K{lV*o zDR5pu$g<~JUZFv`3(p?(20g~X@smg^#57=a9=tf&>4CdoJaVL(eq@F(fKM&7&{GY` z#@CucJfDSe?YIFg@K4Rx6ss^6H;Ds8`XiTnZJxyBD$XdrKJnokB7Kf}p&g}RwR>}T z>>de14VmP&oVAkl6u$!nI64!YRPtUtduF`^8Q$+qW!|Jjh5yIah-v&O`u^JzS_V?S zs4BfH@TY#n?Cj$%)jlkx(Rp!prV^(I-T-{)}6Mxmo~>D4W;UC(-v`{L^?lU-&-^ zIqdwSAsi4X-8)17WX6oE>pXcfXgjRFbyTqBU_3+)ZklRvxU!8Xw730ji8mPhdUSC? zQrh{&9d4gKvCO_TC$TfyHb=hCaXzVM)iW%ive_eQS6jn@ zkbL^-mBt|{LRApAlQXu)T~>d{u4Rs`PjHmd@6nVwIO!cCnUce;P;V}X;YZ7kh}IGq zYW+TaUwg=4gXjDtN^P`i4DVCch{otE*}UoQur_ym>#L;)Tqhjq|dRyC+A)W9xdq|b&0<`t2#)?mrP}^QdhZ)^p_X}Xh+1Jb+kWC-zogT}TwUkth z4d@A>EXt>ezJu#%;IPv(C{0Idawcf-vS<6W@zND&@E<`wM6_Hj7;=VVHHo>tR<`U^ zl0D|TF@4p$69lxWvj3FtS}e4^`dD?sTox^Fidj%op=k;R)9fPV-l!H9XNvBHK86O6%&jhM9UJ4Q!Kj5B#Z|Py%0OjV4+@@d}?yFka~^ zkKXxkL;!jeCx}#+eHjJUWM~An3`D&{LY_o+6g}$@hY}5TJg@V&j3kdAtE)!oGps?y zsc~kB3%_V)0l+eB_;jC=ifSKfL*_O&JAcbYbbEHMFFku`H6SIDUn<^mI~co?R7a&* zUVeSU>O})gCot)bO~L7|PSlnDYW{s%*@H4{%vTmqnpnc)<2xZe5#S=BT*ZmW&u6Y% z0;A_fz*fc8&Df8@q4wabX>^I{hzhbC+E(M2Ut@93iJ=W1N+A7f6inVcka=%??i*`*-V(0o%!|EM-C zzI;sb_pf*9P&EUR4^5%j4SU>{E2`}o^$m8Tk6AROgqRo^)WI$;>IDP_Lhf$!$VfEI z{I>L_g3S?0Nu;-1#kq6Blo&e#4OjlOd5Y(2Or(jawZ%2xjEIt!^98=P*6zTl*xo>L zeYT-Xr$fdQ3_QI)fwGjz?P>(+=;)GiZ;O<`Bn%BdHoC#n2H8HEP=gQYd3h8f)V)dpC@n1wLyc7j-M}io>_0Wjs{qAp zLChfM4c(Swz2?rJ_4kSW13=!E8Df2P_=wViJH*I}I>%MiqFNu5LPV`@d4&5Y`^}&f zU)IJOGnUMH<=8Yg)rqnpiYqgN6Kflu9^h6|!ub@BBX5+?CmU8Mr;K`I zx%=Z%2%j0B1CgljC&F_PtCL;?&X;C4;!Y0hM#oF>X}=_V-e89(o&0VI4j?CQEnm}q zcgNaLUgg|tF6B)|fdvn#e1>E>TlRU=S{Lm#9BVXr+tTyjA z3mMEMLpk2G+kq6o6`Qvv>OJ;X*b218NMp+yRvkxm>Ce&Y99TU+dFZROr|Z9g#&B z9jdvIWdDu$1yr^<*;-oz=7k8`09;c5k^)QuziBXR641j%6V;BCPYMT4OkZm3;-ay= zi+u_jx{ul(9Uaa$iVu{(m^>!u?wxAfCi=vi6KcP^t8H=u z2SxdV3Nl8jVwNd4LiS!3dEr-M;r|}SMdt9h#Qbo{w|@6zajD_mK=a;-obBntAJ=j| z<{&LkRl_5G*1o=xKrNEGKv60*eJ5q=kLPxVO*I@Wp6xWyVggbJLjWzHTTK3ClyiJZ z)g$sU*-|NbEaj?=@dx0@bZRld^Neo&8*h+4T zOQt<<+|N&TLTLiP>FJ{^=xqHG$()^k8WoTSSe_A)k&7I*R2_Dvr6BzaHWxiZL&zcD zDKPAzH!+cZ;(9>#aA8siE>?nIzsN}JgQJ_SW@lTbHKbo@X=9fbl}CGR<&``X5n9U9 zZHvs~$QR5<(|vlIJkpm%+0KsJH+v&W;)6!Tin@cs*ut9C+vdYJGgQSx%`XuRiw>M! zk44SSo4IYo-@ct6Z-lcq05z3@l9O|^`i+LFstFEdRC#GQFR<~sUup_X=rDpgUGD)}6 zxbpJyu;}O(c*#4j2>(*HvLVOZEE!&Es)_{{QP|XYJ5=}zji~x($2GgLP$6MhTC!HE z@)85tOn-7nT)#2#>Rkc~Ueb~|pLw*Ai($VWKqluwx-kI;!8`p?X-w$Rs2A(A9-2~< zLF$-CuMKh7}G)M+{dv({pBVvK8%w5D%yi)j1m(WF)?kB3d7f| z$BYlyt%2aKnYiy3_ZCuk(R17s#@eycna!yDKy^$StNo@LZVV-*5!3#Hpy1%=JUkOQ zj7bo7r70$R4)nIk{GPO^=LRUSBpTn1ph;AvQ_;^Vh z8)jIbaWs$XrMFJ41_Ir#ju|$`tuSE%Ta8PNt(g_^xT%wJ+glGcus;dTF56o`_T0_e z7p@*3o4zwy#}!;m-13UB$MY4x=NV7)4A%!#$7my-FQ4|b)PB-+Ej_taQ-I3)mrN4> zq{qb;@7_Nwv+3$s$l_(z`TP0*cb`9Y4Kx(U@Cz;^g#1ejYiAs z%QH9=p*GwKQa_B2R-GxDQ(QLaSY3#vKLzbW6KH~hif_s=>4faGl{hCO!78;xph|*E zFH!0C`fR;xMEV$A*)se;QMvquCRyFq7qn@q8Mwq|P7@p7Dzs4cx68MN)uj?5< zc;`U5*Zu2-Ll0eco=9whaFk+4SjaQns0BcTU(nG(2p9XiIbKjcJ*8EAub~kP(0vLG zaIDO+oxIZd+Mdk&Ju3KXHnFesXCF>aN32Mg1@x{7KR)r&=9PHK5P0sQmh6G&0x98; zS@y5F`5S1WUcbIWPfyPYDdF$RhZJlrf9J#*Uz&OCBY!xuJ*w`R!4PYvJvTDN0 z?jEmkdgbRj`pq?7P}upDtI9?&I`NddN|mO>$Gk^FuHyrn2pm?4BqVlLj)ri@0Z$#* zxG2?g^l_+O|5N>jF4ca$mytsPS8#Ufy@j_`^Xs#{s}%^_+lb{HnkPiFC!KbPZwg+< zrr`>EaDDHd!1@?A@m@kJ$vWuF)BJVzNSy7ANw5@ODrGe)?jWl?P7U34~I+W1$)|lgCK2d2Rmxexqs=(Ug(ZtYq9s`?smT0brRd) zFy4UpWVME~!BS&TwX<020?f_o_--4&{!~aEz+|KFaKz4|G3%N$b;NFuSpN&vCb*r>Dmt<}$6D`rklcm}V*8GToar zMbk7K%aq|{QA0*${MC$h3FrMDflf`sf!I1y9+7e_9uj=Bic$8Oe97?pM;ijrdIUhx zNBWECA3sbU5ZnY*w3|S|MMAqblH=t1@OJH zmBq2=-zj~*Ip>dYvuGzm@+M)zwj-5EGKMMsM;5{6b5dhARoj2!?#+|@L8oV0ve~&M z@8cue^n+@2Ii=~*U8`(E;+LB?aDI-gb(yb7Ki!kqe6n247buf-F`nY75c`9}!ZTC` zqxPLqei%~A_pslgWCS-QPsjV!e?Zkc7vN*dNrZm~W#-5K<6OsSLWM%KO~ep89zFk% zoT;r{6~a5_h@FRe@vs;Z!%{OO(W!5QS52XN#*jHTvh^rM%*2}CKj2_>W#nfAQ(mNw zP6w^q{-QW20fcUK(7CyU+=4!W%6~YTvNWhrgwBy?^#oPiryfsGWuv&+t|mu$Tad$b zW%i{fmZjzI9a!&lx_+?1GzL$&a-9d?&z~YuN0si9J#NiG_;9*`byz}C5eJ~prO`jS zkSYx#bCYQ_Jc8k`M?I6K?7e~$&T9BnvKl=5^HLNl`|qBVtdzWS=TmpyIu;i~a{75_ z&e?NrLJeGa)?^Cy4-e~@THNN)xYxtiyoI5Q{q*&LeH>`e2kfyP)}TE*8-mtN!thHF z*Yi9Wt?YCgq{$!ur)*?0=)?mM`tgCf^TyiEvsOeQpVqI9hE69xqp;&Y^jVt8JekiG zUedF{cNY~N?V>n2JeW;mli6(!!sy*N1jQ|EJqfOki*qMC#)sEf*B8_&4PMIB1WkAe z6uVdbT48O7X!aBeLYRoBR}sOy`?Bh(3-937A_1X5Yez@p%nYfeTL0 zn|al@|9ioXaMv}t@to8BVq!M(zr0qoP*CyF7m`tm{DFR0iNvg}^GfSMWY;!KZI;v#302>IKWxmI9Q-^8|;8+-4>R+A4a^H@~8q>DM-&d#K_>&p9cJT!ww z*NIYdc_k&N>HJz$R5UhW;Ue=gO5&FPJYaFOx7P>g77jyf)cb;dXgcr*@G*pkg=s&! zd5Y-Wy_Gw_y#BGZD`liYFp>Zp4WBv27el~nzMA>C#NxL^vs1@U`Ilqi<#C@KF+O{l(ap2M-?32D63!xVRPUfF=yo zTi$RL>o+Y?x)}_HFyc3+vqrJK^IXWL*y(q5;uLk%|AGDy1XYq$Ry(w1>`oYsmI2EX z@i%{tsuCh`zV}gZ2R$1=B2!|`O9;20{iUGHhj%bb8$e4Je8AnZAA-xuUHbX+za?Fm z>y~>R4O#klaDYOoO*|1J`bfeT0eL!CDJn*92w4^25_ zIUL7}xo(Zcn)!y1$LMyjRhDytGv>S8j`p90Z_{HcG(+^ZAIhA`j*KyT>q#?gPXvzu7zh{(rMBEKeI(yIB)=tlOvHZrL z`tQ4D=s8V#O6sct?sPXe>Pbs$gE<#%V^%9sii0*jwX*Grzq-atPc%K9>z8 z=oHbM>0JK?5u&E^Fnu8x9#XhG>6M-)Fi<()99a-`GMU) zlK!Ap;}!j*N4y*#v3r6GwwUsJq#pCB)Vksdo0RyTS0FfnRIclg_Ih4%{{Kp9|J;k;(6INE`eV0G-`JBZ+I$xdFeyF-|EsJrUZe= z;noB$05McDi9vvUK`1|DIaqB^ay|T0>&F;{OUUa~uqgiB2vbf4N{1u@gWnDK{Y)?X zJ_!-)5A9{KGjoRr`m&KRy6fIQyk6`pBAW2*U+0FQqthpY)0#3muO^uLVIU42vn8S( z#(L8`0Z${xM36mlR&iO+WS8Ckl+0oo#SoJG=o~KL$sGsn!a!1WNRO7PX&-$n;vye$PxR?T>g#QBz?%|>DkB*LtJoPvK$$}?F$Nx@M zoW!M%d$SKG&`)y7uU8XiUnJG1a$j-hPmW3vQk2Af_1*S!U{5aHD!61dJIl5B=l*Qf z6`XhKT(7PE4O&F3d_?#UWEcp)b8>`+rm-r5iRf!+3g_hggw+6NI-3|t#T7I>hosPoaWcNp=RJiGk&qFWKK(L22DHD>Rs9c6`+)p_+7b>P7Nw}t~iKrNF9(tF?J zDRC5tj`$Tku&9+3w>#o^7KGevfSfo+GPw|C#0~T0xH?c&*K{H;t10d?@~WM-^seBTrbdqXSJ?j}xyU2UmtucA_*KRrA4X=#5;?x7WVN9 zet`Arv(ocxcQF#M8nKJH@8t!)z)J6wS{ZWpd@nC||Fu^UvZatl%V@HnX-X9h(W&;o z;8#S9ArViXmk$w$O&w#Xg;*NdOwD%q(gg5UZW*GoiKO-wFC~lUH8e<>B{D^WIYeDU z3&`SO z_y^4E)=33x0;EDx+dyoUOTG&yKouWD}Amc8=mVjup+_NkNfia^JJC%n@Q*sUAv$R-o^%s=)G~TA5KY`!W#N;Jl z0=NoPo7&d&p*IJ*3F`yzl{Fd~dFobHzMc#;(kAyY-Bss%Z&dv1!U`XLGf=G54b{Z2 zrLAy&_+T_-@p@5D_`yY>4gP3tYTqeORGTG?kLT_8y^ViBynMQC@yBnpV!lBvQygS?QP=iICTxQ5&aY{uYbXaxW|~ z(W0Be(ZPX=ocykUfIxdzHkUPf=weq~E3ZQkFdQjECQvnk#>K@YBqTJNTSRZ|Y?P&= zA>b?QuIa`IA3U4saI#`HD6qU#7%|H?kzB8y7=xBBSVWiJ6$c-`ZO~ZGYQ#aW$y7is zp0*Vc3ZN-p^?w-o>5fd=JVVg6Z2tnMeFZ!wlAC0gJm()nH3k1FS6DtKtQ~KT?R2}# zQ>NQKmHGhogKaLU25rXOjhkz7oinZ9-8cg!mNbs^)v?b;(9QPo5^G6<>INLzgHnqq z!YRn|9_Y4^g}+4*pLDh`AH)Jrzi%+muFk@j-yg`DwF5%LCMjz`l%gVdYoqi6Piotu*JOctGOw7mO5AI&m zZ}Sd2mNz;3+!Hh-Sh?HV)<~#DZ7Q=r@zLG4CC&%b8X$F6hG- z=6H4%J3 zBnq;RVMc{Ql82x(;r36(Y)#@bi9*mBoHYiO3gKyFs>yEP1pPjilry^ zC$xlMS1d37THJk_C1(7jpqkjE!hzGJe~$+%Rh#Wr|MESSJso^F6>_}O+M4{{VL6G} z(j+G6d?mGcG*@L3Q=vq)Kj6SxjYGP$atHO@eXxG{8C%hi{Cl_h8nzM-Orgl z|5;_%c0WQTAz}ZV_H}{|;|n=SmaTjMx2+r?(6NjL`zAQ@A>mI;W~kJ(K|8h`0L$Ln270JBn^=~{;~?1!GUpDsK^&O zlOf%Ycq^TdPHL>Mgq_ohAMQ7I71^^d*%a*uH*>`l*-EDM@w

F(f7|FSWK6Ynkp z=6IORdDCT-J9K;87IWU}-tLgP?IL(y9pM6;R(*9m;n2e}L|ev#`-Y71eZNCkIVa`4 znpk;xc}(cs6ca;%@0#>v(iWqk;e+#&uiu-7{|*$#gaigQwzOn4SoLFakcKE>t2p~E zeR@q@=C>g`p?at6sat|ZMTRIg?SR~?@%ytO1s;eM&Z@<Y}%I3s;5k*_Iqc)*QLf-|^Is}=dS|oe&Y`6`x zMCt2AWuiD7{Hi)h^KE8_ij&Uukx=MO{GcM|lbuR|8tb0hi;Rr%x2eJV)bkscl7jEO zS)~}6*;t~x$QbN|!&JKTuU?@&E2mJPFKyhGak=OdrbYQj)YK6P2~xts_Yn{f;7C=*AJIlL=5X#ccc&gbcZLPtU_sMXb0RrSQ%miI zz;SH`K-eVHQtuEDtPboda{Qc+e-e4zm_0n>T0se3tw+a@B;)L~LGZNN(5YI%U5WY@ z7bQcTN%`ZmP#`44jX~=5L!4IyMxlTXtVnN|px8rW;vKNd9j*@rMr*pXz1*xREd18n z`yLd2R~IKMU@+CL_d<|LR0Bf>U<&cv_Kq8MJBT~o zzy(^p-%cwD&I`CHfX(KZo1N_)my0a-`(3rG4(e&(18-kyusyNkNxL|XYO}BCE0+)o zg{w><#d?;Ngw@5wpU}4b^W*~DwcB6gGL3Lah zZ6ERAZlV-Q2^OMv74os+v6qMZ@L+Q0@7czanU$ajESNx;qh+%R_kKRnCkxPCI8zv2 z-`&MXE+m^V`z0zYEUtNo(>!cgzHK;l=rXo%6&JKox#H*R%eHY|+HCp4EXpw_u(A-z zUZ1V@nxvIr)Q$O%=LA#jmG(V+WSNDvh4w)eit??w2XyrNrArr{%FKrMnJc_B4QK0z zDK3va;+w}(w9j8##_cb6O1)eu3$sNzBvJ1X|zo2#IHb}oL{igeT zB?o&zABC643hjX0|1P8Fg^LHj$7t5Hn+&O^N7Z$rh~PYW$`{4_M)ADzl(_@+rB@(N zB)Ku-YNIc$Q$y^lw{N=NnP`m+M-eAC_x#pYut;jSNwOAYsV0V6l1{|j)TnK(deTkv z-|0A;sT$XlC#0l35J3V1Tp_2Ts=D~)MqPos;7c(4YI#wg@Uq2k z4I$Toh~F(nv&Ll)NXp$oIhm4U?B{vU47%gnKn?tV9x9V?4Nq^L>1c;o`c3Q|u@CMNF|cH%WM>U2 zHgn-UT^I;>IO@3lO7^<6QQ7p6n}4*7H+f-o7ImSxqw!7-NqS^e)N(=aU zH%Zi(E$QXz)B_cpT%36B=K?^Tudi?Q#dg&zt=dU9_56XN**qC8_se)L zEj5Mr-L4NkUeiZ2T~nDt6&Ci{W|!8T@XBvEDKv$qx&&(%()}!14~?selxytx{3D92 zkF2N+maPo<_)1g~H?Pz%)bLldC$VXGbLXAbjsKqe-kn`uYcEO?=uA<;#o0@tnO|ON zAGqNtYa59SJLRxFKQs=MLRXu%^>Wx6e-_=5TfGGTIz`PRKRj+bnrx-~Qdf1V2tXO6 z4ASlO@zsbpt-OHyB8%<-o5JAWU~h@3(rtkk#ATASy}rQ(ja=8S($eqn#6`f__cxRV`>>at7E;VGJK?> zFtl>L)dm<3t`!jR$OK+Y);v#$@4Oo~qH#4798_iP6~$3q{VRZFtn@yMz70OIqrnHY zRNc8PQg51%j9(&gmG<~Ttt2u z)K7ovj(%AJ%k}M-#qa*3G<+Lg=<9A&6qMP)Ou56&F)R>SLsj+AqO-VqjTn*(3-sRp z)n;V0U6%Y75wSK?SErB!fr=czH))<(|6wg|E!8hzA9N^jr6*+Jv_rb5uZo@+(#_eL z7r(xDRfAMyHG&k#j(~t18$aS@H*bz2 zhUn>PF?Y6ZqQ|z~2BFCi2@y_feA&1MWyHTaBDt&K(!au;x4FQ}c~#pVdz0Uw5wZu# z2wX*iRcx@)As1jn;cBTt&8XU80AAUx6o66Tw1dN zdb};ovR@*NnE@XvomE5x&94YTa0qLBKamwYqmIxDCDm{GtQ;p&31Kx-=g)>o z_=Lhmx{~X)mybx_?l@d4x=SJHxCnE_$>{C3ovh$Z72%#76W13f^4;m+HU63|C&2ye z*)#9D>+^@Q*w*&;x9SOU^=dAXeaGu6Zvpo7k1iKnG62TcxIZ2m z1|U%L{vFHeJ*3+;2gnKvF#N+YTz!8>GbNY&O2LTAamro^wCC`TK)@j#Z;e)ITb`k{ zSDhayOU=rh5B46>I^KBrsh;JYh^?g%wrkJQe7cpMnD&>U<$5(i-QU9-qhYq4)oU~P z8p!i4(|Str?Gt}=m`G#@TLj)s2)YpWaYxXvHp{78TJPg=*liixcNeK~?8+@awsksB zYGFCu5TcsV^yKM#LrixL=J^A}H!|regXj|BZHQDe1mm=KX!X9N^toQ2 z&hr@It_G(T-5Xz5%{~x%OdIN^J9+$%mD}(Bw_Snf&eYv~6-uJ%0{p%nsks84x7gix ze^QSa_!(XP!%%zrcm4!o-a_CfuVL?2*abgVfr&lIZ*c$7O_W*oKvJr8pRTxi!Yu;y z`JaNIu0&50n?g?awC)i7U4eMcdI~;KEF)2!>NMk(`Q}CGiT**|-6 zfMWGb^*uwtm--Zqua|BIuL7^rh6pre#_ww5_znb;BgrnF1rv%@pCH&KvKI4od6fnL z_>_`z7np_vmcZcPk|*!0VrXJ^us&>(TsRHk@T;>nrsNjOcM)1%z0K`5!rtg{jb|xB z{i5|J)^J6ph#-S25=Utqor42UNYmrYewpuysIoF1gh8ZKR}+C$(blN0ET4zLoi$Ea z#!XPlo{l-ztAVVax{~&!E6L=fa*OA*;N!sCQp(Mx(d!7OvIFh;T%QMAb#ItFTv*#n zs&aTYk$k+F{u~d@Z82?DdZSf!s}@MVozGe#K(V$AtUew$y&Y98rYcL0utAzbijLwRg_0zuoyN4mUff{>%Han9YfkpvM1YR6ttY^Y^9&9?Rrbm4%-+zkU2m@ZKZ3 zKuY5ZC4y->X8Y~=-rZ|1r@P3$K}>y94;~!;8OvXHqp6^?>G`f&Vm{`AozQ(=1EYu8 z zip)LmZqsdEn48nz=5SJ%QMB|7C-zMHpqZ7v;`QD|)ZU_4{s$mC0|w}Z<2iGRE!!GS zSa3nwzNDDh)Zth~=RIAw<90Bu{9E)H)StG>WfsJ+F>(Voefyd7Y?u2SZ!z4s2&HWO zXR&yqbU#g%&9mxhgo7@P>yqK=UdD6Y5zm)cmB(x+d-gWwR%os*H?DTHH}7Wr&=BsC zt2`CHv{}d03aSkB_DE1YDZJ38ge#|ppF1J(2Z4qU()l_= z*})MmXuf&4UO4T?8cY5#>zoVOocTI^#bbTgYit|KiuK1)#FkvEWvgte4Xa4t`GOVw zr)2G=KA~avT7M4y+Q4l-o;$PKYkwCfetcgT!>+!ZJj=I1nDCftnOiiQtkJ+1aA!=Q z;4U6z1F0P4x-E!Y9+C1xcC1^Y>rPF}fQ&%bzpq%ae6FQj-z4V8m-od>LGB|~jHy}^ z;#%hU=0_lb&s(Is@!^@y^Sv^g3QI8;FS-?G)7Z%x`%S5E5xJ`_`g=xH-L@nZ9_+16 z-04i~I7~rEXs9PdzOD{(wBG~S){_xe%!jEhHgaW}4#Bow74H7CE1Ln;A=I*duYYOX z8EAA-J4)(LtKyQWp|^RXZ6=1RPDBJ+e+OgZ7bH#36?+JSn%$5cG5)&c*FN!}>jJG? zD+GM)%{ReKnY{owPWA5Jhqu8A6O!wvpdIep^PKqBOe*e=kg7CM{GxFFf_Rh1WjyQV z-=E`y#Fb70gXeY!5EaqwI@m1TZ@1R*I#RhL2e#_*+N z+z6!F7Uy`~l@d-pfG|D z@wlGtN*4EtvEU!wW-zwJMvDek%YdTk}^sj*Co!nn?icQpYd)8 zA*G-Z{*}FALAYg6W=wbs!E?la!ix zE{Efho8$d({P*Z%E7`{HfH$%lUsfOM$^GHmp6)~U3%{wRA6s??r_j09Nvx;iftA5E zEk}{X#@pI6TmG4AuB7mY%3JxeG>JfCh?Hn<OzOK z1R`SM%-j;3yhunrd^>sTh337G&v+l-QCL`Jx{q|w_=hzhn+**G z<)cAQZE-gBTf-*>&TgG}wbOrsbDzINHO*SvTyOq1Vc5?xJ5w{Dr8H;-l?BV^Q}A>f z9N7aRlA1~}Jv|LHFbHVsztgGRhQtU9TLT|)E4RUUrL4`tTl-7hoQ3X3=F*I_)lbE!JLlSXp>>Ld!9#mZw)8XGecG_o0_IIuH03WMaL~4 zDe27{%~*v|ZZA!SZ%JAPL))A4Uy1@L;QNI`BLaK|VG$amv&%|*70n!dw}(m|m2^C3 zL)nZ+2mA#=P;I1&hAs~&X)WC@6kD0%>Ik>QnSpd9u|suMu%OyNBiS`Qi9?V3!7Qnpr%k@Ig)m#xD{+MndAZAb(1CG& zb7DkJnn4|FJ9We5_=(uJMFq$G;fSvgyK$}3-6#p+@gxbCkMfVo(qi)EUxGwMcXze{ z44r$b%6L8e7q8$mg+E=H2HCovEtR(as;I2|79VdWTUVX;m+!TU3paSm;TwjgE+#xQ zD8Bgv6(ms7ZDRY#-4(IOd4%ck2u&xJ40DXHb$hb0@h%ER+~BVZJ4Z)@>j63E%RhJ~ zCMMRlwkGmk=*gxr0?@LnC{ig&)ti=GH+2A(6X1;BWDnWoouYN;Y06JLF5AG_+i}Vl z7c1tvwSWHwcuh=NI{j8r?KtB;>AaGFh^&^eDz087_G}wQMe{^L?&FfoZhAp7Q~}5@ z0N&*hkZDA`PGJz;$wq)jeYQv(^ z+0)(A19EmHARd%UOrEl_v4QmRF-+}?Ho8|9aG1Om6x`~b%OgyIsXEZ^ZiQrhW7pTw z2Un_x9>he8@}~CjT?{2_`1w6sEPg6SSbBF+ed7srQ_T_~3I)6|pdI@RR<30}-sp*K ze4QZ~Dy&tkp;(e|V)GD?HX}P29$eE36s?YxTL?N1w_}bTv6A4Jiw^x z8y1_(e)8nT0UT*9KL!0cA-e+}l^nhJn}?78mBz5XupWbq*OrcsK(Nv9^5skN89n&= zeRgA0U7>j89-rElMWS`NcF@Y=BBO3I z8d$>NlQ9T&woTk9k=DSdgv}nnRgh@`<*^Hv7SPW-Bo#f7qj7`W9pF?97%COBA#4DF zlr%}Ch0Ezx^o>VE>s$^mqPAFFH?RE(C-?oc($ShkUWI==9XI4#)yitGU1dmOS-$^i zbY*qYod2Hcb~&eojjM!!IoaoLh%@;*srDKBMZS8)w_m>~0T4E)?=fV=Q?6qEN?JGS z4d_d(dh_UuaNfdMmr34u^N@qJeu2}qjfB=EW{3+g!hEpSpQx|J@0H=$kx3SqGWyHt z)4AKh_KsW4snX(#@7^KzJq_rW?q=wj-F}|QdNX0%vB5Sl$z6Ab!)!Q_R5x7zUZblm zTGK3|XVYeOZLJM{_0{nqRUV=FbrDqS+fn{ydG^G6k#qk&QHz&j6&YL=T(P3m(uP~n z*$60ri^W;i>VOOzxI$$ohq%A1_1i&@Hm4V_r$f*gl52=GUU@k3?x2_Wr*>ZN`MP>8 z>{RRGOrIzCwm7?;gu3yaZzHt?L$D7FD**l%!a)M!V;1LAX1kTGa1?qf@!q=O8@^(JiobOYdpvIp$z10RSA)it%IP*S;(h12g%zC@$Pi_$efyfWZ8cfi)?%9vX|d{ zwnpE%1In-*j};CQmyM8jKawB@_6@4Fr2-}CMoHkU-A_!N&2G=Ah2rso)8;};?^68y zfn0W2ThMd{XwFPbP0yew_cl&OKk1!(l{8|d(+6b@9oY>vdK9}8wB#y(ee>P$>nW>n zp5#6?zk+tg?_L}0ecj9U`-A#4wxO!IJclW*+qPVKkx!p&>+?iIWHI$b#rIxhT`tFY z)-8n+&>w*K;q`5RoW%AcF*%*N@#m~fqdFbJer5ZD6lwNdDj zfp`F%J~aClkUy9@OxbZLT|Doh_dOznT(_J*s+jG1ceUK>vftbzew`QgczDVq?-Wov z703ex=_K388JwVRG6oK=zX`n06ENDIn4XDH!krOu`h0bH{trE~b!oU{0^KE?&XZ_y z&UXZkFaP8C+0maYPG^q;8v!9T!B$fm^?&IGO+aTLI zxJe0{v+WKQn z?oz*0fNfX7{;A`04`D^&$Ea`oH0e`NFw4!yjY#6`Sm0hV2(>gZ$x20_NiHbpCI121|4`7S!fogEFcamdW?Sh; z=YlVaGS6>FG45QrAI*CW2H4+OSsx+j7j%8im%RL1e|&>IiHcXVUuJ1HM9l1V$%HP< zuN&F&<_>$%mz9e6S7!M|lU{!-cIWypK)3Z~`a0pJ+L78v#E*k>WV5ltET4mVptcEj zC&%!z%O|XZ@cU_>yy66(;z0BMB21Dn?Yz2`_dc^8>VwpYTTC2=dho_GtsvCxH4_%^ zVE@9PlI>crA1nGx2AN&T@l=4vQ}m z_UMM)QGL@B=mHEAO9Mi(dK0x~*W=ca62af!)VwV>cGKdW%BgAHa+`W=rn0&cs_`nl z^%GpGkY`E9J_;vo-DSD|xokX=kC?DC+t;{I#-;OX_7-AJ)*u1TNdTQ9I`CLic#`NY zA}ElhV_WX^Dvs=*-+m40D|<#{$h>sV3EyCv`90IG@!xFn|S9LJ>-kxcEh#siKY9XIlLd0z?R5 z8D8F;O>13K$2C)9I{#n|PwX+h6ls;e(jA7Qxcvms1R8W&J2LZq&m!o8S}Qee#1Xgq8SxYDfH?RWr@)j}o+Qip6)EH{lEuYA zB9BQYuO7{+h-3cmA}2=?=ev#h!$=JET_fm5!$cb&99hUg=0x=P9{AWYvmy>OaVy@6 zcxYx$S^J0Xi0>f$B;s;H-K$1`PuI#$p7Ef8iI?p{j@I5;mK6fJEX0=1dHhC+C%PJ& zJhn6L(6h61<}5pEc>YEH{LOw98|ljTtZnhGBVBoB-@)zkOD_`tVSfGf%Oa}VXLt?< za7bgny?@|}201qt35oRB>g#q>wl81sF#7_LgoLK~FIm#YU&iTvO=H7uS{))pe4yQ&Ev#Mq2XPD z4hS8nOyNCWOSIv>3*D}ni{C?Q2&sLs zquR@MT)5(V55$ZdpM#7B4h4dF! zaVK8uAns{|OelOlm*OFV&)IQUnht}*N8I-k-fp>OE}?Q+y4zqBkrIe%i;`BmP2#a| zeG47t6?%D*^DYwUk_y4=(>~H112?<@df_VuExcj5d{vTIw z6;|aJckQBpba#W4NOzY=gOt)GAl=;|EwSis1O%kJrMpDBySw|F{NH!)eeLgn6VU^o zXRSGZF~&X07&aP9!Yr?*L)~j}NH>Eb{Tb$pFEumfpK<8AHTr-e&yD$!TlbjVzv5N2 z+L~%gKtV1$z2s8=>5sRcKL6U;*^)4DTpetv$!FYoLC7(Gn_ZXnHUqPPb= zJ4sdW>F$}l>Y?}H0s${qPJ2bV4sr73n5(fM$5IYR2$_R(vtue7+9PgR5^+5avOsNe zYdm?7bg9^hW@g<%fd|w0bka5`W>PIj5C3h=@b%RW=(5ED=(5=*)7U7}^Z(axig*H8 ziZYyk%ankG3kR6nHvz?B8>})u=8nItxIp+t=#NSgrY0cj2erJ(r1!;+(%ZL$y1ziN zAq(J&tU#b!^}bfKBwxg7#M)LRH)W?+hNC@ATVqMzTec%)gN@I5)0 zq6kO0XIza~6-Sz@L7bf@m-gRc!DRd(?mee#(}efvhw*Da{LoObtO$ja+uZsqxhwCh zG6lCCuuOp_-FOK(S;&izfPmnCxsT%@`uW^KNv*q?H;q@#_$qDw?Y>;}tVo$n(zLiu zxzEr`9BR<9e+d0ERRGqj=(`^HN| z;|mmMTjvT!&1v*Jah#1!>{YUZFuj#-LE4uZAr-(v8vDJLLFy)yL?p3WLq6sB0N4i8 zD=8~?gkVtOb6XSpJl)bhJv{*n`8r^O=p*#N1N6|Iz}!}iL(#~o z&IlK3VtP(`ENulzFiWLZ8CN`-vxBKH_V;i|BvrJEMtInQ-+UADN@1VfmYW3dI;W(_ zI66AU3RGj(7M7Lkx~xNo7u~#IvVmb5z-SbKOBUc+K}QetieQ#z#3|vR8&=i5`D>u~ z#Mb!(dihyaD*VMc%d|b)hU{{5w2nTL;7xDO=h~yb zmFI*-@nDf-V+dU=!ovL@b?a2#IIP~$)!p3>1tSqQz@0iYtOVGRWr@!bd)lz}4dF_$ zly0u8C=u_=Q^9&@_z+E@d58D2MYZQc$Mwt00TLf_ctC?rq)i;@8lKq9=>^LA`n#Dn zpSJE}kOL@QHa(FWti-_v+@=lM*3+T1c2^0;6}9NK;S&*^}tGa!PvB-#z3STx?u$FBr^@~Ri2!i;Fe zrEPfKpl{~-^XE@+j{(Gdv`isqY|vk5OL0m74|-WtJNW47=>d1zaPMKfnng&?*?Zjw zd*~(z!DJr^w*7bLW?n6IV+voILyH@0_gyB4^rZSaCF_d81cX@|5v+b8>0#X>vS z5ZNh>8mTDr?P}cOTDkj&XzFxM3m>fA_A))4uWlX587)Wk@N@ao)gc@Fm4~ML@?~xB zC}8kg5rqu|lJOkbrFW%i3U%raSvMVWf%Y2#sLg<| zZOadi)bwKE74#cm1eY(T+w0a<*vdX~1#_j^YlL1;RFl_RCyquOxV(}}{s$+AnK2%&tNg*^hXL3at z92{gGe3X*%b^27LTnL~=u?Y!p@KJDpI!RNL@Ffn1f#D!VpsrTnkN-m>YhRgG>_}VN z{6}5BixTlv?WpnbNeJsx%kR5HQ+nyLz8c;H-#;TcxeH7E&Y#q4bO#f@i=|CsW3A0a zvOam)#>|%{dgn!T%%w^`Ub_Gj2H0MMZw$vhZ%W1^co{F2H=q)%Z)^lh0n;gLGpTB3 z-gWF`ld#`&;-Vs7w1%Grec%cZWxz2-&XK7aMCFZrZ=Bz7N|PWl@y2D~ zFk@s5tn+c-N)?1V+TkIM^|zO&0;#(_TPs)G^!aGQiCTv@;&ZW%Pl@#N_(5AI(X? z9vvMGq8O3@kqQh~#2g)~#g`Krm%3Rf%9;7b|Ij8A^_4M+2mQf+UK)bbW58*gw~@l~ zZ%I3UBULzfjQczHe&aiGi|HR`2SGCNVd{Uw%UisdR*=SH?pVd_{@f*PIZ{BCFL>D( z0N3K_)fHE8ZSm)#H-T%F=bxR=hl?$oZf@&Y$sY!a?jwoD{ZUfUUgqVmV|&zXTy@N@ zjz+dNO{P+OruR{(tb{)Fj0>p#&ahxecg|RxXLEhr z@j6t|fIV1D@aqsADGzNqMY}R9Z+oUL0w!N=1eo!gvXZfcEql5J&-3AzvF?jQHCrqK z!?(%#a1AJ0Hxsn1%{FJHwGm#PFgzk+jvDhI?8|^vqt05Vs)ykJk==O$_yX=g6j0&@ z1XNp+W_GnD-mUIC307W4B!d-eTuD7>~}K#30S8Q@3I zU-{z)WaSf};BnybFf@d_b)UoWSLyq9)2I~|4w{M9A#Hm^*`1k@Fws>CF2Q{#x{|RI z7yCCjd**jeXvQKC{ifsTY$cs!fl_WCE8cK9V-9n148GtSyKP2)qUKKyys2sf{juBd zC)Yo4t(T?<4ZmD=R+o#WIIXfr(r!e+to{CVv=@05(50acYA{yY>k(wG`}H- z*hHW>vdB~rx(zVNa0>z&R29Ia^aWuVps4{lGu81Xw06`{$QP;xz%WA0?>v;l2*TUP z#>R{zs$SH0-@hM_x`5eV|Ly?dvebiz=`UwzFX-G-16NsHAE4}dQO^Rqv}%dww@4W? zR5sC|%vi~F62DuF3V}uwSUx3)E=LmV?$JJc&dN_0GIS^%XLjs+_`;uK0T-MeU$+pJ}jyY{$C^)yustgj=wUb zT2JiB4R{@EO)z(7%aS(v{+Wlw2l8|T%|Pb919RildNONEeTcrgYp?qta1OdYF zd))^)^g~E^E#k@0fVxylf!~m8?D}vZzbI#}C=>7?W4$6LXn*Ydt1+8Y;d-i0yj(td zEyAIrg{^TqWz+oRbFEY|ZcnvY-{7+yLjH6<`)F147RU{{;=4f~_WXG4^AZsR0No#z zsDNjJyDmYxBHHv`s<=XRt*{`&01WV7*k$uJZRy~W5qi8(dHHe33`D|`0mbgeRzQ*-YPYQlDF}WC^;>FpBNJ^)^M@PrnOILBbGTqx2h;6? zZuxe90_#o>R7K|{2WyD3Rs8UkLeF3le0~}c z{#YV9I_A8pO-3A8+z}d+@;+m(iLj)-QuRvp%bUz^R^N1Qx)a0ixW~ZT1twA;VGYTV z`uH(m_D>yWB;&X^7OFk(DP-ZpLesLMs>A9fF(&Dd!$hxUM9oUoCGlw0!aKe%>K*L# z-s`D>E}8@Zf!#O{v`=Wcc*#gTo2?h@o@W!EGSYZ^wjM#S6y=*>Rs&o7<;T`UNbAgZ zZ5sT*wJXx?rsZqQ=Pfxtmu4Q;ZnWoOqx(qYu-c&^Pu$iE=;v!`ZU3N>reOAOc~hB^ z7#IpRYUjt3DRXbO6-&ms+*peU{aBFYy@-3xIJysJN8UO40h z^mRN=dvUie0B+7cDj;TU4RAY5FMhns{b_cqx$3EUSP-Tcc%%E@>$ClzDR9I*SG1*6 zf%-41u~`VhE3p$hOSizS$kIbszO{Us%ZL+XE|1y$T=WgDmTf4Xt-~s}NbRUomnTtD zN`cpxX? zux@z-^u~aj1wyDAoeu|q`V;~G5N!x;EK?xlzH#)yIaMRH-32Du+EwYt<4jL_l{plZY_$`ziAYmfH z1ixdcj=Whz=$JzaCwf<Wki3 zjQ`N;4hK779$ASyI#ug$mY?^wS1yn91;c#|1Vk*LqrCkNrNGpjvGNHoLyg}@A%B## z2cmZf^9r4n=)ex5&^2DMn;A{|$fwl28zP>;i%*HtL6lSs&bYQjfZ9U@U?L#u79Y*P z%Q-))Uq{Vw-=q6alV8#Wl;hMKam-7^xlP%2OucUMU;qA;e%N_Llfx8Dal-xDyU81^BG63d7SQU{}ORgXX>>Eg}P;xCi_MgdOE*pYD9lD}c&JNejh z0TP_dCv~@R@PYShtsU&MqLZou5gp`}tjFYbI<(nXC|si`c9-$?V?mIdg-9`P)&*NivOq?AuaVF`>W|sSzEnL zspA8WQ&}Ayzd-TsV^yx7Qi>aD85-Ew-5MkMvewm+$Hj zV#_PM28Xaw1&Ubq0xjfg=X9y~QbO*)Dvg_AQnVoXt!P2|s8GD-RW=xRw?LL=oq%D7 zi=2`yS4vGC#_svZv5`@fFIG4PEjt_cojqLVrB6ElI1~20y`j!W!GmV3Vcp!`Ho@4^ z6@KvgUtGZ9q17e9`L(mj+)Bnna+)>uJ8o|g{Px4<-=*AK@54t#Tho}{u?GM0AMZ=^ z7(9{K=KtwqA_SCrfTf*fA;s_69L~G!M2SV{A*7qZB$t98YHyelNUOty2ez%5OD%-V z^tAA=z2KblkN=EBTs0j{nU*$P{{6`4ae=ZrJPN|{cDU`?kf3%r%h@5#m|%UX?EYHNB`6YnA_Xnebz1 ze&leqC46*v%QHfBFw-qG?H6BXYP5oGt$<|%dFMp?9i)`;&edhh$Dh>T+m!@rT*e>y zqps=|(S`YMuMv~2jyKDl1!uIQjL#d}aXuX=EQvAO;9Uj%Q^YZ4=-k{~8E8I1SQxWg z>0X{`LyooktyH?HHI~09RecK{+A>#+oqr=Y-?Vl1VZ+xCWZv^} z=2xVHqlu0l4@-7iqv{Tsqgaq`MXh3!^8`v`h+|nsd-H>`W$Ks$Pprzvj=4o6QeN36 zD238gp%6dHMy%(1mAJHGu#H^bq^DyAYu6c*i=A=L%Sp9nu(X2O9uy;dv7@i|%s!1I z)Yqba9M>I71&n3DlhW1I1qzteUAn_1y+ID_+9h?VrhZ>Cff@CtnTzvHwsK~in$nae z=QiId{q#CUu^v3JC6=&mP~SJVlqoB!7)|1>TrMe$_3|%lRcMMa&i}Y7FWX>26dh4) zdqTcCbkXQsPxYV5GSodQAg1FtL}g4sc)f&b-Au+BcIadIA$L3q7*ln>ASh7=oowi! zNo5}ncU{~n6;A@W3pz-YZ3;Twthas~wQ^AY{QlqM296r~&bnWL)S|-8F^xtSkB#Go0vElKTCv_6y`H1{8PJM%=)_&5bnW)#WAsF~xoK$0>3v zA5!ycb&G;IH3hVG@#FuQ<($0CFzmMzAbd^g7n763KIeg4 zw?iqN(Ug+-{LTy@JMx8tNMzF--5HFgqOCm?Ny5J~HSC-Mo~(cM``D;@3A5i44gKc; z<|}WI!-_rmGixD=ww$`Kjb#*2slZ+n1fsHm1sK|C$S$g_=>5~7$F+=(NEHzMe_9s=<<#00d) z%wvVajk0Fwe>)bt>-ZnLgF5@f9DOSmbbuGjPN~86JVZLi&N~{6rvSF%@$S+>(XE-f z0x$4y+733Fnu&mRYF(DIoqf4-kPRYRvWTS|kRRP~&Kw*3g?e}tJhc&{Sj%~LHa_2$ zTRdra=8*(*rTXYXxn6M9UDhIL@kt4&^xkKymFW`mIfei`ARx5@snvr0Afv!pA0-Sd zET@of*B3}N+snhLz^QFD9wr2OiI9&HdjRtNO3 zFg#`nrbI9T9(cGt*8mD5An^h`8;-#pAd~w{waupQ3ZE<#_?dTVY-BumH=Tx!(KJq6 z&mnf~08=D635Cdy6h5$pH}(I94(6 z@PZ)_h14Q+eMn=`x=OyB*=2)nI9wy+7sC*s1^}?^&slAn@A~MeX`9BjSiw2q=J7_w zCK4#4W55Jh69E}{19*Ha4#7wW^W_kfWRm)%LGInkU$m?0P~{GBE8shJ;(((LwWhl zqgQoNF){W+RBg1=C?r;s)6z$MeOF4B!gbRCsf5HPQC*cD)^e!*Ltfncl^==t70Shr zx%b{YRT);>&MFpTnGn#H!vTW})YdP!J)ojE-}lMm(M;$i zg7cxQ`BzWyZ?c61=}vCF?tgT>fW`)9H^BhWm%?p1SCnnc_D){7j!XVhWV(bKbciou z=OwB@h?qvtD&LeOL!ENeJ~>%4yUd>RE+)Y{VWOcM2G(`F%mtoJV{r4Qn(F8C(Gj_# z;u>W9ndC@{%dIJ?%B(o~cXsk`|JKG$%>UJ9iUy(xu%D#+FXm3WcmbrQmvGRB(g(hX zC$uSNw0@?BpM$^S(2olAr2rBKG;gKi^4nnY*nZG1}-&vFteX6u{wv1N4YONaF1JLbc?aJ28klA6xx-{jD| zdlv#ewn^NYfT9M-@m8PCvyN!g!Kw!6r)vseICM-)O1GD%r||$qTOzmN;M@W+zoB?? z&p5a#F;4aLfk-$yBiY6p>2PO|zidH9(YNMeB$%!e?H;* zHqp9zs~Ui7_qFp)T6%i_;2<>G*W??X5U&ILcQlk6vpb# z|D;pV@dlom&W0b5$# z>v$My1i-;zf34s8zKc1s$uiNU2D@}H(oI^8w= zuwqIqiR+(YQ7*APLS8c6^EM#1ehNkZ(dK@4p+=Bff!kUH5rjG+m2P+Uwn|{bUbsX5 zXM?ey_BS+JeoX`mW;@TQTceo@AmklL&?_~p<=5sUU+99R7zdMPr54R8Fe`h(+ogPD z#>ncf<=`p)#=Xynb<{y~2jW1WL#tqcPci4H`*>RuB-1x521?VRGU3jLJMLXVZl|QK z$Q;4AY1QNiM}pTR+OG+=g@?9y&At)zq5slLU-0$&SDDK-TKKQ;2;P;nBCn%%Wb63{&O<^F@D`k6f?oF8>V6Dyq;S~@e zgToA}glKIk&HX_CAq}MS1cRApRCKhclvE#1SdMA<>o2^*)&hV0atjKT64%Jw6A(Iy zd>|Ra%-@dNiHn_EM5*pKcu`4(I=74K99G86HtY$2h5D@6O5k+tqjhWc>EG`}Q)FHF zwk=R?57oVvSAzGO+C4K+{ z1Yp*y2a?hM#8#mezW+Zn|6AZm1S&`eZ!?|F7Hik4eku0ARDdVdh|pQqFS>7{T;y*@as07s~SvhK&pu4K@FAnr={izmsi4@@1>c9LOw*XgT*>GT<^-; zx{@D<3@#n`6ILzewzUo;E(PA(;dA&!7Ju}GF7MfW&=2PmOOMhmGiZOjE&P`JGydD! zdlzG)d%tRjvx*%Mgj%K^>7uhfzC4T7KrY6tDM1GB8>{*eBnj>9%^5q4$b(Nb!1PD~ zNL=cSl<6nItHyuJ)$HSOC~4KPx1N{Rx_=e${fU@mU2q^zMdJ`N@CV7UxFfU)+#*2&hK&qXV2{1!&9A}^1MbYN>2 zr;P$$0?o-Wf3>;QIn-h!=kKsT$LK3hH1T`x8=GJG(JPb;dSH^ZW7P(}Pl5Y`Dq>#y z03okC;0JGrYyeGxmYMPYW~tN4GNOc)Sz42VuIUgdnS%ID_h2DjaC_CC0XtJzpy zv<|9ct=Q0u*fgmb8J~bn|LEvQ?caY-Iqp(O@->=2y!ZzSy)Dm?s~oGL<5S{DwqAO! z#Kw!WgOLaln$!K+h%LTd_i+)I>yfTE^WH+5{!>S;pr%bdT+Uqjlg+liZ^?6DHWshnMH9t5ls z-&uxK%AmkOHs=IrUK7QjVsLxK4X*r5JvZu8IfCGNCp=Ezz(hA>Shwo%I^76Aq zjVW5@+e%2^P_m0|ZG-mUOM)V(;$QMFL7N!1gE;foo<&^yK)^QOJEC6dHi$QQCLaE} z=4(53EYu+^X>a?vFPVoWt>UqfQ~hWtV|zZNZ#;L`*9)3tW!Ej%R+hyKM1PDK>}*5BFVrs_@3jC|=89~Bop z9b?U%W$`D#s1*c0zCk1P>~6{Skcq)S{~i7rBRX!)2nl+ToUpHSC`-Ng?MxmeiH;ca z{e!!!V_vZ2gnhrUb7Se)<=!}etf*1Y*-}1V<_zLKdj6$uux^iYLu$y0e zt)NM=R13D3T&7~|wury$FQft9RXE+apzC>Sp>u>|=CjFhbLV`~k?@1AbS;A$#I<{N z#M#-hvQMAqD_n;Rogy-IpfX~Ba+aeK;fmS^I(m66RH0;>)^KOIxFFw-J7<0Vssg{(0A+1ErSa{XYrIKaDicXz9o zsH)9!n7nbhUazy7mjGl;AgC(svev=DNoRDLid8>U&BQf=Dqc{|eUMXuEyFJPxv-#o zXDPSinh;S>7E$SABuj$9mQ+A1Fyv9tO5S$FA}Yg)WMEQGWfrYheBU=lflV0cp#z<2 zY4UkT5lSt>&P3-fqeGwdG;@vlrqUA5xG6AN3=1EJCFlZnSN)Jnh;gwUufRpQsEk}+ z3agX;`)EV3xXlFK=Cr#rQka3EPemq)v{4NKD#lF31 zZigEw-dFFb@kYr8nc23oRb7~b#pUEo3+9kugk6n1H$@Kl?bULs$$^7jRF49tQN%W5jUKDYA$8oeOP?hQWQ>EDFHgj`CLJvP?SJ+riwl4M zcKqgbjOdj->2A!tSDtPlDk8)IL^7HO^{+Ml4-KXZB(ctHzVOArJ z{C;a1#cT~|q(Ka5A9B zFEjZfZR=abip4?&L)a3l(t@nf6b!AVZ;?zQcB37VxZL2M?|Q8`IJK1Ma60Z7(c$k% zd;p=b0c_;K8h#A`0GpPbW2g}Po+w4&9sO1Wx)aHrtsTm~WF#hTkN33vn*$9Oa4Db| zSKT&8x}|XIztSpd&Fj!UqEMuSrflGuqR>Y~fh)9pf6X@)nXbd>_D^dM9_eHRmvN9S zb=0#z=uF{P-Bh-cc`~;?MHnV-Zrok3+RjE_g#haj-ov#k7j4A1cT}(r2Kn~e<@@jg zsokyI3_DsQ*tT6O$t$uF5`JAs!NU3VqCU>^vtF1rgudYNC=R|W6SJtHawS87U(x518EN6NhpSw0O7*^bU zGTk60JSqNGcRBE|s04Qf87&qeN3Y%jo+F`W+y@?v=MtFjWGt!NHu+h9l-~ByZ8`Cg zya?_J3q!wue+?+8QQwjQ^}C_6qE2$(eQ(6uga2rXj4&^#w@1LQ)Ie}Kb5SiJhfaIg-%-b_9Xjwk*QW7?Fz{=q+|wUj z-vF%EOtTyNnDgt8Ch;)2;~~0b$CslYY?>N~QW=`7zCjA~e!R9sH2u>tS=GyK=K2;2 z?-Rbnl?&{QCbePgwD3^qAuv*^tySq7S0T4PP#|6 zVAXmsR}*$-Z(~s@tci3sb@?wsf)db57ww~unc9o=OG)Zs96qswsll(m{{kYA9S#-7 zkS{!bD=TAQ@%*u9nl&~MyDbokjJj>*3l>M1#>|bha?q8qfF{rW;?xTBMQyBzL#XaL z2v+bT&c?g_EivIjDr!|>G}#~LBDM|7>)_CgOW+fL1-amQtOO(^5vs-1+}w4;&Vu6L z|El(n(6mzh@n~7gGv?ohDAYp5yNHOUFc$n_rf8;D{h>4?45=IDh zzLJy2RrRTkhd3aba&WRbs}r}?J9DwqA|J+|W5dIIf06)D=}gZv1VAeO*)Uu6um`rT z%aT=?Y4iaic<|o7^}UG}HJWI$x}ckc2;IE{kyT2$5)Ew4+?<@*6T6;2eoTFDb`o?d za6Lx5!O9V<=_J4qh%t=lCZ!$%s`YS<6%~=hWZCk*r~Mc*8-_FbF$E6X^RN9x#N4>A zSOi>=Wn-*qrTMFoOeUSu!sr1zufvk-`WNiu-zOxYhlo^L$oRcMKOS}O<6^$U z+R;5Oz2EbOG--{^V|8w`X&BgSdxfA*}!SPopEkr-h$pFT$9e$^r%nY5@mr7>meLO?PPyjc;glhm)%6Uw}|3K zEa4jfp!>R1p1)%(qh@7FOD+4fStLtyaqpo_j{d+i1NclpLcj^w`0GGL1sW~ehZ{R1 z4dcCkgvxa;+}qfV0R;xo?ba}tT1^@Iz~D}hVG}T2Knze4 z0BBu~AUM4)puYxZr6AjcY)qG6Kib*y`t#d1YcwdtIBgz%p=sLN(r%L9$Yu_|(c_PQ zkKk-gG&^ocLy*sU36b1R`!a72lb$Ix53NdE*5oAcvfw|`-HyyH5r8LK$q*YC_b<1D zRhr+oDHivvOhqpzii%3QgJzz9Af=0Py;QRJ@0pz$DI(QDpOn-`7?>&sx37@l>^f5~ zh>N3AE^{pP701^U^3o)(AO#oV3n{3AD5?*ysScV4%n#YMuy z>Lsr{UT%&$>6A^!CMQdaiHSipO`3i*k(*l8ASnf&4Q_1y<4^-9ld&(~NLk;T5EL2x z5NDWz@ZRR%M_*rIgd>w|pV!OB>*>b|%C_~-RLrFI^Hyb8#9CjvEsX40dFo!hGD#Ug zh-l8H3F(&^OSD5dwb}Te zUo5(<7OWM}#7^Uv?t*|34z7C^AXWty+~!u51)b-KiHZK9A@Ha5G3v&~#xc>+?MIzB z@8S_HxfF_97{rvMaU>kr=d%7VrOS#LVwJVkO&06kI+hLe|dATL~thAo&AC^O19K zR4bTg)%6I*3a+q!RWs7Zj`mX`**m_lnBKn;`;Z4{D|<$R}bmBqUQse z=y6wMx?x115M)-VCH|Szr~7j5t=3KHo`sA&t*ib&vWEocszK*SPF%y1CkgaHLB(JyhQA%T;uPZ09u(z)c+kI_B-`*zBbRuVd_GFbD zD0(O06*O~#o{ysNA=}grZhF^|gM$N|kZ_MXG~?bN%i|uj?5d^O(m)=WoSYoqa-wO} zWD}bvQ%J5j=~NaY*1gH0u2xyV1+iD5Q`*DNml!dfC?X+57j0#hmf12P>8sycL~6;52B5JR##&ePx`zq zzdEVwqa0-Oun2p{kw*2FC7{@*K0$vHBDtV`=qx1c9>NSEYiAnNM#l->J7Hzgt1nl0 zPvV#ujldc%7joKAMw!8LbS=Gq1*nK$!E~QNqcXrxO!D*$POEvrI36)Hki92sFGytC ze6F^+u6F2n#v_e3^EoYrFBncn#5X29)#QAF8qd)U6?{_>lb<%%mNK{gq8JY)wF4bx zFMyiWul=A`Z}sE_nRcFl2>Oy6URFTM0|)QNgbvRa0S*8_-HHkJDw@^ z9qO@t#>-~K8y}8&t(X~&Z}gcNBl+cP@YD_8e4o$0fO`!<>VY$U-uTHcQollLMzscl z)x%~m87&t<-221-c~j)kPVM2gSe)#v_tb@r+WvE5FRxztjMc*BrlchkZ_tMrP&Qrm zLeRfi=A7n2;L%WpwlhNsb3;*rfBfR+X7xG|p+N~huJ~!1e%WQV*jip51vt5gNJz>=hTU@r&S()VHpT;<9F04|X}w?gD zLSjAMcVB&-l`C^rPFxHYwyBd4a`1IrY)YaRVw< zmY8+^=tH?gu`!(S2M3bb!_i5TwLyPL*PC1?bxSUA%P|cO$&&WSa%+cbPS!zQfhXMe zbseFUgVnmIlg;rmkWP*yy{oYVuF!&mm6bOdc0vL(OZlvw+v9d#OOJA0inwks_gI~U zGa09?K1XN2@dL$a0bJQeJMD6>QEir@RG)M59}#LF_Z^wIfdrPAQLYWcs@Z5QuVyNQ zDbHpx(0+)FSs;@w#UQH`Po4oN;MJ-njF;cbCFb@)Pb+uvG=6eXZOYYzU(ar3`?Vet z49?a%vBi*N$ruZb<(m{|wi*{$=@@l^lY*geT@lemCMqiBjsgs#xRoj|-?ve_O6=fR zouqfxp`%MC+&%oOYQ`k_;f!v~!iv*W-j(Wb-qV=fhS&!h7O@wfRS{RE=bLCEhWzw$ z;x<}~^cNMzKN@zZxU--3*LK+0j6K+wGiC>K0-iQ=wrwbO*9~C>>JPD|_xEV9+8&=A zYU9Y($*S<~509&IgFRch*%S#&H*2e;|Bb+IUcMo{J3+E^?o|e@v7NV;XPTOQ?fIDa zVQ>L>6Gj>|7-un|;F5Qn6XV4i%jQy)&TNtn}Qboay>vrTk3}G z^5z)>SL*V050kXu74J@8&SLL9G4)@~(H^G3-?I&6@_ycpwHa+CgR8~R;xk6cyJB#a zo~wa!gc}WGMisO5_j(rn!Wu_s_%{aE6s=6}*3$12*#t&Btn+D*u3H}{Q1Xo-hmAUc zU6Mhz`wOSK+gGzL_e$@nEK6eg`q@0|>Dt6zlaRA%4=Ui+EjlY2{5-elfR>KI7htIr zTYoubBtyf)4{T?YoSdAPM@-F`eeHSvCBp!tZ? z^RF*C05;&>QkdNxH8XwWYIn-vk$bRT3#z2Ph96RnfJKJq$5|AD!Mu9 z6w=ykKevybBLkv7a}p}0Ia%1qr#B!pG31_{Dyo~XoySgEjByiIqzgJvhfXX!WnOEX zhvkJ!n`)%pnCoG9D8#uD+f)rEF{-NW)}GgC0UWbWXX%H%2zlLkaIrtyTw@qAeBF)| zfH)L$COvHV7Fn^}%}#KZHacP+d&UKK8#uU;@NCyrlh`qXSzG!h;D+q%)4|_ie8@tA z*?tACEYPj1d95DNT=FFX6&*-q8>les?Hd|00veQUOUnY^*%}KxziCoY>cJb$tRZxu&j%Y`KM1caC!)}kQ6ulp%MZid!`RAnfD z65`*#&p>nwpSh~F!GB7<#NUz4cptSU;(V|0Ryylr4~<7cph}kh$G$)3G}78CL~F!Z zvqwJ9e_3N)uN8%Q_bjwK)aj(V%*FbolBWz;-;+pxPl`bgo;=byQ+O@W({qEp4Q1hi zYnz`vPWfh@X%O55&m;ZcHsu*}JSyvQCUvu;qzI1qJ%1K=^#oL&L8i|W+k8HKi+|-yUh^Y!*`5N)7;-dT z0rUMFH@dr}w;tGA?%ff;dk6e?3crl-K^+2wD$d2uZVI{6`#>M|=0xjxMq^~qpaQHa z?LKX9d)80yU#1~2UC@D`EW+QMLXhXy_o)#$WxSZmuk^k=1e-;xm9u;$h{MH`lBPy-@%uLem}G+(u3jTHR#DDZX1|VIFO&Y@8%$Q{5I&#DGE)j~!@wC1*y&J- zP`Cez5{*l>D6GI<J7=OB!H~DZA7fZE1_<(no(U?f2yhHwvnbO5xJO(ljdtt*jw% z^4|XTVdtx*Gk+oqQTHO&M97qB|G-ndWV1fsnNGRsf|Tyatc00W%OECdWZk}+!{her z8LzeK(O{ia9pm0pWr)=tS4>qQwC6DqDcPn<8gzigq~&sDvUrKiUmaEt;%A*iNhh6L z3sw=n{UGn{FM^rV`e!$y&2@Lys5tS4pe%Ma`p}d&2lLu5i?)2$((__m@E{jnu;HKzoLoJw2>3UlNj6szjn2`rKZCh0+FEo2*{@M4KKOl`1|!cF`#k3?C7R6E+0LKaRh@`JMCgtkJtjK4(T(TG`w15n^xM`m`1 z!D=;M$%o63GnQJZJ=!{~1F78-g?qlYR0PCvoJ+!DRKPQgf44rWzTd^x7Gnq34yb&l)^HLedqNUK1L6 zQth-pU{>{SoTg*uD4ow4a-T2oFP|JP85j`+(e|9|r*5%1IV$t3 ziU85k$+yR-faTcrMInddKnqQ_EZ{r(7Z@zcEjN;?rY%|FyiqFW$=~O6W2u%BQ)xGp z)cO7Oc2kbKYW3@v z2zX#_jLFO#1^#S+sCZd)fmzTgNLX*!+Q;ViT8%lDb!5b^O5)}srL=U0mN0*Xk1OCu zk990s^3NT_3zb}IBoZ4T<1Q0?b{J`vFYM1|Sb#jPT?`_RewECO`fkwyU zzz+Q9EWbUn;w@=I6^YpTn5#_?3s3QaUTM%4~v0wM=&y zz`E|VVR)X(ye;?O&R1jk_NN|J0NHx_f(O5y#v>N zHYN<=GYrP>vrGX-(b%E!Af3LFy%^bdQh)D0Vb;_c`>T2MvSPwIr6v*GV`cV|upX?O z|5f`{#~0HQSd>#QaH=a2?8K~9t&&6(ApxwGz8S+0WIzk*?(UumR5x(fd+^(TJ3AXp zH-~Vbo|Y4*!sOF6PS`j&PY<}4U)RS>@~)3%E}Wo*NkFI+cYP>!mcopqGT1P6+A&hB zxVFNHJE|$5y)=JQOB+o6q8K9N{3+((V4^R_18t}&6!u6r8U)7qX%+y>^%DF7a?h@3 zbTu?)%Q}c*TY7J!OC-r}Fsi_`hLDKJ4aBL8?_V%zUlVlwsdS~f@YgUe z*Q}iSA#U3}STr;Qw{@cH_6H#KUL?Ug>8cdC4h;{B7ff7AEpKC*IKco-C2bfIn8cS3 zARkRv7nB}f)5@Aw-k!bo)2vGVUWqtup^-5A#Xbd(%W8Aerp{qQ4hO2gVtm149GSG< zk!vED=90+8-k~bq<{WZ1c|`e{$i22JMTC5Z++O(M_Eot-H{wy-v$xRA3N%=CyrZLA z^+yw00p{-&pu5a#ZC$#gy!rZ;Vdu4|un90#ST47w|M?^0$VVcZ$aLtiA0v@#N3A!p ze+lMhTf+`rb^wo;J8pXC=p94#5=9FnPy9xC7viQO*mWo#376lBdA>Pzt)VKSZT-E; zjQw>QIiElk`f>Q(fShD=W!`*6^iPs)hcxZS0+VNZVF`&3YxhM8_0D5ry1m#$R~4-X z@}E*6Z?pA1Z;(r+hR7Mn2PS(=twe-3(Fg2I-mM}-^`ahZ9gRcS-Z`5Fm-#a*uj+vmMQ7RUOyjtqxU_NLQZt6i(i`_H>^ApH z@%o4l7@k1VhJ?rDs1b0Fd9Chh3ts~kjDJ=?EV(~7xB`VDj>gQLlyNH)2IV90oOYwWi#Nf5*9Ra~3i zqaV^P+pg=Py24jKi+u?A;r41(3>em)lzb#h92p{w(mj*09?r6W%v;g!Dhl?bSlyLEvc{j%YOtNK?y(Ck6=>WE& zmp#wr^%O+MdS4o=3&S*~dhq>xREEhbC@|z8)cHR|ePvjbUAQiYf*_4_N;iUnbP7m= zAUzZxaMgoX z4|M2`E?+K2ez)!+U+g=KW||kgRPs{2qQ~DbjDL@S*G_1sGEdHk-;Ad%iQKJfAiR+P zvh2l-KQ?OIl(H;7IV>BSD+jRonh2@7gX4}6w&8k~LG5&T>?4wZv-odHIFu7E1P3*| zl#=ddg&>K?4$rfYe5Px`GW)^gfZwze)99_aJk%QiHYbo7`+#_6c104I&$BINGP{+!p7{d#V+fWZuPgjwj_pS+@@Mne24lx99AK+?>0<%gQXU{W~;J4YsK1 zn(jQ)jN3KK?-EtI{LOb3h~$0BEiQ7xm&t{+ZN5{!!{4`a1WY8Ce5=wf-oTdK+_@!I zbLx&_`@%^#?&|TLJ{I1xwOI$m-$Ltl4CCbywIR5ceZoD%Eqq&rDAWOcke-g=W@>%# z3_N&5+!pXQ82c{R`D8UI4u1qx>NapG@-AAs+ou&D=|DOM?QBk%`x-5CX2oRhhot^; z^<8}Vh#57%aade;N909beYI3 zIa5qIB9Q4_k+GF-B$_Q{fA4l}4w>3&78xK{YU;S&N0|XH+u0nJLyvyNC39)nT_Bhu zvyv`>&-P&4xv8WC)~G{9=!5|uWqiWNe@#u#ltI>)JHSvK=+xGzoZODkrAEGe#F1`F`a zNp!46nxVCpWm3c|P#WUz5{5ELN(@p9HBapP__G(>??$%v0u%zP`!-!r!#w2XzB?<7 zQrvsG<@fM>UFf#0@gB-fm5tiZM|YmTUWbO?EG@~n?>>RuT^sGB^3#8rGu+%BZ*=8x z&tdJuj46pNf5)@9r2lu6$Js76CtjQ7d2DAD?L#i&0+aA6^fU{S$hTvrZwU_bQH$!e z_4TVeJz@&Vpb04}Z-Z@O3f;Rf7QJyw1p*eOmMi>THk&&hXgDO%{knzJ+~wpk_^<{z z_JIh>KG1aqhyfStIk4W~(aZk@FNpUW&Ne=Yx;il85W;L){dR5jOY(~s=57oliwW7m z%?sZWKfZ+LwSy$Zz5%ZY9Un(T=&7w?)wYVPe>j>>=fEG&r^;^AA_vd%B-sqTE`!J{ z|9lM}IEEq_!){%u%&WXteo-+9tMFr?3ws(7{4G!3{6sgMRx)^5jz><^@U3RZHNx-#zP|HH z(m+bD4yo%Xt&X?O-98tJ(uazNw@;WP?l{!;g&b0ma}Fevue<5b@zYJO|3r2z0ws+) z?aa}*{+5#CHs;Owz7a6U$!7?rfY7Oo_IC06qbT3Qt(>Ui9+CU<|0srM4Wc8%b{yqS zCHAgJQ`=5(0WXLLKyK1I_`t%?&p(?GU7Q1Y&yOxwqg2%-_(-hifW-TE=- zh_q?+k5|X*a2dQOtUd&=;`ZCa!4j_u}&tB4DahqXr{d`Ym~l1(S3}{b%w3i{D>Bt&!qCONS^YtZa$O3 z++4_9c&ldDBfK=Nq$KwKogOs=*(Ot{TEg3J)pH$Gd98t8v>Y76yFy>1(AFpQA<|Bo zcHi#OrlzawTbhP_M6NBsgD=9>-qE22R5kEEM_>(JrF^(V*_|q01)JyF0;{E0N_O{f zVsT{HoP5)py{lmygO{5dKWFqE&^-b79sI*qgr5#l*UU1#yF0K%=3k8N%WrHvze@*G zA{g>T9UWgG2ezI4h4~Do86biZl~oR3?^wJsGrhasKYZ5nlDSuY!}i8AiV5}6vg2Y_ z4i65_ADAB1S#KTGSzhYbQSG6v49ov5xy&b?f1Ls_v}tbC_ZIa=e>wz(&*@+k_W~5; zCs=^GB+^dsJ$jJHt7M10LPn=?9Af+NmS4AV5)FSEBW2)$y)j)>at`YqlZw4Cq??Ym zTe7=B@1U6ZWUl88owSgiEx&s>)-&>pLP#Q{J|%KW{p5GazFv<$m*HonqtbX$+px`TgvLwSbA;{0Ql8@>7`KYyBFGh(9k;e2>hlD-4& z{HXH}QO%idr)rH~^KZPH%N$W>uQNEZ4tb&=t4Pb+J9mOW^%k$UGltx8AI?)Q{>@F& zgA1Yrzzk~wIJJ3DO0owMVoLMrc%`2bx`LHz*^5X)LBRw_*@@62KT8WG5Or=-XMk`n zJ-v*Oo1d!l%^(T}c(bB`Sqn(d1UDv0;`-hi8Rf79dgvG%8y^n58pb9f+LC>V!X~pZ zoBHjBZ2qG5jB3fe;76a{ff)~O!bV`{r*2Oxwod~_j#m$(ADuhVP?%_v@op@oK0_up z>eJRC$hsjKUZ0ApNDupd8W_DYe*wr zC+N(&dyMH$o2SZ5n@7*U>F{w*>pXA*@pdDh@F@SGNV*`pY{n!W4Hf80-iS0kb>^6` z1^OAgP*zw|yGas63YZLRo3H4n^?iltR7d*xvkoTbYiHe3M zXQ4bUF3$aY#tgEE2t-T3%T%S!2HtV-xhc1v;{oMjp;D`iGV9GRm-W6dgHTsL085_3 z7UbTA)|zM*qf3e}3+(c7k(hk$NleYdD`Ih^HE>!gu{HG%Bc zOSttHE}j)xaik&Zg1rAz2U^mYXVcI%-r|fzb~SRpS`9hencsD@*wl#U;U@^C zf6H)q`XmVlZ-QErp}VuP^0;bPc!Lh(!p*vT?pD~7=cf0ZMFw}6O}jJgg@h*5SH#)$ zRV6ak2B^rELEm+gcQZp(D@Ydb(5$^V`asyst;E{&xRq{J#N5_Hn6fU?9jP~?UC z=uR3gED;mwxIrjPVYtk!%+}PJVkBXBrT+M1woVenQ$Ad&tL4#u3iC0ZD0UJH?W~EWh1&9mV zcN1FS@{wfn+5<12zwgOuC%~XkxXw~Zv-&AktQ>>A$Mz+&(JwZ4yxCh07ZLW{i2QLWO-w$z%1W6<#wy6D25aZ_f#@_(ej&{L#>m3aXY% z|3Be>sya`p>{+oR-ZM?4-&S*sx29hlOJ#JnwAx z_oWkEPN9S5ZSz(Pr>^n*`WES7o3!r*Ie*SmPwc7!01f@5y96C6FTeN4cib#M>B*vJ zU~A2TD@&V|b^a>0Dz1)x`;*$2#LGW-)ufvG=YoGVb^m;b8$AEjHvf9f*8Brb$u8AS z&UN1ZUt;-BXGYT(hmD}Zt;}=E@6fC&(wS5O-OmDaP}K|OgcifZV~I#e)?FW>%ic9> z9XnFo*a$&~(ttcP2r1-vU{6wyc)pa3Gx!02&)^`%AxeHX+uy>6+gd7Q^HhSCUNAZN zu%v$M+2a5kD&2X{AfT3V*e$LF?%dik&kLK>R5E-9bYvvZPWKuZ9}Osnl}G0OW9FPR zuy5KSKLw2H1eou=6ErOgZ?!DvSLJ}j2#mcYb;p4D;jvwm7ZXFIqoV^^>TX)y>2*=t z2dU<2F|5Sbwu;;v?@icgHSs$RN3I?-H&Jd$AmK*K-!#5$_#9A%aPUJVVMC%s(^<(_ zzJ}y?#0?SAok~sr{zOe0chmAkXp_iuAdx5xYfgCspV^ z=#f}#-a+QN*k8$^L3ydg#!4iZ=9%{+<~hk?cDX!3NKyzrRaSZ;I0>k>Sm2Qw_gBNd z4s+JE<-oN(eGW#KBT(;hzC7@F6YU>xi;z#nlOj(P(e1L(Ebyh$Dy8k?N05Fdd~-_I zn4TLw|A~Bc?wa>vx_Lf=hK5FHrGKFb`L>?U^>vBn3npS*uDBO3Ud=U9tkRS&+uiY( z?sK`kr%rOSuKm5ZUd-R{pnNAKa&*LnvddHV_>E#|}$X#PicKU#|?j@s+%9Fmx2tAAj6v6%({y_Ugvix?A&yolOad z7mp5XdSa@m>nJ(szJA6KE{?|tv-KA}TwMmLkb!i8X-iMg;#FN;z3C$!;28OjIP+7* zHZw7a`Sxwjn&moA;hScY2RB%C%-`TZq$SIsTT{Dd@anrXP+SRtx*UNi+~aI@j|K3G z)^jy60Qm;#o^!g7%|Tz+N6`@?f=-d{TKr*XXZ0g|LUhdy)%hOe2r;@RR|G3APmWSi zeR?YX47&A;q71s^?Alf~Ef+;!xHQ!ob(z*?si7f`699DG{B-Z9gL>N(XZ-;iswo%3kQ!fEGU3HGWbzij;w9{)AIj(& zbI$x&Rkil6R|wLML0Po?HFLf@`Y=?F*!LvHsPS@8{XFRdF2RqD-~y)kH@>Fdrf$-E zWG}mRXOdId81d1HTm(3L>Jz39n*Fi+rVOyif#S|4+PFu_H(O*dR{$hBE32t>j$}&+ z)!2egAy6WIBmC|#P<%p+@aYf@zfS)QCFp9+D=&`$UPP`>W3*D?hv~+@my(-zDg_`L zKs!3R?F6_TBd!!sZ{aGWd6Vi13Q&NGkHUHIE@G7Ag46Sh%JQ>%wksL}`uhYqyv((! zwWwqw=bYghL^QTfNIsanh8>7$SIy5E7_!xaJ*r$q@@e;JGreUQiHp)VJko#29Q86E zEOPPNJ@uEhW*9!~2|7vT02(l`>Qw7kQ_&e_X^in8poZ3MV?;i<`EVb zpUsG8yjW!)mqJAZ{*w1BOt3!!S)I^pxDv`!zC3|rC2W!Gio6-{=aqg*hk3c%aKGt# z^@70Metx&{_jjeP^*yeiBSOAhbqUOnF*<(so%r_6&3ld=)^|-b)6#emP78dCAb4Kfm^zN@+ zm5w)@tT`ANvwr>horA8cvpEq*EhL(4V!oAjPWlLsy0pjUedXQh?w^V3*OQMw3JQjr zObahhPTe)I)gO_#d?@bvSZK#~Bgfd9_wmOu*EFX@Kz{U(bkPEP->(9V=yuLsZRb+A1wdm=XBWuT#a2J`ibagJ#Xxw#dx=n3P zEiB_qjun$G_X~%tHZB@UHTB&d1AznT)4xO8<8{9013bHJ{S^bhBbNB@UFK|(Oll|v zx-NR;k~yuSr|svyC@L?QAA`ON(2oK-AUYv$4IsJ$c1%DCH8)v;$xOmVY>c_)h>GK_AO2Dq;e+ z!NQ+O*hCFX2Gh#sV?SYA@}MkGQiECSh|l$6m*D5b_eD>f+%IRJqijTpsj`a{|Ndn6 z+s{FJDyQTWR(#cl73=LqxADiYC^-z1sJrpA05Qeb`|jde0+u0HeiGzmpSye8Q*Oa> zXABf1(y~66*eaEIk#k%E`n=XHVz6!jLB90Tf$s+~I*diJXI#A-^i!q!!(cEk(=IN7 z06%~l5k}+BU)_=DW?g)(ZK<;NrwjaJzX!>El)Qstyw0|z$%>{t>-Sh_!_13q51wfy z*57nkou{!olABd%x|z(mh;I1vRo)G?jZAco{vye|iAM_~XYhJ<%5OAxMMmo!{S)ZU zPv5*HxX@cI^}S0@VB5@Hbbs&!m`mp6yJA6Tb=$!(#wng(pL6HoDn|4EGUMusU`(#*KBC#+Z6(uC^x*Yr__a6FB}20)GR8n^3(vF}LjSNyw(NRRZY=0*GZT-`*T^uk14IXAy z9=G>jb-6B{eWH`0?)sjql~}G_3=mAW`BKus5A61^NQK0!(+Pt;C=0^y#l+oxVeB5+ zFJTFBJE9~#52n9Y?sfLMX!?{F>^SC0CN?CS0@CfUag#k(q5WzIvffa$}=p3O1#0W+#%l|N#5=-F*#`RrbAg}k5a_60AP4tO)c5CrnS zn@aOvY$3s4*=x=wm52EyOJ5M5UFmb@fdKDz0|9p6{Rhvg5f`8Cd!38%nKf(Ra|62N z*yQAQV00ph=;oI8-JHkyTF%_JhmH_$#=IQN``WsKBwA=%basdn-z_^KJr>ms@>=}Q0$%3)3i2CgUL$QcXGi(OD zJ=Bi`2v&Vn+TWE>@`qtX9teF~hji2T%SXtp46hO%{cb}8Q=oS76n7MeuW(p{o-uwo zcUwP3DSM>)KHoEe>orNlo^V;_!|Qle`LB{b=I?lIb$yUVmt)vAUkauOFet573kj%X zKpvO%an&j&cP7wC-v}m2m~rmKsvj;lU%Y`WsO()P;3mlN0%5(PuoHFSIHe(<(95&$O2 z|F4%gob07sWsmfCsZ6bvRH_#h=n~&`({|-2WywrS8vsorx{g+bwhZLuV@W(d9h@CK z2ozc){R%ovB{zOCIBY#LCZzb?`x88rVmV*vm@cW+(3Xx`nmcuOHLqd1MrP@5ASR>F z*y4?)uXNK3pYThl5+M4(?u<~fyW8ASsrNLNw%qVKL6_4_a90j<%&L-;!{G5YQV({P zvd26rY=Agc-t>s#;NEZT(A>vmkWFtB982v5M;&|?C*d=~Gs-=~Q>E*>qz6u2VZhRX zM>w<@Y|pe6N${edD+!%+)HJNRj%S{6&LP6}c&pugkE3ucxfwY@+~2FlYT--aco@dn z(nV$o^s|4AL1VQumb26=RGjSjCaMV1F|x$_71dl;?=z$Uo|$!zOby+3>(b^PPptX& z!#^2JW(+_>9o)MDm?F&BJ&xR8e^F$>;k&%NJTW;5;zp*X$8oCsE7fLzW()zI{0sa& z=1EVIE7P>zy?YAW7bsiN$$K&^KCb3MScf9M`6Ein)e+3QJe1YlFSzVjlh8e$A;eXG z8O%S(DXAwyrQ_mxgl{mQ;1#o#b+jqiJU1;KguMt-zCAZ7YJhY1`n0)bW?vTy^L@V6 zPhJA6qRrJ2df+F(v?=F>!oIpY$~;i_n=$eF)v~wv^C5+JkWY`^^cq%j9_T-aEt#K| z1pt8ORE5TUdxvFiKWD~by+2TLkBx~&ws3hT%t+=7y6pw8om=y3E$g2jWTQp`JTo8O z@a1FZEhESVb$(FoIi65>CNR+=Y9~CBnK_;2Nb0$S7QYk8P_buirC^HLeCyWIXbLQ? z*aUEi!LnC!Nl6L7HPXZa$xsu3egNpfU={d6K)-l<>lNi-N*;!H@gb_Q3FSTmw+ePw zWp?Bw(oGyCWafnW1!y&Yu{s3q9yI>W|7k0XS*1DqZ6Cu+=NwxD<8CjFA;5X#D0^0x`P)vp+<7HxBy9lF zBX^e#6N;7jJ6+5zmb)*Ge(xokdUmHSY&|oF5prXv6LoviDZSpA7|9`D%AleOCiuvN z46!KZuKYqZsb zn0Rn7vUET}{BwjP-hD=x`X8`sgQ4@Cyu4F_kmFcF^7{E>OC{9Vsg%_Sshr(|nn?R@ zBZeNxfXaz)cES^gxJ;TU^53$IB;wkY^!rK0xZ+fY)v3zWz8~!Ge*9P_rJgiG#SA?- zc1Imz6L5tYgE0*5KS3wwe-0$M2}Q*$@x;4zW;b)sr!*RBBBI{?og@}95LGsZ_IxWQ zuz9$KSfnbvL$fotA&zTGN&i5n%jr}!yc#RsfvU#%sb_P6fs>8 zsxAC~=ReNS+D9=lG2rH)rl!_~&BIL6`Ndsac)*DPHx7dGt0aK~U{TV=T2}0LGGkj% zEVp`10;CjreYGCq2-UEjB)S%7&&`0(j*;0o%)b@A{b$tIE5hX%RhX*6BkM5y=ylkv z?=+^T(+z!LdR2s0AI&8AUh{eo6L{c{8&OKU2rC`E9|BFP01$+~R=QqCUF^H=u8ji5 zS{u{#2k)(Z9{=(AS(3KyPIjOdPkDeFncB26Wu(D@q@yve zt*^DZU{$EbT3C5t&D3`OmHsP2tWAKyLBw{($^K#HSRa6omq z2p@(R0MQAUI=bmb79=ZA+UCt)dqhkVfa@PbCj)I{6K--n6w6f_fsc>~?!(9>8Bt_c zyykc^S;+Y55{;M?6n%$BT|a3<#KghFjs7eqYj=oWJvS!$KEsnj_hk88k4mB9{EDDd_VQg~XX`kv2{V{0?!W*}c`Oa*AA9 zcM|~=^9rqkPh2>qXVzi9djVIPCIVV07X}IB6x4+D7$)nD{d3h0wG4l5kYMU}Wo0jc zczJ}VP$oh>F5gyMTneZIOj7u0aLdV;qI{fiWffjd-|Z_5P1M$Hv{c!!C_NbCBq{AT zLYshg2T=JW|9I2@#r%$sFM%;>mPV!!`2M5;t_5O#sQLJ25{Os?1m-4NJg@RV^j5f( zQHas}ozTR^WJhz0J@+rfAKmx<6MsZ51r>9j8d|OGuW6JX{{i%#qCnu*w#@U_+S;4B zkBlQHnx$nY7TUio4#Qn04*==VE%l_160I*N8N4&eYjAnu;oueySE& z#kVm{_RITOkjl17-eA}Vn@&Sp(c5~F{^KqBO62lM z3O3)L!u`+@M4z5mzvcZU46kM9N9h>b;OSuTh=07`>UF)Xp0wE< z|1MD^pdG4E<2V|P!%EB@N%cV#dUwuUEQeB}S^mgx>HfU-u{P_^LXxWv%6m}*gY3fD zwt@N+t)p9#%)7o;mff0TC>>#Xp(=P<01+?uPw{~SG;azjDmZ_!r+~JQMnoi2A_UW* zTH77;BtRZD4DW-la&B`jCRXQ-)t42!xY=nfu?VlcmjyZI#uPG+VJVSIDT8aisj(eIaII57{(f&orKUbu!QiKuT*f_y$}PLfkpe%yh9| zjpgh|N1Ljyzt}%XmB>s6+q{eFuj#>kHYa$mn?!KRT~d0n9Vq~_W7Agr z#uM>rJP$F_0tH2ud3Fz4S5z^02Z!viPf`Az#E9T0Uk4S~;G&rLfLUBqlL&NdAcm)I zH8SzDD*0eE?0H>5K|v!BBY_wSdZi4|OzsIc`-9_~UBeX^ldeBuZ6cliRLOz=sYEck zz1C_QjzBX|7Q*)&03kF$dZ5Lhotih+g%i8fdqx}HFk0$Hg-7u2(}h1B7A-!mve{99 z{?Wg~vPeDuPUYwSB8iiIXZmr$cVZ&0%+}wGsr!?d<3CArXndIG=Z2B^W9!O#m0>T2 z+HjQE5q(82pJ+M4F23yhc+6G{)^9ora}4&d_cPKx`HG^n;)=&{a5#@!Wnc_(ct(d& zt(FoI{xpjIwdlgOqt#%KB+87}4$)t&j*ky1vWvY-gB5Q?F}c0g)V*S6|k^@?-RqbUQ>JwRT_#KB<(tOlS05GV*u zD^--0Tk(_xmmL@`Ce19DcfI@;flV5GHz26o>XU$FTt!b$(bP1r-+D2Hb8$OzmUvW8 zS65y_B2Yt|-&I@WkIsIC6$>;0x!*nwcm3_%T>KlKzCHhgY<2$b5}t!~XhkNp!eFjk zdF+sHazki9$s_4ip02T$FVIZ)w>)HnH-P8(9CJ{Aq}GYne!1qAv@(^uW=ETWJG;%# ztcH4j^J=s9nvV&oX8HO(whXL42Gn{)tsmp~->-JhQ8Up7{D=-^C*S?b@#R^};RiwG zjq&e+`)H>nYq2l2Sm%$=J@+_Dog$zYPQ;XiWX6NVZ?5pSOC(#iKfR4eZ{;m49@hgc z@sUCTuGsTrQaSu-Q!+Hqac_^NECss-rWXZOJd~W8K`+hR_Garcx=7p#E)xcD>&B;8!Z|2h*b?%>MQj@B_>6T z%~7ll|NJQIdgCti?D`}yGE|}Z{iLZ+HbT&YZ<5N+$L|wukaWG$kL7dFs^PHGgOt6q zU8XSN);!hxX#akxh_%ge)vz&sLEEXlp$qKIzn24K1!E`YB~7f~Qg$4REMuraIk+u8m&sMW%AApRC*CcQSP}s?Of08S+%=~sk zL~q3&%}qnP>?*yBFR2nhzfSTFB;R{HBS59gCv9oBu(dmTAw!j}pkrA^*7cE3R`cW1 zbQpnC=9T@bru*Ml95E&Fw0Tk(46LoLm(tdiF@Omf`S~b?gq!FC3NAGL89NDA8XH|t zQw>;{llO$Bpnr^M>U?38t3SjC$_NWFW zq2EEUSi0h7wK5~?-Xk#-9Ig2$0eWJi(|FiVefD@Qzr5k$`95huEa+ColA;5CUVvUm z+=1Ks{xo0Y=>F#5WZ3r}4;)LN&$X~L)V$8EkC@<|HRPL*cc|o>ce*c)m=S;$?*%n= z0EmjKyLPmJo=xe1Y9aWaj3|eEwFMX<7{SRizMTjru|s5muN#o;|Ma9{Wq|nxUhgoy zQU&zr3ij)?zAelb@S$EKZ+=(Oxv;cS z=EaI=dW4X>b~*i1l=^M%a*Vx(dv%gY%7>6Q<$wEI?Db&+^Yv#K>@#Jt^S_Lm9C6dv z@;I5oBfp3(*NErF(;5a!)LD*IDPCx#Cx>!UwXV{b#F@zM?p`frG+9%!_P>7318f(7 zC^j}R0q$)?&6Zj})iaFVV)Zd&;;Fqqaz?6-`I~6iq!jJkpH`bX>m31@y2)j`D43~- z-waEdI{62cWYzv#S}Fg^qvxwJrwF=N6gDP%>efMQ^5zdkm`Q~@>EB@Eybkd<=+`}3 zmCA_$QLex_RNqM9UO1iZewquT(tK`2-2Y{^kc$XVB-erlR>$XD=gr&X!cZKsjl; z%2l-j4FQ?hcV8pUpE@H-hxP|sF9}T4)fPTeP$vazWpH6mgFCmjod?+;h7!6t}Z%?H;7n@)|gs%jv;O^aNbM1Q5-N}0!R5-i7~fgZ%3!2I#1 zapELB9UVDv(OUWp8S4yj>U-{Ux4v4OUqUKW%r^qnMC@{nH}wK>D|`?|sE(4i+N}Z+ zbMtCA6N+w7o-y?V9x?9J*ul?G^k0LXKe;xkvpd=6VrWw~@Sa`X7rq)mmy=Am6smh~ z!}r)z_Ow)`#>jy16e~@S84RT$%%n~sD*OmXCOqn$MeXpLueBa@29-2M<#U&ZYwR#$ zp{0ZJQi{vSgsX4B-A4QOtX+2u8J*q{HfZiqCglzcZ78Z{Cm(nAFsmO{^CB7(S_p$^ z4U$aGKg3s>J_rlf88G?FE|uKi)}1d1nmo$f6WPKEVV8NyOdtFp@Qt>l`m$*&KyAwV zx_Y=7DIU#GV%VElLVrmJb^;_EK)-}gl~VVf>N1; z|HeIh|M_pfBs-TOrhf|vYd8Qy#HzuIF}P+sV#sD|Zhjbg=cJ#tRY0 zGiPIXZO5BKb3n!ECKffIYG(ug2(ETVfPp{Lz9&7mQ*(9vJF5&XUBskM)Lg#E;O|=n zggjk)A?OgP0*60l+r4vqNn}}B4dmsMyAZ>>g90lzLgK<{;h^W8;cY0l6otT!IjtYSHS*V}H1^M5OCq7a z4+vm-G6attC-yJ7oc>A>agQSE9PHQd{4 zN0*HEwBc*1Csbg*>=S;-E{pE^RT^V-o$Y|+t=EKZX3w4bbW$Uvs$U{aAZ+^c@xtVV z>nGt!Q<_|iGjw!R8s?uTxr-Td4MCFvjzVuM@PcS#TK0rtzweXx^A+nY2zy1VW(Hap zpk>@|@W4Rvw=ED9=x(dI{qEb*_Nm{%i&W!Zfzti0h#xJgqfgKBi=4v&#?8X!*`aSb zE~9^pUnmzC%V?uv>2ppOQf^)hwE814F*7^3KLMyp(EV-WM%vt^evxPa=FaYRT^V=E z%)@Zc%{n*UQ~tm*V6NCijQw$p`>4dbSh#y|ifdrX=;wz(mBQFipo@vF`aFL2fMiu1 zS%ZT&Z;0sNx_ZhmDY zzBA9rZ}>y{ES4|kcn%FleXJ_N#bfJSI(aiv-I-?nB6Tv!mpGc4^92mYk<^$MH66$^ zDfkC8x+JMO!Y|!^tWZ-kD6E0@nGNtLwtal*bbZ3l%N(Ett4|0R$5$gEAtoo3cyH_$ z66Jb);jH5(WR}0{Tshbcox4ymF<%SP5yHFUYcXRbcm~`Okebh5!w&)|xv9TGMMqfv z)4{y8Bvi4vER7P9H4G?y+-hpOrg=s)LDbTErO&ME$k~yTX=ZoXq(<7(W+jB$+gtIO zwh#OjoZR}KCTZ%h{9-)*wi%CJGdx=7$LWe8{}OU0XzFo!Wy#Y4_Y7ARM|7)nTY%@V z2*kL98Rn9#MdSs=&@iR;(wwEDSHE0PX7*cu41p_WhFeVnCx=O*6gOxPeI=ykqqUy4xMK+Hkb0O`;netXB%u<}X2A0lP0I z&|kKP1>ktKjH}3(1JMXNH|G)g`_Gvn_Qb?AWG^XB_};>C5^kk?MSo%Ea5=5G$pK`61^aK3)0`f1(`f(3Quqc zr>}N#BG4cJhmozaii$t*W1+f%>GL<^0n0ZB-|sCPR8PsO^xhR%{Mt-tA4V)o9~`lb z+Xvz~M-&liN2#9q0b8Yv!83{c%9iujBqj)}G`K2NNA8d6EDqi7z0Cy}NPaUzFO3c67Z!2p?Kd}0GHK@ZNmdQ!M-Flsz!jns_NO!IsXYJdqY=xSOPdXd z^V-U}jOqGIG5~luL?wKD`G(WPj@sTJqi;1*UEQeMJp_Z7Pg#5W#R$-9YLy#G)^4t6 zBpCtO?Zng+@PRJT7(9pXiI?`&@X$vJu#O-*IO3($9ujmb7Pgt%QVE*V+rM~|^FDao zSV$q_6ZzsjglA`P$*|&iHtIWirrS+(WJm}sEArX+cns}QN=iCVRM%T13KYx#Z@k^H zLrYKfkVDa=MNlCS&+%mX@N$OBgMPhFX~a$9~;Svhq^ydc)W8S2Qdv&jQoiN zw9TBMH#e4dd1SyBGz9TSNaYj)ic(lQ5rw1?^LH&)sjLLkJW$q?^9xTRBI0iH%6!C^ zjQj!H9mo|oou91E6EBnsqC>H;=$tiga!(opp>zu=N)rDD$&!&8jtsz7oAF9!D&{*{ z18X8!Cd%DUOq1y52>pN6Pzq1}YNNQ@KD)*o>|;vS;UKEB1pp10X| zS756EFPr6IGYr>8b_b~|idgN2pT>UJ=@DKQ{}LHMY* z@;Gxv_far%5AL+s?$!P5c%#P_sXF;$=Q95T1s%eAo`;@LT@Ztd*U)azlIon5cs~ELo3^%nyvYF0Fnv$?d6Wv6&q}d2-|FRHSrH z)@fC4cu{V8-0cb#)D!VMF+q=v1W_tN-vg=}k1H*86VDghuhNs#()LaSRR6s&k^h-E zMN6lFISyRGN7&wqqd-+UmZyyI9)n)cT-bTT_GnMBw@dtYuukt=5X+E=Ck2p{+V znqzRDHU$0?I8zA-d;`P+oN38+Fl{EHxO*h|;lj{-c4u}<^II#l5D#^ zAT5bV6FbMUPl)hmeaJ31>8`x&+Es@7n=-B*B5lmHPnFpfiFRkFj4O{@Yp}}ySoc=d z4U-lr2&Ua|X6jv^xDY;`rB)>Uy*WKU6#4soM#1*W$&s5+;1`yEmdHPPQwfB1 z0SLMNv!j`0hAeSga-Hi-9$;h=l8v@_sYP02WYzn^WL_B{8$fF5=;#RPn+3Q3Qt4fLlOe!qrmG=SE~ez5DNi``6NEgyi;$z{E^%Eg zN=HY3)&Y}vjr7*?wGjOfVo54*a1xJA&Z4o4NfSzsVe9Z_-q{~sp&-pP;BNe%yLbx) z?)C;@X6VR)EH}0qki2UDoKfUR;W^vJq#HNaiXR}g@T!=6o?n%R7q`P2P`tqKZ90_J z2joBi`2x2^dTsk8@$jB)Oox75K$fi?I~hJ9;VZ+=rvMI{sdr=mtQwhtK_sn3dJ|#c zH_|4!MZb0W^&1Z>%d*$iGa5P7`*(LSU8~(kx{8=CP;{7~ZX)r|(jCqNGQVglZd+sB zVcSK0X#+`ogj7ESSc)&AT%OB(Li3P$Uf$?iLt{q&UyJUifHf`Bt;Y>)LkR8SYl}ii zxlib75*d`F+Cu*4A@&0$#EBgGFy;l2uQy4fZmQ2Y4~f|QAGs;~Hcd(C1KQG=#2egI zdRUht=O;(JP_maX+i@IcWxqfn%=pNNpHz+kh@D#y&c7Gc`AYU*rVfZ&{QRVt@-OR^ z84&TCzlO_l=9cagyQM;+VZWPrg*+I0NPY><&n;Uy)NH-Tur@-zsj5Nx{Bt54Giv|z z(py2kEYX)$e$$_0YJ87=BqTNbF(nf%_{0euG*lj0LfoOAffs^n$_=b1`%bzv7_ z?v0dlN6=^J-r$oy`%>P|l`UrB<6}?aI`C~O_ay>wylbg_vC()jsLt}L2XpzmOy9)x zRKre|^xL;-7fbf=iD66Nzcm(i=5&+eS*4gy^YUeMXy{X5X8lus^GPbNQKC{oJk~fX z)gb5O1r??n(V?!}&yo^TfC7OQ=!>$^dK2Zi8hpm@SnE?$uARt zb@1sGL8D6i?kxTC%TvK* zu1qQu(c!c$K2rl(H2t3LN9hRh+H)j_FJC=O1VK&=@pQd10u*98(;~8F zHLEZF3u@G(*{Rb>+aU1De zr+7DqFEi76o)I9*5$HT1Hn$?QT(4dM#LF0n83sFG&Q%F zEw?gWWea5sGxC0UvpL`Ah|+9)lalkUbpWyN*7R0D^x%PZ?F>|~=dEL~$vQiSkL_ISNhL$jy|BHz)=ldsi!i(9e!4%Ky>%{Ea>_|OpKghC#Pfe>SAE-Q-v0Sa;*WX z#yi~FDEZa%m^+Sw6uipG_KIPV7n^JL&tpGB60bToA{wiFD~Ft1wesJ=+k{W-(43naggW)juYH_?{wcZr3YH!n}3hC@R#NcKT)bfIQh9d zT?&9S=%5Ra&XbM2V$EWP25vnMkA}j>I^e5t2`tJXxY>Rc78d;a^&Jp{#X7E4GfAIk zEp`5HeMro!3!Wd^C&`M1LAsrcEHW&#Jky6zqMP4{y(lt@ze2G40W2=?fD)TQ1{Y zNl7<0JK_wxL$q69%skNa)oT)Fbc}_aeD3n)7s6e9ejspEcXylIF0aI&TjxBwo-+z5 zpSdxTlUwKZ+{~(??*80)7!|I`8BzKC1iY`(K#22L;7RD^hEIS45D*<`8jk)-!_iYR_j_QHA-{~rQuM*n7Jc^niS|Y$ z(L^@qwdW_cxUK+NvFP6y;sL<-oc}R1=`!Aa!u{d$Pz;vJwXoaZoKO8ZZWs;o$XDNc z7k6_UneKQZAqkhfD7V|3nzM0NYnnq*hF(4=uWf$M-UehJ{VFgTMXu3}s-8P>-?@wU zV?vA?MkqYj{+%gD)4t#2XFR5k)a*|&uj-r7X>w*A2bxbz76q3KYxB-HaUSy;@h12MT>ea8d_Kz_v4r0r=Qu>8y2Gd4 z6fWSa+B;dR=HMyy5E_Vo%9`FJopuod4Rz8UNH@Ihh^xo6hXpIdy2G~kBe~TYy1myE zG%hDgeY!y8$kuW;JooB9r~ICV`gp)EG@JW1NO$u&R`=~D~paGVIShc6X<)oy}&v)`hg>U; zKSn;Z=r$DR#*CscdpmTxjC8d>$+lDo5^~*+7fOL)(YU@V@NfS@(5>wWy9TsytecaS zeJp_rlWawoJZq&Sh;8{oZeq%#M3Fraa%;3K$pW&F)T`16}f)GHsWCU6Z-U|u2i;E<63j2G;*!tDeYs&O$MHgE3 z>n6N8-IU3N7}~HXIhVuM-lb^NL`B`$>i_zX|P?4&J6jW|49>5I|IU$-b+OUg}{^fFD2F7o11 zLzDn}DyE+ldFw9LVrL78vLJLv`BXDoXLtKD5h5IzZLgA^@eN$@o{QcW5O^1jp7+@Y z(BCo$iXZCtD*E9{@IVx-bI*C;Btd;1jL)3PlJcy@&8?mM>tVAmy8&pz$2HKw%L2 zQ_wMJX*GvhpIXz?J8CHZ4a6;d(~{>KDw;!Q5NxJ01Z3H^(0$ci--=IQy_iugJZ6ke`U8wq@G>(f46>l7NbtG1MfMB$xK=(#(A96^B7h6)P20;DpEE5vzfiMU7x%xafzT z$`aT3o+a%bLMY}O;-@4sNJGjgp-zey1B`{IZtm|Buc#1GS*Sw zJ!NI8zpqRNqwS2mvfKav2ex%9Ae-9QFog$N163Bd z^pUkc*No4Jf=D*AOthfz2=QYVpP&Lx>|%8mibP)or%q};U_OF(r+@rNHF7T`z|ZQx zFxgYSYAr^NpZb^NB8(y9@W%V^z;bk49MVYf&7fiJ(ak>pBFk^r!B4uztc4pKrti#+ zL#}&SIL?^rtSjX8+zWbYF3vY^_N7OymZ6mt&xG!XNw9GT3Q`*qF)CJ5Z?uC!2(rp2 zb1J4W5S8IdEW(>H>`HMwTM|wC9@%x#GpkhA1nu!76!20}lulZ3TU5Iq&w?Mz1vcLY zFq)lmxbPNarPc6tgJw*ysDzWgi3E!|c&Q2M&f(HA&=5b8bJrg9%zZhCkl&p?fRcq8 z8A4?qnDP7lUZm=w4fq~V>IsIocW`P<(2Ug*3=E+CK+uet;{Q?gmQhuT)_ISm0%AK0TV^g#H29r?Gxmh=K0B4 zFTqVx!3&7?t%E*17FO2UyH>6TH74!CICgNJ%7FZ;p29L*GpsaJqY=xfJOsFyK*0kk zyg0A|ZX`>O?l0l)r=Yl&lRv?GISScv*g3Tr=Z)aNE9L2=YjgzTG~PZ5r)Wq=K(_{w zn9y72?LhYFeVv(xsTj5($2~Cq+ z$Ki$lW5~Ql`e1#%!68yJzJ#C9DB`Ox><4`Ol!nr4i`D8%pCN z@B-Ke;J=CIu+25af4zrRnz5q${l<;#hO268(I=m+Cg{&=(H4%RUCE;sOfcEVRqN~O zylDrgR12lWsXL%J4Kt43C%-q;ayxqil1`5RbMXpUusL#E!7!}CQi;`|4+m#a-jh{u zzk)8@cvF_jjq@bu^)U?jL_A%4b!F7HBrCe#`zUF)U=UynuDwX>DA`TIhLb6{+Y6NU zaPA#_Crt^4;O6)%EnWmxtAQz9vQ~C$QidV*P{mKXRa#ZU14#i6&UZZk^&8d0$r^*d zHSv{Xu|<&$xRGWiDgG1haeBX>&arYcP2@3z`w7{+YJQiK4!sBPb-6#b`>=Qj4ymk= zcCBoCC)q@)#MBd*enk>mzE_q)DU`WbeX!K+UMRaeUbEKMHZmg}EK8LzvFT;LD+Ako z@bDuNLj|&$u8j0)XI`42HJ|OTK?gcQ2{^8$&t)F!p6bTKz95{3vS{<_&1+==}9ea z>)5&B&H5bP^uer^XKHO}R->o~L zZLWK@PWLAXft&dWduFfopVlzNX9;oSs#7)tVeSSAP-LTYe^@s#71&Q#kp#HX8&2*& zk}hm)?9VaJ2gIEY{%ZdD+4j+_a%hy|$Ie6s3HQPZiv?7X!?tdvGIwf?f&Ovof~&=X zxW7-EracuFbKcq0nH;e51R}y2-aTkZ>oGO?-T(U<#BF`&bKQ7=E;hd2>~d3s`TYlr zT8WUq5!aQ*{v}IrM8r=Zkr40r^N$7i`TN^b!t#)l!{*5D8O)a!7hhI-)pY}3w-bn* ztCwwVZZ<9Ft@VZhksPIP8aPfEF72AL6H9Y5lT)EF{m7My zBuN#C!u{d~4fPEbWvo?rn4r?ESNcIse~>HtM_eSSG$oSN<3G1wWLgUuyvZ|JAdY}; zq~)^`=fwcpBZ-NjLb?UOo;T^N+hKmlN8(ood~r_Y(s$r80&5N6Or#FpVWK#4pvGD0 z(JdlW=J7A%RkVE+g|yv4`hYy(!EQ|ZFHBF0q^Z9WRgdw7`WYHI%)`6f>p3Q2VQ*R% zAeEn=9$wXO?rSbS!DD#m>Zk+$79|Y7q1K~ zFKCaUBj_I~A(1@#?F%j;_%U~=?8S&h2uQBCO) z4y)N(qG{=#6E$$$OT-34%qvju}!W%yn605%P^3KdFRvg9UU1sG5{N~^WlXOxz*l84j7-2-*eh&e8(#&%kDp3 z9H4`CEs|iQe99(yTU?pKy!|Z=ibwR*-;%7n^Al)i{F4ybDABQB6@0($)Hm^0NKqd1ck}3vQ`R#A z@Hb}EEDi22_W*M<`qet}>MqoJ+`xd~zMj(qg^}mmNtRHj9q$=F+wDpNnyeJq##y#p zp@YU$1E4^a^1l8fW>77g>VB8DvtnbAQnngTYn~?z< zO{LYX;Q;$CGxyAVz~tqyf{tY$lkZ^~>zB}@Z+v{Lg6Yjc3n0`e@!TCA9y*lWMf{e_ zB|hXu_troBlNg@yl6y2Nti;E*oWa9@;aRQu?qI}?WO~8jMg;agT;uIK*}}ltU-!!| zr+*rzz9c!X(uF+q1!6vPoTx&f@n{($;GP(O@MRQfkjdMW?5y=4K-_rz#oYcnh=2>Qz3HTt#w7hcXDG_94*`!$3qC+q0dQy@{=I=pZo_q4U+qVTQqCo! zM3aFK%WrrW6#-s*2$0^k@1(T?bN0AY{i;yKd`|QE>>Ar@Bh9C8E2n7Cn5-)xfo1N_ z4hwel?WCF~cEq#|R?H`ZpQA)Efq^^f$2ZSQvx_#Sz-c&ekI_;qfeKTIlW*=3jHqI( zzP10qx*1rj?u)p%INRMZA|2u0#SOgN;12DAv+^k@N@)~yl>SExKetPCJgTahMFhkk zL*0M;*R!eDlm@x{#N?EI)$ths7$`&)ILZ34dH~5|u}{8DYy$$|Gg4FRv5+NGJKOig zVT}YL^>0_(m#5g+eq+aVO?UR9Wbbplcl4x3H2lmqu5Ce1PYdIp`9h|$Z{!pWCe*Yg z5V3##@ZPk54LyDtf%Cr)_|E@+J3KT$-ybx9kE^bW4*?O-@I5Yp(HH=9MI|H-{&+=~ zy!}o0&|E7KmC{!(tJro1qoi4dYvgQXdZc@REEe@H!UZ~O-C(GP_V8L23M%g-Zeucp zy=uVD+y|&B3~gq7Xw^7NS(NOQcKg|UVO&sxN z)<2#*J7@C8Y#<3hc%iAoTXb!{^F-qz5K3MzGC~2w$msrmY*X0`?0QE37}6~M9r%iX z_e+Hi5w}%8ExHum>a5%aAtz_zqNl^#`}=P!i7Wjq3@Dc(|L+bET+13Yxc#-ktT#zv z6e{3^|Jv`ypQndLMisbL9sq~rS})gYkN%^NX($)@t3?mhgn^2;0DQLvkEq}#HNQ9Q zBGwmNgz3fG?IbN3C58rcR>RQ+QCnM6HfJ0MSHzwkM>x1A;ka#bv9FZb+J`n-X-`a@ z-}WjJwq>QI*9Kru&@4c@2ZX-_1Oz0gVaZ2jJbyu2rfHk+oSZ3Pno8%{#{muL*6 zc*ni5q1yPm&D&ao=l&1~TOQC}KP>LtQu52T=9_V!S#K)wInr#` z5znSGh@bB?=>209y8M4xsV$&ch@~7>X}SX9#HglgTJ&_y+^7qc{0!UaZ2%{f0yTZw zoNfO>xPttJ%F0(0oFCsYqyA1hL<}xu4&khARQxYoOL(+;{tB_ zT5{O|Y-~^-%Yl35G^^K`W1-pEHYsRKmE=umPm^vLw92o|Jb9MH0U%OA@StYR;mioe zn817(Cm2_L!Jr*pg+%rKdLdoJ(4l-{msi)Me(PH^oey2`&ZXE>XnA&aHCZiu6|>Za{Opt`nz0)m2^AvUR<6g}+_S!vS4K z4l3LFKk%g78-n!iv7fExg@J&APrIQU<0!@f=d^WTo`SyX;`{r2!3CA1GYG+0g;b^K zOIlm=!?en*a}aXv_QiryL$k+ASeIYTNy_PQZp-6pqNRFSi369`Km5%4O9UkZrAn*y z4uU1eh}Z0$3kDcF_*{~aCxF%M|02-1=gU%ll!2k^v3SOGv^I$&`#uvQ)}jpy0R+a4 zlDZXQTG~)Xy&k(kfoZjwPOrakLS4-PW^CT1L}FhtO+5UoO6$4c{fGVpeKP7$+kaG) z@6xNnznZU;oo{JOgZm&QyBHr9+TW{57DszacU1#lcE2Z?!FjWhA-{mX)7MW&Ns0*V zQ)VFDRVGcfQ1*KvZq5Kl_}2;hc>Tv1w7$Ob1{kc5bLBd|5~2-ir9Wz6C0X&;7E9FT z+Ad(I^e7t9r58xPrSOLWRH_5Sx1tXk{^3je>Oj9Y{yka0aZmH^y_NbX(7HW9LuwOx z3s7NQXsh`FP;qLiEB&i406{*aZE z&=vXl54>OLLVT1DGy;qphnrdUzVYq4tL|E$YU^VpmiU zjP4HROn(x=)RSYDYHj;%--N6e2RL~_MR^gMX9x6|x$S*&C+0%1z~W*zm2=OjXt;BF zX>RUHp=|E({50=ScI41B)Gnt0Bd-{k?dn6o%I8IU4E5Dh=$##i=I;1b11my(J*IE_ z*t*l!-@v?zsQ_)}Qb=0m64WMS(rEULuhk1J*?D&m`0YPR2~hRxAbQr!PnJLAhgp=rEoZbbH_z0FPnB7P39 zU8Ck=Pp~J37MY3ubBzb|wd}}>LGl5FGP$2CJ>V*&%znmVFsVqoJN>7Z%z7ju5Cn5r zaECdZP=PyQuPm`lSG`!stE1PQq-^jW!MCp+GFwQi{f!vUL8)17f{L}#8_*@B5wtZE zS4L6C8OIm;0G$Du@!Hk(D&kHTDSad$h!{6I{0-v{ZavVX1B5`}%JUd}N;xP#Zy7H3 zBYLFa8#Qebh>GVV949GyD7fduUoGBTlnd{Qv0l*juxW_zr#{0^71Kts? zR|?OZ&`ILy2&>jH?@*ej4V5?bON5w$eb^MsEiZ#VX-4>WZJ=s`N|o!RdJr@N;XTjS z9ko$KGg!yo4k;cZi^+b(d?vNowahQz9SV7Ii`!2m7Z6yni6I;8>(d6D5(F%!>_y~O zFgOBrjmcz+65sRjOf%%-%{zI9^!dvQ;Ms)oMRzuXLu!RC+WP!$bFsSqsVFGW4PSx( zW(Om;A^u$KGb?X^m`16mB@lHeito}DK5pYx*uxNr z*igG@dw}S$&zC-A2LXYVWSY>$vPKw9uPS<9Y&Y96v3{@K%6*sEd*+bie^pb2iOrl? zpH9TXT9(Yrfxm%^3%6C6RCITH+e_(4`ARV6;7DkDI<_A zT=|>%Z52I`r&@~StPq@El1eSuI`Gj2cb3X_$Z|m7gb8j27w-(DnL~nMQOOT){6Ym# zzziBpR`ilN75#E6LjMm<{MhX^;7thes@tSL#IlKf5SJga^RR#UY{!`*qEeZdhz$h= zrCe86&rlv){R z-{7#{M`L7U)Z9W*^K|f}jJ2fz%1W63?8SNNQL|G0sEi>UZ_wMkAw$(Aex)X<2;AJ_ z_BH472x<3_;SuIl_YhP#m0w{ZA$O}QEwl=6Dm{CV{=PiS&Mk$OmIoGsqN}rtAx!Rf z1<+?FzQiP=Vu4%QcUTe1E^uS8u%FqS<7XM61$>=#_F}lJx8ElgD=Hn{4}I`$c!^Ru z3Wt-H&$wxliX~!Y>dLE+CB;lY5fqhP#AgK+8MNQ=hVPp@=a%n*bu7wE(11OR`m)?6(*CA2usK|o)0(EDt+AQvBw^=?4 z9abdt%$w7sJ&bekxi^=So~AZb z>*=&P-Z&&r(r`H9Q&EyQ%V#v-roDaEhZ~&n_%3K$B|$4C9!PP~V+x0C!l!mOsl@Pa zhHW)(#O#~3gSuGM?c&DZ*nLGg117W2aN83H4icKoPbOx2E8=KaGCLy~2tr1t>fxu0 z9^{{k-XH(rb8M}r5RzOR6m8oXg^F0H{_P)7g%6KuDh&Zkxn?0U|1Z>FOANM_Buq?| zS^;dYTlw@C!v0ESi+e^HEiEZ0Cw3qM`}tp;kgD*Zxl|p>dx`}cRHX{U9>^4aBNR(v zpd%d|{-dZVS+#{iC${&;%cpBj>K#0Q)jGR3-cbl4t%R3FE`oCzN@40kj)!Z~RnDkQ zzQ@N(C3Ba0%4ZL9Va+|4r?ySc4Wmv?0(84K7~r*$>DT09rpfWs#_8$yKy#6dG?tWZ zJ=l-N#f;W*>BKg#o12cVWL0hO$(=W1M!~i+BX(U!1_3E-_jiTnN=3@fMXPDhV(iz- zIwJj}R488YES=uvn_IJmxuv0GrI#OiRox`?>OdCeaA2V++gC2AQ;klDxIWG+uMGAp zmo81>UZJ^erU3eNT61J~`-C{MM1w35qxmhrX44Jq(6Hij)w=79#c_90Lot z(HD?Ff=u0|fT> zw24E7@L?iJ)BWdf9bR=!EN$<5_>P{&9sF-43KJ%O^T>~7kezVc5Y z%Lq||%A%Bk?{NF-PeXVXMuJtoTX!BS@`!>O_sJ}m+;6p}#k(qQT2DN0_mS(nQbLEM zkEH2?TC!8JxJE`&xSa2{;;{HUxL?<%l%7t!Dnn{btF!SxmeMZZR%7uTYZ~;WnLN{M z$uhi`C6t|(cdeVt^=K-WHdYBq9%?fm{_0+6BG_co&~}VX9o8(954t&=e)g1Xi$}57 zZqgGn@~PMBsYV*kqlhF8YFoAb)b{Ae>TjS>dZY*2bADJ72aST3ztKu#XuJBUs_Bd5 zLVSC6nDK|Jh4k2eSv*_9#3+|sLg;|@tn^xZj$#KlZ;_d?53B!xRTl<3bmjR?)-U~7 z=-R<{QV~~6q-1&A{O65**jzl(AJ6mmwQ+5OYchy^wfyq2?;Ex&w=|BcD2zrgh{So~ zb2>Hk<@VY*43>n`sRJuMU@((xD?)KI9d!mq_Nz0WD!Hk|2FhB_-txSH++MxyW}zhaoeBFR;8*?sFt7xPZ+oP&Qiy;)X!=`w9}y zc~WT(F?`%ww7*L*K6*58B%RPmI#=K2X2)--@$P70Xv~&WpbB<6aXeAu&z*7@CVdjK z;pM7B-TO(Py&0#4HAL7we_^hom*o`#c3ux zezcDPU5+M@6{0_~? za&0~W&vXN@1)nX}VKF|TAl9D9hT>JZxmI?~^V}?T4l=ZTS6>iy zcq6Z!$`ObdM$yoGjHcK5*1qV(Ht0cD;P{NVHa6|jL@(NaHJMXDOb}0uXu0VH9g#rn zc=Y(I+2PTsP9D6(nLztXPuMZDKWhT)7m@ z5tT9ijaN_OGfx}OgvS!Q>^FJY*|*>Ow&q@V43Ds6cXCj$;zctJ?E;G3LGSM`n23|} zFZ=Y%);yLec{&T##_A7oTBnEY$TI2e;omnQf|_i#*879%$_Xt5q@>!EK#Xdmz_Sfz z6V?4V&imu9M2q)!UY~0c4|=ofH9fB;x!7gYI$H**bKePw7bS@bqGU`;g=MSXy%S4# z#~T$7DJlyy=5VCvIOY8@2;qV5%hl+_d5yj>-sj(avGtk}*5XP0WLbR>)Ud zddbbpzEP@K^z*&FD~PVQVgG)$dy)!$nl3pQTfDX5afIjf0!?JjT~SR8c0hJ<`Gfd9 zfi)a{HYWC;{P88!(_D!X9Xz+eHtT_u15@I6F%z1H@ zovf$$g6IWM<1;67-u?QS0Q?LV*e-PVVo_bDY_!`9IBaKo-kqS6IkFg8IK07RUdY0uf#%#3^u`1{)bA^R3!Q94$fC zW(XDzR>}=RT{2m1)y3X<>UI~jKX9UjM9WF_u5C8V@qB_73rJDuVzy+mYnK*R#+D1= zIh@c&uxMV0N$UhKLT$dp+O!amiGz%@FY`NR@Cv>ABuyW&t|1oEC5%2Bk_;lk6#V>H ztl~#%ufE%~i56g3HLs9bl$-kt(90;%Rt66%X=P;U<|Wee^74E;1g72`%v*1x1XBj> zibiIS-jz0o&!5p~5HEwdC@+tMg!w7+L$c9rDkNClhNZ1c)x(F>&giN6H^d)C{k+{9 zdpVe}-<9}3R5~;a3_uwa?*fHCuLCXUHe0s%IS&GYZ=;fu2HEm%bW(N~(PT}H|ADg{ z)dzFWU%~q{&Lz8C4tr84IY@^6kp=2(PVn&kY6uS9AwACDfgPc@!?*1D7ZTn8@PCU| zQd7Q<j z?#Ts>8DXNJE>R6SgHgZEpmo%HGDehL=%vv|V4|1o;_-*lqLx?I@ z2rxXzFnX3xn0X&V@7T$5p;sQzjIO@18rykrOd*{ z#ti0Kkfm2{UHV-wl{fj}GjHtEr$+jSrm|eWG;?R@BC}rdMXE0|m1Vhqmha^r?e}%d zt|ia(MV>vcN4R(ksh>JWTbQF)D8l$fy-)<+rTDE7QKsu+EK=udFZPrk+%IZq%-Y*) zD9-k5P&?=eRuBovxV-1d@b^O(Ak-I4t4?tB^h99RXL2NmBZITT=eyG$;n_)LW(M_t zt}8cBtxJp-*XYAK1fU`5XwC_^Cddm5&qq(%NUJgf+dN%EzcOkha2T@yb47MI-f&vg zY(eunUAo2Hr2B*!)p1OTKtc&}?cgA!Ti5#0f{pwkBCSs_ z&3*VRSiJYPpg)M)6C9PXc29~|ds>N;(zAn0Sg`oFQ@K6nqI(snf-p96?3IP$o7O6Y z@k&GF7;0qtA=E3 zOURpkh~kBGD-(=b20kr?GH=U}>yG-+Z8s&`R1;WRek&CapI;OZQU z3sb|C_>z4rjFVjOK&_A$&juLdR#W0=!RD*os!Iw&Im{=cJJ9wH*h za(7lhUzS%;c&&q=fOZ7C+;}>BT1pkMlIfairFbO>v9B8H3pf`7lH`cNfc|cjO@Le9GMu%VDI$7_i@>V29 zKDJ29C@b0ded$HMnAkNH4crs>HuN2s5&C#F|5o75FYy{AcFr+W&s;>;b>Yh~LZm!G zJKOwoD&6@v0_uZ*y7gt~ z7|(~-cn^i2H+}tv(Y@8wMzgH~q-kqX7QAvqeYng(x*_9_u&?DlWH{KhomeEQC29ex zoESzB`trrnSMZp8)mR6rQ?%pkY+gt;*Ne8yW1o`pzJIgfhFCPANh>HTUI;(96!Xf| z_^ARfFd_V&20 zVzaA7e(FR>hBWB|In_VQkS%=;o)iIfg47Zd-=Nh;pMs}XJUef@2=^cSE&tZ+Vt%9iqyd$o!11{0(d6xpb}g3 zN}UW_Mwf2RFQ;Wh>lYXLNJhyh#PqtXt}(tWnHBt8G*+syEBB8sqvu`IPk*s|fqiap zB)cB0udfZ>aV$w6Qy+=;Ej_=*KDwDNbrla zLKWyBawgPfzNSH=VVI2v>YXdkCk3T*?d@58byu2BUHW7fchIN|u1oM0%v4;zI`t|m znZVFLR5i*g0W4iF_Zu{lNv7&OG=&2ci;ibaXC!A6b%%_ZD?d`~6%-1p>dV?SWmSfk z%RD+5yIfk(L{8qo?EN3=wVP7Lg_22|D8xEoLG`^ljA=GRN_^pI>JlO|^ z%naU{KYiowk?O}@-=7hvmLI`WMuPm=u{P+@Jzs7UVoqNJRWl@)?~?K$Sa{%TFA;eu z*7w5#F^ZH(f+^~sf3_4X{|*nfN}3-}bt~fvq^(-<+Hp|gz4??H(gyXnpE;#EZ1#w* zYIdpMVDriu7#I*>cwo+-d`lWINOS@o8n4ikao&a58Pl0ka(~oV6Zh549Z_AMz~7$^()1?L*!~?U29GTqsPtzrUbQHJ z%>M*3J$XJKK(=I5+{`{Gh7J3M`z-(cAu6g3Zg~2h{_1$5N4Bf4Jt9|RROQTanQzZ$ z#ze62_EZWqV_E7#Ar`gDT4j(L=VeD}nr4lzAArwG*l>2V|09khuDx1#arVqmtLzaa z9N2H$>#~^?qsN_rTTlC8Td_p&J?Q!Y=2G(L{=Jj-kz$pq-I)@c6fQ>xGu>@m&W&rL zjNJB(4V9a&Tg~NhW3$|Xzq?&db(@-Z9z4AgG$Vf9z6upBd?6ky9mh~dOc8h~z-~P; zVU|b+DEEdy<}G>QMSaH!;qd7u{E`Gk9qAo%<67`$vMprEFgJn=qNDkweMc>F?$#FU z<X{Z@cu?XbpGt z0I9n}qo6ql2yFqXCrAS&fR|oSyQ5Y0xvNPDxtcySYbVDTid0=X7X5`kCMVbZmu|v` z^^*4!+^j7ZmMRX9pRVPL1%!nKh&h~SmF!*Qt2{NQHL>MqKU;ZGOT2jl%8(wk8XH>N z1vmC^P*)2Xc{!o8NL(wA`{n%V7EIg#=nBVwfq{EaD&*#Yt5=wwh&1@P=*PVIB^O=aq8<;gYSQ#ck zc27-X@HyHbaR=f2jC@K;aHt*eCQNecAz6K(&=fZ*4110q+6to3ox> zW!HI1!)1{D^waL{99!NYju+svgCzcU^61md|8TLGyFP;$j?Nw>aHftOV}qSvQw-S~ zNCPtNzXvQ>TC+~rrys!nl3y${iP>pb##0hsMmQC?l5{)}yJU|cLl(6b1Zq*h`;p7he zIK$<)xKjr%%}Rz@2j|eB*as?0O7b}r|2E#PqXU!U3;Pp^+KfVKR~lQYVveubj_mHf zuBL^_S|*PfU}vr69y;(#6;0kQ%1=QdA&LFhvG-3o1}MZ8{L#s!v)s_VB}A8;uqM0RI*?<54Kum1$~oF9pQo& zU~e+c;-IcslEHcpQ0b*SJa_?->EpN8XQKN0J7^6n61~;X)6#VtW?8%+s4lOn?k0b=*MAYp;U8;)_b7Ly-C{rSiWxtxGt<--)aPM)jvqRC zx>G*iZy)GVz+ARVTxV!nBYr`e3K;7iZ3)y=apxF4aZTvlh7wWx;|LRR^ZaR?_qRoF zLE%7nrTVozF0u4UZ+zfaKc_b=O*1{4YShOHmNlma?TWDkqwSfhSU53Vbk@c&*lyh6 zA;|A`ir|pX`q$|W*){m{;|OLQ@%jS$@Fky6eLBS8!~gMjQ&B+6ec(f1^IE^f0w#!n;2v|S+OQ*doaOLgNKJao!F~)+$G!&6}5j* zUHX8B+n1Ibu_HCvn^j1?7=in9`MHzSAvoPtKYlxYJxD{P?j=yd&RbXpbN-+srpQc6 zR!$B?*WM@=O@F8Xk5SZjpB%l{G6@mv+6{u9GFU!;9y|N&LiEQrXKYqx@CCu%LBW#Y zwB-|&$EHW&%P_pls*eST)rq|n?RI0k8@#7Liy}= zc#~S$34+y>v*Y#i!1GP&B*&=#!o3-Muw{FpA|cT{*?uIMXQ;O=9P<4%M;aHf;encJ z0KDjY%{>{hC42{u5MaL^X~LDwS5mSF(dsH_R5zhD5W2nWk}5OzOQu=jB(T_L3W*4B zJw^$tI(WN;>dMC%W@F)?IqVvewJnfWD`!vsTyye8bNA&>E~9bn)VcnMfDJVgaoPv- zht+m6?pQmvEE3rIl%nR=#FuS@%4Q=j`x#hRfBT=39cZ!wOL1YQ(zHX%m-K2<1P*2z zN7Ot$N^cf#wg2JzRGVylA?~1v6p89_JtPrBeZhC&aV&OAtK?+n6#FKl;AZEdiDeB9 zi>zyRTwklk6h!pFEx9RLZ+hzzK4|T_3(^j0Y~^oNG%kMs`^~bAplLpKm}%0r=V!tC zf;ijkz{0}j3WN7$W($E0?b{_M7@xXBl%#Uh@~bjdrf4iM{7tDC+rPTm-O%CE(QQ^Y z{KD_~4CSthp)&S|gv@~Z{8$N>mRD^{+s(!?we85{u_dL3#>4qawDLS+T)T!-V3`bV z!ljFen(DcEW?TlGOgctKBfPvIWPbg4jmL@>?tdHqW~Cwvupd@dp8;hO=8dz5b~9Q9 z&N_hVy~f8u2@(gsC^JY7H#;)}YR=A4hI9$}RO1)z5r{zt{m;I=D(s z!*4rE7p=b8BHeW%d^03`-lCXQ)}4E+*|u;rN0<6=xFIXuGgj^*IcWC~ry z+S(yw3#T}Jtuint^cEv`n2bYNEyYc&oi%moE=Qs%wCgZc&nbJBwC=!?E(X!w&6YHv zIT)4m;Vm;sDZ@8{*(v|p^ybpov~b*icETL?4;S@Ug$49D!2}?dlIE$pK9w6y8tUQk z=ypQuhB!jBiC0h@ryvu5-NA*+P-Hbg7e^L!0DH5Ykd#*+${{V^@uFyED7PZ>I>7v1 z2x#g>=vcIJzQuf<{;Q3CPu|4vkNr-YJv7C!TU;G?b`Cc(ktn{ZF?+au;r!=Hj#B+= zaC7kpH3zI!`5xDi-Cmw&AJ$#gz^0IbSWNATv^j zsI**BK~w$n=T9#AV4PV^i^nnwc?n39Vcl|k!HFA~)uc%5067{m=`)IvZDX|zi#H~n z4)0#j#VmNa8>phPpva(kzKi>kXrOE@>ub$3Hr`pJG&;$V;)pKZFy1V>rH ze~W^(5pzfTqt?USl%mBUQ~TCj@JB8Vd5yy*f=}}PY!{yYR(-3pm^?TuM40ykzitI= zHhn>~3&}h+|BAGVahM}n@tKv=TdfhfOY5W|lRv#ZY4PLIs^emmwk1KS)OT_c65nLH z#lAmRl1ajag_VSZ3e(Glg=k7j7aA7yY*%}r140+jnIHf)fOEj*OR2;vz8SmKAD_uB z`lH~&^Wj?eM~?+%pQmJkTUQ~h1_xKQZ z4HNuZ!WvO^C-*Mic3tz&Hg;Q&`$*#lDkqP@(do$d_I$qC&?jO@HBMqogw7ESga_ty ziru!(G{SOctylSl!3rh96wV}KmA$52nN!x;CSMFC;B;2tx^59?hI-r30?)!6{JKoX zr^~7&l7uUy^NeVE2BUXV|$&!lm8WIzO>CD6&5ONq2u9ZddJHb_i{AXv3@SQAt>OS z{(@4vzpkmXx@;)ZGCgoMAU@3KqO$GPuQ>m@xdvu}EJ}Ew!iS^MDk<-qBrjG_!sk&J zCL`DtWu68&#+`SLK=0~fNvkvy1z1xpCwQKG*|m?`UQJH{JkVP6MG!~fN#;EWN4t7` z7W&i*W9}YxU5xTcnVHdoRBLS(9WU?7W;}?4x7i&n#DIbsIIoo!*D8ZS^k>j11qc0v z0CGM56I(WHWjht7Ij7E7b(mXmc*Y71rz(31AGrYI)E&LFrU4Rd>fU@OJ9VYeUop!# z8G1KUH9H&3B_`dg>D$odN?fIn6ooIHecn5%tL{XnXZrzuY=wp?j(EJ-!FMHJ6%@kd z>B;LZJ*SkVGUK54@a_h4E>l-ho)I8EZ91ul7I1>4}DoL-`Y_HjMFKj*JHnBBORS4e#RU^0$eaUNa6ZL3+ zY@5(4d}TP@|3rVpgc{A&P%}*!={+ z0~KPi=Lzr{@pL_s*cOS&=yU9FUKe@Ed~w$^7i2?#G(Q|kBv^m9ofH%rs=xNv`w>{a zFW3!1OoDy|WDIl5!EcU@U2S?dGd&ID@AXStj_A}>U?KC$`ehIF9VzZJzfH(|mHioS z)gVP}x*6{BWR3`2ABW#G%L#fISY_izFLwiLJdFlB`A^%S)=0IYUd+gC(`EDL2a6w4 zaD|k|7)BqrA9(}bjC?J7&sTP_6W7)|67@5mgCzT>40S)9YlwNbr}dtx!v5X*t=<$3 zD{@7P*}`5U_e_dgdx2$1O|X65%FfZ_TPLm;$e9$E2-Tw6x~i5t;l~nnJf07o2~n1u zVup5IR`_C81W@iuOY(7T2v`YDFOmq%xklj$6UONzH%rdRUDYGUodbD}$D>|jrJ~6# zQ2L~t5B@evH^1qhm?MrvW^R+@R*K}NBJ;bAiHU^>V8uJ_7(t1@;Tuc2(Ll zSA8MUB1J4Xh@XP14NA;ME#TVrL(qf8An$NKtXL)1NgOBW6T?J+-PqUwZ`BW=o(TkX zUL)wh!$IF*htvM)*8-#`jL#s1PI}$Px5)VV?LKcV59gx+vi3D*;c+*pb-v^ETICt$2t|tlvGsXxEB$gkG=r9-795QTuV5)mU zV!MRk_0RR=!YYg+>FR-6&9?2=^S1Z@-lbdP=OXEoM6E8lkK?T<&xo7`w#`rUo_ z4jWV2j+_J2B zy+uNBvf3nmd#2tdvP9i;gLK@H>_-*;+{kPUNkyd5ke6gS(ul{9lFIgLx$`7CZ-Rj( zmbaaed(IS!AS_WLFwWF3P*{OQaP_Dc+qOf!KUmM1zNV$5aKQT+7zrM$y*9T)R*i1s5 zN-s!T>~G=O5NuS!_eC!jdDZw0-xwxmE4e(g8memJax0IAi;FUeh z%afa>t;e7WUBu&IvC2M)hMKzc9mz62Dm$1312`>0PO*K4ji&pCV|@Nb>OXk4EoOMo zOu3J$kj%qBM-Vz|Ih`@Z<@D)jKO)rZf63QC%3ouCL$&rHJJuyG29-sNVurbQ0Rx02 z;Oavny_!@NxSBYqagg+6ik9&iN*2Y^m6|tiu#55nbYfIZId~gU9R7Gn7`uB%|IW{p zR`g89%US(QyJQ=`UprCbW_S5XM6BT)_M?Q9zEx(e*WxC=`QD)EmsU;!-6vIg2zFK* z6g^^CS!2e{tcwl_haYf813b&U%+#4|e5rF{VKw3HIy{(IhR_ybj!%ZtUP}J(M@}KP$3MUCiwp*ulNi z75X{*+0!y*r%Xkv7ZeA7MWBiLG=pl_YK z;fD-uLj@H0rAt za;)babW6(aRKY7iUeX+Nbs7Q=fJHALM!h~c@@fHx3-GViP6F%d*j6xh*AHY#fg%U? z>c2sUijk2~l_seehW|bvPT@=Me3wd6JN=vI5t#_}s7sV{RFb^jTpRY_VCT~y z+*u&*6mY)q)KgPC#~kor?*gW9n9+es+N|iZhs1UBFj6rXELEjnv_zj1 z{r>KTR1??Nn!=~yFO7@hUPPk(~-l&iI!%O<-+ETll_G1(+GSsmo38)@VVpJR^29#&Ps`h?954NIYAy6 zkZf98TZOVOcYNu%0+A5(%>PBk6rP@R5u{^M0_!w zk=D{01qbm3R}Ls|=!|4~6m=&*?-j{VT#l%xhm<#wpnWJP#P-FD#qHJ#gc@-2YJuVP z7Xp?~`&cu^fq1v33!*A{EyLKdkSUb5-&+G9Ev%?KmR<-HsmyyUL-E zk9nDK42L_UclQ=6v>ojc1}4rY24jU1RniO zZS_G@_4N3oCqcX#nYU(DruA*KUu8qDb46_Z-rSz8TmG!np8w{t^Zppp-9MA4C6Qs)l3GM`AbW#fcW2^?w`8JB%J=iD~p zLw=V=lQzByXuxCRWKI3Ve6p3r)5I>TDeh)PS$DH1VkRy6UQ1>0uFaBp-nK*0aiw)2 z%7ppmr(3^N#K?A%hAS1VE&6+fithI;`Fvfa>aM zn%3B26Aqgm_JAs#BX~mJRzBIeMbYz^_21`gc82LMi;vAmfG_|@$U>M(4=HyxVI0ur zy7!2M_XN{P?D-?521={tgGRKC*^@x2p;ote`0U|&@o~L4#VYB&|M!dfRDQwFaFFio z$~Ch&tPPq>k*M5V2BDeMZdR8c<-`8orv)q7+F$DHx}^vX&;`fD*)S{1B-=Y!v=F?+ zafdt;@$i(M*>tMq?IdIA3z1eHSCV8%pQ zIB=~eT%xKa_t?_n8J^5R+FwRJ{N~!@iq&3(c@!bKX~k| z{}Ko7gtfho_H5eeWYf>EbiauG2Pwf+$48YNVer8BnpSXDG_~70LMcUv%7^4WA)#Pq z#^{IKW8ko-rX`ID_?^6VLhECaR~1Ml2F)Y7ne!Z&$PX7TEVKU@C*JtZZpvn@M0&b#(ZRnGs64 z&oVD98(5c}(#L0^#q4leY&*)IfC$vXIy|xF6lqE$Vi}HIYVp!XbQk?4tle$%DoCyiWxH9eiRcmXp#~?tQj;qq{NILvL^r9 zHjF2aTla>giPuVRp&(}|=h)Qj{5#Pp&fz0^u9pFgupoBhLm_$i((nT%01RcdXX_g% zC73CV$ES}bi+)qn>CQ4n)VG&y! zDJfviu-soquD*S?lVf{+>Fs$KSsB2pw@*$-t$;7JTK{#Vr+WH0@fX4QEK(e_r^_2b z)VpY1R-3=2|2e_f4V$=uZH8998J>{)1Xp$3m$QC`HHdltn0SK6E4M!a9m^f z3D37Sl(KyT&&`j}6S;s2YpmK_3(~o~S#!6z?LFyajST2G7fx&wR@Lr3Rand~YM#jY z+T6H=f~3V+|HX@3SdZyl9yPa%)qWn*e)Op3p-^inJo*hoUt`lpGCmxf4YTl%Jxv~4 zTNV4r-Cs8(vlV20Xa7t}?2SBXv>v+e&QZU5j?N|<-1OgpAI>?9kthQ4LbEb6@AFuJ z5J5^qW5Rryt0qV1NQLD#SOLrHhPl81>p^Vwxt{9Ms?kQQ&M^~Xek&(d&mASLZ|vg+ zD~XihPL5FjQ~%J6$VrC|pYKnluvTBm{vB6tP@k~XAN6FKtlJ8TQY_GX3Dyq%;VL%a z=9Gv~okTancsxA2mF0!iVLm1dO)GYUQLbAZ_vd&S&$;v^y@>-VT}@TLuGg64cg+>{ zECOe8S*3?7;|49nE;u-D-ii$J9q1n)lL%dQbGJ|m1PaoqF~JHtHW0GEOA93y1!=!b z(*(n>Ro9&&p56!SwrSssi&#N&@Xw#LAmWI~Q%f>x< z3CI3zR|c{A$BD8yAGGvC4WMNO?`5#FMpVP5Rh2W_rX9YEdN@F$S2bjo!vfKgt9o3a z9pyQ7iKzkUM6)63%WrX$m4utvwV@jbMD(~G4(UU8H6NUZ^ zT&AI;Nqy`ulm&N}wX{>%R#m%TV`VivqPPhhZF+|9WWJ04*pA^jp7ErF$4^kKzF8wJ zf`y!ark1{{R4lOZ-qcBW-Yg$Io^ZUI|M*`hJ93o6>>R^W6cIry-@hRe^DFJGs*#xd z=Y?3tj0rMFwk+8X!FiyPa(y?pWx-YIWfw=%0=3Ex2g}=!M?dKl@Ci&(721BKo+b&% zNVdbQhWy3Gv{`*J+&|mGV+mGP|3oeHxU_uEEj5#0DNZRxEOWF_Uv%##~T9>@wqdyLbL+oAUN#)5%O;QbpZwCT+;uy83W%E(JhLB9n>#GMn4l z#l4c{s)MPWGtMQdOm0XP(dSlN8fK~ur`#l?O6`t|iS zdjlq6&oEWhI!`nWzoS5ar8>m;{a<@Q?Kq4AIU%eVHrWKRoLUR=Z_Pn`a9ie!uc!G7 zGVMg6O=bUly)dE?uq60bI%Me3^45)y&xv{d);|ktIx0%fZy!qKOm32u84AiKRLy_U zk3EetXg;X<$T9sU7G;<7{Bh?`C3)#9bVpdpNar6>VPjgSI3RiOCliTp(C~ZHWYPwM zZP1bw!Vbdx2*OWnlQlVb`16FVBXH~bja3NgI}N6+NaLpa`-jBo_D?>Zlt%%|vf7hi zNHMjezd8Mm$eN6`vBI3p&|*KUzByfGekDm*B>J3Fb5)F<@hit(de6d&T53U2wq@-z z@xsnB|a=xNL(cp;Qjr7C*6z=47mF0ovYA0tlfBkD3Sj1|RI)us2B4`;{T>LZqj#0gdvx-z1eu}O*z@7okA6zVlL)xh&thc}ti{jWcEO8! zM1!+o1+%UjstXyM;&7B73Frpr=~AhsJz>1Q=Dt>fOq*2xxx8q z&hf!pF%l$77_$Onp};5)BNRpI_+nHaP4c4UL7XnHtg02dLTMdftOiZ~gwq6jTsbL7 z8rp6Bt(is|TdPo0?GLVdKblDyNmFV zccdcyjz4R^XUXe}mPj}dQ9lL6@aTyg?QdSVVBu1Uf%;9fFgz+ZK?{Er5cKS-VxHrnl|)&b3CO9PjrHuNGCcVsg!KiJLh6m)CuLGAJjtv zsKc-M*2Eh5q!*t!1)^dfwaiQb3DV%|7!L(wxQd$fvX_Kj_V zw=XdE>h#`y0a2C#t%Trc+IR{eQiPd9X1%)HVbn!@Qyk#=+Gm1?hk zuKpgcKBBk#pm07$Slw6JSqY_LTv5Ks`P@O}J@zruGW8cpNp|WpvsrDo&v>z{lOjXM z-U$b#O@_`t>yzFoumRc2zi({zWn2I3VoNujayo3*H~GZTERTzeVeg8^+X=+)G6lP2 z?OzJMk~}6TXMFUhzIJF6Ia0Uy82ocb>St=3(6^qF4Fe?I@$>|F{@Dj{!}o+f+EA>+ zQ&-`zGBW_mLNTngO=92qkzh=+<6W+<``iyZ8 zyY*NPzCMkl+&1Ct4k-pWuHhhsNy5Ouz?%b1BZVsq-~9fEv3V?nj*X4|gp~Ko^|cRB zX3lyGQao9v5T}|bnHu=!I**bPz-Aojikx)M_CM#~gP-91h=^sM>S##{1d9jQZKG8or&b$6q0-K5QEq|Rl39A+tCWt&+V z@vDU;mByGqu*@*F$!qHMKl4X$9+aZ{iTON@3>DB*ft)3zt|X=S9KqklJWTCt>H%mQ zf<0=76yerf2=)5J+`CO1zS$#h2j699o&Y6+ka%APWx=We;_pS%nktYIsyX;Bd9>$F za+!$lu^H6l71k-zTx+PL(e>`V;@-B`Jm5@C^PC1)G)m)oIGhHG?{40&{@@+H(3=+~ z@x1v)Hr%5*u_~uFPY5)U;Er1xR(FJh)ZebX`3BEjKHHlo_w=XU<(H@K5@8bm#_HO< zd8@_8^9)?DUCJ`I7jPr%UG77%c?d!uo@J^V$Tq1e)hhe1pq5HQc&Wp7HPYr#oh!fA1^OSwRipHFnb+VhDeZ|X z$&bq466XS3!+m71Z*v{An%bW}3ix;a za*ucn)UfVKGsxpg)hozx#>R{gwV-{$eUzD3Kk(g#u<-M!u<0{O4N+{oRRk#$%?YmWC07i4BU*H%m3DgnT6$nZanlpE}_%_XQ`UB2PiuPea<;yFq1MX z8OXhB99)IOpq~~~7W(Jq0oPcscw@hFAD?P1bLahJyw?!THAe4=PL5#rw~I1GcOiuj7Br$_q9^By zzJ8v{G^($E7J)`QUByDvP_5Lj2&GL=6-ftE!-WM>47R;ZLiC~!$M@>#(k_h92oY2H z;Cd~EN`Hn>W-U)*@NFfn{PjgL_8eyc9?RK!P60E47c$cx5ow9C$mRC3)Td!^)Au{= z0T*#n!p!V$E&DRH3pQRIqjmjr0Qvod!6I5)-{ePsteC#f%LN&#Am$2MeG1@Qld73Y zE-#=GU*9IeN`CdLIwM9>Rwwd}gBXA-hK;R0&L6Wp1KZ>b4^%EzQMzw3(dE zey;x6hB@-#SV*3w5uO3bxpH;FUS?%hsz-|!GziLzQ^fTQ?R-OyUWSiecCcY!L%oh= zk9qR;ThF!gkfxTp<)JW1xF5A6aTA(;*Jn2A%PgD>r}}2n*W3E+FeZdR%8TQR^DADs zt*~np9-d8pCM)YJCHK%A`@?`k=`jWwIVp`{N1h-dkFju}ksb;6))OMr7ho^YB zl)c-!0@N9YRzN{Yc)mRq{-QH8o;I9oewAcYw7JvNj`DkZzzV+DROKIIbni`<>)jc7 z+WKF%3;E8^eT8Jx?gDjNMdLypu3a!91yb2Eh*tLduC+(8ajfP;C*p(WiS45RFMYN% z9Rs5?ItFl7K8UsM1?k9#M@Nz%_a|53#UTaL{UpEz^b>mE{gV*HN=PoZItAK(ti4WeuQlRR zNg2=^`rGS1p6b{}fz!7iYZun$T0Zi7h_DJRT7{pR+2iK-kDtUwtq*>`mA({4STsmM zY4ebZCSF4cYnC+(APH`clxXF=DaY2LD;VSBdg|jj=YQSLKu^kRr+;768g9IY&A0_c zHo#)Us#p6J6nxn=>{jC*{&WAaiO{uJ<*_M^x zKB7UTg@)+{!5In~wDUk0DVU^WE0kAptU-bIV~)n*1cm4KZc^b_Ph2Fzq(pokp;Lsh z-%{(5&Bm5KF4ymEou8(O{$MYjU%lR2Ri`G0obbLCI{HgSerKqq-6lR!D(2i=v2eC^ z>2+J~g^{7RU-x_=w|(X&%=4tFv`mZ8OujvvvGMlLTZ4L6?xp6HkWScdw|(kIifbLR zq1@S;b23b(7Le!sJ7Vo^tMo!7LBz8y!Uhiywf>SnCSuL3tb7it>42)qn?v?qY)0|q z@ICW}!xtxOR0;UXkG4splS78pC8AG7IOrQqbqvOgc&C)BBkpD{0w>TN)m}x^SH5T) zggaZJm~xR@fq&t#qg599wkRawjCUxPVY0$A+<5CB)BnI=HbQ<;r2%ZC&tDEIQMh*t$u5%2$Jpj*U$7{Qwbd32F;`-$|}Wk zBU%ja4-+UY@ID-V%VJISMoW|A&?EM8{78t`RGTCvlmE>eTSkTJ;nm?xsePv9i`^kD zqt!M{s(bZ}$oW#xZqxICrN*bsd#G)l%kB2RKo@DaXr*#+&Epv~2t-h?DK-}_ZnSxb z_gXcruXAEVM8(=<^YGLxeM;I>YFs95Tkins3QQJe{K|Ktg{8MRWO;WnvT^XNIK39h zC^5flyH+g}7fYuBS7=ml$LKG$IWMOs7a;?3VVtHSDF%5txo&8%ME+`S*=TB)_VZ5D z65pVe>BttFy9XA2Crm8Icq&>d5));Tlm$50Ei%XbeP zZ?E<+K{a{4DZo|>Yns>1v~yS8KLD@lT5=e;c;(f)u{%NE80Jwl-Y>C~z1Jcb#maf5eGI1;3=(U$e{2 zghl-uX0$*mnHqUm#;$g`ZFVUO46d)v>64c*=6X92^LU@;JAbY~xZ6y6+&25g_j!

AmD+}OEX?- zxxEBcb+jkJ{$-zT)zNW6S(;$I0tRCbp22ixxC9g!3Nqe+xy!;ArRpP-s`iJ56%FW1 z0;Ef##~g;Vr=*&c3s`V$etga$X#4B!S}HGpRVzr?YGY+(rMH$FJ^Lo<`}A}*8Pn!V z%K?_6d`G$}3AGD$Pfl$|78%>^*e}!klk93!XE+kl24S11{^CsLW)|l|kba$aK1l@R z(zS>B*j26k@X6tXr>>N~nz*Lg+=gpqNQw)!>bJUcaHmAtfV2 zH-F2ODv78-Wmi-MFm+*rUb#a^cVvkxlX@M|$_t?n8|g{>%FPB|&1?3j8rTPyyMqrz z!$k)&hf^gyarJz81~}FgW$&e$5MyjL$85F)Xpo7ctYc<&thX-FZSW~mjK!ZW^QNS7 z-iG-uziB`eF`)VYnA)(H4_C3FnYG#?;s{tK2tv&Pcx-0-B*=*5kf}V^0v9Hg-HIv{ z`}jQ>r0(3?nQ{~b@Dr4o15|1%tF%ug5^%H(srveq$Gi%3hl!!?K-yg_=xdQsKPG0z z-?sAfAazV;pqSWB7b#<|LZoXo|XTST`#-B SUaCPhfxXRv-K-CffBrA??q&Y~ literal 0 HcmV?d00001 diff --git a/docs/auto_examples/1eof/images/thumb/sphx_glr_plot_gwpca_thumb.png b/docs/auto_examples/1eof/images/thumb/sphx_glr_plot_gwpca_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..b7a9a0fda32ee338ab65c4a01b494eb3335c87b6 GIT binary patch literal 50794 zcmc#*Wm8pOxCaEJ8|g0T2I+1Dq?8aOr5mJMy1P52rCYj7q`ONRghSrNf9@B!ukM*~ z2ARz{d#`w&U#%0Wq9lWgM2G|h1%)anE2#(U z&HU!7v$GIvdu*=r(|E+gRv^lV&LxB7ryq47s!7s&`ZCcGrZSwjtr6aZ-+$g9aE3Ge z9MW?kJ{2<2iL0o%e9R?Bpr$}XMBHAUD`bqDnDSV4B+lMzo46Ii62o}c^=?DylceM} zo`gd=-I)TV&@46vCU`-dDGC4kA+|@Hi=6W175v>yRPyD8hCJmP^70~nlL!5BF(x0A zePaT@)+dXc?ysFB| z!jqET{3t6szkdvNQcNC^($JXE6CuLK&#A0rxq^u9X{xF9{QC7P+Iw|n<-MBPG{xA? z{q3W7x#p39k3JZ$^;oha#Ee0~hqE5V6gI!hae?UJVfhB@C21`!0<_=&@E-?9M{^5{ z?gJ;)PoF|2_KcPqtRu)ovUYb(!3Pfw4z4d+R_**KtK7^BXUWUW^=mz834ear6B$kC zjjpTX3JVWU<+UZXs81^|XPT}2tl;e}Y&ln12(Dad{pR>@;j0>W-!W4vN=pCjZDY+! z-OxV6+^Q;c-=?N_42+D>gWw}pzOC&XmR0Q@Rdq_2s^@8zxow8hHQgPS(Z7AW9;d*v z0Y2NXKZ2lKt2T&2A_#+oWNfo9-0can=k{x^hxgRoy>5Ee10~qmYy{W3kZU%&6~q z4hLm9Ti&%{2i~#itQ#2-7N)nq|Ac$D{bp4`QWDm-141higp%9Pkd$uQk&4Tx2`*wK zM}XPO_7PSs#Yt5Rnz7h#ile!z zuOms!bqx)1@$nMs>NsGj%+6|>j%S4GvJ6=9Ja>1>Va8&kPknbri5=N#yIJ*U1GDuq ztYB7kS@5tz1I?$4zGvlF^AHh61_cJf$}1=ka2O-(?CjJvHcl@uE1H_pT3cH~$x>rc zVMJ1=C1fSE{`JDXZ92SycCkrd~Zr1U(kk3Y`sj%=n z*td1s%u`cS;KRTVNZ(;1r2|w;w`X;13yX?^WXQY{5)yF04g+)4Wz#r4Hz%W{gy!Yt z<>c)AK}E%P>A0x9J+rr0@?}O86;asP+0`{PzH2goF?)-H1ghBC{bQ3Zi#h{t&(nd92}fLT|qDoorFpyBHHJ%=lGP#BMa3A zufRr!Lw*yJk}{aYtRt(aNG%{Bfb#|KrT&v*2J8=DWGV$q=Nn8Hy}!D?-nhA+I&cDu zP+MC&>iX)xz7f&{!zb_b?c4O!l$fUG8>H|4i;LO?s`TKM>jnS%HMsNby*+FalJ{Vg zw6wIql*Oi{x&NhQ1-IjX5yBpm-z#w1_Oef3_BU;OXtU!U4BOXnV(=!dF2{NG^@*VA zsPI>SVPr_8jFlBPuc4$IKnA-8jCgM-_J_APBF)c1)=QA0BDBz}Uk1<=7+;)RXeAsR zqyMN!(-@*iZtrbfLfG(8=B;>ekl?_~fU+-0iq4Q278QjC;|B!?2L}t|XC(9)Gd5e2 zGPc@yC@x@p_TK7J^CVs}XBzYebO|iPS3E?~HVaQW?BsMfdC+|F+uPfdi;H1O#Z(vw z7@%%QQg(e|BTtkk;CFg^eGQgDkdcrEW^4ehpx{H!jmF!={j2_LF7iR+L7|oVRTc+b zD#Dn2NjW)08XB7139z<3tZyKmo5K>@9|tH}HQ+W{=luR*LNq8%4MP79t@*#Ji9a^2 z*uaX3(#C%;cxczN-G5wPC+A2-NtFLij_{5c4U`eE%@XCE%Zq))U%Gj0K<|!2u4Y+j zRaJ3kXExXmJT}X+US2|Y`kwYB{H?XM@t~+Ny&2KVq?Gy>t$vruxeYo|-+>brXfWW- zzkmO3wbFu%(4eYU{RO%>+ne9b%?*rm63ge|la*G!%TZ3B1Qu2YCrkh$2E{J=iEJ_O z@PcP73hV1}D>TEwCI%HBRK)%$lJNGm@k8!1a|;_vPEPE0CFOr-Ru*@b_?Vn9g^KE* zo+vm{BS>@_YPo4*DWs%8KLTBlo`Jz|Z#?twYJ2*hKk9~thM#H-;jrnHj6sjy-rWW3 z9aJPZZ(U0ohbv4Y0DoSKV!*l_FZRXJD5P*(zIpN`OH`GTmWMyCICi>mQq$Cwwy|MZ z^SnfDZFPx{&F%ty;@2N~wGTt;g0&k2Y7z6NBV*tmwYCx2`PB z+gP*ZS`uLE^#r3Ag94^erpfNKuL4TFL$lx?RvF4z&%)SA;5j%s7uwu7S(1?p73FF>2|S82#T6*npYSPBFwJJP75QjcK0DB38`9bNJ;LbP z;Av+j5hM|y+`_ZDN4A~c5gD6&J)V(NgaxH#Sm-=_T1fykRk43GmGj)a9qd@US*@=x z``Dr$Z0OwSgHLU)?0QwnHAVv*2Hk;19x!xoH<17sdVT9DygQMtY+^z)#ysZK@GVcn z0udd3Kxkh&zHH;v@>wal23aE`OjOtRW^|5ylI>D^ z<=o5tv)2zVkYP%OY{Q{B>bjJa*HfQTiZ&j)%bO2A06J9CU%n8jf&&Hq6{sE;7Y;8A zKsq1MOJ+M+O4lzWO9S`PG^SFc_L9`wX$7o_}F1f`WcemEtTBVcc5B0hI~-4@p; ze`vN>fl6Q847W+%1GlF(=yGD%=QrNIk<0{|%gu?Y_AnPmEvKBKqVZ$UHRxv@4;Kmm zk%8VMJX`!wCYe7)d_pQWICct%FDLnIfc8XH^dIqI2i5UC?qi(M7W`>;$>2yG zvrdQXCq&CeK2-93BJ)Ba<0P(^j2tq4!HJ;j#E9riZqEe*Jg)TC-NVBTz>kFnYa)Mg zyMBVN>&_mf-)0?Gf{xyDCfi$_mx3On?XHcl&67YdPJojd7J?WqW`rVq?G4#UE?;ZP zs>$zn_?-|x|I-ASch!jspf;$4f#iIfkMWqXe!6cSILnT!cFL8<4aF~DE0tSFlb4)T zytpMLqdxZHhYTW0IXZHt^H@`|vtUN*_WUkX(5iJoJ?1`+P{5Z_ltgjMznCm_E)Ulm zxF)KUp^cx~`P;o=AJ`vS+iQPO7eB;0`ufZMWG=-Pv{?4j_;6-jLnDXJlV{rT7=TR< zqkMsI-Je=bnUlCRvSD~}lMwFGi-oTCLA(Gd^U4DomA z{M`DQ#NV-n<$rbLJiZAW6geRkewQ^-1!dvWe05A&u-^Lwo-nafa@(b~{=5dZWe(3O zBhd3LEG^9~Eqjh#jKa7!!mrpJPZUiH#@7!vl&96_Brmp&QKTjIJpg1=7I{nng(C=^ zEJ`{bb48nSKnO4)$a^XT7%XaVOTt>8~<@P$3!P1t&6G0!qTeCzf3a9wumu$qih z7Vh@%Ir@Ngh6)=9o?ZaDE;nqvPx3XfInVEs2rltL9 zGVdn->QMPw%m%D%i?ys~gAiGT}0z>td(R(5=pL1T_(8(zICyyTJkx%p0bEVE6D4R$Ag zo9BP8s*%z1ny-@6QwA|5Q1ZebO!rStb{}unUfLzVJgcrpsM06|W4GI-)bLoL;ZZ&D z2OIAn(>-^d!j5=9Prw|(EC<;9`0=Bpyd2j5yA?w_QZO$*m(SNZfd~6@GRTkfvdJ0- zcZMdPna8#d5O@fj$gzdIoSfJjmaVn)=dqSu=LFMaRi2*_04@V;w6U=P_}Z3Bt9`TX zyG*6svGjpteea>}@5pW&-|@`M%>h!bb=*?|7%ckUw-t%kTs>Zi+x*@Qu5l)(*Q>mk z(E%T{pICiv?@lr?3^QBXJwUf8xVdoyxMDtv5xykEed zAWy^686LC?A4C}B00`@r-MjtMBbf>f$D?7L(u1;%$oRD{4!@1l{=P4eDF}{ACrKT9 zl}{#08T*Be$lD=HDH+>Kbi!yS8k2Jkpnb_D$$1>}8V_QC4v_8jj{tDBs?Mjh zC7agBT!~=A#-)W0FM*1Rih`md)z6>b>*^BoXP(IF&rNBiHU zWzk@}HV%g4L)i>ikTFR~(qF&QU86=ukbaQd4(s&I4{o!sqEw?#O%;iM|0;cczBv|8 z**!MuGs(Q6)q}@}Jb?PC}vh5k&KM~2*)_Vqgzd;qM%ZW?E$Wg^Lc~0$&+GZ#(pS@F}7laE{?4}xu zK9gF(%XnN^(B<2fKOu2u&EJwfcW{UIkmW5n^@#HTi;FB;z(aKrW^Iz%Zs>CEFeqTC%Q=cOlh4@lqG=72_1K_j940kV05ji5PnT?G& zZG3!EQqYi9zsUT^&l4thet5{|P~`j%kthTc%I>7p1q6+|O0B}GSUHP*@S2FWdZ4_V4k>-S4BL!BORBHWMa(gV@7s_nMTn9zRL z>2qq@+JuMFw-Sp>ONhwGRDiN2#5%LRO_WDMLPB%*K_JWoa1~712Oz^JC~TKdJ)vVm zW-t(7K?%zx#Y~!a4NJqz(Y{cYj((V|J{(JOxXDP;)Y{qv zAgTU_!N-}h!wzo=JQ&YcwYk^{2H&hmb&Db^Je)fnR)T>|6*Qs_f5S{kOB)1qD?pk6 zTPU6;u!#Jj@)k5Lqn#g#Ov%gxTQ_X)`6Gj#;BBlqyGB98nQB%sm0p*eS23&FWZ^;%L8#~}X z4Z{IQHPuh)w)&fSXGzDri9C zI9_Tbn_O{Wf^=Zo*xCX>KRG)aG_aLhQW6558>|<478dCWO`uc^6unQ688Sv86V|md z_lN)r>Y)8tBR45p@T=N|(X)*nz^M1H-DwpwLPm~FuZN|2lUBuMz6dflG_YGA;*39k zCVr`jM*R^K;ByBDWl`XJ^o+{X8M=FV&LN(;qIF`H>uYLHV*qAZJ(~)E{;Vr}<)F6& zA-t*T$krLL>0_zQzbFw`zGl?cJR9iq*>1@@!aqZ0YjkHJn|MlrafAj60a(B;>;Cle_PM=_igB5%_4>-Cq1tK6v%9j%v@h zd+F?5iRXn}47u3xae^j0Xd^?9!mD1BPb3Vu@$G$YK=c=V{~mQ{XlUS%Gj;L^5E6l` z@&fkWu;_+NnNjodCIb>-#KVg40};?&6_uE9a)g)J0BG+;M+Pbd*vk(Oo(v2ODg4e1 zBqSt2xC1L(YHo&I8bGoEW~@--PLg|<)741v!ky*#AeFjfw86Bldv-N*n{t`r>F*2; zL_|b@lq?yvT6a|Ru$^Z@-#uJSe2_&S#|Ns=_8{g1j7u_!7zU7GMar9r=|PQ8>yv>A0HaHy9vLw>CG~<1fAro3Lqj;va_FmB`K@TkEdBtIFl93$o3G%qAs`iES1ELT-!go z*ihfr_GnVc+L0xwMoJ{W``g0SM0yd$6PhfYIvt-FpIR*T>{96tfg(|!J$)=?%oMP# zgM$M^xf0@2K&xoqr(I18ggsFHCrtq?3%G$O`TWtBFJGXd2e21ec6Z5Wy;;S5sH`i- ztX8YD4oS&QEvpLn?vVmTec9TdWV$Z0Jt{QZaVKm&vSd2kVAGfShm#UVQw4&*@(92B z6%>W)ss4y5;O+u-wx0I7+2-82Zc(RnzjXZ&?&IZl5FOZKGNtOIAJ2pJJ$e{ojly3f zHUJpT!Nzk~b=%u;X?5F*E(A=ASwjXazydx~A4uYJ#lkWv+sg}kpsi%#Zf^zIK z`YO4Tw$0nGam_8*r*hxKd^NwV;YZ@)N~`O|5Z#NxW6UwNyzHqSRWW1cmbZyW+b$CN zdG<2`0+ZFJ!HeA;s4I^c3J0X<>{j%8-La7+{r;1B$Ky#$ zf-AHO)1CB)Fd``fbqb7!b{(dq?01{2;R9P;$8Q8+!if1_L`zSd!iQr9iSi^NCs3r% z7WhN0wO%_2srmTmvmG6J6coWnI$2sCeHfws*{g*=!ycAiyc{84+KqR1zK7`}O*NO3 zR5~@t-R5{G;QC5$9P*c#|FuB#yHi7+Z#KLCa2p*pHZ&++f%%ru)?W0^zD-X{vwKH8 zf9w*3Lf|lYaVqkz4?)qeHR{HjLGO`XsB|G z(XfHdwQd3jja!jmodfe6Yjakxv{3Ku?fD??>!C zky(8;=n@f*3!Og|0%V;0 z`~b-1UakA}VUOUn*Nc|}@Gj_UGEU23{^e-INP7X8^Qf6IB#FU0|=R zTg@O=0urjsl1b08XTL8g%&DQq{h$HRv17l^}7!SYRF)mM_1hNq1;uljK>(PdhDKE}KDU}*m)c8&JGyZr^a zoF~Mgd{%Gq^^fynjp<{HoS%7dQkD%y%$GS7y>~e^H}`FF%WI@j>mzVZAv3ZmA;j4)ew{H z?fGJV7ncJA4xSFk2tXwVynn!qYxCOOC)UkN@1Cz}u4BAC{{${)Y^q=4h}2O20>`v) zqI>J6y2u(8vg?_kO4G>P8R;a!??d=RM=k5>yjNCkt3Lt$hNd4YU|@@{q+Ia3cvqAC z*5J^P0TADrw0;Hxtf;D@A}S^Z?I#Krlm)O9t0jySkb%zxq%{C#Z?x1GR#%lie#CoG z1QpqWd&G>4sDQ!QX|^XBt1t30#3>x>_&EcM0rI(B_~zTojuS?TCyIxiH3mFV;C%o@ zMQrw_#*!;Z6X55+D;bp&T`xRlJufcwG5jSUTHBr)NkU})lN(yawrX$dFdEVVv4l$E zX5A`Rp4fzpO>A8>-xfU*fii|BH&bq=*VT6C=ET=(hsKXyx|>&C9x;_K1GuOkSi&_m zn81V<&lN3H1x9J`kg+)MTpgNK7!m<#0Mg~k$_j9rfUgAnM<79hHpR!s=TV4hyVMX_ zq0>m3$lx8j#`E7JVIsh~US{%!eE2%msV1>;CFp>(<#zOl`uFE4W&L0TkKk>=$+;^& zF|i~=;>pL&M_CF47=`qti}&}ux8pyp5AQZ>ifwz6kA{gwk(9ws)nqptd3 z_yhk7v0;~BNaQfzq;4sDtozrvMSM1CeQdlbwcx5N%(b!B?6z$4QFh^0wz|fV%2u;& zNJFBEayljylnH;>RP`Wr%AL|s;E;a$hbl^loG*LCwc_#`5PzS164ET!R#Y1+Skqq9JZ=BfZ12)x)&Oi3{m zkkYH2{r&w(25X;3l@8}`sfx6OL_`ONhx{iDJ5o{lAn4+t2#N?DGb)ZW-pL6Rp^M`8y#;Vyd6DBDO))C(zt;Ek?|bcm`hNw*-Y@O##~Xqt9PM> z+vc=Xr8O#hr3cRmjqcTFb3R8vfW)Mf?m?K(oDaDV=_LEN?95LB+)BVLv9Qpp213XY z0I&kSI*6P=`Cka!TReIQ+U5M^1}MkDF)U55lWBA24c{)H#RlJQ*8j3iHY`co2FDM+ zCA0Fv!(j+K_vtbW<*Rd2tHM6~DEh!Q`Sfn)I`1L;EgK#ihYdApXE5nUbe2xANQ9Rk zDR`ONLqW^zE3eaPt34vY>c(pNamEldfcDj3OB{41Xqdr5hs`VdeVi7RnryvNnW{;u z_`#G+VWRL^aq&+uEZO0C`;_un@^&p5viF6-6}~!JT98`Z_W=O`R$xz@Zw&-{@7~-? z2o3m{cVxf6dAOY5G#$nN>mmR&7fC58;7*|t5)!^;WHef7afXF~E>cYOU$A;H3}0mX zwl+cVi;G&$OGz}~f`>fhf!M@pBERYV zT{m*F`kh$^)5YoFjkAz zSyxwA5=U8hA7KmkB>K0>tbYKDsKKNW02bO88cl?28&j@K`-3;Ee3|LD^MNj*8~w4s zl2(feJ?9Q*?6+YzAP0wrM@pnt^-^83F?zjmC?y8_df-%y9=YFY5q@!>_-_H*4-7Gk zYBwuD&r4$0u?C`zbz%gRYU zb$OEytQa{^9)LLweh?BOshS%(oGFu&l@(=4z69zKRiZr5Rt>-!fqG#S&cc+Pf-l;j zRLsZ@&<=pk?*=rASlL43uO`5Y#Q&fzF0K8BsME|erz5j_J9BakNn)aBaa!inr}^AQ zJ?^hv&9;9X)qsCPA|&uOj3uvcs{55CX%1IqLTAxK;!Dg3w7M4=Vf5G(2)jU&zq7x*ysK~hR479-oY zMdE#Mdw`iWThGve^Y5*{xoadSqRMh<{#w?w@Ap!1$loTAO95{G*WVL9aYqqBk)tH?-T0SMiFQr;;sYFw&WI%d+Y3yBz2_}0M6LN?Jn3?L zEWRI2%QmELS06~-wlSIBAkT1fJMsRhbsX{}Y_x*Od8p$G%FYniE%M8v6?kP|jPbS= zzN3W<;z%>u0vUKEaR19EMwM0r`OEobwFfi;bczoh(*vVGOM}B+SnF;1GpFJdEyxI> z4S%2BwAmmT5a5FNCmwY8qAUNx=D<2yoIQ6t-UG!DR|t2x^UmAi&f*N&`>E!MOi<^X zX+>z|HSdiV2&9TBDJi|((ZqlSa8#T?oj)@QMBu=Z1zXu_wT%EkL4Ex-;rd!&83IY# z@bT_CSconb8++bc?ug_H|7AqSS5ypA zFUSc2{I$KHuYkl!`)|uXXTkbw`YUZA38SEki+{;(hCSGV0tZ^OH19V%W^QWAzTr_B zN>}HZz)e_r23Z;L*x$X|)tr!`P_xtOG6}Y$@lgb{0<1zw85vH)UIZT7RTNR+vomUz z=aiSDAed^t2dPt_2TuR_6F6f*Lq`|9xoHSeQlLS!0j=j{O@KdP3v-#^H!IoND#u>1_NS#p>~mB%BvpOnNz)P=z?L2q+A$7 zZnV-_Nq&4fVPh6j+7~k2(rd88WXqEUsxf6^4bh`S6j14Fu-VA+EO4bLb+Ib0|GwSp z-qG9&m0Z6;I=nE$HzUud-lCh}Tt`-^Zw-Uhhl$1!>7GxiflA>v#p=OA{0JZL%jfOg zE9+O*_%qVuqx|u`)sn+o!!NSIyB(>!`@2xyKC*ok5jhZ+^zO_CTLC;JPyoAYsj$Kq%8&Ke%>-w3czyXdWIAFCy7{wq|lBPJ9H#Nrd zv{ZycX&?~Uzbx+GKNt86mhSEwiZX+7|MUhwV=_kg=vTXIn7ZcMV`zkF6SjW)y7-P% z?>xFrgHpsh^!{<{H73)m6_dw5J3WeO$l#_oTK6v+z@g9ANDz+>W%Oi>^5s4%DGBgh zt(Nd`2syj&J66&sA4XZE5vH+8rJ@O7E6~OR?}9*^+bcZ?==y-}^;pz{d@=r_2p?Z6 z5Ov#io&3pTl9PeV-26#|0_3sTF=BiAPj5KES})hM+?SqTU5y$$AqOiaFVB$nvkNm{ zah90nS3G^M`jo?!)(;k38Q%BpitQKCvW54Z_TNvH?W#R^|FD>3LokNf-W|ie+Y;$Q za+0C5@+NWd##!#z*K>7v`-epuv)HK2kTIZpT{-LMW2C0|DkQSa``SU2J9B$Z1m^>2~ZBU2BM#(7)RlLDMN|e<8>vF^R2tYZV|o6O&n{hF@W>W z*O_C})}~fgDgl`sM5li@yW`DRYEE*v!7D{(Q4pB7Jth~+GLwom@uf|Ocm{`)dIep@ zaeyQY(7QndEHLm4=v*KfvF4-Fx(^eg_*xDAO1*2($FCeQ#>-*S*OROh&oI~hWmXw&W znkNAK2QWW~-$W09K@uo8B4NSwPf`=?+sASLmn_5@E4zO)kB`yIAEi$Pa2?-U5N-hR z2LOfuY_w}#0VbladRTc?a991vV2T^98w4`=9m<3~w28}Ivr1P^*NTbmvpq>tLfsbw zVB6@LqCwf)+n1>SXh)haP&FScM)z$;rFNKY#Pfuf-1&97icrbrZ6+89<>|y- zXB!kQ@;VZF_JQ%OqiUx=g=IFKO}As6lPkH!^WMqLy_he8*V!&{RW!OY9jq*p^5#a( z+JSW;!Eaek*uq6BbxcKt)HjPoYWw)O0!YbSeVhG#A`1MHRde|ud0AT8Wz13Yr3(i1 zR9Sf>`GUcn3l%sY5dT?fw0&NfISTfdkBKK{%R%4AL^BqJVfX<7zUJV0@C(|H1E&*S z!X=fJMfLUE#*z`zRJ&%#O~hQs%hFE;2kI5tN(u_dC0r<6e`1o-#NS(!)K)gra%Iub zkp+(h!T-||Yy0T8@&&!kRdo{18S*jb)(=^_C@}BxhSQM(pY5hO0Q%;H;dYyLFcrR; zQ_Py2sbAb%ejIwD)ASo^=CIO}7DSmPUQb%p6>APyFUXfH+ud!*kj{!{8E0GWo2Zs# z;csM(Os(IChT@yc*Ev8whKLv7( zoYvykjY~a$O4Wd|ll$XG7+?%Ql75M#7pfX?5DqbreCO%@74CB@6l-rkn8+||yV84_ z4M8{A@%XrezIZ%u%|nSUy(VAjzyI(>Xi)e^n@`{pyu8=s7g=Mwf0pYF0sMkyf#Yrj zd5$IcTn2nTJh4fyZl=#5o*<(67h2#*d(K|9TP~&E^o8~~UsGcI)oZG^WaLd;mh5cJ z9{Zdwho%7Uk6I{P20;m*?G8!7Y^!^uQKeGl6S%j+!omWUaa~;Bj z^MQCN{P>sC!ND=r={I$EPe**`e7!py7NTr{Y75~ZeWn*YbE3e4;Im`@(+L&|yFEw~ z>9Wi{m32I3tq*Uim@gpMX2H>P2FIVkdI(&pOI(!Xh}mEJiHGYK8E^5fsc@C!_d);u zrbGB*%hPSqMabl;s7&ii_zOeV>%PAi6`+ko{H`t*o)4G4Bwb0L0{LTXOQ}i{s+8M9 zc2+yx62E;o*uas^CU-6?0efK*qzx}tDPmwY`^0(^@_|L^(C~|7??$Up8Y!9$Ukx||Toc!>m zyLEdass3luWQs_g)TJ;jmYeI*KSEtxa5CqLf`;;Y(Nj*^J`Av8SQ-5iY~&+Zo7L;@d+@ zngpna<*-wjy>D91@+#^Oua4Ro<6YhM(?GY|ixNPTgxkmeyYA1*vhE}B^jb8(B6cZh zrT@pQ29#&@{j2e(D=Q%Fy~xV9-v%!K^cO>w990?34%FU`%=zRNRRt;ReI8z647fZx zJu1%46)W$-fXq-)gc(ceC`rqbZzMZk?u=5BRBa{Cj-?w-4 zvD%~y59hsbYTyG!#?3FI6zqTwZ&<(EWteRS?({vMDAlq)oNNSN9zlOFZshor);2b@ z+}zG<*u#8qHl+_NTMW54?nLC2^!stz_pBjAZ`5axD+1;w0{k=M=6fmRnqj~@5#y-@QdqE>o5yLfmlkQ9Sb0J5w&veFI`cfti5RP94G{^p3sW| zi$kNMf-s#@n6X9y_^zw2Vg@x?XX&5ry~m0w&M8`uU@aPMkya%A@PvQ1g*VK{uW`Pe4teRk*opX}Gq%-w5+|-2s{cymMpA^E~CkoC#0TY7`M4w94<%cm( z7;DLtO7KJWyZOZq_V>SlAoq&{0bFSiWi_vum!1hLRJU3UnZggP?Tj`v_gyH?3e&ET z{>m1|Wo$O+is0AI7=oIFFRv^Reo1=!T0#kFR=E86uhEmo7Vcl&4*XE{;q~?Pr^w)G zLM{Z>38+)@raJ(Qa@?XvnvYsCoC57SZMC{gwcjHTX9 zioqv)alDY4t+M-H<$%y2?uaHv5u(wbk`H1-v60HtJAcLf13!5&d7TJDff|s%DPy4e zdh4An9@yVRv)Q*crZgz;g-0`xz8=<1Io0^#VTy%ca<#q)aRBa=>p zqSZU+1Ps){58YemKrfTfmhtX<#-A{=1(`csTwKL8!BjORzVElUu#^hY?2oe^Y;?u3 z85xqtKTMdO_C)OJdA{(ZA6VMlYTuvlb6S=2FL-d2VmNOqlmIWa;}vZe9PXv zrytuVgcwf^oR2J^PHykel%CFvL4*?^ShmN2gXP9MVW>0&nxl?Uhm|cGV5}efmRx z3FSFVCg`W>F9dDExh zXvn0BK2FWb@swDEA2AJ(`yk+a=ezsAwICVc2@(m$U%v*2b^N=@eoCg~;YpfrdPfZ- zOM_vFK*i{Yw4%va)p^H&2~TB&bcxS?kFxpma{tI|%g5*L)j6MAhV#u@^^Mci+^qiq z!Gn>_QhRun-VQGtZV{Dw7spIfo~p}_n-h`r<}qDo^}iy^u{u#`D*(hT?-p{8n*SD z@;8WprvIGh$gBIXtnPqKzdJWn&~ijHB*=s}=`X_Mpm;y6h{}3#!X0hC1M| zfCQMRxA#+Y#ELCRsbW7kXtYq{NY^LTvwwJKc(FYMGEN}S^d6kW0kN%@h5Hisc4(fo zs4pli{Gg#xzcwBP;@)L5@~*D8?3Oe4Ix;VF0KB`ZjyncCJy#0GkCgzQt7&NsLH4qv zXDm!s+uVBl`q;r~8GS>35yLNEVvfW2YK_ieI7WscyIVIY&Lf#1??>>1;#N4|vEB1- zo2qwj8#6JZe^yGL&8pvuBk1)Jo{M*TAUat`vRtyB)W-G!6J#yiIFeXmb}%6N{MNm^ zsli4{z=2wIOjsAK!F0bAl@O;Ch35gC|C=F^3+mlOrTC#^HSHeh&!8yk-)6T%TQjqN zI8JNb5)zVLl!DetrS$yBsq_3;IkXId9aPs?g>9Lue&eIY4SRRC0bAPlqqK6}b8J-*FqHHcTAg~4er5f!DkRNk!aF_>Y1wot#g@hkR3{Er+=Ep90XIP2WCA1+R7qaq)>|pRC~)re>}AB&s+bL>&t9x z6XYKA<@ej~II zd|GYGut(ftGgq`aUJR43q+Cit^&Qz0XGULLzF+oYD$)Oya?9R(ur8~+jn>4V@I z&8E3k9SYG}(!~>JJ;Yz)`BpGLT3PHD+D=}E)A9tY>=+`h}2mc-`+^@F=u-(r4 z%z{y=*w|#(g2{f1d}HySdyUGtzQ8emZg2PXD{Gw%Y)(~U@a>hLB4Vnsi7B!A=kGnc z>zO2{XDU4s<<6BRHeqQi*?GQ2dGEE_FVl|4jfKPx{`! zBg&nKuJDPtK0eZ#ZU|cHXpVfgx}Q)UC-aYPiO%{kxMQTYu>5CWAYJ?H_SG#6j3aMj zb$#<`PCulc-Dde+k#aVu6C&&0AZf6@vtv-ZAS?T>+BkA>gQ8M23TqAixMS?;b|cP5 zpLbUurfg=oTPhr1N42-Rn3Uq{vRp43k(iv)K1I9$nax+K9tVU;>> z*9cY2CFctM1^;)IJy7_ZWJ6)!fR4ZY*s(XG*$2J{ddkaWum~ z42-)Rkphztd^JPulK zl;1+u4g)k1$j!A-C&X@F_q_~1QGQGEy5R$9qt8^QNM2702m0ct3P$o;F5?{C31W@f z)+QxG$}KH2{^#aU z$Zz0eHeGJ(22tZHecFQVO6=&_$Z2f6X&Ui=s332tR$>ERc7TtMFG_-~JZ!$lxw{-r zprtK;ezs9p*TCU*hjK_n#J-K*V-X?Nu-D&tHS>HnTroGLTa!sL($Q_+IIXSGHQRR? zGBJ)$kd#uVSfmS)Q4I^jgn{04ylDQL{hYg5C^Q{um{IgZuwjSQ<`H0pNMY_#36k6#@_ zIib3+ucH=Gp8`Vja0hXH)y1{E_@vzeAE3&_9QB4&?}n{R=i}DdyC&mR8o{PrF{rzh zN5QregCs6y=wGIdQ@>Y#&gl{zM=p2ndN+KteZBtp=5?+Y7ZUyB1x-PL zPk<7$R9S-k(iO2OPOlwhzB)r_csO$5V;Hd;SW_?{JwwE6V_e^q_n{13qvjf5D>Nf$vl8q;4l`T!+R>n}*NF*5y zZdx>$?oKbS8X948)GbS@ss?r(NUn5O>g!R;KZiHgfT2udX^pt!E8b{avS<1%z16&z zYg_X5%cT9_oEVHTb$`w$I~_J$SMpCI7a#_l`mc)%7-YBQ|2QkPop2q?_F$@;%-K(m z-J?xqI7wcSjNZAvX=FjKq<$Hz%7T|l(RlJlt#OTVEqRDYNaO;LJQx=){l;tzta!w|3Ot3Iio6<`}ngSb-?zA3HtLD4&*AJr=StB649PDZBtpLhid*0>LIA<=5izn3Df$z$tX1#B(x}YeY_kUPA%djffu4^Nrv~((6A|;4OcY~5j zgHqBU-MC5V7LbyV6p(HZkZwsqK)Sp8o9lhPe_P$^;ac~7T{FfwhYkKi;cFxZE5Q$o zjQ&jmCk>%woQGJR`W)iaqVn@k`{bB)&s$j}JKBRzV&zS+QUwuP8h@!T>(;gme+i{L z#gVa(j{(X-rEaj(g~=}Z!h*0sDCtGPcUEIY0@ZK$8dCQnOnSQn7abLYnEfbC%Z50& zOj)DfeZ{VugTXWU1qui!)naq)ZOi;~Q)E6ka=uB3&)%b7aQuLjlHfr3 zC;CseboK(t73}KCdVZ3p^QN7eHp!$hvF@5G3pX{0-=GIGx;R?zbmwoD`^6t+^s98Jm7J9HU;FJ9YN)<~ zikPstEB)kUo_5_wGzBabxqP?_GMq=;@ec#PhCf9T>yC-M-A58)dzo_kZ!yEs04dTT zDx!{jfy*K4`YUgl`;5h`&zcRRt}9=Fn(5HcUvYVh4KcE)yEHYr72b!~8{b)Jv$Akl zlKU;Tzm=Cqrld4VQQSkJ_n<|c^YP&(bHw^RHX;Q|X1xJ+7~8^}_PqC?`6<6Qw+69( zcSITmeINsVF&56iOWZ_o$$i9qE4>lcSp46Mf6J*oE<;Z$1nbYWz;U{pa`Axte6W7U zF3*X2H@bPoPT7fitPPd#c!3XQc5}J8xj`Te%^F#kef26QFK_KrL^HJVKVqSk|If_A zhLMi*j+!fJ(TP}HJ%2X~MYlgDhtWd2hU(J~7{YhTdOHKdP*IX4nAI%^<;PrpuX|{o z?>ua9Y>VI0ug0Lkom8MwAR&{3q5n?H9sRm_d3T-?5W7*>i;n2&1Rcj~?4KUr^ZEKl zjRp;k>(9GfaTg~S)uKr}`@_+bG1zzu{26Y%`?0zBf@MTFQ5vS1Vz{n0hYnX>dV9>x zaONd{{!3_Y+Yc|yO9k6Z%O98qKRco6oQK`;Jhm%3+MUllY$iIVJV?Ymti}U>`K;Eb z#9tal73u)v>W9&G(uog2cZ~S@tc_NHC`ertamL&3f#ytMgMlv%SJRNwcBNG(yY_e~&45 z7QZ$ih?31K&PSL@`)Q7Tu6Z55=BIQcOd9@Jl680Wpk$qys(Yfbu`fyJ@<(w1yL4x| z$i*|xDN^Fp+D}f~o30>-k+jTWeoFFh-(&B>Puo-+tPJJ}A zzJbL4g`9TJno9}xzDC?s)y{*@%a5}$!0KT=z*DU0de1G|txw3BJ-kYq$H^ws`fE6> zq(Di3RO3E5S{aNwAt+nJmGVN#D^}5n(V*`fIaM{N-`WT?ukx%68Rs&O>1xWiUcmt2 zp#V>esVt^kmrrk@fPu;T(^u?WjY#J$6?cVOCmUw^Y?DIubEkL943H-uJ>Y6wrQBas z_@)XwnEQVJfw~PI>4-DVSB{9X1~JO+HeUK1*=|y;CVXv-5+7K(IsTudxT`BENc<2a zAnkMv-=FW5M8tJoGmr>fD79u~e$ez#Hh0SKZK+sL{T-t40h{-SlT+rQl;Q6BLS^&c zkDQEYh+&mVcfRh$=3q+bqdW?fM{{ zju(a1URZc`2QjAKcx6@&U(sCR(91BSLbq|kgflu zETv5Px=W^9gqD*vQPorC4~1@`2aQBl5N&LitH}k6EQ2b{tcJgs6|>Yupm_a=+J-F< z862o_dxT4>e4&H-eGbWKiz$Nx^F6IBa^bSrwf$h>cXgd@3(~07-@E|@je>_Vyhwh<~C#`By*u{lA7-8TV zG&VI|C@##p9Z?!4BsOeXu)jb4J>#&nGZ+9?j#)B`<=S{DuL$>Bm@J%LJ=jQjgw%)g zcko3hF=vD8ek-18PgnDl4*;JXo>b|Dnm+Wi4cN>CO!I$5q!yB*msHZGTauVD%7Ow6~vv zv5ByE^y@=r=DwT$Jx}-=$(?bY7P3X=@;5TK_<@qdU4Z)3)YJ^AeSPvgeiUa`#`Gy{ zX+2z?-(}HQ5Vq#2Q^_)GI>D*2w2-~MdPXnLu#F@5$;Oy0uzUa+=JtYIMg9e&DS`lR(5)62~L zEr(A(G|_x+8`GVHkWtngPTw7(RMx(!EH?DM_ADlI$Cl`msPXJt2FaFo?PpWlXD?53 zl=0rJzGZ?wCD2%D1)6Wa0vuj}O)HGPd*7`MFg-OAC*NA4$t5+EEfP6vyV$#ruS*ut z)n@<0FSsZ;(Qfe!Rive(*%H9cm35+z-jHP_ws0@$mF79A^6O zgVMLHQenbwjORv7QE?)KE!#Q3?b>&C#O3i8T?KAZ9SQ(raNIn}uGp5g^AJrH{mZLf4Lq^sqM zaNmGDB>}sZwZhifn8zUH<{nSgTuh`)>gTM z$SLX-3fk4<`hu#G&oa$!aph%$hN{1_)1AV-w_yzogUSCy|9wT_k$OOZcvV473|4Af z3zj?R`1ING^uNBDP}gqUr)A|l^YDm%953s{kJ6mJvuc2p@1JK`VltVxzos)KTT1`d zYn_2V{hVoC0z)<#pS41QooVIfa_>|#I!RI1n9Gm~OWqmAOG9I0m4ni9vn!@wiff6l z&-Nwf4w(Kvf39iZMzrX*nlaE%^UpDp{gJPu($MVHQS9j0INtR$TeNGQhqc{WX2u>J zV~L#RiW$5z)#VRs)g#1AkkH7GkPLnXf$X0F(=@w%eL-9*09P@~n#!#6J7?DC{13e| z6?{{l#ya7^L;GTbtzh{%hBR5$X6e7Xe}x7posYHx^Hx(R87%ua%I1T>y6Y-@jE2dpfu- zBCBKQKc4=cGrH~+A>}u@-z{f8n)-pUf8rVssb#U{qK8HE_7uZ+0yDYltJjwKeUU4) zk=GirM+?ovVOYhQ4cprlb1ouf;LLuX8Z61!R_VaRRy`Y1pXndCG9xOWwpYWcP5dTL#>Whhazg#5ex&~Pbc%2H@UX^xehmKADZ)$|tTnz5KmQdKMSI%pEle<%+T~MyO>*bI z_b-Hc>E;A|J9qE=D<*Q`^w_ZitO(3(dr5Q${-mddkD0WS>}7<#KKNOMdbX(HV?5;X zHZ@1(LzbrFIBkPB$H;7YN5??8Y}C-^PWwjLLFDo6q{5AtZYyu`BYOtuW3(xXL^jE< zr_SHhy&o%h9Ymi+XTSG}C5iwkxj#@fKf5X?(005ex41EtkD*PDnMJCSsh*h{#aESv zpQ6a+?|+K9O#1<=(OpZjaq^A?9Y*Ju68s$V|% zt}8cq$nmv%pd~i;rRIAQaj<43VcEWC3RyFYl2tY_i5A{fFvb%*je0lm$XJ^0qSOcv zEvFbM_?ysXeW=u1flpT?!>NS6LKF4$BZTNycbScO?ED!mC(pvMPcq+En7BeNiS zs(6s@lhtqL9K+8i+e4@Iv5L3_y>TpR$-%`qz5Ad@{*^k&A$hkeM(k>J!Tdn6xqiin z_f0VZuQ@xe8*ps@0S3aDhoE?D;e1K0V%Q@V(nmuMQQEZD-y%olNVU; zQ*L4%NUpq5(sA1F9#nh{rLLs&h3|j5-Jl13b_YTJxj~)({s#c zJ5gWDCOUN7OS!!4z2kb=|AKElI~E~ok7axb?%Aw~O)V%|0M#iM%NS5rNX)8FG-*y{MqmmTQdVAJvE!!@)91`VazMiGMO=YF^{gJPhZ)+j$ zeDdcPcvEN@?#lG+j69>(b5-Sy6m zvcSJ)&5W?9dH(#l`ow5dblNNM=TUi|(SV>CCfeX}10HACBR>3i`SuVZ7T)7gHE>k5 z8FO)Zp6z)NV^mdFm&0-vo@_wMVm;*9B2AgGeF#enWY`zuH2_TYZ`ag{sd4qJ@BT`1 z6f+?j$|7!2^{o6VfzrR?;+DAyDs8zdT!Z&a)-JOF$NNlY?&tQ-MLI#o>#7$b=kzt& zdza=uYcyYM>(E_%M9}y5f2BL{jrvY5fq}~l`Fuhj?I-&VA{~w+;eF{Mw^5#d?E`%E1YCaG&rszj3nhyx z;mmL}_Z0^d*m?5d2(Tme#2Rc37?E>rGW8;c3-tsNLcGF7=W+SFO4yYZ*M}26rYnDw zSf?ApP6nLQCa+LVOKPct23rD)u?2se+|i#oexkwF9P;AHEVUw8An5AaqQq)wQ{cyw zxknf9RVrS;eR4Z*)AA%>A6@>Nee<0Il`QG#xHy0LZ?rro zztre%7;rVHnUHQ;#4c=Z2u$C$RphHnNpqQZ;Z~LC^@5xZeR6w}gk3y3&fj5?@%{@d zYNO#+WrnlU=+0lbw`Yq-bjlM$A3L{Ae<|EDu(CgZotrR~AB)DkwQbwN>7s5~Tm&lB zFZ)wH|5Qi(2;dh}lGVCYw+Q7q*vq=zdJ%HM#Qd#Y)W`n7AD!64?w8wG-!yvfYMayg6*${eC1X}|g(xJARVk-Y&=m{4c3W0k+< z(n++}@~wieL`9ZJGLQDtM5mGZz2g=T%qYsGwGJg_Ad!yV4a=A_{c?j5D&F09MD%tzV^{d>EnkY#Q(qG{8m-bM!sFRM2@W?z<2B zC}n6oiOo=yUzmP^4Qqo;DiLAc`?yYv;a&Zv^ju^o_b|;Gmx)oTlh;IM6g%l67p6*6 z{h87wO}XZai^V7@D98ma;0%3iBSD*xMMWfcu~4EcDYIC=M|;6z+u_-<{&?iKX0x}D zLIOL`qkLZ`^Qh?S-(($HTUca%E5(zLL`i;OomrY69E%wU@1pw!HwwylWf7e|l@g2* z4mVfn5_-rUKKl&6arn=lZl-ikU+tomR!nwdvsvY%cNaq=cDjat^*b?f|DBr*mcuqy zVNgGW8qe;YZ~X671ROUyo=GXY3rw)s2R&=m}tl z&!dfBN=p8^gWXx}nOt{MRH(4j1WZl|S1jGteCEGG5kV6%Z_bd>1XkvZC}cl75kMHIZzb7QRbL<0#<#<@f2??rOb5RN zbQ`y>+SB+fpbOR)Mia*W&>HKmHAv}=`^7BCdmv_G@IipQHgGjaIC5~64v;>ec7^=_ zASrIU#h){)4)h8~;;GOnyAq|~dtqcTh5_+wZSeN{$}>1~bUBwoyvY4PUmdNm92P+p zE_Xd+A^#-$#x%Th5J=TA;SXOJ@+5<0_-vg6%mhFKxdV_U;48wHD3_`Ia-!LQeqmH? zh>&Rb=AYl0yZa}PHJWS@SqDc}9qGf$34Es+C&?L40>EY8UN+y-(b4&Vub&G_Ff*Oz zw6q{faWzfZ$t5`~o0*XM0u1P#0OEm|RjBwhJm}r79y$L-fc0HB*mO;Kw3e?cD~nTa zeFq|DUte=zpx5h{)O*dH-8tzc`(a#xzJ)am%h>xgC;dMe3`pPT!t zT3T`z?)-3{A#CfwX5#1PKXH3`aCq1~_pC}a<1KK>HKnm+qSHDq4cNX8j1QRe<#O^k zu~rIL)Uu!Q_u-iys2xW4`ugw$xvTNhZDM({Qx&BaX?lDITkJ;F1gFUc;Z6Nw^VZsI zOWYE(G5ND-Y8q-3ghCUBJh?fkJ^ALSpM~)LB$t(OY3t~KN-OWw_#pU^7Gj3+wp4TN zA?a7!W%)WPFG5dYZYe3OVlBVw^3b3E-gbS2DHq;f_9bG>i+62%I+=gU9GFy8R3xRP zGe_3Xs=T{YH9xKXcllw{5eox3u#-xH2@PI)!qU<;u*$6KO3k^mq&*uHjZX{w=R9J6 zn2yGqN-ucIieLL^^)p-E;lUm%GS=F$WYZaKC^75T??vHVa|R(>OJrAv#}oV3lRqmd zImR^%n5?F_-3{E6T5OxdF6M5j`v)GRIsm;}OG_j*UR_&T9+o|r#2lv7jQ5i!B&=Mq zR&!D%?A^Ka@btpasQyztK0J9?fvlIB&8qupX+77@wGqx( zAd8hXyZGJT>si*6iO6oMBOj}>&ZBsMMZk#kRZ)&+Ikj5ia-&BklLej%CfN7gmSiS{mjpWjenEeoV1Ve`Sh;(r{5cbh-EO}^*n7d zzYQE*^?Vhm;yAb!>g$UPq!aLn&;YL?CI-KhqYDqPT_OZ<`+&q33}mi?X^Yxi{DQ1l z5}txYubAF3(rJt0kpy|1+UODy zi@`uahItT3@}PwTFC-#z;Z^SRYIrw5RoY1ma81GtQ!OYWw_N>w!fsH}>rjes7u_cDj?+1C(F}v7zzvVWF=o?@}uO8*9iG{*qc2{IAz+y4PEAJR9n@c)Fb#tz`%bJ z^ro;K*uPv!XTTHyqc}Aq8&s>0i3J>2q1b?e&(~ z5x;+ja22c6=yL1Y(cmY=^+|<_7D~DOF*(|ulCmn_5Fec{H3&T)v$N+|M`mvA_eIf2db8q0yFe-O*A9bQU=|;&xzWMG5wY9veNG|M}5Om`QJ*tvTb99z|7M z!Bm%7sS~Q=Z|rn_u$nn&nowGjEj1ruT(akbND1&QkMQPiU9ule^lxZ zh(Lv}q=W;}CHd+h?S|*yDt^3rEP40fS$BWG2{^wIi69{BgXePf>!--b)rP|XiQJLl zsd4FFY}LS=dTKQ-ZKc@f&Ejzyz|Vr|V9MB3%x;BAj4Nz{VIO)gSiCf0A4wTrHLy3u ze+gXNTz1{qM90GVNIhgf($5Ro9}eAy^yJujgTgXr73OO_G7Enj?)msotuJ!o=Hiis&vo|(20t^BnriPA%nM_;UpAR{!yQ?9`E%7NQzcqK`j($~@ z^|xQD_yr0-jweCvD_C(%!>^Z`k|h-M-_D(;yer>Jjk!q4D7(YGWRR-I6S1+l79OzO z*lyl_FzOEftLaIH+mPYC*&6a&a=+y}j&ko<25DCbM(~5AI+Ub~pZ6tKD*s#R>M8PJ z@%as4AV4p{e1Y_oFgxTCpLf75OC$$qQ2=5H@(`n0f=@Tk_pHD23bQ|Gln;27Z|o}9 zO+yy>u~+IgQ|j@y^H#dI^vJTt(CNGk(&S2i)@k?)+$JWP3<^%)ibxT0k#~ZeE^{wC zelxU;SK*`2Kg%)Vy>>J$J2IQ=70RPf;$ipUb}U^VA-$}g;QCwP*7tp)d+zJ5ODYZ6*lSF$ zB;nTZnx{o(XTp2-e!<+r@a?e$?JGV8?$7HXEaJOk%XD(?*G{b=K{V0wDrpW6re^i6 z@X;w$OcgbBBR*_tj*3W7m(OS zP;8sE#n01dosr}=on_UU6J-`I%YIeOmjz@t?9uW2-!NV8Wy@=^qS>jrfQlO4{Y{iQvR%sFxZB!?)eJ zl5_5uJ^m>C-``k--N(Q7&fIbab|&ekh)iBwYOe!hLJ$mk&cb_~ym9 zs545MA`h$hCEy5N=tkxcvMIU3<>Wrt?537~6dV(9{^| zRPu+ENU5fjGKX;;CQ{F~A7r~z;FZ6pi|!rXF%PyhXdvAzEWzirBo3L>bX}uaxU`nt zX$mds@2A&f1sy!aFe@w{5h+tI;RwYfW);mH;e_-S07=7&8~{Yoq{&^&670FKv+@6d zQs7<)(zNz&bEb0Gb`RRRQG#j%0Il}44mFlbdQgLVo(%_u2|2Mg8=QEQh5z}rVFYqD z1bmFRD8S>*tVCpC5U_{>US%x$Oy70_qHIA#?Y3<_DL3fJP@#L_k6LSJY!qQzXJ7lM zdb%Hdl6RLxjGI~pfOrppl_3RLUWgP9zHOaztUrUEzQS){d;kA8ZH+2%itE<`7Gb$IeZa?v(J!y^U3p_1=Cy z>Q?JIJr!k9nIXJ(koWj#uJGiQ_x5Wv6tmNFS}vPqpc<1znG+^WP#LZqthE?fH$@4-D^DY`mhT*3&1&bRks8?8Dn$0E2dt+*zU|d*S)}>roaKIvP3!I+v33p*jTO(sZ{NNF zuL$ySWS}d9T_tq6u*8H@9ZrKkla+YcduL~z2qZn_dz=@VC`bvzYF&Mpte#a^P}Gj zhFg&$V()cXhkY&x?nI)#ws;aF#1gcoV?zV(EZAPd3_{t&G%$ODf<u{LiN? z6S>{+!Hb7b-MI&(?V~S(^9bh0zG5anGs{6Q(z;Q$L$ql2<2eF=QPSrcP`YmvIq7!$ z<%f0=aJzsme5x(v=ePbyJR+R4P+|g^AGY?qU80-YMjif&UI90H7)Z#~g$q?7Sw4NlFTy+l8nOlIm#+ zevwf9NTYU)Ej+x$+db4Tw{{e>Cp@aN?wID9q!I0*DmgisJzb(UT+s*P$r+q&?-iNO z$225pLw|ol%jTz`kdy1$sSWLVHgMK`etCW47r_i~{qXd3FYFw}+}!wJqL!GLs63Jh z%LPaaFgG(3-S~`8L>!=Ey)=v-nHg;nC7<1=m>#TMY-+FiLm>kWp}xLnV#wr4fxuRU zBN;fh0Pq8G2~XS?4_K9D#LZfU7B5d_THdic*A;BsH>Xv{>k}}dBnu=^pytFy^Do#= z_r5R(_r<{9S_c^SOMrjBR1>I|;qQ@Tr5NRsmqS;m*ivsj=qB?3Lu&w2Kb+rOZD|@DX=F&xw17? z8441fs5_+L_+SHV=BV+QxFeKZ zJpOZ_dRB{F$9e{zl%5CMLHWQ!Z+j>wF%I1e29tD$Q*_}p5T?tE$$2UWf|-hti0FEW z&!bTvwESJYz0?V5H5ZqansNnfBw|v81(X2>|Nrp^Atz2LD zx%h-V(=ZbOP%!m)w!7sX6sx8r&7c-QE{g8^(>L%Q7&f4_Yp6SX4c?I6PdH_iP+7Ki@A#mj(@JxYy3kZ+^?UsA_^4r(1Z`0ee zeW~Gh19U#~a^o=#e5i>{O~MH100a*R2qZ&F47}a&!DcYE!lnt4#Rp?pXu+VzS5s9j z(|9XDEBL6Mk33yJ5;hV01{~BiGtd~vb;~8*x>8IDy>Pko4R92^z#;7JehgmSx{W6& z3CX`mZYiuB>W!J_+d>{ghf~^&;UJ>Ut-hz-ZYlYg&b4XD4c`bKXCsB;|G=B@##7kA zG?KFEVuJ+&2)g?F+aDP*b|*<{XlM-G%TYUUn^`4H#O!+JmD{#V{FgO`!%(f4vA9&% z501`Sw!!_mqT6rpB<}((-wxD2Sk+j$onvk33Abd8;)7SuRu6lQG=^AwwrH=4AXE+! z)5yTk1_6p&TUHSHCICmgH0h-EgSUE%LmSUP9Su-?4v^4NuH_M{{^~MC4e%##L z|Nh84d>@@2X(swg8P>muBuQAFIyySqwcI0tP`*!aPAq#UUBi*;xz~aUe`>&uvQHur z5%|X)&X;+m!)dQdeg2u7p&e@`1OyQ{tfppoA!J~*G-*GA30ihLZC$8qbg85NY1YPE zI1IEnE)z|1G$mI2qI!5QXhO`=g#~W|?dHwRi%fMD<4!8i&oRShONh{tc{^nT10@LE z!U$N9xpZ=L{5os~3~}^ttU9d3_wU~~`)--tq0`pCX}b~^r}j9Kg^58h06m!ChsU<3 z<~2?5yI@@{M+ViTIEV5<2Dx;9?c$u_@vg5g@|3OAAD)9v1{yv9GY58J-3|-B;)b(f zu@lwuleeajzAAp4XMS<9DSQ&pH#~DcTDy)|K0F7D1z3crVDzu6``7yvE^C;!B7^Ht z7R~@fL;`HdJg;HkJT?{$WJE>AHk;~2J6@pS_1uY9{N^rHkn*VF-tFA1n@qP#s@r9L z@j&hSI;PdU?ktrES#j|tR=*Fvl^-UZH2qvA&`~G#tjeo6t3;kw-I@N_!ZS(w6iqTJ z0RJm5My$h5uu!VR*r|*HTR6iH7qgd5&DAl(b2p#=v-Jtu zG7$9Np0eC;IHQdQ3%c+LCrS}<#-6nqLipYshwkBzO%g2mNVlnj2JAZ)y-}B)fJ(QJ| z+CW5U14qH{kP%nsiE_`(ErQYP(Y@57xp&2u>?~y(xOS*#%RL4iQYS-kp5cXo|Q?Em`3kxb(Fd6DqmrklDpmYSbIp`TJQD@l8NME zeo*v~;Nf<0namHnKP?Eaxwa9OeXaqy3O_s&A-4whi3-FVEqMY>Pp7P^7BMhI>51QY zt-7y#>o+nvkQe+$DIY8UXMfG$Yg&_JVRLO9!Q+jN1BC-vC z{XzjKz7SP`IT@Lw&2(Um8M9mA8-}XfIjpv6D`I^4`SovrQF}})9!y_OYZHkCHIjCX!OQ-9FF^2!p&@pW`lU* zCEsAA^YvqWiN$cA)r8|t{y&tFnyk_Wwp7?z*t!?F3T}3Ten?xCQ~f?RS4!S0x1mOV ze`O6nhS?EP!_@wh!rO{32D_Kfks4?95#LE7A)Oh{LHLF3k47H>SnzH=-TIf%*1Ys^ zZ0?`Jn1SI8=H^*XM+v=^f=yvf`1@k_K!pSLcV3sjq$=DNVrf28Cpv1gTyJhp6>hvSS9K1aW0f>CIXTuz@YP`zUnlvYe5_fE>_ z{aJW?U~qV|=}(d>r2?6)$=8dQWY`f4=x#`P_HXvDm*<$2k~?p%FY}Z1VtGx?%pONS zjInw3{^bsN$aXu@)wgpF&+Vd;3>9;b*b9#B5lU6j5@&O~y&dv)#@cx+=#ZlQp6z)t z`I8tH{MM}@fttqx!kHJwV@yfuJn2$op|95X357|)T%tl4P0u`!Uu5U5g0h5+NS1#4 z6oRom201>{%QZl7Gmv=8U3T?tn$At%P&|Kr90}Cuc}n}k;07f-P|%ImSWNQO#I8=K zb!JxDy&Qi2w{hDh4f6Hg7kdX39 zX<}Y3_74X;CR7=m1#aFXETyu?4tS^RjR~P;?5*aG2MbOds`FvV#@-(L)#nQ-9EPEP zemvY3{Bdw6)1GjiH}pBXU+8PTT`wh~SK(|rRk*>DpCoCi@|3@{24|sl-3z3bJM(Pv z`I5wfh724Uh+Iht!onWgSoG0_Y`M$RT^Z50S}k0Pa{C;c4Ha&JKHG=JI*ao)4r(i0 zWMAm}<2qc;QZ8p}amvU0i0*0Z?WF~|HEp}?)UOFQ-JYDPpg*D1y6lQOMDY_-9Z?%L zckDcxZ!C6l(M$%Ts$Vj*E(Y?IatEU>u#OnLarvF)a8!;CT^ z)A}4~*^*_q@z|1@1T@%2y3T)0cGuAfgC!@cZKY{`Hz>L$%I{F$;ujFnKR4RZbLZe! z3F6o=;u1ul#7oArUYkcJTJR*r#E84QU!W)ah^G&Ryleo9!nZ%Z|Ayz(I>(FF#=Qn! zc@062d#AfK=|Vd~p1J!1t`z#*rr1i4Jbh1sojviWrZ2127Qbf*=08ekkqY4q$Rp$! z6CfmASntSnt6f}{>_<-(>#W~x>YeN&ea-w@u;KJwQ|Y-Zn31M;mvcyL40rct3F;yo zh{Pf?aKclE(y7yL6hCNvPD)IqCJtrjA!bR@AJ|P7!RQ>CnF$Bt9pfDqLJY+60=j)f zN;Cu}Lv}NiVuNdIk0BB^eXz5zq)Fb~oEeHt2zbB$v7 zuDr(bP>+x=c!BX@#o@~HbkNOpw@uKkJ@$K# zemenb`ac-~?_YOGM1Bg~-QCOW5ZUjVlF7>}5M@hCiv*bbV?8~+(eZKo81cXRD-`}l z?-0X%)1sv4e`s@>XV~^dwf^6pjul%xNg)bD?L6OE*e!eVo(LH^z2J(lKG2zyEJ@JD z=_eFmvGa1q#waMr!$|f8VdGz=3dbqcPOrcl1*({y5&Q6GWMZF(B=4GK7Mv2}iV|Q` zr~DW4Uu9)wSD7M&)(@KzXP`M;m$_2Y1!u4N5w(h;KL(TH0*u(0{D zd2TI7?gvaXi>fIQNcc%utxvZae7W`2vs?{zGRXd|)h#=R7P;$@QkY)jEGpKwvCj;$AS>wS6>Hk%Zqv(Sq281xb%Jbu0`DkW}|iq!jIWJgxRw#;Kw zT$@CpkWx{~@N{2Uoen;(AS!FE_4V^Zbt^lSD;{Y2V)Cc=Y_BUwE2Wh5qrukoy+Ixk zg@AJ3bMr&Idw8kZ37WVZWh9(0oY?Or-Mg~e#bI_Pba0v<%U%9L`05dCXXU?%6SkjX zA*t{_kE?gTL(|JOD0KP=w#5*w7VFjZ-EOYQ3o+1ud+2|DL_iZI?(csP&%g!nO=M>* zZWI)5Dypij|J_A}As)gsH#ONKK)3rN--RGy)nI_`lJed3Paf~5r>JJf+QiGOhUOc; znXfs68CUvJ{T_#`J$F}JXZ-zpBx3!v8yWkDgK@_jaD&EVqGpYW18 z!O*XrCjyBCk_Mq}%hdNuE9;pazhfLO4in0RWxYU-jke1V*ZFinIep)+J-;VG#z|wD2>Cm~CL%Xy`dXpxeFFR|%TCXPr;G zf`ifNds^)5jfsXboM}H$8?^^F+e!|n?=~-Y_of@WjUO0YI5I9C z7QJBsx=>vmKVvI_)#hBsLRh?{po z&DuBe`B`z-v)7*(qaolMFkOUR7?s6S%&YvYltJd!=2E*8hef|_RbqA8W^{H}DghvP zbhZg?2Oc@RGuGIhJP)zguvT@L$D_3?^*e^>%L)GBA5uQ zyewIgXaD-Qt^eFTEw`N$hFL>~@~6y9+LPV6!M(kwpj}K!c?8oNAt9l#hzOW_N0}rs zk*B>q9kSU;@nC{#nM;w(Sf{*}_7>y7`uzR%7l{m9DF4Ex+eSRnSDJ5b46e-DBWsEd zcqsJ`pE{=sac);|y=UOiyH%ytkYKUUN^FV>9zv;Zwin>9j36oYxRc!9Av%a8T^}7G zih5qUYbo?g`Gv;7P z+TC@w9J`IM$VdXL`#`>10*W1sD^29%l>^jd@S-qL#BdkVL>s;0CgRHJD(-%e%zmeK zw^S2rlbogK%Nyd}P{{d0u*AU+P--_X4sf{IgB}(HW4N#|6sY?Z+HCOW@jiM0I%)x? zWa|=JBeh*dnR0&i<_(U-;iylM%TX_Ae}NU6=UZ2T`+C)~i`zhj)DAE__lQxxar9>Ch>& z^Yh{U!Sc32j!Sle&YPK5(M2RkG++j|CUh^qNJ=635awu*7l?M>5B@gB927~FrRId2 z7dFkwWlLO?^+%kedHa%rXGY$N1?9F3?<%#?gW4*>DyS`_cJ!P%kaiz5_4ZP50PLu? z>C7Dh#0_T-H)}G^?t7PQFaF6ZL zqEn2W*j*sr4O;x*w^d)d(MX|+Y(%gqs? zp)2QhU>2Fae#X^Drt2{CoK?Spw0`EnFYH^up-w`n=y9?&Ur<fpd82DJ`Qr}o zLSbkQ!x@McfLf|8hCu-_`h}CGJ(Q%}a)KRhLMJCDsJCZb7qMZer&(r82sFU?%ov6OfquPg-wU1h}rL}wd1RM_Z!b1UN7aWiu1!n1iS^x z`Ra&XRghQl!v{_Xaps3b7WgPcqTjmGR#a3+K}rd*2w-poqs-KKmhh~R%{`B6R!6q% zA?-xXwb}9JaD$24FFxsqyOU1NJ5!z{6C?(M@%Ln6F4cWQBKt1TC}JtHN!9SDS9AN{ zdrx^%3zMG@RPFBlEI_Rpc3zBg@Mxvx z!)60tK0>pM5a&wM`zaaRm@9rcrHtsC5kj2U+gh= zQm8kk`^AM>ZrG@#v!K#`x6C70UIilh6rLrN8QG!YY`cAm{OrSB%19VZ+c100|KQP? z&NZt+>6P!p|Db|oC8i?|y?n=)`U=s2H2e6_Ms{uMj0ARZN!H*Y!++Nyy8aAw4(byGT8P_V~hmNVaoZf8yaDWyd4hc>K1twlk z9-TD$*4VK@X2*+TYL(o>atBifwe^?klPr01g<>Vcht-=m(w`YN{il4celA*V zO#jk&(k$*yo$ArElSKQe#Vl(6=hLJW#W%(!iS}E59+XQZMLs5C7sxS3a%gCZ(v0V$ z7fPjq4t%GB_302>Sd!~~hei$`zUn2)9)&vSG$6O9)0itmHGl8&8XkF&+zpv=gR=uR zqU|tlL6n`aH)kXdhjgC)GLv5XZ46&gQ&UD*-NC9fCfUQ=Bl0C`$j6lT4H__&A%s)u z>zn*&39i^-IpL$$AlVn+4o8WUGVm5VjZKYd6}aGP#+N|<{4s#g2m={8K7RjKZB-S| z14_Xzc(>)sm`dwyU*I?kbp>HlnL6!QI9?cSk2~?}Eu1|IVC{u5or<#ZM|wFZcvRI? zYmW*P&i53ka!P6=slu*5-uPXA*R6}$_L&<9dCh`fDTs81C-ROtMfU7a=Gqgo`2cX| z=`mdWXL+`LrK#VO`!uVsKP6ONJ!6ETb~b2}Drb7+Wo~ZlW@yRkiq;FJ*-O=#1kL2? z&#Vr_FPdGSad9c>-_X1C)%tCWT69QvMj5aB65vvEULfJ47lIO~bIZl`2$Oz%j+}fU z+F!fKxre{A$?KBWb&tB`Z4?p9o0N<+gGqR#(r`oDfp!Je3n)3ey1KrWSH^~IVU3bM z=K!TMf6@PI?mOeL?%%M9Q1-~)d)~Il=0bLeQ1;BoOft*fviBY-*(7_Js^*!aSsr;x~o znPK=QqQ)zyNatS4Iywx-_Wje( zG2|Q9H97$8e^i6_$=1}5Oz{iO)75qv{11l#a5#Yd2_G&NE^gIOQ?$MR^tazWg0kMS zUvw+(JYYAZ@QK{DKc~jnTS}139Fd3m96GgDYvXvDv1_Czd36O(^b(X3&Xksb<|ql& zgx1BbaJ@QbrcKd_LzP4R7fC);E$KJP-P7Iox@1U2j=h4c8T&V9%MMc}Bs;cOY7bL# zhyFM+508&48XC?yMZ;3oTgBzX=BJ_}W3ZM9)t{W#Kj1>xzhJFxs6Cv*&RW5AX>M%} z^4@yAYoWKwH=?>Be1zkrVU(*HJl95;B>BFGKyidcqm->@nJ*^G>9Vtz<9?}eNp<3D zrp?EKRKCMHc_Vuza`UN`B_%bH1(-?ARvE;115-liY1fb;op*P4ck;|`xH|VJzbSud zO!z7F=^ZX)=R@ZTUCtM#%>hPbXMztB!^|jRrKMU;9QTS(jCehrg`(d1@49&>1ug`7 z`A(g}iudKU9-2&bN_Rgl+2^{8FJ!8jlt5nL-{CJDJJ&F>;E9i;5YlB1Wg`m86&UiU zT9}!1%do$AkX%BHnji(H^vymrZVg-uE3XC6{E>^tQy-IyQpUIlxM;3A0?*|e>lHom zBrzU4X4l6J)b}?ACoMBg$FFwW4{@kQK%B zeWjMNEQeRu%toIt2&%L!Y3iH!7Ic%P1X`!B9I9B&H0b%h$c@VA-jCy6W4Ld{J*FqU z@DxfSF)1QBC>$qSd?kUmh4klF*z-?HpJhl1_wIUya$12c{V~(T;ej%D9_B{jW$Hwj zzEAz%kCdb$t=L2Jtx}HfJd@^>ZH(A)nwF9`QXRO@g^n4Hf4Kc9l$=pFa;7=`HUGVq zYu-}&+>h|Dr_nqO8L1bs)xk3iRF8f1%$}4>#mXv@SZTDDp$l=OSW3&AW^O;)9L?Fm z;<8tN5}!+mSu`214H_Jsom>dk^Et!*gT*ga<$K50oO0 zM_6)*$Ksh2a%-&jgD(M#MdmYa0`{g*`9xFFvqy#tBaO<0cMhry zB$o^O7*r*aIRQvIzuhs`I&PYbn4*NIpzXxBwq4xE?W zM2D?Ozl|1gBYpoo4{z>zEjNK5-i7CaIEmURm>rFJcrbJ*Yg$2?%et5{6Fhx|7I^m! zUfgf%m)?Fy)ymORMqz;|M>`6!xnl!^?JNDsro*!n4!DNTMH;DD)!w=Xx`q~ z=voND$2m2v9a1RKJd8__q~i?S4{Ea8oR;52a9lO}=vbDS>z(uUQ(D}j{?R+W@id$l z$bxKwjGJ+1=YC~oqf<}rvA<$#2(0_$l9J$M?lVVo%uh~P0I1DJTZw`8h(ILh&y3Pe z1-t#1+|ip*EAJ@%s%{r~e|y_1ISFxs38x0|jNTDzFFJjE`{!}U=6;BhhK-gD%%MuJ zzmG?t%DeBMlMw2yHUzs><=CRs>dYuLbJ1uJDXOZk*j$Y-4-T%(w!I<4jdTGlQpJOL zc$2}prVq9u{Bg|3!}v!=xs_L6PY~?=fQ>E5v}&NsoW1MS^MG*=J8`_4-zu|P^Gm0m zWTeTRE6$6DXONwN$-H#9-5paMJ~K1Tn{r)eGPmGGQZ=P0yJY3mkY%=?J5ix;OE{{8 zli@GAI@i_~;~#;l0KFX{MaZzj-yWJn{&k8@!u`&RkD4xrU(yI)4`bV)2(oFFEe(pi zarPYFX55JTmis5H(+&B(^1&k0vxbHSGIDZF8MOz4_u|6>)`TlL`NhOkfPi2+?Y%qG zGr&FbBik3xsI%wxzk&v_LyHPSh`8fUZPto!?>yR-zt-=Wxt5Hq?Q=5mIB`xDqh7!) zt)D8$_)AJ&95%YohQn$J!%Yut#YE`Tsixg{9tre2Q22h|rSMS~VCNS);Y+LK%=7)D z$HzCavFjURyf*|>sHJ^7v(wDiShx^a(YB+pL?sl0tbZ$yczPz*34I8d+_y{6A&8fah-b0|woF9~U zEj!S3c@}A!oSH*dDLU+1qof*peHF)=p~eCT1rQT`PmCm&H2Si`(U)R|98m5FnEmtSbPV#mcO?sy%l?dUn<(y(1%%*M^a z+1m?h$|sLTU^V?ZCf^kOFqqv{R2J8ic7(Pc z6n&a6UZ})!-D~+Zti)=|i4C9BGXx8R1)|fu@^=z9)5HDM>O)=k*uw zzI}kp|1TpOwyX?p@QmSOHhRb0uhQ%COb{bMO-_HR;d9a?^gs;-VjdktC0SWn*kcPb zOwjxRm6bBeJzymC$50CmaIavY?d`U2d3i4;-B_kv&3Nu{@6go>OH@=PN+vOnjl6ha zC4f0JH@>KlQoZ^+)mb>FdTG1eqV{sSw90HGna3vL3x`N^M)3SZdktB^m&aLgcr;a0 zD%utg=4xfkKOJI_k@xo1#eDfvtU=}R?$=YvJ4H`ERVPQ>{gH9lgGdvaru1t=cCG(M z^i=Cm{M&4q=Ce~_4+AvSftyI3rR?lK^Y;|G*S9Y3vXyGDA7k#R;0({-VA<5%O|r!n zdQOG8dV^h6hpHGAo8pAo*S9}|DUAmm*z%fhX8v+PC>wv0?E@bp8XaY9GqdS+nt%Lv z&y4a*>_Y%i2gDaJa%;|hhOY{R2rIHNv>l@ps^9v%X3j^iBTg({q*wVt)t1vhKq*K-FZJd8cS zAM!Lm>DIoY%4HpypO0o#WSRAW7TTtU91Od3isfRNZo6bS++L+8E?zE&4?n4XT6vaq z8CLUl-)IGZwQJ`ABM9j@QIwr89S?x!z!D~2qguVc+{SYDWFz>W%CXhY;(1X9$M&iu zu_3}A>}P|#7YRD~h88e31;_H3DRDC%^oN{*Z7M z)9D#YgowSQ{f@|sJ378XZu$>Kp4^Kmld@W)v#2QIPqn#PcWq62uL#_F#A0I;D{W6% zer~*Ys&g4%v4E}1rJeuL;7!pi-W_(kua6?Sy-s#-_)6o!p50JY`03!!C&^NKB99(X zx0At0hhbmw*K$J$1RhDAyN}zY2@8<;tf=(XT9~}1_N@JJu7d^T(9Od^ z-`?Ja|8sd5H0NLEvgW+^NBVZvlrUxIGO6q}od>h$;}Iq&y=#lHf-x$Wbfpl<5a%x< zfO7Lo+WXKG9Y^j&Ud~6+Eb-Ofe!g?3m!Cd8%O3q`#;ZOM8q#&SHGP_e$Ro`EXe0x5 z(aT+Tq^i1z%*=Vb`*nIeH*W>V(XXzCM&|aBEG*`GOpK&BR}~V?ipKZlwkq{bHD4TI zkJ<3k^1m6Rc@pY@D&4uD+q(68-neQ&Vl1mzx6zt`Sqndwh?#&)7KxEf<@vNaX_hQ{EPEX4k%&sY_8LG8rwXG!CR!kj` zl6#Pa7f|{yJbo3*sW4f)tE}RITW-Bj{3GpD4E_yXHZY`!DiWgm&MUzRNDm_m%aZG( z|4l(iu|i*pLJ{sqPiqyD!10Wk{q*e02c|S29IKjw3kItuB7Xh)75B>M@wMyGi3D7awgMhyMw#V( zLkvsl{GzfRn@M1$X}ifUI94Yjno%@cGBCIB z3Fz<7q82I3W){Cf`*Jo5qHvkE`s0kjFI$F4(*1Gil%3kYM{@M|_wUC)`g@-r zS59gW?{;hN_2pUU#EtGI9YJWi>vTs)N8^4s1-BS=bTmCG7;V3IWQj5KnetupL@Ms14!;i=omNU40;kW3;& zNG`r273xzgoU6#y#G5W_&emk_P04iG!n~y$-VO1`Q(xQ@+QdC2Bc-!c-=3Y4QO*A7 zUAH`NmJDb0o0Xus$YBs97sXs{!nhOqsVYC82?>!W;kHwsp!=gIw#!!`&&jyrmnp96 zk|QT0_wKx-$ONTJlZZ%`*iMG9$LF1=Ma9Jo1ff4iM`d+>h1Q{Qu%=@VHieF3;wi?$ z5-nhp5n!Rg?yffAA?oP3wF`aIMxOIJR0UcHjAgfG8hx+KrsNS;^McD}Q+Q3PbeG9CSqCND-0t1qH_!SXdIct?TQ=LQR5M zQG-Ub4Dd6gC@8*Yi?(dEw4bADf=EzfKKEx;7+;5zVu@G_Dst-XJ^WluBaio#hL_~#z7WX$PX~E(Ex)*!pgdepMo_xFYjM>NZ1h_J0|6xWY8LS4dhscT5Hn)y?dcv6(!{iFAIHc_^B@%;7{ z`O}5RE;^Xk8O z;AkZDOGvPb8OCkQTMyL|;IDRMbHi zSa>*DdlOFdz&qindef<4!VH?jvLUI05l!tg7y1}p*Lnd|X*r*3lBmCNDgFKE1ZDPL z^(`l^Qy&YEuoa40&dTRi#4PJ#y)(=jynM?|8zzq`9Xa(PZ0be-Cjt=kynMNkYFnC< zlknhdF$2<&WN847HDhWe<01-GqO(F74yt1OupI zRbpNVIbsbJrcSWIW0aZJkJj{G@J8{dA}6tIETc>gu>iROJugqD9yL70-pYWX3BNYS z&SDOcmv8A!`xbNqmWo)*($m#V_!5kHpYe#)ML#xA*WD_AnTPeb2vx9y!C`MF3RmOl zk+x;RTqI*4Db;j@VO9*O9AzY@G9=;9jRr*r2CkC%o>whVWAS_*7d}J1doX0&mH((L z;#%rHGQDgrUSfY57k8+5bRJ>$TS5<}$SW%=x+TBN{CEeSMU|q=g9awep&1}AYLE`) z%X!X}?JiRFT?;7K1^=q2Dv+*?F#`sVU>*N9Z}lk8pHrxPbWsxbXqc5K z_*&ndqL9*_xhZcN^dcf;ihr3zFNEyAGED7hKfuKc z_4sMMIDThs=t6n13E~n266(lqc~0U8D^JWjP=b_v%^odaTEm-Dds_D-^9P>k&k9z%&3C(}-%xQ9L<( z4I*Ch$QPd5HAtY{fN2XR$$#)8FoC>>x>%zLG!Y!y}O(xalX_=S%M#f$y2*>yQxYoC1G(A5f3f|UO+%O<*H*&Ne4r;UX>*faGX`z1Olu4 z>1p|s7Zyx_c0XUWH=2UT`d1EzJ4=Y%rpq<*4B;T z_lEvUoSN(2w1-#K!p3#wklsf^_g%xoGfO6~A}-Z~NlHcx2`h;5{=p3ul7@Y5;dzpl zOdtuD$Ac<1YV4I`!Qt6MQa(QT3<&SzE}Yg9ghnB21-`Ym4tdPz*JqyH=tVplIr^48 z3W)^mhLz{O6^WdbB@fVyfGrQ2su`D6Wr9!yDNR;(^$||gIj^GO97*}qU&lw!P(Ebc z+`YWUIjYB!51Q}XbzpI}Zek)L&&}CSBgLB14cogD*xcnK0gk z#ig!qV_`Q%&|3EnHb2md8ksu7lQ6;rd`$(~hsW=}Tbk;p(zv5j4LIN=FpzJ?d?Mlb_qO%!6AaFM@Pb0V7 zQrMy8>O^GaeSDVDJHxUZpLwgd7a6u8@+@Q&)9tHV$}ergL{Sy!&8n;CdM4e)F17Ix z0_2{#*wT24>J3rL`H*(BIrYE2uhhG7Sr->fcpzEu^@R869E(}Z)7tvqqgnCjJYv=T zzk$IA;_?a)0dgSFpY}<&78RnV{~N`va^Z%>cizKnI>%$sG%N5`h)^_;oa-Y+)(}{w z6jW3=)i>m3JHEL6_--#TSf5c)r0tZZ^MIy)aPU7YG!lJ#0zX;^8%yQfD!N`ODqFb~ z`)Nn6H)!uFD(vk%Y>}xlaFLx{ItB-WLlND`Jmf$@SlUD$E<#tl|HrxX4r(l=a#6|K zc)v?-O@l+8*RNZY<6E^uJ)ne3mEGiVMwtE^_QtyMddTaL+-mr6PF1|fi|@5!m19lV z*5o-(jx6!nC|FBkLYP7IZ`1lrk(certWj8Q$&<@kY2427DZ)>6>^S~a~|Zausi@>%v+cy&Vg>5knIJv>~A{*&4i&H#RoQ^^5PJc?wVN zX@5zyA!cfkDYEd}Uwd9rI?(YXlTtG2Z;`1QLGoWsjRq_Xn+G<*)HMu-Q4>DebPIJ7 zn(T2uNf#HvlGA}tK%g^N;)rhM)tmOu^Bm=Gk+%vrUfiN$JS-nzf6XJk7fsn>W-=O$1P#oTFdM zSgmmvefg)Di)zov_g>##9uEOMw`qlx>EAw6whq(Kr2c{L7h+RWwM?-XadAm3#9d5m zG8SDZq(@}$q+B^47mdo=31nW9+NBvU1`2(#b>@j@TAJtW?Oj55n$e9Q3L&r|>v?N? zcX$D*dEn|FM@{e2E52hg48K;Oz=1jKoyDNUmoJ^v%f`msO-(>}4Mu0MkPE)K;Ft`! zIAeO8D)Xq~tYmnXvy$5te_*a-z?uj6XGvi2h-~C_`IxLUq;tb#^M7e?&&18`&>g(z zAw~_B-NfN!dBX@!%fkGBzW7fy5W@)JEoQteX7Xbso`tD_o!Q#hgkz`sm^;Sv&D?wo zQ+-fp=jx#{>Me??Dt#S_!(ZrWy0F!CYANo!Gjv{AtIt`Oq6m=&WCIqjH` zn)%Pr98R*D@9&yBqaJmI++-lrqev_lA&R{3%&$sxTX2a6y1j@p2XN~l@O>nk+?oud z4a?M3_{p)*K%_iXWreG)tqmnRq+9?Sp*5G2lUtmG!hWgL0rWkp73;#!hWq*IVle7Y zm4k>{X+nQ8wG83OnRMr3^kM;8$;jtq^$1b|unpi~aOivi^^e24tD4;}x!y{IWp{Aj z;}$l22@cxsj|DiYFGwTm5V=dxt>|*To1e!gxktt*^5eb#&3jHcMYq!2PaD(mM7|t` zxCV*$UaQdmt|_AO@KZIzc!dAj+3UBGBuD5N_jt}g6y}Hj=H>g>mKbOaN*}OM%2sdi zaH%LHSrp{i;xcv9L(B)R2Fq)1U@w8)Pd4-^z!9yo8lr|0gN&M5o=6UuqizjU;8p@r z<#yX?7C-nR2#||^Ivip&r)9@j4>(Q9CRn^+J#c#Uzqd*p4u(wB(QEnrzcSh2w{I1n zEG*zD6xn4;2P#R}|JE6h6cuG~baKOz+b?ao8dnsMw7zwIx5}1A86%|c>5uli+w7q4`mf@u2+L-q zMTka(e!<6@8wTWuMvE=yD~?#wd3U6xEQF7SlPsQk2>zz0SYgwy7Oc&5M*BvKaa&e> z=YwzwU%;x?2%ro)JI8KM*J?d|+Bs67ys^FAwY0z2P2qrtfqLudR9!7#GL@6f6Q_Y{ z9>mAX3rkwLL7_2fs^R_Z1o~MiqOQ)=R-n58LbCwB#>mf47}pmcB9ofD_vcvkL)IG> z{gRBG|8Ol`@Dh|f|D~ui^7Th$&~#h(=GC9m!L62;e+VE4qBxS9oJNQn6;F<#%IXG3 zFDhrWAU4&*!c1W6zJY_z%e$+@PBO~1o2q2n51f9`4?AIFfE4H?Ys{~iH@?ib^<-)eSUH%YCz`7cH5P+mu9HK-F1L%9w5U2lMsZ4If%Vs z5R%CCJV>eF(W~{(1D0KuRd=$GX+_u~4AR8tDJx)u zh+Q3x{zw&6#>WdJ1CW8AC{kWlQ*)#m1F&srA~}GuKYjjuXR$z5a{Xzl8VPQ2%``t~ zYpuMzuxdS%7Z>LjT)P=kDWd{m1cDEHZ#$j5y?xCcmxq$4*E&2{*o;bL((LKRB-)ne zJ#DNaV)#fp?9g+?sbh%j$a-pP zvK+0%QaL7h`u7+vl4r(H-o1G)$M;Q&V$i@yWAa#^s~XF)-Zc5avY^Zt@&Wa=m1%SYi5g`=J7vs>0$s@w=*%C;X&XKQcs<=LV?kM&ADI^+7XsSHQ{LE)2#_2y0bh~oTzN>uMGOH zAIF)XB0o+2*DH;ut;;>AsG`g?`!wu$k+NE^$I}0~j(N_&*X)6>1Zfz&M{yZNqbm7c zg=iw|yeD@2xr+X;U!y_xSYBJpO0?Ax=EaHr3SkmE>tG{i_n8+jfBqX9H zHpTPSXh=zcig{KY+GCPi!OFmJ$V28}wmbJec>lHjM~`Bd?--GTe#&!nINPgnuaBgyWg>F@CtPKDf7Fv`TAI|# zk(H@(!*qWo8YAHebN!$BDN;xxL=2Mx^5srMP5KHz_#DA?WI$e3?DlS^X`K0(%=kBou+TpbKG1!sksULPk?{qkM^M3t)km6p4vRg>4dDDdB zy^&*7SV=jA%xmK~YnZSemrdyO!XRX8WD&QgWG>-2xGGrb39pdQ;NBj_;4&j64j^%Y zq*&k-%Wj~T9>n0z|9*PZ#YyZ#ZnSxxwU(r^5ycfKkgCSQ8u#&N%< zeUYKYqoAs8_Kyce;60W)v^$FZG&R|4>0btL^zSU6;KLyW_X-J^+m{uNE8xU4Qe`}$sFOtnJ$ zJdfX@5j#GYzLx`T_CRI!Kxmm$Z*02m%@dZ53IiYW+4d)y>5j{HKX~Ei?jTi!@;IwR zDVT@COFkIRC)cn@c|54BTwUVmCZdlTMg11#h3l|fsPEeHGDz2M1%NO&hQcgs+=pF7 z4*!Eo2%zOwRxIR^mSJr3Ddra^+o1BvO7J2&WD;2~jvXdpG$UecjN9&=W9`M;IpjNE zmd2{ma`dhYd$K9ape|_IB#eI8Zsvb~IXQxsdf%A9)ds8PzoQjJ4Dttq>d)wUdHzuU z8J{h4-exJ2^CbT=`Cw4YqbUr{T_%qIqYT@83FD+)_4~ycSCTf(z5i_XSeD{HG5>G7i47h*u)e*thoEgKn^4G{XnA% z*0gYuMi+55`&GjWG)y_qb!aHx`Nytl&yomlZi;PkpCH` z%iY(dn-u$43KegF98pE3ntD(9HT&+#IKRlc*+8nG)rtoZ(Vzm5SjZNmeJ>e?BrZ5% z*aSY>utytwBGnhKaOoMLhq~npXJ-27D9{AMLK7hKxA~*r3yc~#kN`aEA6xKskPO}b z-E9t_A=Gxmv(p2P8DY!n@eIXxJ?cF$PCY}9&gqz!W`7N}-ZX{pZnwGnjT`8Y_?4NAiYmj+p%7X?D67vd=5Ksv%KMCYdY?Nqb-}H=y1HWV&IIIW*cz-U#PQc! z+=os1`1lsDSXr|x>hL?T-;zq|A=a$9CA5gl4DMGdcl`ND+vM#Xu!2!(7?>X`?KjLq zL3%deW17iBRbqd8EtHB%{o2`}{w8Z*ai$BSN5`dRglv+TISAb}p3yViCXeHLlC8kR zPAKXS&w${`5JnH(h}Ku-SOiqCwiBgYuWHR|^ua4Pr>5o@rE5JpI@$$B`{31AXx?gD z^Bd7_0{cQ1o+Mito?{v;dFL}WJTCd(wcqD#=N#eT1~ zaT3StiXv>_uZViCq_c38E7L87h0(H`^`4ZyfO$bA6yW`!@+rGpv2J*mi~u<_hS5yQ ziAeiwIfSa$o8S@1!C|ZK%cYQ~0nyU&QAT`Hl39f=UTWD-VVQXzcEu3=kp|`QWw%{%QT+zu&y{T^&g4`JXujFP(sm zwE1?bGC=MMm9YcH)v1&9uQ{}9#u=v;hi!9=F~FS$YA*hVD#-8fr`qEx!g;#pdTBT|%riEw;%(Q8}RJo6BBP)GxB*kRaJ38L1+NN=+?WkB_t$RF-Eto9rB8a z{b+4Pg7QO)daufI!1R`(EYVB?Qft@>GlGy%^!66#cvdR5gL0$d_|3~sn9_3dR|zQ6 z0G|C+QXlqZU2eA$or}y}s?|NB7ZB;dp+6(h(Hx3= z3kxQ&l?D^GUqfZLQ42b5SlP!+Jr}%of~#)ThCeEsHXyXpmYkT_-A4d)d1~rR=giSQ zR}1grr|eK>CDa4Q;Nv}*ITMcAyv_|X6sCmW{UJSF1>PdGJl|fF5*mJcNQwn?ZAke+ z@C@Q+Gl>fslH{cqyb$;w4ntB?-3>s!4iD_M$xTp_1II@&Fsqu17~N3W+6A`dhAX%d zAT$v0rV-uN0*m|ZgUZMtLHM6}3^h2QnU=uf2~8axd0|RsQY^J82YBGi%jW0j=YTUZ zu(5?o(MmzE;o{;7m2(CyQs5H2)3^N#ogE#cf*ws0_{Yn~9>D9#ZfpectDL?*IVc4n z3c-N_Q?&T_`1Bf9HuyCcLMFm1u@y(MpSM$Ypzep_hmPA6)Yp?M##+M;6j-^S7^}IV zYVsQc^$t*ea)+%KclS;@dMklh$byHV(l9<07{+%;BpM2Az(ha<2w4EwLNLmy(2Zzp zY&1K0Ry+=7L*NT&8Spx+J+Na~oBJVbu>qzsaA87nd*_-wk(0$eg+Oo$LkXDosWK9n zfZ`C?4IrDTUt&&TPuGJy@V5WwPrQaVyJ`W)pT})(Z^ZVtD#Dfu7y43KJYxV+>|Sdx z+yTDh$;5i4zElo z1^np1d+k*%gkFg(T)jjX?Z88U65&=1h7?ZBrTMpBg(2)6E2Td@^?fUfGgkhtJO;G) zYWev0@MW*ZRZFC{Xef@s_glX%=JUcjHY5M*KFzD0E;t&?=U#v*v))%>X!v7 z8VGR7XlTH;4Xla^E0UaGHe6CtBAlUii~8fo0B-oi&`@G_7HRRufjT-ECOaUlvg*8< z-nizx=l72Dd-A|4;qRNpx|I-92KMob@J7ZBU5SRR`13*@aSZ+y6G!5deG;)pkNW>d zf#+XSuJOI`0pccsgdG2S@Gb}SYWv2HydOxH!AP#~pP=eP{{1S~Oc&{a9=1EVj+&2S zUE7(Qxe@UfE^W@z_d*WtWr1eYb#Kxv8hQV9>@&l+fw01ZAZzHMy|>5NO48fEpFV=* z6Cl6+JesQz&`-SF2=7^1sBO8jH~s4XMxvKHZ(2H*_EnU9kzi5izwt63IjBnECSL5e zWWim2xry6K;n3Iy4t+@AG_*L;axDm&`>bCQZ0Wx6NKnB1)9!b5=s?=TCFeTeXd?x5 zn83f&`{PG`Ny+w?lTU!sf$r|s2?NY^fF=P#IADo^nOi|!-CO-%GLR}+IXUxNTC$$7 zB8-e^AXErEe3*6IOnG}VV2kvv=?rKw>gwu%Ij*v4>H+=KqCyYc$rRBy|3TrL_Vt=I zJ?r+xKw|*07ur$?!HkeJfG`fkB>(@(r>?X}W?}BN0o6osW1|>AqtNODnh!7vbf%VG zUS9gdVFv$^z!ofME}+T)j}frsfH9Qb?spT&+TbydD9{`RG!>d2Fqax7x4UIM!C)5( zD#(Oj;gtf8n@@xdz$Jwr3k*IWAE|xh*C-fRJ5;p|l#1?=g$cN275@Fe+xdZ@;DZjj zi|r~^UteEn?h7<|0iOe(lj@_piwQIr7Z-|ZY9K%36B3elc0M2?nW~DM-tOK!Hm#io zy$d9p4~dCE`X#7_nI``g3F65~0UAc6Mne++j#W$d{u(yC5wMm3AuNTE9n=->^$o6S ze8R%LX3^sE0^|kYzy>c7)KKnV)o=AQHoyHSFT(1{w;rRwZ8PN|C8eRs+W9Xd z4ni_0**9R-4&($tf5O7UV;EoRcl155r0ypVX@A=VARLg$%pe;>s6ANu{;@%MjS{r2 zKtcg_0i0h_EHr5AfQ118Jt|M|t@ a4a&Ck7&Ydz%oj-TM^R1 +.. raw:: html + +

+ + .. raw:: html @@ -128,4 +145,5 @@ /auto_examples/1eof/plot_mreof /auto_examples/1eof/plot_rotated_eof /auto_examples/1eof/plot_weighted-eof + /auto_examples/1eof/plot_gwpca diff --git a/docs/auto_examples/1eof/plot_gwpca.ipynb b/docs/auto_examples/1eof/plot_gwpca.ipynb new file mode 100644 index 0000000..de4c22a --- /dev/null +++ b/docs/auto_examples/1eof/plot_gwpca.ipynb @@ -0,0 +1,169 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Geographically weighted PCA\nGeographically Weighted Principal Component Analysis (GWPCA) is a spatial analysis method that identifies and visualizes local spatial patterns and relationships in multivariate datasets across various geographic areas. It operates by applying PCA within a moving window over a geographical region, which enables the extraction of local principal components that can differ across locations.\n\nTIn this demonstration, we'll apply GWPCA to a dataset detailing the chemical compositions of soils from countries around the Baltic Sea [1]_. This example is inspired by a tutorial originally crafted and published by Chris Brunsdon [2]_. \nThe dataset comprises 10 variables (chemical elements) and spans 768 samples. \nHere, each sample refers to a pair of latitude and longitude coordinates, representing specific sampling stations.\n\n.. [1] Reimann, C. et al. Baltic soil survey: total concentrations of major and selected trace elements in arable soils from 10 countries around the Baltic Sea. Science of The Total Environment 257, 155\u2013170 (2000).\n.. [2] https://rpubs.com/chrisbrunsdon/99675\n\n\n\n

Note

The dataset we're using is found in the R package \n [mvoutlier](https://cran.r-project.org/web/packages/mvoutlier/mvoutlier.pdf). \n To access it, we'll employ the Python package \n [rpy2](https://rpy2.github.io/doc/latest/html/index.html) which facilitates \n interaction with R packages from within Python.

\n\n

Note

Presently, there's no support for ``xarray.Dataset`` lacking an explicit feature dimension. \n As a workaround, ``xarray.DataArray.to_array`` can be used to convert the ``Dataset`` to an ``DataArray``.

\n\n

Warning

Bear in mind that GWPCA requires significant computational power.\n The ``xeofs`` implementation is optimized for CPU efficiency and is best suited \n for smaller to medium data sets. For more extensive datasets where parallel processing becomes essential,\n it's advisable to turn to the R package [GWmodel](https://cran.r-project.org/web/packages/GWmodel/GWmodel.pdf).\n This package harnesses CUDA to enable GPU-accelerated GWPCA for optimized performance.

\n\n\nLet's import the necessary packages.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# For the analysis\nimport numpy as np\nimport xarray as xr\nimport xeofs as xe\n\n# For visualization\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# For accessing R packages\nimport rpy2.robjects as ro\nfrom rpy2.robjects.packages import importr\nfrom rpy2.robjects import pandas2ri" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll install the R package [mvoutlier](https://cran.r-project.org/web/packages/mvoutlier/mvoutlier.pdf)\nusing the [rpy2](https://rpy2.github.io/doc/latest/html/index.html) package.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "xr.set_options(display_expand_data=False)\nutils = importr(\"utils\")\nutils.chooseCRANmirror(ind=1)\nutils.install_packages(\"mvoutlier\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's load the dataset and convert it into a ``pandas.DataFrame``.\nAlongside, we'll also load the background data that outlines the borders of countries\nin the Baltic Sea region. This will help us visually represent the GWPCA results.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ro.r(\n \"\"\"\n require(\"mvoutlier\")\n data(bsstop)\n Data <- bsstop[,1:14]\n background <- bss.background\n \"\"\"\n)\nwith (ro.default_converter + pandas2ri.converter).context():\n data_df = ro.conversion.get_conversion().rpy2py(ro.r[\"Data\"])\n background_df = ro.conversion.get_conversion().rpy2py(ro.r[\"background\"])\ndata_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since ``xeofs`` uses ``xarray``, we convert the data into an ``xarray.DataArray``.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "data_df = data_df.rename(columns={\"ID\": \"station\"}).set_index(\"station\")\ndata = data_df.to_xarray()\ndata = data.rename({\"XCOO\": \"x\", \"YCOO\": \"y\"})\ndata = data.set_index(station=(\"x\", \"y\"))\ndata = data.drop_vars(\"CNo\")\nda = data.to_array(dim=\"element\")\nda" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's dive into the GWPCA. First, initialize a ``GWPCA`` instance and fit it to the data.\nThe ``station`` dimension serves as our sample dimension, along which the local PCAs will be applied.\nSince these PCAs need to gauge distances to adjacent stations, we must specify\na distance metric. Our station data includes coordinates in meters, so we'll\nchoose the ``euclidean`` metric. If you have coordinates in degrees (like\nlatitude and longitude), choose the ``haversine`` metric instead.\nWe're also using a ``bisquare`` kernel with a bandwidth of 1000 km. Note that the\nbandwidth unit always follows input data (which is in meters here),\nexcept when using the ``haversine`` metric, which always gives distances in\nkilometers. Lastly, we'll standardize the input to ensure consistent scales\nfor the chemical elements.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "gwpca = xe.models.GWPCA(\n n_modes=5,\n standardize=True,\n metric=\"euclidean\",\n kernel=\"bisquare\",\n bandwidth=1000000.0,\n)\ngwpca.fit(da, \"station\")\ngwpca.components()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The ``components`` method returns the local principal components for each station. Note that the\ndimensionality of the returned array is ``[station, element, mode]``, so in practice we don't really have\nreduced the dimensionality of the data set. However, we can\nextract the largest locally weighted components for each station which tells us which chemical elements\ndominate the local PCAs.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "llwc = gwpca.largest_locally_weighted_components()\nllwc" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's visualize the spatial patterns of the chemical elements.\nAs the stations are positioned on a irregular grid, we'll transform the\n``llwc`` ``DataArray`` into a ``pandas.DataFrame``. After that, we can easily visualize\nit using the ``scatter`` method.\nFor demonstation, we'll concentrate on the first mode:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "llwc1_df = llwc.sel(mode=1).to_dataframe()\n\nelements = da.element.values\nn_elements = len(elements)\ncolors = np.arange(n_elements)\ncol_dict = {el: col for el, col in zip(elements, colors)}\n\nllwc1_df[\"colors\"] = llwc1_df[\"largest_locally_weighted_components\"].map(col_dict)\ncmap = sns.color_palette(\"tab10\", n_colors=n_elements, as_cmap=True)\n\n\nfig = plt.figure(figsize=(10, 10))\nax = fig.add_subplot(111)\nbackground_df.plot.scatter(ax=ax, x=\"V1\", y=\"V2\", color=\".3\", marker=\".\", s=1)\ns = ax.scatter(\n x=llwc1_df[\"x\"],\n y=llwc1_df[\"y\"],\n c=llwc1_df[\"colors\"],\n ec=\"w\",\n s=40,\n cmap=cmap,\n vmin=-0.5,\n vmax=n_elements - 0.5,\n)\ncbar = fig.colorbar(mappable=s, ax=ax, label=\"Largest locally weighted component\")\ncbar.set_ticks(colors)\ncbar.set_ticklabels(elements)\nax.set_title(\"Largest locally weighted element\", loc=\"left\", weight=800)\nplt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the final step, let's examine the explained variance. Like standard PCA,\nthis gives us insight into the variance explained by each mode. But with a\nlocal PCA for every station, the explained variance varies spatially. Notably,\nthe first mode's explained variance differs across countries, ranging from\nroughly 40% to 70%.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "exp_var_ratio = gwpca.explained_variance_ratio()\nevr1_df = exp_var_ratio.sel(mode=1).to_dataframe()\n\nfig = plt.figure(figsize=(10, 10))\nax = fig.add_subplot(111)\nbackground_df.plot.scatter(ax=ax, x=\"V1\", y=\"V2\", color=\".3\", marker=\".\", s=1)\nevr1_df.plot.scatter(\n ax=ax, x=\"x\", y=\"y\", c=\"explained_variance_ratio\", vmin=0.4, vmax=0.7\n)\nax.set_title(\"Fraction of locally explained variance\", loc=\"left\", weight=800)\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/docs/auto_examples/1eof/plot_gwpca.py b/docs/auto_examples/1eof/plot_gwpca.py new file mode 100644 index 0000000..a084283 --- /dev/null +++ b/docs/auto_examples/1eof/plot_gwpca.py @@ -0,0 +1,174 @@ +""" +Geographically weighted PCA +=========================== +Geographically Weighted Principal Component Analysis (GWPCA) is a spatial analysis method that identifies and visualizes local spatial patterns and relationships in multivariate datasets across various geographic areas. It operates by applying PCA within a moving window over a geographical region, which enables the extraction of local principal components that can differ across locations. + +TIn this demonstration, we'll apply GWPCA to a dataset detailing the chemical compositions of soils from countries around the Baltic Sea [1]_. This example is inspired by a tutorial originally crafted and published by Chris Brunsdon [2]_. +The dataset comprises 10 variables (chemical elements) and spans 768 samples. +Here, each sample refers to a pair of latitude and longitude coordinates, representing specific sampling stations. + +.. [1] Reimann, C. et al. Baltic soil survey: total concentrations of major and selected trace elements in arable soils from 10 countries around the Baltic Sea. Science of The Total Environment 257, 155–170 (2000). +.. [2] https://rpubs.com/chrisbrunsdon/99675 + + + +.. note:: The dataset we're using is found in the R package + `mvoutlier `_. + To access it, we'll employ the Python package + `rpy2 `_ which facilitates + interaction with R packages from within Python. + +.. note:: Presently, there's no support for ``xarray.Dataset`` lacking an explicit feature dimension. + As a workaround, ``xarray.DataArray.to_array`` can be used to convert the ``Dataset`` to an ``DataArray``. + +.. warning:: Bear in mind that GWPCA requires significant computational power. + The ``xeofs`` implementation is optimized for CPU efficiency and is best suited + for smaller to medium data sets. For more extensive datasets where parallel processing becomes essential, + it's advisable to turn to the R package `GWmodel `_. + This package harnesses CUDA to enable GPU-accelerated GWPCA for optimized performance. + + +Let's import the necessary packages. +""" +# For the analysis +import numpy as np +import xarray as xr +import xeofs as xe + +# For visualization +import matplotlib.pyplot as plt +import seaborn as sns + +# For accessing R packages +import rpy2.robjects as ro +from rpy2.robjects.packages import importr +from rpy2.robjects import pandas2ri + +# %% +# Next, we'll install the R package `mvoutlier `_ +# using the `rpy2 `_ package. + +xr.set_options(display_expand_data=False) +utils = importr("utils") +utils.chooseCRANmirror(ind=1) +utils.install_packages("mvoutlier") + +# %% +# Let's load the dataset and convert it into a ``pandas.DataFrame``. +# Alongside, we'll also load the background data that outlines the borders of countries +# in the Baltic Sea region. This will help us visually represent the GWPCA results. + +ro.r( + """ + require("mvoutlier") + data(bsstop) + Data <- bsstop[,1:14] + background <- bss.background + """ +) +with (ro.default_converter + pandas2ri.converter).context(): + data_df = ro.conversion.get_conversion().rpy2py(ro.r["Data"]) + background_df = ro.conversion.get_conversion().rpy2py(ro.r["background"]) +data_df.head() + +# %% +# Since ``xeofs`` uses ``xarray``, we convert the data into an ``xarray.DataArray``. + +data_df = data_df.rename(columns={"ID": "station"}).set_index("station") +data = data_df.to_xarray() +data = data.rename({"XCOO": "x", "YCOO": "y"}) +data = data.set_index(station=("x", "y")) +data = data.drop_vars("CNo") +da = data.to_array(dim="element") +da + +# %% +# Let's dive into the GWPCA. First, initialize a ``GWPCA`` instance and fit it to the data. +# The ``station`` dimension serves as our sample dimension, along which the local PCAs will be applied. +# Since these PCAs need to gauge distances to adjacent stations, we must specify +# a distance metric. Our station data includes coordinates in meters, so we'll +# choose the ``euclidean`` metric. If you have coordinates in degrees (like +# latitude and longitude), choose the ``haversine`` metric instead. +# We're also using a ``bisquare`` kernel with a bandwidth of 1000 km. Note that the +# bandwidth unit always follows input data (which is in meters here), +# except when using the ``haversine`` metric, which always gives distances in +# kilometers. Lastly, we'll standardize the input to ensure consistent scales +# for the chemical elements. + +gwpca = xe.models.GWPCA( + n_modes=5, + standardize=True, + metric="euclidean", + kernel="bisquare", + bandwidth=1000000.0, +) +gwpca.fit(da, "station") +gwpca.components() + + +# %% +# The ``components`` method returns the local principal components for each station. Note that the +# dimensionality of the returned array is ``[station, element, mode]``, so in practice we don't really have +# reduced the dimensionality of the data set. However, we can +# extract the largest locally weighted components for each station which tells us which chemical elements +# dominate the local PCAs. + +llwc = gwpca.largest_locally_weighted_components() +llwc + +# %% +# Let's visualize the spatial patterns of the chemical elements. +# As the stations are positioned on a irregular grid, we'll transform the +# ``llwc`` ``DataArray`` into a ``pandas.DataFrame``. After that, we can easily visualize +# it using the ``scatter`` method. +# For demonstation, we'll concentrate on the first mode: + +llwc1_df = llwc.sel(mode=1).to_dataframe() + +elements = da.element.values +n_elements = len(elements) +colors = np.arange(n_elements) +col_dict = {el: col for el, col in zip(elements, colors)} + +llwc1_df["colors"] = llwc1_df["largest_locally_weighted_components"].map(col_dict) +cmap = sns.color_palette("tab10", n_colors=n_elements, as_cmap=True) + + +fig = plt.figure(figsize=(10, 10)) +ax = fig.add_subplot(111) +background_df.plot.scatter(ax=ax, x="V1", y="V2", color=".3", marker=".", s=1) +s = ax.scatter( + x=llwc1_df["x"], + y=llwc1_df["y"], + c=llwc1_df["colors"], + ec="w", + s=40, + cmap=cmap, + vmin=-0.5, + vmax=n_elements - 0.5, +) +cbar = fig.colorbar(mappable=s, ax=ax, label="Largest locally weighted component") +cbar.set_ticks(colors) +cbar.set_ticklabels(elements) +ax.set_title("Largest locally weighted element", loc="left", weight=800) +plt.show() + +# %% +# In the final step, let's examine the explained variance. Like standard PCA, +# this gives us insight into the variance explained by each mode. But with a +# local PCA for every station, the explained variance varies spatially. Notably, +# the first mode's explained variance differs across countries, ranging from +# roughly 40% to 70%. + + +exp_var_ratio = gwpca.explained_variance_ratio() +evr1_df = exp_var_ratio.sel(mode=1).to_dataframe() + +fig = plt.figure(figsize=(10, 10)) +ax = fig.add_subplot(111) +background_df.plot.scatter(ax=ax, x="V1", y="V2", color=".3", marker=".", s=1) +evr1_df.plot.scatter( + ax=ax, x="x", y="y", c="explained_variance_ratio", vmin=0.4, vmax=0.7 +) +ax.set_title("Fraction of locally explained variance", loc="left", weight=800) +plt.show() diff --git a/docs/auto_examples/1eof/plot_gwpca.py.md5 b/docs/auto_examples/1eof/plot_gwpca.py.md5 new file mode 100644 index 0000000..6e37a98 --- /dev/null +++ b/docs/auto_examples/1eof/plot_gwpca.py.md5 @@ -0,0 +1 @@ +958e12c0ca3bfc03fe27e2e22362e165 \ No newline at end of file diff --git a/docs/auto_examples/1eof/plot_gwpca.rst b/docs/auto_examples/1eof/plot_gwpca.rst new file mode 100644 index 0000000..3f6b009 --- /dev/null +++ b/docs/auto_examples/1eof/plot_gwpca.rst @@ -0,0 +1,1812 @@ + +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "auto_examples/1eof/plot_gwpca.py" +.. LINE NUMBERS ARE GIVEN BELOW. + +.. only:: html + + .. note:: + :class: sphx-glr-download-link-note + + :ref:`Go to the end ` + to download the full example code + +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_auto_examples_1eof_plot_gwpca.py: + + +Geographically weighted PCA +=========================== +Geographically Weighted Principal Component Analysis (GWPCA) is a spatial analysis method that identifies and visualizes local spatial patterns and relationships in multivariate datasets across various geographic areas. It operates by applying PCA within a moving window over a geographical region, which enables the extraction of local principal components that can differ across locations. + +TIn this demonstration, we'll apply GWPCA to a dataset detailing the chemical compositions of soils from countries around the Baltic Sea [1]_. This example is inspired by a tutorial originally crafted and published by Chris Brunsdon [2]_. +The dataset comprises 10 variables (chemical elements) and spans 768 samples. +Here, each sample refers to a pair of latitude and longitude coordinates, representing specific sampling stations. + +.. [1] Reimann, C. et al. Baltic soil survey: total concentrations of major and selected trace elements in arable soils from 10 countries around the Baltic Sea. Science of The Total Environment 257, 155–170 (2000). +.. [2] https://rpubs.com/chrisbrunsdon/99675 + + + +.. note:: The dataset we're using is found in the R package + `mvoutlier `_. + To access it, we'll employ the Python package + `rpy2 `_ which facilitates + interaction with R packages from within Python. + +.. note:: Presently, there's no support for ``xarray.Dataset`` lacking an explicit feature dimension. + As a workaround, ``xarray.DataArray.to_array`` can be used to convert the ``Dataset`` to an ``DataArray``. + +.. warning:: Bear in mind that GWPCA requires significant computational power. + The ``xeofs`` implementation is optimized for CPU efficiency and is best suited + for smaller to medium data sets. For more extensive datasets where parallel processing becomes essential, + it's advisable to turn to the R package `GWmodel `_. + This package harnesses CUDA to enable GPU-accelerated GWPCA for optimized performance. + + +Let's import the necessary packages. + +.. GENERATED FROM PYTHON SOURCE LINES 33-47 + +.. code-block:: default + + # For the analysis + import numpy as np + import xarray as xr + import xeofs as xe + + # For visualization + import matplotlib.pyplot as plt + import seaborn as sns + + # For accessing R packages + import rpy2.robjects as ro + from rpy2.robjects.packages import importr + from rpy2.robjects import pandas2ri + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 48-50 + +Next, we'll install the R package `mvoutlier `_ +using the `rpy2 `_ package. + +.. GENERATED FROM PYTHON SOURCE LINES 50-56 + +.. code-block:: default + + + xr.set_options(display_expand_data=False) + utils = importr("utils") + utils.chooseCRANmirror(ind=1) + utils.install_packages("mvoutlier") + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + R[write to console]: trying URL 'https://cloud.r-project.org/src/contrib/mvoutlier_2.1.1.tar.gz' + + R[write to console]: Content type 'application/x-gzip' + R[write to console]: length 476636 bytes (465 KB) + + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: = + R[write to console]: + + R[write to console]: downloaded 465 KB + + + R[write to console]: + + R[write to console]: + R[write to console]: The downloaded source packages are in + ‘/tmp/RtmpZTx0wl/downloaded_packages’ + R[write to console]: + R[write to console]: + + R[write to console]: Updating HTML index of packages in '.Library' + + R[write to console]: Making 'packages.html' ... + R[write to console]: done + + + [0] + + + +.. GENERATED FROM PYTHON SOURCE LINES 57-60 + +Let's load the dataset and convert it into a ``pandas.DataFrame``. +Alongside, we'll also load the background data that outlines the borders of countries +in the Baltic Sea region. This will help us visually represent the GWPCA results. + +.. GENERATED FROM PYTHON SOURCE LINES 60-74 + +.. code-block:: default + + + ro.r( + """ + require("mvoutlier") + data(bsstop) + Data <- bsstop[,1:14] + background <- bss.background + """ + ) + with (ro.default_converter + pandas2ri.converter).context(): + data_df = ro.conversion.get_conversion().rpy2py(ro.r["Data"]) + background_df = ro.conversion.get_conversion().rpy2py(ro.r["background"]) + data_df.head() + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + R[write to console]: Loading required package: mvoutlier + + R[write to console]: Loading required package: sgeostat + + + +.. raw:: html + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IDCNoXCOOYCOOSiO2_TTiO2_TAl2O3_TFe2O3_TMnO_TMgO_TCaO_TNa2O_TK2O_TP2O5_T
15001.060.0-619656.56805304.143.611.29013.0712.250.1673.222.481.142.010.481
25002.0120.0214714.17745546.658.730.91314.786.480.1052.473.082.191.780.298
35003.033.0-368415.57065039.258.140.90211.895.700.1262.443.172.131.160.408
45004.039.0226609.06922431.043.980.52410.004.080.0521.001.371.601.820.395
55005.0103.0544050.07808760.060.900.70213.206.370.0792.593.132.971.350.139
+
+
+
+
+ +.. GENERATED FROM PYTHON SOURCE LINES 75-76 + +Since ``xeofs`` uses ``xarray``, we convert the data into an ``xarray.DataArray``. + +.. GENERATED FROM PYTHON SOURCE LINES 76-85 + +.. code-block:: default + + + data_df = data_df.rename(columns={"ID": "station"}).set_index("station") + data = data_df.to_xarray() + data = data.rename({"XCOO": "x", "YCOO": "y"}) + data = data.set_index(station=("x", "y")) + data = data.drop_vars("CNo") + da = data.to_array(dim="element") + da + + + + + + +.. raw:: html + +
+
+ + + + + + + + + + + + + + +
<xarray.DataArray (element: 10, station: 768)>
+    43.61 58.73 58.14 43.98 60.9 54.0 82.72 ... 0.196 0.202 0.207 0.109 0.141 0.185
+    Coordinates:
+      * station  (station) object MultiIndex
+      * x        (station) float64 -6.197e+05 2.147e+05 ... -2.82e+05 -1.273e+05
+      * y        (station) float64 6.805e+06 7.746e+06 ... 5.796e+06 6.523e+06
+      * element  (element) object 'SiO2_T' 'TiO2_T' 'Al2O3_T' ... 'K2O_T' 'P2O5_T'
+
+
+
+ +.. GENERATED FROM PYTHON SOURCE LINES 86-97 + +Let's dive into the GWPCA. First, initialize a ``GWPCA`` instance and fit it to the data. +The ``station`` dimension serves as our sample dimension, along which the local PCAs will be applied. +Since these PCAs need to gauge distances to adjacent stations, we must specify +a distance metric. Our station data includes coordinates in meters, so we'll +choose the ``euclidean`` metric. If you have coordinates in degrees (like +latitude and longitude), choose the ``haversine`` metric instead. +We're also using a ``bisquare`` kernel with a bandwidth of 1000 km. Note that the +bandwidth unit always follows input data (which is in meters here), +except when using the ``haversine`` metric, which always gives distances in +kilometers. Lastly, we'll standardize the input to ensure consistent scales +for the chemical elements. + +.. GENERATED FROM PYTHON SOURCE LINES 97-109 + +.. code-block:: default + + + gwpca = xe.models.GWPCA( + n_modes=5, + standardize=True, + metric="euclidean", + kernel="bisquare", + bandwidth=1000000.0, + ) + gwpca.fit(da, "station") + gwpca.components() + + + + + + + +.. raw:: html + +
+
+ + + + + + + + + + + + + + +
<xarray.DataArray 'components' (mode: 5, element: 10, station: 768)>
+    0.1813 -0.3584 0.1243 0.2 -0.3812 ... -0.1229 0.2865 -0.4732 -0.4197 -0.4249
+    Coordinates:
+      * element  (element) object 'SiO2_T' 'TiO2_T' 'Al2O3_T' ... 'K2O_T' 'P2O5_T'
+      * mode     (mode) int64 1 2 3 4 5
+      * station  (station) object MultiIndex
+      * x        (station) float64 -6.197e+05 2.147e+05 ... -2.82e+05 -1.273e+05
+      * y        (station) float64 6.805e+06 7.746e+06 ... 5.796e+06 6.523e+06
+    Attributes:
+        model:        GWPCA
+        n_modes:      5
+        center:       True
+        standardize:  True
+        use_coslat:   False
+        solver:       auto
+        software:     xeofs
+        version:      1.0.3
+        date:         2023-10-21 13:16:29
+
+
+
+ +.. GENERATED FROM PYTHON SOURCE LINES 110-115 + +The ``components`` method returns the local principal components for each station. Note that the +dimensionality of the returned array is ``[station, element, mode]``, so in practice we don't really have +reduced the dimensionality of the data set. However, we can +extract the largest locally weighted components for each station which tells us which chemical elements +dominate the local PCAs. + +.. GENERATED FROM PYTHON SOURCE LINES 115-119 + +.. code-block:: default + + + llwc = gwpca.largest_locally_weighted_components() + llwc + + + + + + +.. raw:: html + +
+
+ + + + + + + + + + + + + + +
<xarray.DataArray 'largest_locally_weighted_components' (mode: 5, station: 768)>
+    'MgO_T' 'Al2O3_T' 'MgO_T' 'TiO2_T' ... 'K2O_T' 'Fe2O3_T' 'Fe2O3_T' 'CaO_T'
+    Coordinates:
+      * mode     (mode) int64 1 2 3 4 5
+      * station  (station) object MultiIndex
+      * x        (station) float64 -6.197e+05 2.147e+05 ... -2.82e+05 -1.273e+05
+      * y        (station) float64 6.805e+06 7.746e+06 ... 5.796e+06 6.523e+06
+
+
+
+ +.. GENERATED FROM PYTHON SOURCE LINES 120-125 + +Let's visualize the spatial patterns of the chemical elements. +As the stations are positioned on a irregular grid, we'll transform the +``llwc`` ``DataArray`` into a ``pandas.DataFrame``. After that, we can easily visualize +it using the ``scatter`` method. +For demonstation, we'll concentrate on the first mode: + +.. GENERATED FROM PYTHON SOURCE LINES 125-156 + +.. code-block:: default + + + llwc1_df = llwc.sel(mode=1).to_dataframe() + + elements = da.element.values + n_elements = len(elements) + colors = np.arange(n_elements) + col_dict = {el: col for el, col in zip(elements, colors)} + + llwc1_df["colors"] = llwc1_df["largest_locally_weighted_components"].map(col_dict) + cmap = sns.color_palette("tab10", n_colors=n_elements, as_cmap=True) + + + fig = plt.figure(figsize=(10, 10)) + ax = fig.add_subplot(111) + background_df.plot.scatter(ax=ax, x="V1", y="V2", color=".3", marker=".", s=1) + s = ax.scatter( + x=llwc1_df["x"], + y=llwc1_df["y"], + c=llwc1_df["colors"], + ec="w", + s=40, + cmap=cmap, + vmin=-0.5, + vmax=n_elements - 0.5, + ) + cbar = fig.colorbar(mappable=s, ax=ax, label="Largest locally weighted component") + cbar.set_ticks(colors) + cbar.set_ticklabels(elements) + ax.set_title("Largest locally weighted element", loc="left", weight=800) + plt.show() + + + + +.. image-sg:: /auto_examples/1eof/images/sphx_glr_plot_gwpca_001.png + :alt: Largest locally weighted element + :srcset: /auto_examples/1eof/images/sphx_glr_plot_gwpca_001.png + :class: sphx-glr-single-img + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 157-162 + +In the final step, let's examine the explained variance. Like standard PCA, +this gives us insight into the variance explained by each mode. But with a +local PCA for every station, the explained variance varies spatially. Notably, +the first mode's explained variance differs across countries, ranging from +roughly 40% to 70%. + +.. GENERATED FROM PYTHON SOURCE LINES 162-175 + +.. code-block:: default + + + + exp_var_ratio = gwpca.explained_variance_ratio() + evr1_df = exp_var_ratio.sel(mode=1).to_dataframe() + + fig = plt.figure(figsize=(10, 10)) + ax = fig.add_subplot(111) + background_df.plot.scatter(ax=ax, x="V1", y="V2", color=".3", marker=".", s=1) + evr1_df.plot.scatter( + ax=ax, x="x", y="y", c="explained_variance_ratio", vmin=0.4, vmax=0.7 + ) + ax.set_title("Fraction of locally explained variance", loc="left", weight=800) + plt.show() + + + +.. image-sg:: /auto_examples/1eof/images/sphx_glr_plot_gwpca_002.png + :alt: Fraction of locally explained variance + :srcset: /auto_examples/1eof/images/sphx_glr_plot_gwpca_002.png + :class: sphx-glr-single-img + + + + + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** (0 minutes 33.235 seconds) + + +.. _sphx_glr_download_auto_examples_1eof_plot_gwpca.py: + +.. only:: html + + .. container:: sphx-glr-footer sphx-glr-footer-example + + + + + .. container:: sphx-glr-download sphx-glr-download-python + + :download:`Download Python source code: plot_gwpca.py ` + + .. container:: sphx-glr-download sphx-glr-download-jupyter + + :download:`Download Jupyter notebook: plot_gwpca.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/docs/auto_examples/1eof/plot_gwpca_codeobj.pickle b/docs/auto_examples/1eof/plot_gwpca_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..29323210b4ba1830445f9058c5603c98d6221def GIT binary patch literal 28006 zcmdsAeXLwX6>sTlU*Gpjd zIcK+jEn6#89Cx=N*B{t2dEMOLv>(f>ES65+4fUvyIYRSz^ z7N_b?XarOz^#e)@2jKDm=#wuBfKX~++H$LoFnPSDz^LRTw7BP5WIIMrQiGEQZ}hKF3sRIS{0Yl_vl+{9a<=Ao(u z?O9vn6yf=ocQD6gNKMChbPeiRrq<(^uR*WkHKpA2>+%~|W$aNH`Exu)u~IGF#!K@; zr>cdda1gmvp@hZ=%;Or9?}g3LH^VZ}mszQnIqSmB<74u{EcE||8U|-rWwI6)o7!2; zS)9BZU!E;=U(GwPZ?wdcLYkOj$vP-zMfP z7)8TtE4ezKHdub^`Rl@|tFd>4vzizyq<5fuxC19rH}>V6(oGYAQMaB9$}{f-(W^2g z%Ykw1W=oYE*tsjalNF~5U{f|%fqA1GOpAP;2AfY19aIaW--$(ox57_5PQ&nMnOSzS zn$o@*IT&;sA!ST-6?V*(oi;&X*IXH;>6a^1$HVy2wK*JvKMNR`x6q%$qQN7_qvlM9 z9;L)m>(e@bL`e(1NZ(Ly!g4Y-=4RbuZhOg^fZ18j%S*?(+Y(a(I{6TKg$sWJit)KH z{yHAzX{?=+l}~1SBcZ){%e69jr&7)CupFduU@hDL1JFILZbtYq@ZNP;hC`u*P#1e0 ztBdFBLO3q$7Nw8GV;1=sUVNWYd>NN~VbLv&S1L}vrYavqHG%x;Y2gACZr5j^TJO|{ zBFT3vs4oPqJT^7uuqjKpyQ=O{tUGI=a=ZcVHDseU+;50ShU{0MD?&pYX#Z&jy&BE^{o(AuK25r#}KttTk9gac9Giv#;)Z3?RR>)J@9 z_?r^Myn(!x2?Cy(9agChK|FPC7~_%8@D}gO*cHsfH^NTjQOZB zd1D4-&J+cY(6xztAPGI+HlU!|rwy=3b(O+a%C(8;P{GQisrH&N5w&24F$1z=7X`0c zU(S{4dA5;%BH9YCfDOzoR3MZgE$PFvr|;0)m$$$}4f$CM9LSWPQ}}4i0!vzvGPr+X z#zgJ7+6<0N_!UKpc#`N(kw=zbfwI1`e{DoW4fw4Q8tI>vK<0J$(fCRDd>OrsJN#%n zzb4)bTAzeapj5@11WQ$96fm{-Ny+|ux)%k+1D$CB@6<0uNO=q`m2cmrp+zdchn8;V z@$5iy5U;lHlZBYKNJMj+d=s|!ONiwyE|qrXAc|a)&#)(Y-H7XRcBdNoXn#B)75UGo z5rR75L0Wo1b)>j4$xg|`dT1)~5j_}^`4~kE68|Y(=M$Q6D)cE$98&rWg)EJF0CJlG zxhd1?b0$1g_X{RqB>p9e6EQ2_=fmDoAV1wzdc};4+VGkg99i)?MGjFXDZRJ6p^K-I z-_ivlvF}jKXuNNfF8Mt(7ApJ$GdPm}Aw{l8a)|;1;{2hHOi8HzPfQ7r1)ovuNQ@^G zh))@>^N%25t`r(7zs&%Pl($ouSX8PGP@Hhp`{~qYp}KqYp-6VWL_zKqe`THwaZ35h z?BElo2>9_gPiv7$m4gORMx_G(6{!|Sk&~6Oq==lj77wYqm8|d8OVLA-Xmqz_`AKZl zkmIC0-;g9AGHEr%420oTX%+}ih_EJH#}S`+J}UhrJsC*&8j2W<+oL_bMY2+FE;z(5S6E3Oq_KBDiA$&prsTYS?g1OJvlRYg=MLHtnSZD`Na|Y)(oy zzQL4>T62>r0W#zkiWPB6cqG_;@#lM()D7@;sZgXq9s;>+q~bxv*K~2cZ)SLYFm9;ng6e@!quDUJW`K34T&GgYVWH zi(=SmW~K$U?WbF1rFPoSw#p<;e4bLQoE(R{y&zr`#N!9 zG~0hF%Q5c=Yes5p8=K)9z91n)CZ}gCxR%NC|MP^9K5_@PD50-95kX@yZK#?UZ+9$+ zqHfIV;%yoxH*E`-PQ5|XhBxNa!ZBpDp$DZTzoBC=sJ)la;xscvrV}OjoXhkXsMZzw zP$YR3MMdii{Ea~BPG2k?ZOBG#_@W^m8FD;@hMpN(C`mIE&kWv{LEb#i=lZe{5w+ke zMrfq}s}$H9E|H|_IH&Au8aV1be1l>dGEZck2_5xY1{Ac7zHNX-E%Y4 z7O1EdXSTqBtT~&)d;z2GBA{ijEs^+w`Dx$o-hYy8R_kdS$t$C|b`XMf63>pY*DVIzU=miGup&(kH#T ziWZXA=SURziQokDr4C)?2rpB7ITH>J4Xg&MbG?jLm5cVipGY~#UyQ0o9qy;HB&72z z8x&E>E-W0$-bCS}#cw~<*_N08)m z2KzHbCT?QXG2<_-u~BpW+8PZq>ru)Ox0_M1D5Xa~ZcIiEdeWEy+43|6H*9N6HbnK| zo;72lRy=P8M<%>Tk$8WjqCRe9zO0R=gY#9240bs-t@4l#kk)pXg5+MurWL-aiKTVE zO>t<)W7A6C)dka9-{vj^h#Y;T2sOfn|zDton+i*aO z{jn66iVRO^u|Jgn=B;!pu*Z>~nZ(aP%1nWaCI6I0CLTRy&u@zYWPbI%gag_l?_x|t9NrOJ{SkQ-yH)-Dx$Txyx#*O4q}1R)Otm!xyau%HXh92DTX1MGh ztx$pUSAyFhEllF#FgX8hDg*r<4qM|f=3pCWct?#fClWToTBbtDJEuyE{U*?c z|MXM0T3kR&8*R1tfgYMp89&m4p{XNF5oTL0CN$wxDAU9tr3DIUajV646CSF&Yyw8& zFQvG-Y_)LB$fylF&EUw2%P7)ttHl+%cq)0fE)a>ml44rgYH^hr3l+Y{436Y`6xqsF zi)&0tsQznB36KSQDK=$~M9K>I1_K%@|0V-0Qhp1CwY1e@pFRuKeY-vs$-Yyfz-Q)L zEui{PIP*?j;lHXS{ zACBYDCuQDz6nXF6Z1JP(!c;g;G=lOUa?u@;8j*Ml>GfhLiHrT1w19cT&_a1OY1K>N zvQTlZ)st=Kw#o}09DJ$?&hL$DHv1W=E)Ebqhv#UyfkrdpDoA+P%nMFy%@rz@8XMm- zwsE@XIB?d=wlE7M@nqEG6ImP1@(q`=@I3KDOQde-tM(ykcOo`ws>2e(yiRK-S|mlL zb@RNO=rS~0tNA$%MCN%q4HTsN{!LorcaVWBSI>_aPPZUDo9OR)1To6(f9OGp;(t<9 z@0jBjYjQ0M4UX5*mblB4LjI+JB$dru7)5I!{f>WC`<%1cI;4PL0Q1lD(PlifEbEpf znb+@Ulg1qx-0$a)qFc~l<{6{0G&FZIEk`t#K&DKNN^^X}m!ParReV|=sO|v=$?Vi7 z`zU>7(wt){sF!Des&<4qpI?$?;XCt;e++J=>L2AF%`#m)ej8qeN2p{eM%Up3GyZlS zUB}0NQzq)_oFw$1qs=J*IXG(!%ASJl?1sn|w1@xcT)rmy{NR|pLq>NP4{IJJi(?|A z{d6hF?{hN_olD(2nLBylx{gjjWvv?_R%=)IP1NBCyDSeGJ3zkP`=nCP}Bu0MRc zVbSsLx4agXR5&F;Jc>VURncQV58kURKh8%bzd?Gw=#Rpe#(Ki5*dyz0BqB1ZSVkY8 z3#@!TTdPm-sIpWv7)%HFCQ+q7BP5U!WxJ#@zbB2LN#Q@DBZZJ?*-f~(y zU8CJt#m+yGHOea33V-#y>dB%oT~ O*2E#L?@9=(XZ{Um8p^Ez literal 0 HcmV?d00001 diff --git a/docs/auto_examples/1eof/sg_execution_times.rst b/docs/auto_examples/1eof/sg_execution_times.rst index 931745b..36a3329 100644 --- a/docs/auto_examples/1eof/sg_execution_times.rst +++ b/docs/auto_examples/1eof/sg_execution_times.rst @@ -3,12 +3,13 @@ .. _sphx_glr_auto_examples_1eof_sg_execution_times: + Computation times ================= -**00:17.020** total execution time for **auto_examples_1eof** files: +**00:33.235** total execution time for **auto_examples_1eof** files: +--------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_1eof_plot_rotated_eof.py` (``plot_rotated_eof.py``) | 00:17.020 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_1eof_plot_gwpca.py` (``plot_gwpca.py``) | 00:33.235 | 0.0 MB | +--------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_auto_examples_1eof_plot_eof-smode.py` (``plot_eof-smode.py``) | 00:00.000 | 0.0 MB | +--------------------------------------------------------------------------------------------+-----------+--------+ @@ -18,5 +19,7 @@ Computation times +--------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_auto_examples_1eof_plot_multivariate-eof.py` (``plot_multivariate-eof.py``) | 00:00.000 | 0.0 MB | +--------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_auto_examples_1eof_plot_rotated_eof.py` (``plot_rotated_eof.py``) | 00:00.000 | 0.0 MB | ++--------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_auto_examples_1eof_plot_weighted-eof.py` (``plot_weighted-eof.py``) | 00:00.000 | 0.0 MB | +--------------------------------------------------------------------------------------------+-----------+--------+ diff --git a/docs/auto_examples/auto_examples_jupyter.zip b/docs/auto_examples/auto_examples_jupyter.zip index 3030f858ac7346057719197d74128562f630d058..bb6eaf3b621331898c6f6a46251f9d610e2b12b1 100644 GIT binary patch delta 7563 zcmc&(&5s;M6?YuMA%X*l5J4cSY!p0`tb1lQcAQu{j@Ikgu@X6pv&NupYo?~VW~RK| z)$OY8otcEF$q9~-k&-h(=kMW!)#(-u7$yQ zq*;@T2!0$ns`g2-X1O3Bm9d*brd+{dy8#mg<4M3OuT@e$Go?Nfi;Li;um(~Q$U7?= zVk_l$Gx!FSXCP}tnj{B4zIU5t&9f(hbpT8;vl zb>b}Q0N8{nI@2OeIshCo59qS(p=Hb?#4-E|A>i1M3b5!Ah7fBdyboj#V~(F-yOUqq z?m+RE8VgiW@`Ww1=lnfcY?6>#oH0ygaN0c2V7xJ0b2yxHt@$Fj;!t=vWC@HxW&*Z} zd7j8fVyWOo2?NG5t%NcFv0AS?lrv_TKI9g6Hjh)>v9yb9U=IOtLd+340!2awFIT|l z!kS<>T=77$tw4&NSQ^7&t&3`184uD$%+*9_?~k{+RuHCk$;*Okn}A>|90~|`@TExG7cxa!jIEM) z_iiyU22~1JAT7?unh_iawvYr0&N;{+CI#*ypFjhth-H~tGzEMVfWxa;P^B3RfkZ`i zIBLMwz~x|Jc(5h{;8$nl?qtc4fG7|Sw}gH?+4Rm>z#({yByIr+N5aU6N&J5-EDx^T zPP14fHWVt07^`@Ea5(xM9sj=zMIVWjgzAW&!al^7VY|1k?9kCJ9kXkDx6V?)LKg_1 z*xLyjdSgwwxzKn5gpNe*9^N}b^`;={U>b6Dl1B;B2iJ@Ga1$(_dF=OJf7Ys%R@5cW z^3z*e8`ijt8#Wgddu~--?^r`Ge|h-q^55MjtNN`27}9tHX(m*j1&ba!8C67ZO6?3ztvt zLA z+_-)v+_(sZ11>1)!j1RZHVAED17kPOp$B{FiWpM7JGgcHUEoitK>mljZ@uLXn1>si zIOj*rg9V*+jOg0I{@6Y^@EftbePVg6vwVQo(O}i?4V0tyy%#qqzT-;}07;7K)tBFX z)rt5YDW)YGeVV>C;(h{*1X?DkqyM~6(w=xI&Jj4$oUl2sHdovlp@AC&U>)T z2x^N_@CjFHO?Zh48#rnib?sAp7rh879nV&UCJD}!mQ@w55Iz3KF!KQNZ3J zHhr;U{Qw@Dp!qWJ%ckFgPG|#wY7?!EYk5QEkk~4AI22`+00_B)K8QA28#MW2w#Z60 z)lM4(gm3&RuBAvYW%4Y`qpi<0|%q={}oNwFYrB858YqQba^`X*`;hQkBT zEQTPi$=Jk%D;9Ss6CkeOI=O8q0X~RbjAG6dAY_L(WMy__Vo2tMU(<=Gf`HU{+s6dBwb(NGqmJtvU8F5ewC)RWF;R_V=fA9DZ- z04eFu+X84!tyX62EDpNG=m0rbqv9&T9h+zw*AWKwDN|9KLKH$^9We%%I@aC7jt?&y z*$!rK6m#6x^tOq{Kvcp6jYKE_i(^26Xr&RX^WAMYFQm8%zx1O>o8fC7QTq776~;-f zjvQ0Ivz~Tw`L#zssfg*GD|hG;={aeUAEJy7ko?Jo##G~y=HWHB2=rs#a@&n8DOx_R34y}MJwN;<&_^4+69AHMg^9`FxYj0g^xH(a8(C+M+AV0q%kq=`Q6Oa7wpyXKYJEy65w% zA?8qb-e1!Zs<1mAjTh_BQI^%uqxI)b?A1#V;m8?|bhJ(X9N{#z3RV^Lr}7Vu01ExH z-Js4FM=69`>5rXd?j4z6gvfK+vIpX}1xh*1Hu@++yjBuE5(&PWf$qAhNa? z#VGyIkj=@P+;PowOVl(@XD#Pn7701QLr?RrPx=R*fv-f2i*Vyf3QhX3(|;Kg+fd~R z%sb6y)FD*Y>k!3^9*`t*?6uPl8ttRfu~0>`JB}NCq@fQjG$LVdqHZD#vnij-LulE4 zpKwJ3;2DK1o$BC-5U<{BXMv7hg|7t`;wk*MQ7Yd#12{{P|Yc&<($ zf{U&-T(FH4!2)6ysDO*cUq~6&Uz|qhy^X?daE`7(q#InYWfh+v^Kr~L>Q868O^wiy zZ9M6ut3SlkTfh6qYk%15Jm{M!K8xC)kKh06wUb{+?|t-*<;NdBeY&52@cL8Bzkm4r w>6<@ZKK0S7r~mnjW%$vx)0cj=eB}L&Z|=SE*q4dM+!*5k diff --git a/docs/auto_examples/auto_examples_python.zip b/docs/auto_examples/auto_examples_python.zip index 578c6814077ff8cf55a3f45e632c15e99178aa03..41853d3d284f9d5d20998d3beb3445854f4fdf1c 100644 GIT binary patch delta 7261 zcmcgxPmkNi72h~L6hi-`O%Fxj!<2zxDVDjbtsN%|tLSlf&$Xo2=4^w3Lzeuf_UIr@8VNJ{H<8UqDtz$=OzzIlIt@ApPu z|M8W-{riIe0+Ipi~j$3Yinzu%yc_XOfi}*@<@jHDhPtkJ!NKA z=5rm%Bw2}t(zAJ?V)5u;|Lp7Am#gO&JZTqLokcpAi8wH6ZZee>Vn36~%4sLM_ntsN zPvBAtm&-z9oot?_s+gNt6mwY!9pgnk)e0-KSe$BC%0z#GuZe+5Rr@xC6v}3PnNM~S=?L;gK1XHpiN&a;K#X8nVck$Sj?4B%fiZt zbQNamZ4!Cwx~Op-ErvHDnTc3Wr?dm>UhykzYS`%<-p{ZOPK{Lx;~|-zU|V%HfqzMR z!JZd|fr_;su)2^sArnZWXs%L*!wTCrW{|#E1ji0`IHumq-PRzLs1(`cdMqAkhitj^_B+C{ zf(Uo#17%fTD2X8Xn}{5o4eyh`a;Ysx32rUQSV2q`BLLaVe~pZ>F$_Vr^zmYDm80Z^ zUtO*uByr@W@}0sbD54gIKCuC%9WwFgZKj!hOpwynGr z(khkD4AKIQb1*a_FH&kD#cH#u4x}ZgY=#oPYNPq$V20vxq!sp~T@K;!LssGa>{Q#z zWaLt@d-GOb3~t{1*Kf`Sw{}E#cV}m(7qV%)$6{U-x!c>`wg{;U5vT2l!a1pCYY5{(ME z-E51b%0hcRx8@oA5*j<(UZ}}-{hr%yUs+#;c|85am{N>{9wPctqyQDVXsSsesm-e5 z#DBB`U}euKV)NDRS_k?TB2VV!B-Ca*Hqka9O1WZtUZlx3phGP~y2P$EOiX2jszjb3 zqgf4|0pV(}AoPLlRbzy&OU9&3czvM!hC?6uY)n>tQrW7j4z5O4<+-s1d}_sbyp-0; zRrsFA$MIMsa4mTqu%niFq9cu^N){#J5o<)gasZgxOZx=o3u8|_B=pY<+-K2+8Sx&F zO;|Reuu`HLBw+^NEZCpp@md9{ybPk~4$Byiea$SS&49l%B+DHoEu%c#2mt^}k8)Oh zUIK5NaC(+e{Yj)M!9?k+4~3MQg|eZKGEy5pKpS*WPZMZSZgYiE-kH46DOyj=-a2@6 zB-FHmP^t~!8Melv2|^05YAQdLN}hEozzrD*TT&J4GG)v{ofZI{yI7DKI25fwWlo>A zh~|(8h!pb5()&=>8p=Y7$V9;o!e4wRkdaB>=SYFT${2l;3mrQmSSS^I!n}2{jPE^3 zO$?bYT_yDk+x4ffkZRQU4z@RiI7ehiEhi3+-rJ|Wea92`9vxk$LV_=l)o~pq^3dA1 zXt%flB#ac|+w`+}v7QD{R| zEZwfvq~Db*`125{(BKSuCICNqK71`^y?g-MS1!OD+3(?%+&BIokgGNykz3jj`8}e* zf{Juw?eavfMt~7i%7~zScvmJ)^*Uuy5wRH74zb;!EN~;BC-@AbxiL;1e6s&A)z%u@ zMU@W+&C}XHqsH;wptTbc8jt!kN{sYOg|>%wT(kh5ijfD{285+XKOU2kAbS8q-o_7#Eh`}Occj8=O z@*XcGyT$G6!ryz^AM6co9P^V68~yWeeW%^6*Wvcmg$}WrN@z5y;T<4&4TVe2MZ$v$ zFz8px_UO7m-gkR@EQdTeil;~mNb&2q*UXSq>#yBjNZFdNXfOLTAjN}YZ^T-$Z<7NS z^Sq$-%OtK=CY&o7ch_0<7$eSR&py?K48YyVKenS|*iugw%q*o;V%iyzv zk3J&7%b+iUU)Nt&kbiDHRkh!%R$$omZ>)me`IWIX`3S?6>jno84XQDgwLW3P1Aq(Z zFsN#Z%1fTB*_aw4*Ck;SXE}PNwhjoXGfXEu4sy}*Lu%awnG_l7$x|w8v<}|@$#AOV zL&CXnZNRu1dp1J}hOP{v%?NKwv~EAVFxjEUeGqeiKuys|C7wfRB1F5UIt8bpNlUAk zHu5Qgbx!#;neu#RCQEQuz#>$w(9%dl89xJI2S=$D!x(`KN=u>?JVC4=5px^A1p6>3 zTOEbsBT}>`DjZtmWr6|4A^X~q1a6@h06%^3e?JpK9HDCk<;axrSd~!%e3Kd6*hs%W z6)RJUxkMnIlNYO*RoJGR=o1BDG+>${CM3y%#Q;_PCRM~SLvfMIo-o~K zX!T8r0C}R_=Ow0cymEpuAH;+SCE+$20=y@oTGp*A~P_nD+h7J zqbQ0O=3R6HKGH$)Emg?0&xKqpzCmQwh;F*!P0t}WHXk0^Qu(_+9fm=hB7wi}!!Zn+ z2-UY+j}Iv*_!sWJ`kHJlXq5upzP zAFE?Rd{h?lCg(8?6^io3amCdFa`=3M5(RtV45W+*2e1S7t%&y%ykT`4aEl- zQBW|91(;y)dVOfbuP4%i0v8^JHc#l*-r~&Iw7x+vm4J7FQdB-yFC-3m#iV?q;rF5= zH-<5!RFW(rWRwqEt;L8#kgP_{IU!p0Mm*`2Hm07EF&D4sM2~l9&H~%hygCdQR3o4O z;*taFXaKTlRxn zQ0(KRiYiVrSLb;ahlUz4Xa)=lHPB{*T8~k!bWYhOFxhdpLUz_icHGh%HNkWEln28S zSwqH7sq=BV?fDQ4Jh>tDK?9O5Wg^%yby!NFDX~{7rS=qv+J0Dn3r}TIVv6i!qmAc@ z%DTRBk`c;w0nosV}1156)lf(Tthx*yiD z?aJj)F8g9R3_cxT!xcVv@yXi`gYf6LPNfBI;tQ@F4Kw6mxoky%0#^bO*3MW4#~yE2 z8#hUe?3K!53lcpbptp}VM4X`kGL-Q8QeQT zC3%nVHu&0wl$;J5h9DNT>4}`EWEed7R%`<9lSn48I6FRZ6iP~uPE>8)XMwKGA_`3X zPLUwC-?&ko#_%$bX&5AGTHxBF)9@XfQFihKft#Df2CcteA1zMlP#W-}a=gyx5OkIe z-olkg2VxRm@S_C%u0ZD%;sIz7;Su+40!ktcd?N1&0I(M1lzu|-&_Wg6yv-+UULcOg zm8@_GO~s63U~m)9b|9^#o4(4>7*-7e^2y$Ym{u}ffPU2Anyk3{A!IAB-X^Nxu%$L^ zykQxd6#eJ`i1ojiw8JaTxq%3DVnSyjP!HqpYz_##v2%qO?X8_FXcZlZq(NZBKS&^d zxM`e?KfJuFPHl}~=hpleDEt4{X078!kqcGN*6UKIp$V0~!Uq!VOn&&&3YT11JK+tw z!rGni7C6xP+{O9A`QLxwy^Fa8Suc^nk3M|$J3qm1d0Sh*-+J{gPp*Cct-*_a>f;so z^VhF`b?w3re`h~Bdi`vE^v0!s+&la2(Va{Gd~o*nquZDM@Y&ffZ@+OSK7H%b>+0;m Ur&oW_+3IZl>C)C#m6N>x0O$s~ivR!s delta 61 zcmX@|jInVo +.. raw:: html + +
+ +.. only:: html + + .. image:: /auto_examples/1eof/images/thumb/sphx_glr_plot_gwpca_thumb.png + :alt: + + :ref:`sphx_glr_auto_examples_1eof_plot_gwpca.py` + +.. raw:: html + +
Geographically weighted PCA
+
+ + .. raw:: html diff --git a/examples/1eof/plot_gwpca.py b/examples/1eof/plot_gwpca.py new file mode 100644 index 0000000..a084283 --- /dev/null +++ b/examples/1eof/plot_gwpca.py @@ -0,0 +1,174 @@ +""" +Geographically weighted PCA +=========================== +Geographically Weighted Principal Component Analysis (GWPCA) is a spatial analysis method that identifies and visualizes local spatial patterns and relationships in multivariate datasets across various geographic areas. It operates by applying PCA within a moving window over a geographical region, which enables the extraction of local principal components that can differ across locations. + +TIn this demonstration, we'll apply GWPCA to a dataset detailing the chemical compositions of soils from countries around the Baltic Sea [1]_. This example is inspired by a tutorial originally crafted and published by Chris Brunsdon [2]_. +The dataset comprises 10 variables (chemical elements) and spans 768 samples. +Here, each sample refers to a pair of latitude and longitude coordinates, representing specific sampling stations. + +.. [1] Reimann, C. et al. Baltic soil survey: total concentrations of major and selected trace elements in arable soils from 10 countries around the Baltic Sea. Science of The Total Environment 257, 155–170 (2000). +.. [2] https://rpubs.com/chrisbrunsdon/99675 + + + +.. note:: The dataset we're using is found in the R package + `mvoutlier `_. + To access it, we'll employ the Python package + `rpy2 `_ which facilitates + interaction with R packages from within Python. + +.. note:: Presently, there's no support for ``xarray.Dataset`` lacking an explicit feature dimension. + As a workaround, ``xarray.DataArray.to_array`` can be used to convert the ``Dataset`` to an ``DataArray``. + +.. warning:: Bear in mind that GWPCA requires significant computational power. + The ``xeofs`` implementation is optimized for CPU efficiency and is best suited + for smaller to medium data sets. For more extensive datasets where parallel processing becomes essential, + it's advisable to turn to the R package `GWmodel `_. + This package harnesses CUDA to enable GPU-accelerated GWPCA for optimized performance. + + +Let's import the necessary packages. +""" +# For the analysis +import numpy as np +import xarray as xr +import xeofs as xe + +# For visualization +import matplotlib.pyplot as plt +import seaborn as sns + +# For accessing R packages +import rpy2.robjects as ro +from rpy2.robjects.packages import importr +from rpy2.robjects import pandas2ri + +# %% +# Next, we'll install the R package `mvoutlier `_ +# using the `rpy2 `_ package. + +xr.set_options(display_expand_data=False) +utils = importr("utils") +utils.chooseCRANmirror(ind=1) +utils.install_packages("mvoutlier") + +# %% +# Let's load the dataset and convert it into a ``pandas.DataFrame``. +# Alongside, we'll also load the background data that outlines the borders of countries +# in the Baltic Sea region. This will help us visually represent the GWPCA results. + +ro.r( + """ + require("mvoutlier") + data(bsstop) + Data <- bsstop[,1:14] + background <- bss.background + """ +) +with (ro.default_converter + pandas2ri.converter).context(): + data_df = ro.conversion.get_conversion().rpy2py(ro.r["Data"]) + background_df = ro.conversion.get_conversion().rpy2py(ro.r["background"]) +data_df.head() + +# %% +# Since ``xeofs`` uses ``xarray``, we convert the data into an ``xarray.DataArray``. + +data_df = data_df.rename(columns={"ID": "station"}).set_index("station") +data = data_df.to_xarray() +data = data.rename({"XCOO": "x", "YCOO": "y"}) +data = data.set_index(station=("x", "y")) +data = data.drop_vars("CNo") +da = data.to_array(dim="element") +da + +# %% +# Let's dive into the GWPCA. First, initialize a ``GWPCA`` instance and fit it to the data. +# The ``station`` dimension serves as our sample dimension, along which the local PCAs will be applied. +# Since these PCAs need to gauge distances to adjacent stations, we must specify +# a distance metric. Our station data includes coordinates in meters, so we'll +# choose the ``euclidean`` metric. If you have coordinates in degrees (like +# latitude and longitude), choose the ``haversine`` metric instead. +# We're also using a ``bisquare`` kernel with a bandwidth of 1000 km. Note that the +# bandwidth unit always follows input data (which is in meters here), +# except when using the ``haversine`` metric, which always gives distances in +# kilometers. Lastly, we'll standardize the input to ensure consistent scales +# for the chemical elements. + +gwpca = xe.models.GWPCA( + n_modes=5, + standardize=True, + metric="euclidean", + kernel="bisquare", + bandwidth=1000000.0, +) +gwpca.fit(da, "station") +gwpca.components() + + +# %% +# The ``components`` method returns the local principal components for each station. Note that the +# dimensionality of the returned array is ``[station, element, mode]``, so in practice we don't really have +# reduced the dimensionality of the data set. However, we can +# extract the largest locally weighted components for each station which tells us which chemical elements +# dominate the local PCAs. + +llwc = gwpca.largest_locally_weighted_components() +llwc + +# %% +# Let's visualize the spatial patterns of the chemical elements. +# As the stations are positioned on a irregular grid, we'll transform the +# ``llwc`` ``DataArray`` into a ``pandas.DataFrame``. After that, we can easily visualize +# it using the ``scatter`` method. +# For demonstation, we'll concentrate on the first mode: + +llwc1_df = llwc.sel(mode=1).to_dataframe() + +elements = da.element.values +n_elements = len(elements) +colors = np.arange(n_elements) +col_dict = {el: col for el, col in zip(elements, colors)} + +llwc1_df["colors"] = llwc1_df["largest_locally_weighted_components"].map(col_dict) +cmap = sns.color_palette("tab10", n_colors=n_elements, as_cmap=True) + + +fig = plt.figure(figsize=(10, 10)) +ax = fig.add_subplot(111) +background_df.plot.scatter(ax=ax, x="V1", y="V2", color=".3", marker=".", s=1) +s = ax.scatter( + x=llwc1_df["x"], + y=llwc1_df["y"], + c=llwc1_df["colors"], + ec="w", + s=40, + cmap=cmap, + vmin=-0.5, + vmax=n_elements - 0.5, +) +cbar = fig.colorbar(mappable=s, ax=ax, label="Largest locally weighted component") +cbar.set_ticks(colors) +cbar.set_ticklabels(elements) +ax.set_title("Largest locally weighted element", loc="left", weight=800) +plt.show() + +# %% +# In the final step, let's examine the explained variance. Like standard PCA, +# this gives us insight into the variance explained by each mode. But with a +# local PCA for every station, the explained variance varies spatially. Notably, +# the first mode's explained variance differs across countries, ranging from +# roughly 40% to 70%. + + +exp_var_ratio = gwpca.explained_variance_ratio() +evr1_df = exp_var_ratio.sel(mode=1).to_dataframe() + +fig = plt.figure(figsize=(10, 10)) +ax = fig.add_subplot(111) +background_df.plot.scatter(ax=ax, x="V1", y="V2", color=".3", marker=".", s=1) +evr1_df.plot.scatter( + ax=ax, x="x", y="y", c="explained_variance_ratio", vmin=0.4, vmax=0.7 +) +ax.set_title("Fraction of locally explained variance", loc="left", weight=800) +plt.show() diff --git a/poetry.lock b/poetry.lock index 6f4649c..973645c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -4,7 +4,7 @@ name = "accessible-pygments" version = "0.0.4" description = "A collection of accessible pygments styles" -optional = false +optional = true python-versions = "*" files = [ {file = "accessible-pygments-0.0.4.tar.gz", hash = "sha256:e7b57a9b15958e9601c7e9eb07a440c813283545a20973f2574a5f453d0e953e"}, @@ -18,7 +18,7 @@ pygments = ">=1.5" name = "alabaster" version = "0.7.13" description = "A configurable sidebar-enabled Sphinx theme" -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"}, @@ -29,7 +29,7 @@ files = [ name = "attrs" version = "23.1.0" description = "Classes Without Boilerplate" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, @@ -47,7 +47,7 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte name = "babel" version = "2.12.1" description = "Internationalization utilities" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"}, @@ -58,7 +58,7 @@ files = [ name = "beautifulsoup4" version = "4.12.2" description = "Screen-scraping library" -optional = false +optional = true python-versions = ">=3.6.0" files = [ {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, @@ -121,7 +121,7 @@ uvloop = ["uvloop (>=0.15.2)"] name = "bleach" version = "6.0.0" description = "An easy safelist-based HTML-sanitizing tool." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"}, @@ -150,7 +150,7 @@ files = [ name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -optional = false +optional = true python-versions = "*" files = [ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, @@ -470,7 +470,7 @@ test = ["pandas[test]", "pre-commit", "pytest", "pytest-cov", "pytest-rerunfailu name = "defusedxml" version = "0.7.1" description = "XML bomb protection for Python stdlib modules" -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, @@ -481,7 +481,7 @@ files = [ name = "docutils" version = "0.20.1" description = "Docutils -- Python Documentation Utilities" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, @@ -506,7 +506,7 @@ test = ["pytest (>=6)"] name = "fastjsonschema" version = "2.18.0" description = "Fastest Python implementation of JSON schema" -optional = false +optional = true python-versions = "*" files = [ {file = "fastjsonschema-2.18.0-py3-none-any.whl", hash = "sha256:128039912a11a807068a7c87d0da36660afbfd7202780db26c4aa7153cfdc799"}, @@ -582,7 +582,7 @@ files = [ name = "imagesize" version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, @@ -623,7 +623,7 @@ files = [ name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, @@ -651,7 +651,7 @@ files = [ name = "jsonschema" version = "4.18.4" description = "An implementation of JSON Schema validation for Python" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "jsonschema-4.18.4-py3-none-any.whl", hash = "sha256:971be834317c22daaa9132340a51c01b50910724082c2c1a2ac87eeec153a3fe"}, @@ -672,7 +672,7 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- name = "jsonschema-specifications" version = "2023.7.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"}, @@ -686,7 +686,7 @@ referencing = ">=0.28.0" name = "jupyter-client" version = "8.3.0" description = "Jupyter protocol implementation and client libraries" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "jupyter_client-8.3.0-py3-none-any.whl", hash = "sha256:7441af0c0672edc5d28035e92ba5e32fadcfa8a4e608a434c228836a89df6158"}, @@ -708,7 +708,7 @@ test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pyt name = "jupyter-core" version = "5.3.1" description = "Jupyter core package. A base package on which Jupyter projects rely." -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "jupyter_core-5.3.1-py3-none-any.whl", hash = "sha256:ae9036db959a71ec1cac33081eeb040a79e681f08ab68b0883e9a676c7a90dce"}, @@ -728,13 +728,46 @@ test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] name = "jupyterlab-pygments" version = "0.2.2" description = "Pygments theme using JupyterLab CSS variables" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "jupyterlab_pygments-0.2.2-py2.py3-none-any.whl", hash = "sha256:2405800db07c9f770863bcf8049a529c3dd4d3e28536638bd7c1c01d2748309f"}, {file = "jupyterlab_pygments-0.2.2.tar.gz", hash = "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d"}, ] +[[package]] +name = "llvmlite" +version = "0.40.1" +description = "lightweight wrapper around basic LLVM functionality" +optional = false +python-versions = ">=3.8" +files = [ + {file = "llvmlite-0.40.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:84ce9b1c7a59936382ffde7871978cddcda14098e5a76d961e204523e5c372fb"}, + {file = "llvmlite-0.40.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3673c53cb21c65d2ff3704962b5958e967c6fc0bd0cff772998face199e8d87b"}, + {file = "llvmlite-0.40.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bba2747cf5b4954e945c287fe310b3fcc484e2a9d1b0c273e99eb17d103bb0e6"}, + {file = "llvmlite-0.40.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbd5e82cc990e5a3e343a3bf855c26fdfe3bfae55225f00efd01c05bbda79918"}, + {file = "llvmlite-0.40.1-cp310-cp310-win32.whl", hash = "sha256:09f83ea7a54509c285f905d968184bba00fc31ebf12f2b6b1494d677bb7dde9b"}, + {file = "llvmlite-0.40.1-cp310-cp310-win_amd64.whl", hash = "sha256:7b37297f3cbd68d14a97223a30620589d98ad1890e5040c9e5fc181063f4ed49"}, + {file = "llvmlite-0.40.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a66a5bd580951751b4268f4c3bddcef92682814d6bc72f3cd3bb67f335dd7097"}, + {file = "llvmlite-0.40.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:467b43836b388eaedc5a106d76761e388dbc4674b2f2237bc477c6895b15a634"}, + {file = "llvmlite-0.40.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c23edd196bd797dc3a7860799054ea3488d2824ecabc03f9135110c2e39fcbc"}, + {file = "llvmlite-0.40.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a36d9f244b6680cb90bbca66b146dabb2972f4180c64415c96f7c8a2d8b60a36"}, + {file = "llvmlite-0.40.1-cp311-cp311-win_amd64.whl", hash = "sha256:5b3076dc4e9c107d16dc15ecb7f2faf94f7736cd2d5e9f4dc06287fd672452c1"}, + {file = "llvmlite-0.40.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4a7525db121f2e699809b539b5308228854ccab6693ecb01b52c44a2f5647e20"}, + {file = "llvmlite-0.40.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:84747289775d0874e506f907a4513db889471607db19b04de97d144047fec885"}, + {file = "llvmlite-0.40.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e35766e42acef0fe7d1c43169a8ffc327a47808fae6a067b049fe0e9bbf84dd5"}, + {file = "llvmlite-0.40.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cda71de10a1f48416309e408ea83dab5bf36058f83e13b86a2961defed265568"}, + {file = "llvmlite-0.40.1-cp38-cp38-win32.whl", hash = "sha256:96707ebad8b051bbb4fc40c65ef93b7eeee16643bd4d579a14d11578e4b7a647"}, + {file = "llvmlite-0.40.1-cp38-cp38-win_amd64.whl", hash = "sha256:e44f854dc11559795bcdeaf12303759e56213d42dabbf91a5897aa2d8b033810"}, + {file = "llvmlite-0.40.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f643d15aacd0b0b0dc8b74b693822ba3f9a53fa63bc6a178c2dba7cc88f42144"}, + {file = "llvmlite-0.40.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:39a0b4d0088c01a469a5860d2e2d7a9b4e6a93c0f07eb26e71a9a872a8cadf8d"}, + {file = "llvmlite-0.40.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9329b930d699699846623054121ed105fd0823ed2180906d3b3235d361645490"}, + {file = "llvmlite-0.40.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2dbbb8424037ca287983b115a29adf37d806baf7e1bf4a67bd2cffb74e085ed"}, + {file = "llvmlite-0.40.1-cp39-cp39-win32.whl", hash = "sha256:e74e7bec3235a1e1c9ad97d897a620c5007d0ed80c32c84c1d787e7daa17e4ec"}, + {file = "llvmlite-0.40.1-cp39-cp39-win_amd64.whl", hash = "sha256:ff8f31111bb99d135ff296757dc81ab36c2dee54ed4bd429158a96da9807c316"}, + {file = "llvmlite-0.40.1.tar.gz", hash = "sha256:5cdb0d45df602099d833d50bd9e81353a5e036242d3c003c5b294fc61d1986b4"}, +] + [[package]] name = "locket" version = "1.0.0" @@ -750,7 +783,7 @@ files = [ name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, @@ -820,7 +853,7 @@ files = [ name = "mistune" version = "3.0.1" description = "A sane and fast Markdown parser with useful plugins and renderers" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "mistune-3.0.1-py3-none-any.whl", hash = "sha256:b9b3e438efbb57c62b5beb5e134dab664800bdf1284a7ee09e8b12b13eb1aac6"}, @@ -842,7 +875,7 @@ files = [ name = "nbclient" version = "0.8.0" description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." -optional = false +optional = true python-versions = ">=3.8.0" files = [ {file = "nbclient-0.8.0-py3-none-any.whl", hash = "sha256:25e861299e5303a0477568557c4045eccc7a34c17fc08e7959558707b9ebe548"}, @@ -864,7 +897,7 @@ test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>= name = "nbconvert" version = "7.7.3" description = "Converting Jupyter Notebooks" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "nbconvert-7.7.3-py3-none-any.whl", hash = "sha256:3022adadff3f86578a47fab7c2228bb3ca9c56a24345642a22f917f6168b48fc"}, @@ -901,7 +934,7 @@ webpdf = ["playwright"] name = "nbformat" version = "5.9.1" description = "The Jupyter Notebook format" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "nbformat-5.9.1-py3-none-any.whl", hash = "sha256:b7968ebf4811178a4108ee837eae1442e3f054132100f0359219e9ed1ce3ca45"}, @@ -920,13 +953,13 @@ test = ["pep440", "pre-commit", "pytest", "testpath"] [[package]] name = "nbsphinx" -version = "0.9.2" +version = "0.9.3" description = "Jupyter Notebook Tools for Sphinx" -optional = false +optional = true python-versions = ">=3.6" files = [ - {file = "nbsphinx-0.9.2-py3-none-any.whl", hash = "sha256:2746680ece5ad3b0e980639d717a5041a1c1aafb416846b72dfaeecc306bc351"}, - {file = "nbsphinx-0.9.2.tar.gz", hash = "sha256:540db7f4066347f23d0650c4ae8e7d85334c69adf749e030af64c12e996ff88e"}, + {file = "nbsphinx-0.9.3-py3-none-any.whl", hash = "sha256:6e805e9627f4a358bd5720d5cbf8bf48853989c79af557afd91a5f22e163029f"}, + {file = "nbsphinx-0.9.3.tar.gz", hash = "sha256:ec339c8691b688f8676104a367a4b8cf3ea01fd089dc28d24dec22d563b11562"}, ] [package.dependencies] @@ -978,38 +1011,78 @@ certifi = "*" cftime = "*" numpy = "*" +[[package]] +name = "numba" +version = "0.57.1" +description = "compiling Python code using LLVM" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numba-0.57.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db8268eb5093cae2288942a8cbd69c9352f6fe6e0bfa0a9a27679436f92e4248"}, + {file = "numba-0.57.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:643cb09a9ba9e1bd8b060e910aeca455e9442361e80fce97690795ff9840e681"}, + {file = "numba-0.57.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:53e9fab973d9e82c9f8449f75994a898daaaf821d84f06fbb0b9de2293dd9306"}, + {file = "numba-0.57.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c0602e4f896e6a6d844517c3ab434bc978e7698a22a733cc8124465898c28fa8"}, + {file = "numba-0.57.1-cp310-cp310-win32.whl", hash = "sha256:3d6483c27520d16cf5d122868b79cad79e48056ecb721b52d70c126bed65431e"}, + {file = "numba-0.57.1-cp310-cp310-win_amd64.whl", hash = "sha256:a32ee263649aa3c3587b833d6311305379529570e6c20deb0c6f4fb5bc7020db"}, + {file = "numba-0.57.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c078f84b5529a7fdb8413bb33d5100f11ec7b44aa705857d9eb4e54a54ff505"}, + {file = "numba-0.57.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e447c4634d1cc99ab50d4faa68f680f1d88b06a2a05acf134aa6fcc0342adeca"}, + {file = "numba-0.57.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4838edef2df5f056cb8974670f3d66562e751040c448eb0b67c7e2fec1726649"}, + {file = "numba-0.57.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9b17fbe4a69dcd9a7cd49916b6463cd9a82af5f84911feeb40793b8bce00dfa7"}, + {file = "numba-0.57.1-cp311-cp311-win_amd64.whl", hash = "sha256:93df62304ada9b351818ba19b1cfbddaf72cd89348e81474326ca0b23bf0bae1"}, + {file = "numba-0.57.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8e00ca63c5d0ad2beeb78d77f087b3a88c45ea9b97e7622ab2ec411a868420ee"}, + {file = "numba-0.57.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ff66d5b022af6c7d81ddbefa87768e78ed4f834ab2da6ca2fd0d60a9e69b94f5"}, + {file = "numba-0.57.1-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:60ec56386076e9eed106a87c96626d5686fbb16293b9834f0849cf78c9491779"}, + {file = "numba-0.57.1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6c057ccedca95df23802b6ccad86bb318be624af45b5a38bb8412882be57a681"}, + {file = "numba-0.57.1-cp38-cp38-win32.whl", hash = "sha256:5a82bf37444039c732485c072fda21a361790ed990f88db57fd6941cd5e5d307"}, + {file = "numba-0.57.1-cp38-cp38-win_amd64.whl", hash = "sha256:9bcc36478773ce838f38afd9a4dfafc328d4ffb1915381353d657da7f6473282"}, + {file = "numba-0.57.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae50c8c90c2ce8057f9618b589223e13faa8cbc037d8f15b4aad95a2c33a0582"}, + {file = "numba-0.57.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9a1b2b69448e510d672ff9a6b18d2db9355241d93c6a77677baa14bec67dc2a0"}, + {file = "numba-0.57.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3cf78d74ad9d289fbc1e5b1c9f2680fca7a788311eb620581893ab347ec37a7e"}, + {file = "numba-0.57.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f47dd214adc5dcd040fe9ad2adbd2192133c9075d2189ce1b3d5f9d72863ef05"}, + {file = "numba-0.57.1-cp39-cp39-win32.whl", hash = "sha256:a3eac19529956185677acb7f01864919761bfffbb9ae04bbbe5e84bbc06cfc2b"}, + {file = "numba-0.57.1-cp39-cp39-win_amd64.whl", hash = "sha256:9587ba1bf5f3035575e45562ada17737535c6d612df751e811d702693a72d95e"}, + {file = "numba-0.57.1.tar.gz", hash = "sha256:33c0500170d213e66d90558ad6aca57d3e03e97bb11da82e6d87ab793648cb17"}, +] + +[package.dependencies] +llvmlite = "==0.40.*" +numpy = ">=1.21,<1.25" + [[package]] name = "numpy" -version = "1.25.1" +version = "1.24.4" description = "Fundamental package for array computing in Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "numpy-1.25.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77d339465dff3eb33c701430bcb9c325b60354698340229e1dff97745e6b3efa"}, - {file = "numpy-1.25.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d736b75c3f2cb96843a5c7f8d8ccc414768d34b0a75f466c05f3a739b406f10b"}, - {file = "numpy-1.25.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a90725800caeaa160732d6b31f3f843ebd45d6b5f3eec9e8cc287e30f2805bf"}, - {file = "numpy-1.25.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c6c9261d21e617c6dc5eacba35cb68ec36bb72adcff0dee63f8fbc899362588"}, - {file = "numpy-1.25.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0def91f8af6ec4bb94c370e38c575855bf1d0be8a8fbfba42ef9c073faf2cf19"}, - {file = "numpy-1.25.1-cp310-cp310-win32.whl", hash = "sha256:fd67b306320dcadea700a8f79b9e671e607f8696e98ec255915c0c6d6b818503"}, - {file = "numpy-1.25.1-cp310-cp310-win_amd64.whl", hash = "sha256:c1516db588987450b85595586605742879e50dcce923e8973f79529651545b57"}, - {file = "numpy-1.25.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6b82655dd8efeea69dbf85d00fca40013d7f503212bc5259056244961268b66e"}, - {file = "numpy-1.25.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e8f6049c4878cb16960fbbfb22105e49d13d752d4d8371b55110941fb3b17800"}, - {file = "numpy-1.25.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41a56b70e8139884eccb2f733c2f7378af06c82304959e174f8e7370af112e09"}, - {file = "numpy-1.25.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5154b1a25ec796b1aee12ac1b22f414f94752c5f94832f14d8d6c9ac40bcca6"}, - {file = "numpy-1.25.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38eb6548bb91c421261b4805dc44def9ca1a6eef6444ce35ad1669c0f1a3fc5d"}, - {file = "numpy-1.25.1-cp311-cp311-win32.whl", hash = "sha256:791f409064d0a69dd20579345d852c59822c6aa087f23b07b1b4e28ff5880fcb"}, - {file = "numpy-1.25.1-cp311-cp311-win_amd64.whl", hash = "sha256:c40571fe966393b212689aa17e32ed905924120737194b5d5c1b20b9ed0fb171"}, - {file = "numpy-1.25.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d7abcdd85aea3e6cdddb59af2350c7ab1ed764397f8eec97a038ad244d2d105"}, - {file = "numpy-1.25.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a180429394f81c7933634ae49b37b472d343cccb5bb0c4a575ac8bbc433722f"}, - {file = "numpy-1.25.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d412c1697c3853c6fc3cb9751b4915859c7afe6a277c2bf00acf287d56c4e625"}, - {file = "numpy-1.25.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20e1266411120a4f16fad8efa8e0454d21d00b8c7cee5b5ccad7565d95eb42dd"}, - {file = "numpy-1.25.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f76aebc3358ade9eacf9bc2bb8ae589863a4f911611694103af05346637df1b7"}, - {file = "numpy-1.25.1-cp39-cp39-win32.whl", hash = "sha256:247d3ffdd7775bdf191f848be8d49100495114c82c2bd134e8d5d075fb386a1c"}, - {file = "numpy-1.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:1d5d3c68e443c90b38fdf8ef40e60e2538a27548b39b12b73132456847f4b631"}, - {file = "numpy-1.25.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:35a9527c977b924042170a0887de727cd84ff179e478481404c5dc66b4170009"}, - {file = "numpy-1.25.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d3fe3dd0506a28493d82dc3cf254be8cd0d26f4008a417385cbf1ae95b54004"}, - {file = "numpy-1.25.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:012097b5b0d00a11070e8f2e261128c44157a8689f7dedcf35576e525893f4fe"}, - {file = "numpy-1.25.1.tar.gz", hash = "sha256:9a3a9f3a61480cc086117b426a8bd86869c213fc4072e606f01c4e4b66eb92bf"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, ] [[package]] @@ -1093,7 +1166,7 @@ xml = ["lxml (>=4.6.3)"] name = "pandocfilters" version = "1.5.0" description = "Utilities for writing pandoc filters in python" -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "pandocfilters-1.5.0-py2.py3-none-any.whl", hash = "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f"}, @@ -1213,7 +1286,7 @@ files = [ name = "pycparser" version = "2.21" description = "C parser in Python" -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, @@ -1222,13 +1295,13 @@ files = [ [[package]] name = "pydata-sphinx-theme" -version = "0.13.3" +version = "0.14.1" description = "Bootstrap-based Sphinx theme from the PyData community" -optional = false -python-versions = ">=3.7" +optional = true +python-versions = ">=3.8" files = [ - {file = "pydata_sphinx_theme-0.13.3-py3-none-any.whl", hash = "sha256:bf41ca6c1c6216e929e28834e404bfc90e080b51915bbe7563b5e6fda70354f0"}, - {file = "pydata_sphinx_theme-0.13.3.tar.gz", hash = "sha256:827f16b065c4fd97e847c11c108bf632b7f2ff53a3bca3272f63f3f3ff782ecc"}, + {file = "pydata_sphinx_theme-0.14.1-py3-none-any.whl", hash = "sha256:c436027bc76ae023df4e70517e3baf90cdda5a88ee46b818b5ef0cc3884aba04"}, + {file = "pydata_sphinx_theme-0.14.1.tar.gz", hash = "sha256:d8d4ac81252c16a002e835d21f0fea6d04cf3608e95045c816e8cc823e79b053"}, ] [package.dependencies] @@ -1238,13 +1311,14 @@ beautifulsoup4 = "*" docutils = "!=0.17.0" packaging = "*" pygments = ">=2.7" -sphinx = ">=4.2" +sphinx = ">=5.0" typing-extensions = "*" [package.extras] +a11y = ["pytest-playwright"] dev = ["nox", "pre-commit", "pydata-sphinx-theme[doc,test]", "pyyaml"] -doc = ["ablog (>=0.11.0rc2)", "colorama", "ipyleaflet", "jupyter_sphinx", "linkify-it-py", "matplotlib", "myst-nb", "nbsphinx", "numpy", "numpydoc", "pandas", "plotly", "rich", "sphinx-copybutton", "sphinx-design", "sphinx-favicon (>=1.0.1)", "sphinx-sitemap", "sphinx-togglebutton", "sphinxcontrib-youtube", "sphinxext-rediraffe", "xarray"] -test = ["codecov", "pytest", "pytest-cov", "pytest-regressions"] +doc = ["ablog (>=0.11.0rc2)", "colorama", "ipyleaflet", "jupyter_sphinx", "jupyterlite-sphinx", "linkify-it-py", "matplotlib", "myst-nb", "nbsphinx", "numpy", "numpydoc", "pandas", "plotly", "rich", "sphinx-autoapi", "sphinx-copybutton", "sphinx-design", "sphinx-favicon (>=1.0.1)", "sphinx-sitemap", "sphinx-togglebutton", "sphinxcontrib-youtube (<1.4)", "sphinxext-rediraffe", "xarray"] +test = ["pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "pyflakes" @@ -1261,7 +1335,7 @@ files = [ name = "pygments" version = "2.15.1" description = "Pygments is a syntax highlighting package written in Python." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, @@ -1322,7 +1396,7 @@ files = [ name = "pywin32" version = "306" description = "Python for Window Extensions" -optional = false +optional = true python-versions = "*" files = [ {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, @@ -1394,7 +1468,7 @@ files = [ name = "pyzmq" version = "25.1.0" description = "Python bindings for 0MQ" -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "pyzmq-25.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:1a6169e69034eaa06823da6a93a7739ff38716142b3596c180363dee729d713d"}, @@ -1483,7 +1557,7 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} name = "referencing" version = "0.30.0" description = "JSON Referencing + Python" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "referencing-0.30.0-py3-none-any.whl", hash = "sha256:c257b08a399b6c2f5a3510a50d28ab5dbc7bbde049bcaf954d43c446f83ab548"}, @@ -1519,7 +1593,7 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "rpds-py" version = "0.9.2" description = "Python bindings to Rust's persistent data structures (rpds)" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "rpds_py-0.9.2-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:ab6919a09c055c9b092798ce18c6c4adf49d24d4d9e43a92b257e3f2548231e7"}, @@ -1621,6 +1695,33 @@ files = [ {file = "rpds_py-0.9.2.tar.gz", hash = "sha256:8d70e8f14900f2657c249ea4def963bed86a29b81f81f5b76b5a9215680de945"}, ] +[[package]] +name = "rpy2" +version = "3.5.14" +description = "Python interface to the R language (embedded R)" +optional = true +python-versions = ">=3.7" +files = [ + {file = "rpy2-3.5.14-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:5cb7398adfcb6ca4faefbe856fb7af95eb11722ad18fbfcb4a79dbea1cf71c7c"}, + {file = "rpy2-3.5.14-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f35910208e5945b5108b7668bcb58127742f47fc0e8df8b2f4889c86be6f6519"}, + {file = "rpy2-3.5.14-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:adbd8e08f67f807fcca8e47473340e233a55c25fffd418081e6719316e03dbd7"}, + {file = "rpy2-3.5.14-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:ca95dee528d0a8032913de5fa85b8252b925f389aa2e2219d5314dcf43beeb1e"}, + {file = "rpy2-3.5.14.tar.gz", hash = "sha256:5f46ae31d36e117be366ad4ae02493c015ac6ba59ebe3b4cd7200075332fc481"}, +] + +[package.dependencies] +cffi = ">=1.10.0" +jinja2 = "*" +packaging = {version = "*", markers = "platform_system == \"Windows\""} +tzlocal = "*" + +[package.extras] +all = ["ipython", "numpy", "pandas (>=1.3.5)", "pytest"] +pandas = ["numpy", "pandas (>=1.3.5)"] +test = ["ipython", "numpy", "pandas (>=1.3.5)", "pytest"] +test-minimal = ["coverage", "pytest", "pytest-cov"] +types = ["mypy", "types-tzlocal"] + [[package]] name = "scikit-learn" version = "1.3.0" @@ -1716,7 +1817,7 @@ files = [ name = "snowballstemmer" version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -optional = false +optional = true python-versions = "*" files = [ {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, @@ -1727,7 +1828,7 @@ files = [ name = "soupsieve" version = "2.4.1" description = "A modern CSS selector implementation for Beautiful Soup." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, @@ -1738,7 +1839,7 @@ files = [ name = "sphinx" version = "7.1.1" description = "Python documentation generator" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "sphinx-7.1.1-py3-none-any.whl", hash = "sha256:4e6c5ea477afa0fb90815210fd1312012e1d7542589ab251ac9b53b7c0751bce"}, @@ -1772,7 +1873,7 @@ test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] name = "sphinx-copybutton" version = "0.5.2" description = "Add a copy button to each of your code cells." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "sphinx-copybutton-0.5.2.tar.gz", hash = "sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd"}, @@ -1790,7 +1891,7 @@ rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"] name = "sphinx-design" version = "0.5.0" description = "A sphinx extension for designing beautiful, view size responsive web components." -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "sphinx_design-0.5.0-py3-none-any.whl", hash = "sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e"}, @@ -1811,13 +1912,13 @@ theme-sbt = ["sphinx-book-theme (>=1.0,<2.0)"] [[package]] name = "sphinx-gallery" -version = "0.13.0" +version = "0.14.0" description = "A `Sphinx `_ extension that builds an HTML gallery of examples from any set of Python scripts." -optional = false +optional = true python-versions = ">=3.7" files = [ - {file = "sphinx-gallery-0.13.0.tar.gz", hash = "sha256:4756f92e079128b08cbc7a57922cc904b3d442b1abfa73ec6471ad24f3c5b4b2"}, - {file = "sphinx_gallery-0.13.0-py3-none-any.whl", hash = "sha256:5bedfa4998b4158d5affc7d1df6796e4b1e834b16680001dac992af1304d8ed9"}, + {file = "sphinx-gallery-0.14.0.tar.gz", hash = "sha256:2a4a0aaf032955508e1d0f3495199a3c7819ce420e71096bff0bca551a4043c2"}, + {file = "sphinx_gallery-0.14.0-py3-none-any.whl", hash = "sha256:55b3ad1f378abd126232c166192270ac0a3ef615dec10b66c961ed2967be1df6"}, ] [package.dependencies] @@ -1827,7 +1928,7 @@ sphinx = ">=4" name = "sphinxcontrib-applehelp" version = "1.0.4" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"}, @@ -1842,7 +1943,7 @@ test = ["pytest"] name = "sphinxcontrib-devhelp" version = "1.0.2" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -optional = false +optional = true python-versions = ">=3.5" files = [ {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, @@ -1857,7 +1958,7 @@ test = ["pytest"] name = "sphinxcontrib-htmlhelp" version = "2.0.1" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"}, @@ -1872,7 +1973,7 @@ test = ["html5lib", "pytest"] name = "sphinxcontrib-jsmath" version = "1.0.1" description = "A sphinx extension which renders display math in HTML via JavaScript" -optional = false +optional = true python-versions = ">=3.5" files = [ {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, @@ -1886,7 +1987,7 @@ test = ["flake8", "mypy", "pytest"] name = "sphinxcontrib-qthelp" version = "1.0.3" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." -optional = false +optional = true python-versions = ">=3.5" files = [ {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, @@ -1901,7 +2002,7 @@ test = ["pytest"] name = "sphinxcontrib-serializinghtml" version = "1.1.5" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." -optional = false +optional = true python-versions = ">=3.5" files = [ {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, @@ -1972,7 +2073,7 @@ files = [ name = "tinycss2" version = "1.2.1" description = "A tiny CSS parser" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "tinycss2-1.2.1-py3-none-any.whl", hash = "sha256:2b80a96d41e7c3914b8cda8bc7f705a4d9c49275616e886103dd839dfc847847"}, @@ -2012,7 +2113,7 @@ files = [ name = "tornado" version = "6.3.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = false +optional = true python-versions = ">= 3.8" files = [ {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c367ab6c0393d71171123ca5515c61ff62fe09024fa6bf299cd1339dc9456829"}, @@ -2052,7 +2153,7 @@ telegram = ["requests"] name = "traitlets" version = "5.9.0" description = "Traitlets Python configuration system" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"}, @@ -2067,7 +2168,7 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] name = "typing-extensions" version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, @@ -2085,6 +2186,23 @@ files = [ {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, ] +[[package]] +name = "tzlocal" +version = "5.1" +description = "tzinfo object for the local timezone" +optional = true +python-versions = ">=3.7" +files = [ + {file = "tzlocal-5.1-py3-none-any.whl", hash = "sha256:2938498395d5f6a898ab8009555cb37a4d360913ad375d4747ef16826b03ef23"}, + {file = "tzlocal-5.1.tar.gz", hash = "sha256:a5ccb2365b295ed964e0a98ad076fe10c495591e75505d34f154d60a7f1ed722"}, +] + +[package.dependencies] +tzdata = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +devenv = ["black", "check-manifest", "flake8", "pyroma", "pytest (>=4.3)", "pytest-cov", "pytest-mock (>=3.3)", "zest.releaser"] + [[package]] name = "urllib3" version = "2.0.4" @@ -2106,7 +2224,7 @@ zstd = ["zstandard (>=0.18.0)"] name = "webencodings" version = "0.5.1" description = "Character encoding aliases for legacy web content" -optional = false +optional = true python-versions = "*" files = [ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, @@ -2155,4 +2273,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "a52384add5c7f9d836bb6992f977d83826388c587ec0f1fd4992bed704de743c" +content-hash = "71e4691a62ab430759f19da70b074878428cdaeb31de7dc1dc438d92dc5f4ac2" diff --git a/pyproject.toml b/pyproject.toml index a5c0cd8..cfeca38 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ documentation = "https://xeofs.readthedocs.io/en/latest/" [tool.poetry.dependencies] python = "^3.10" -numpy = ">=1.19.2" +numpy = "~1.24" pandas = ">=1.4.1" xarray = ">=0.21.1" scikit-learn = ">=1.0.2" @@ -19,22 +19,26 @@ pooch = "^1.6.0" tqdm = "^4.64.0" dask = ">=2023.0.1" statsmodels = ">=0.14.0" +netCDF4 = "^1.5.7" +numba = "^0.57" [tool.poetry.dev-dependencies] flake8 = "^4.0.1" pytest = "^7.0.1" coverage = "^6.3.1" -netCDF4 = "^1.5.7" -sphinx-gallery = "^0" -sphinx-design = "^0" -nbsphinx = "^0" -sphinx-copybutton = "^0" -pydata-sphinx-theme = "^0" [tool.poetry.group.dev.dependencies] black = "~23.7.0" +[tool.poetry.group.docs.dependencies] +rpy2 = {version = ">=3.5", optional = true} +sphinx-gallery = {version = "^0", optional = true} +sphinx-design = {version = "^0", optional = true} +sphinx-copybutton = {version = "^0", optional = true} +nbsphinx = {version = "^0", optional = true} +pydata-sphinx-theme = {version = "^0", optional = true} + [build-system] requires = ["setuptools", "poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" diff --git a/tests/models/test_gwpca.py b/tests/models/test_gwpca.py new file mode 100644 index 0000000..c3e87d5 --- /dev/null +++ b/tests/models/test_gwpca.py @@ -0,0 +1,55 @@ +import pytest +import xeofs as xe + +from ..utilities import assert_expected_dims, data_is_dask, data_has_multiindex + +# ============================================================================= +# GENERALLY VALID TEST CASES +# ============================================================================= +N_ARRAYS = [1, 2] +N_SAMPLE_DIMS = [1, 2] +N_FEATURE_DIMS = [1, 2] +INDEX_POLICY = ["index"] +NAN_POLICY = ["no_nan", "fulldim"] +DASK_POLICY = ["no_dask"] +SEED = [0] + +VALID_TEST_DATA = [ + (na, ns, nf, index, nan, dask) + for na in N_ARRAYS + for ns in N_SAMPLE_DIMS + for nf in N_FEATURE_DIMS + for index in INDEX_POLICY + for nan in NAN_POLICY + for dask in DASK_POLICY +] + + +# TESTS +# ============================================================================= +@pytest.mark.parametrize( + "kernel", + [("bisquare"), ("gaussian"), ("exponential")], +) +def test_fit(mock_data_array, kernel): + gwpca = xe.models.GWPCA( + n_modes=2, metric="haversine", kernel=kernel, bandwidth=5000 + ) + gwpca.fit(mock_data_array, dim=("lat", "lon")) + comps = gwpca.components() + llwc = gwpca.largest_locally_weighted_components() + + +@pytest.mark.parametrize( + "metric, kernel, bandwidth", + [ + ("haversine", "invalid_kernel", 5000), + ("invalid_metric", "gaussian", 5000), + ("haversine", "exponential", 0), + ], +) +def test_fit_invalid(mock_data_array, metric, kernel, bandwidth): + with pytest.raises(ValueError): + gwpca = xe.models.GWPCA( + n_modes=2, metric=metric, kernel=kernel, bandwidth=bandwidth + ) diff --git a/xeofs/models/__init__.py b/xeofs/models/__init__.py index c1a39ab..a45ac7d 100644 --- a/xeofs/models/__init__.py +++ b/xeofs/models/__init__.py @@ -1,6 +1,7 @@ from .eof import EOF, ComplexEOF from .mca import MCA, ComplexMCA from .opa import OPA +from .gwpca import GWPCA from .rotator_factory import RotatorFactory from .eof_rotator import EOFRotator, ComplexEOFRotator from .mca_rotator import MCARotator, ComplexMCARotator @@ -10,13 +11,14 @@ __all__ = [ "EOF", "ComplexEOF", - "MCA", - "ComplexMCA", - "OPA", - "RotatorFactory", "EOFRotator", "ComplexEOFRotator", + "OPA", + "GWPCA", + "MCA", + "ComplexMCA", "MCARotator", "ComplexMCARotator", "CCA", + "RotatorFactory", ] diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index be266ee..22ae5d4 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -60,6 +60,7 @@ def __init__( ): self.sample_name = sample_name self.feature_name = feature_name + self.n_modes = n_modes # Define model parameters self._params = { "n_modes": n_modes, diff --git a/xeofs/models/gwpca.py b/xeofs/models/gwpca.py new file mode 100644 index 0000000..4b1e2ac --- /dev/null +++ b/xeofs/models/gwpca.py @@ -0,0 +1,432 @@ +from typing import Self, List, Sequence, Hashable, Optional, Callable + +from sklearn.utils.extmath import randomized_svd + +from xeofs.utils.data_types import DataArray +from xeofs.utils.data_types import Data +from ._base_model import _BaseModel +from ..utils.sanity_checks import validate_input_type +from ..utils.xarray_utils import convert_to_dim_type +from ..utils.constants import ( + VALID_CARTESIAN_X_NAMES, + VALID_CARTESIAN_Y_NAMES, + VALID_LATITUDE_NAMES, + VALID_LONGITUDE_NAMES, +) +from .eof import EOF +import numpy as np +import xarray as xr + +from tqdm import trange +import numba +from numba import prange +from ..utils.distance_metrics import distance_nb, VALID_METRICS +from ..utils.kernels import kernel_weights_nb, VALID_KERNELS + + +class GWPCA(_BaseModel): + """Geographically weighted PCA (GWPCA). + + GWPCA [1]_ uses a geographically weighted approach to perform PCA for + each observation in the dataset based on its local neighbors. + + The neighbors for each observation are determined based on the provided + bandwidth and metric. Each neighbor is weighted based on its distance from + the observation using the provided kernel function. + + Parameters + ---------- + n_modes: int + Number of modes to calculate. + bandwidth: float + Bandwidth of the kernel function. Must be > 0. + metric: str, default="haversine" + Distance metric to use. Great circle distance (`haversine`) is always expressed in kilometers. + All other distance metrics are reported in the unit of the input data. + See scipy.spatial.distance.cdist for a list of available metrics. + kernel: str, default="bisquare" + Kernel function to use. Must be one of ['bisquare', 'gaussian', 'exponential']. + center: bool, default=True + If True, the data is centered by subtracting the mean (feature-wise). + standardize: bool, default=False + If True, the data is divided by the standard deviation (feature-wise). + use_coslat: bool, default=False + If True, the data is weighted by the square root of cosine of latitudes. + sample_name: str, default="sample" + Name of the sample dimension. + feature_name: str, default="feature" + Name of the feature dimension. + + Attributes + ---------- + bandwidth: float + Bandwidth of the kernel function. + metric: str + Distance metric to use. + kernel: str + Kernel function to use. + + Methods: + -------- + fit(X) : Fit the model with input data. + + explained_variance() : Return the explained variance of the local components. + + explained_variance_ratio() : Return the explained variance ratio of the local components. + + largest_locally_weighted_components() : Return the largest locally weighted components. + + + Notes + ----- + GWPCA is computationally expensive since it performs PCA for each sample. This implementation leverages + `numba` to speed up the computation on CPUs. However, for moderate to large datasets, this won't be sufficient. + Currently, GPU support is not implemented. If you're dataset is too large to be processed on a CPU, consider + using the R package `GWmodel` [2]_, which provides a GPU implementation of GWPCA. + + References + ---------- + .. [1] Harris, P., Brunsdon, C. & Charlton, M. Geographically weighted principal components analysis. International Journal of Geographical Information Science 25, 1717–1736 (2011). + .. [2] https://cran.r-project.org/web/packages/GWmodel/index.html + + + """ + + def __init__( + self, + n_modes: int, + bandwidth: float, + metric: str = "haversine", + kernel: str = "bisquare", + center: bool = True, + standardize: bool = False, + use_coslat: bool = False, + sample_name: str = "sample", + feature_name: str = "feature", + ): + super().__init__( + n_modes, + center=center, + standardize=standardize, + use_coslat=use_coslat, + sample_name=sample_name, + feature_name=feature_name, + ) + + self.attrs.update({"model": "GWPCA"}) + + if not kernel in VALID_KERNELS: + raise ValueError( + f"Invalid kernel: {kernel}. Must be one of {VALID_KERNELS}." + ) + + if not metric in VALID_METRICS: + raise ValueError( + f"Invalid metric: {metric}. Must be one of {VALID_METRICS}." + ) + + if bandwidth <= 0: + raise ValueError(f"Invalid bandwidth: {bandwidth}. Must be > 0.") + + self.bandwidth = bandwidth + self.metric = metric + self.kernel = kernel + + def _fit_algorithm(self, X: DataArray) -> Self: + # Convert Dask arrays + if not isinstance(X.data, np.ndarray): + print( + "Warning: GWPCA currently does not support Dask arrays. Data is being loaded into memory." + ) + X = X.compute() + # 1. Get sample coordinates + valid_x_names = VALID_CARTESIAN_X_NAMES + VALID_LONGITUDE_NAMES + valid_y_names = VALID_CARTESIAN_Y_NAMES + VALID_LATITUDE_NAMES + n_sample_dims = len(self.sample_dims) + if n_sample_dims == 1: + indexes = self.preprocessor.preconverter.transformers[0].original_indexes + sample_dims = self.preprocessor.renamer.transformers[0].sample_dims_after + xy = None + for dim in sample_dims: + keys = [k for k in indexes[dim].coords.keys()] + x_found = any([k.lower() in valid_x_names for k in keys]) + y_found = any([k.lower() in valid_y_names for k in keys]) + if x_found and y_found: + xy = np.asarray([*indexes[dim].values]) + break + if xy is None: + raise ValueError("Cannot find sample coordinates.") + elif n_sample_dims == 2: + indexes = self.preprocessor.postconverter.transformers[0].original_indexes + xy = np.asarray([*indexes[self.sample_name].values]) + + else: + raise ValueError( + "GWPCA requires number of sample dimensions to be <= 2, but got {n_sample_dims}." + ) + + # 2. Remove NaN samples from sample indexes + is_no_nan_sample = self.preprocessor.sanitizer.transformers[0].is_valid_sample + xy = xr.DataArray( + xy, + dims=[self.sample_name, "xy"], + coords={ + self.sample_name: is_no_nan_sample[self.sample_name], + "xy": ["x", "y"], + }, + name="index", + ) + + xy = xy[is_no_nan_sample] + + # Iterate over all samples + kwargs = { + "n_modes": self.n_modes, + "metric": self.metric, + "kernel": self.kernel, + "bandwidth": self.bandwidth, + } + components, exp_var, tot_var = xr.apply_ufunc( + _local_pcas, + X, + xy, + input_core_dims=[ + [self.sample_name, self.feature_name], + [self.sample_name, "xy"], + ], + output_core_dims=[ + [self.sample_name, self.feature_name, "mode"], + [self.sample_name, "mode"], + [self.sample_name], + ], + kwargs=kwargs, + dask="forbidden", + ) + components = components.assign_coords( + { + self.sample_name: X[self.sample_name], + self.feature_name: X[self.feature_name], + "mode": np.arange(1, self.n_modes + 1), + } + ) + exp_var = exp_var.assign_coords( + { + self.sample_name: X[self.sample_name], + "mode": np.arange(1, self.n_modes + 1), + } + ) + tot_var = tot_var.assign_coords({self.sample_name: X[self.sample_name]}) + + exp_var_ratio = exp_var / tot_var + + # self.data.add(X, "input_data") + self.data.add(components, "components") + self.data.add(exp_var, "explained_variance") + self.data.add(exp_var_ratio, "explained_variance_ratio") + + self.data.set_attrs(self.attrs) + + return self + + def explained_variance(self): + expvar = self.data["explained_variance"] + return self.preprocessor.inverse_transform_scores(expvar) + + def explained_variance_ratio(self): + expvar = self.data["explained_variance_ratio"] + return self.preprocessor.inverse_transform_scores(expvar) + + def largest_locally_weighted_components(self): + comps = self.data["components"] + idx_max = abs(comps).argmax(self.feature_name) + input_features = self.preprocessor.stacker.transformers[0].coords_out["feature"] + llwc = input_features[idx_max].drop_vars(self.feature_name) + llwc.name = "largest_locally_weighted_components" + return self.preprocessor.inverse_transform_scores(llwc) + + def _transform_algorithm(self, data: DataArray) -> DataArray: + raise NotImplementedError("GWPCA does not support transform() yet.") + + def _inverse_transform_algorithm(self, data: DataArray) -> DataArray: + raise NotImplementedError("GWPCA does not support inverse_transform() yet.") + + +# Additional utility functions for local PCA +# ============================================================================= + + +@numba.njit(fastmath=True, parallel=True) +def _local_pcas(X, xy, n_modes, metric, kernel, bandwidth): + """Perform local PCA on each sample. + + Parameters + ---------- + X: ndarray + Input data with shape (n_samples, n_features) + xy: ndarray + Sample coordinates with shape (n_samples, 2) + n_modes: int + Number of modes to calculate. + metric: str + Distance metric to use. Great circle distance (`haversine`) is always expressed in kilometers. + All other distance metrics are reported in the unit of the input data. + See scipy.spatial.distance.cdist for a list of available metrics. + kernel: str + Kernel function to use. Must be one of ['bisquare', 'gaussian', 'exponential']. + bandwidth: float + Bandwidth of the kernel function. + + Returns + ------- + ndarray + Array of local components with shape (n_samples, n_features, n_modes) + ndarray + Array of local explained variance with shape (n_samples, n_modes) + ndarray + Array of total variance with shape (n_samples,) + + """ + n_samples = X.shape[0] + n_features = X.shape[1] + Vs = np.empty((n_samples, n_features, n_modes)) + exp_var = np.empty((n_samples, n_modes)) + tot_var = np.empty(n_samples) + for i in prange(n_samples): + dist = distance_nb(xy, xy[i], metric=metric) + weights = kernel_weights_nb(dist, bandwidth, kernel) + valid_data = weights > 0 + + weights = weights[valid_data] + x = X[valid_data] + + wmean = _wmean_axis0(x, weights) + x -= wmean + + sqrt_weights = np.sqrt(weights) + x = _weigh_columns(x, sqrt_weights) + + Ui, si, ViT = np.linalg.svd(x, full_matrices=False) + # Renormalize singular values + si = si**2 / weights.sum() + ti = si.sum() + + si = si[:n_modes] + ViT = ViT[:n_modes] + Vi = ViT.T + + Vs[i] = Vi + exp_var[i, : len(si)] = si + tot_var[i] = ti + + return Vs, exp_var, tot_var + + +@numba.njit(fastmath=True) +def _wmean_axis0(X, weights): + """Compute weighted mean along axis 0. + + Numba version of np.average. Note that np.average is supported by Numba, + but is restricted to `X` and `weights` having the same shape. + """ + wmean = np.empty(X.shape[1]) + for i in prange(X.shape[1]): + wmean[i] = np.average(X[:, i], weights=weights) + return wmean + + +@numba.njit(fastmath=True) +def _weigh_columns(x, weights): + """Weigh columns of x by weights. + + Numba version of broadcasting. + + Parameters + ---------- + x: ndarray + Input data with shape (n_samples, n_features) + weights: ndarray + Weights with shape (n_samples,) + + Returns + ------- + x_weighted: ndarray + Weighted data with shape (n_samples, n_features) + """ + x_weighted = np.zeros_like(x) + for i in range(x.shape[1]): + x_weighted[:, i] = x[:, i] * weights + return x_weighted + + +@numba.guvectorize( + [ + ( + numba.float32[:, :], + numba.float32[:, :], + numba.float32[:], + numba.int32[:], + numba.int32, + numba.float32, + numba.int32, + numba.float32[:, :], + numba.float32[:], + numba.float32, + ) + ], + # In order to specify the output dimension which has not been defined in the input dimensions + # one has to use a dummy variable (see Numba #2797 https://github.com/numba/numba/issues/2797) + "(n,m),(n,o),(o),(n_out),(),(),()->(m,n_out),(n_out),()", +) +def local_pca_vectorized( + data, XY, xy, n_out, metric, bandwidth, kernel, comps, expvar, totvar +): + """Perform local PCA + + Numba vectorized version of local_pca. + + Parameters + ---------- + data: ndarray + Input data with shape (n_samples, n_features) + XY: ndarray + Sample coordinates with shape (n_samples, 2) + xy: ndarray + Coordinates of the sample to perform PCA on with shape (2,) + n_out: ndarray + Number of modes to calculate. (see comment above; workaround for Numba #2797) + metric: int + Numba only accepts int/floats; so metric str has to be converted first e.g. by a simple dictionary (not implemented yet) + see Numba #4404 (https://github.com/numba/numba/issues/4404) + bandwidth: float + Bandwidth of the kernel function. + kernel: int + Numba only accepts int/floats; so kernel str has to be converted first e.g. by a simple dictionary (not implemented yet) + see Numba #4404 (https://github.com/numba/numba/issues/4404) + comps: ndarray + Array of local components with shape (n_features, n_modes) + expvar: ndarray + Array of local explained variance with shape (n_modes) + totvar: ndarray + Array of total variance with shape (1) + + + """ + distance = distance_nb(XY, xy, metric=metric) + weights = kernel_weights_nb(distance, bandwidth, kernel) + is_positive_weight = weights > 0 + X = data[is_positive_weight] + weights = weights[is_positive_weight] + + wmean = _wmean_axis0(X, weights) + X -= wmean + + sqrt_weights = np.sqrt(weights) + X = _weigh_columns(X, sqrt_weights) + + U, s, Vt = np.linalg.svd(X, full_matrices=False) + Vt = Vt[: n_out.shape[0], :] + lbda = s**2 / weights.sum() + for i in range(n_out.shape[0]): + expvar[i] = lbda[i] + comps[:, i] = Vt[i, :] + totvar = lbda.sum() diff --git a/xeofs/preprocessing/stacker.py b/xeofs/preprocessing/stacker.py index e0ef1e9..975d450 100644 --- a/xeofs/preprocessing/stacker.py +++ b/xeofs/preprocessing/stacker.py @@ -386,6 +386,10 @@ def _validate_dimension_names(self, sample_dims, feature_dims): raise ValueError( f"Name of feature dimension ({self.feature_name}) is already present in data. Please use another name." ) + else: + raise ValueError( + f"Datasets without feature dimension are currently not supported. Please convert your Dataset to a DataArray first, e.g. by using `to_array()`." + ) def _stack(self, data: DataSet, sample_dims, feature_dims) -> DataArray: """Reshape a Dataset to 2D. diff --git a/xeofs/utils/constants.py b/xeofs/utils/constants.py index a09a3c3..aab85e4 100644 --- a/xeofs/utils/constants.py +++ b/xeofs/utils/constants.py @@ -10,6 +10,9 @@ "LAT", ] +VALID_LONGITUDE_NAMES = ["lon", "lons", "longitude", "longitudes"] +VALID_CARTESIAN_X_NAMES = ["x", "x_coord"] +VALID_CARTESIAN_Y_NAMES = ["y", "y_coord"] MULTIPLE_TESTS = [ "bonferroni", @@ -23,3 +26,6 @@ "fdr_tsbh", "fdr_tsbky", ] + + +AVG_EARTH_RADIUS = 6371.0 # in km diff --git a/xeofs/utils/distance_metrics.py b/xeofs/utils/distance_metrics.py new file mode 100644 index 0000000..1b80009 --- /dev/null +++ b/xeofs/utils/distance_metrics.py @@ -0,0 +1,134 @@ +import numpy as np +import numba +from numba import prange +from scipy.spatial.distance import cdist + +from .constants import AVG_EARTH_RADIUS + +VALID_METRICS = ["euclidean", "haversine"] + + +def distance_matrix_bc(A, B, metric="haversine"): + """Compute a distance matrix between two arrays using broadcasting. + + Parameters + ---------- + A: 2D darray + Array of longitudes and latitudes with shape (N, 2) + B: 2D darray + Array of longitudes and latitudes with shape (M, 2) + metric: str + Distance metric to use. Great circle distance (`haversine`) is always expressed in kilometers. + All other distance metrics are reported in the unit of the input data. + See scipy.spatial.distance.cdist for a list of available metrics. + + Returns + ------- + distance: 2D darray + Distance matrix with shape (N, M) + + + """ + if metric == "haversine": + return _haversine_distance_bc(A, B) + else: + return cdist(XA=A, XB=B, metric=metric) + + +def _haversine_distance_bc(lonlats1, lonlats2): + """Compute the great circle distance matrix between two arrays + + This implementation uses numpy broadcasting. + + Parameters + ---------- + lonlats1: 2D darray + Array of longitudes and latitudes with shape (N, 2) + lonlats2: 2D darray + Array of longitudes and latitudes with shape (M, 2) + + Returns + ------- + distance: 2D darray + Great circle distance matrix with shape (N, M) in kilometers + + """ + # Convert to radians + lonlats1 = np.radians(lonlats1) + lonlats2 = np.radians(lonlats2) + + # Extract longitudes and latitudes + lon1, lat1 = lonlats1[:, 0], lonlats1[:, 1] + lon2, lat2 = lonlats2[:, 0], lonlats2[:, 1] + + # Compute differences in longitudes and latitudes + dlon = lon2 - lon1[:, np.newaxis] + dlat = lat2 - lat1[:, np.newaxis] + + # Haversine formula + a = ( + np.sin(dlat / 2) ** 2 + + np.cos(lat1)[..., None] * np.cos(lat2) * np.sin(dlon / 2) ** 2 + ) + c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) + distance = AVG_EARTH_RADIUS * c + + return distance + + +@numba.njit(fastmath=True) +def distance_nb(A, b, metric="euclidean"): + if metric == "euclidean": + return _euclidian_distance_nb(A, b) + elif metric == "haversine": + return _haversine_distance_nb(A, b) + else: + raise ValueError( + f"Invalid metric: {metric}. Must be one of ['euclidean', 'haversine']." + ) + + +@numba.njit(fastmath=True) +def _euclidian_distance_nb(A, b): + """Compute the Euclidian distance between two arrays. + + This implementation uses numba. + + Parameters + ---------- + A: 2D array + Array of shape (N, P) + b: 1D array + Array of shape (P,) + + Returns + ------- + distance: 1D array + Distance matrix with shape (N,) + + """ + dist = np.zeros(A.shape[0]) + for r in prange(A.shape[0]): + d = 0 + for c in range(A.shape[1]): + d += (b[c] - A[r, c]) ** 2 + dist[r] = d + return np.sqrt(dist) + + +@numba.njit(fastmath=True) +def _haversine_distance_nb(A, b): + # Convert to radians + A = np.radians(A) + b = np.radians(b) + + # Compute differences in longitudes and latitudes + dlon = b[0] - A[:, 0] + dlat = b[1] - A[:, 1] + + # Haversine formula + a = np.sin(dlat / 2) ** 2 + np.cos(A[:, 1]) * np.cos(b[1]) * np.sin(dlon / 2) ** 2 + c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) + distance = AVG_EARTH_RADIUS * c + + return distance diff --git a/xeofs/utils/kernels.py b/xeofs/utils/kernels.py new file mode 100644 index 0000000..a01b5a3 --- /dev/null +++ b/xeofs/utils/kernels.py @@ -0,0 +1,34 @@ +import numpy as np +import numba + +VALID_KERNELS = ["bisquare", "gaussian", "exponential"] + + +@numba.njit(fastmath=True) +def kernel_weights_nb(distance, bandwidth, kernel): + if kernel == "bisquare": + return _bisquare_nb(distance, bandwidth) + elif kernel == "gaussian": + return _gaussian_nb(distance, bandwidth) + elif kernel == "exponential": + return _exponential_nb(distance, bandwidth) + else: + raise ValueError( + f"Invalid kernel: {kernel}. Must be one of ['bisquare', 'gaussian', 'exponential']." + ) + + +@numba.njit(fastmath=True) +def _bisquare_nb(distance, bandwidth): + weights = (1 - (distance / bandwidth) ** 2) ** 2 + return np.where(distance <= bandwidth, weights, 0) + + +@numba.njit(fastmath=True) +def _gaussian_nb(distance, bandwidth): + return np.exp(-0.5 * (distance / bandwidth) ** 2) + + +@numba.njit(fastmath=True) +def _exponential_nb(distance, bandwidth): + return np.exp(-0.5 * (distance / bandwidth)) From f0dcb3411f36d8aac29a7b781556afd31a36a6b6 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Sun, 22 Oct 2023 14:00:28 +0200 Subject: [PATCH 32/43] feat: parameter normalized scores Returned scores were always (L2) normalized which can lead to confusion when compared to the scores of other packages like sklearn or eofs. Now users can decide whether to return normalized or "raw" scores. --- tests/models/test_eof.py | 4 ++- tests/models/test_orthogonality.py | 8 +++--- xeofs/models/_base_model.py | 33 +++++++++++++++++---- xeofs/models/eof.py | 46 ++++++++++++++++++------------ xeofs/models/eof_rotator.py | 22 ++++++++++++-- xeofs/models/opa.py | 18 +++++++++--- xeofs/validation/bootstrapper.py | 6 ++-- 7 files changed, 98 insertions(+), 39 deletions(-) diff --git a/tests/models/test_eof.py b/tests/models/test_eof.py index 1630fa6..16d4ef2 100644 --- a/tests/models/test_eof.py +++ b/tests/models/test_eof.py @@ -381,7 +381,9 @@ def test_transform(dim, mock_data_array): assert isinstance(projections, xr.DataArray), "Projection is not a DataArray" # Check that the projection has the right name - assert projections.name == "scores", "Projection has wrong name" + assert projections.name == "scores", "Projection has wrong name: {}".format( + projections.name + ) # Check that the projection's data is the same as the scores np.testing.assert_allclose( diff --git a/tests/models/test_orthogonality.py b/tests/models/test_orthogonality.py index 93d44e6..0fe3296 100644 --- a/tests/models/test_orthogonality.py +++ b/tests/models/test_orthogonality.py @@ -41,7 +41,7 @@ def test_eof_scores(dim, use_coslat, mock_data_array): """Scores are orthogonal""" model = EOF(n_modes=5, standardize=True, use_coslat=use_coslat) model.fit(mock_data_array, dim=dim) - U = model.data["scores"].values + U = model.data["scores"].values / model.data["norms"].values assert np.allclose( U.T @ U, np.eye(U.shape[1]), atol=1e-5 ), "Scores are not orthogonal" @@ -78,7 +78,7 @@ def test_ceof_scores(dim, use_coslat, mock_data_array): """Scores are unitary""" model = ComplexEOF(n_modes=5, standardize=True, use_coslat=use_coslat) model.fit(mock_data_array, dim=dim) - U = model.data["scores"].values + U = model.data["scores"].values / model.data["norms"].values assert np.allclose( U.conj().T @ U, np.eye(U.shape[1]), atol=1e-5 ), "Scores are not unitary" @@ -128,7 +128,7 @@ def test_reof_scores(dim, use_coslat, power, mock_data_array): model.fit(mock_data_array, dim=dim) rot = EOFRotator(n_modes=5, power=power) rot.fit(model) - U = rot.data["scores"].values + U = rot.data["scores"].values / rot.data["norms"].values K = U.conj().T @ U if power == 1: # Varimax rotation does guarantee orthogonality @@ -183,7 +183,7 @@ def test_creof_scores(dim, use_coslat, power, mock_data_array): model.fit(mock_data_array, dim=dim) rot = ComplexEOFRotator(n_modes=5, power=power) rot.fit(model) - U = rot.data["scores"].values + U = rot.data["scores"].values / rot.data["norms"].values K = U.conj().T @ U if power == 1: # Varimax rotation does guarantee unitarity diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index 22ae5d4..cef12a9 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -23,6 +23,8 @@ # Ignore warnings from numpy casting with additional coordinates warnings.filterwarnings("ignore", message=r"^invalid value encountered in cast*") +xr.set_options(keep_attrs=True) + class _BaseModel(ABC): """ @@ -144,13 +146,15 @@ def _fit_algorithm(self, data: DataArray) -> Self: """ raise NotImplementedError - def transform(self, data: List[Data] | Data) -> DataArray: + def transform(self, data: List[Data] | Data, normalized=True) -> DataArray: """Project data onto the components. Parameters ---------- - data: DataObject + data: DataArray | Dataset | List[DataArray] Data to be transformed. + normalized: bool, default=True + Whether to normalize the scores by the L2 norm. Returns ------- @@ -162,6 +166,9 @@ def transform(self, data: List[Data] | Data) -> DataArray: data2D = self.preprocessor.transform(data) data2D = self._transform_algorithm(data2D) + if normalized: + data2D = data2D / self.data["norms"] + data2D.name = "scores" return self.preprocessor.inverse_transform_scores(data2D) @abstractmethod @@ -186,6 +193,7 @@ def fit_transform( data: List[Data] | Data, dim: Sequence[Hashable] | Hashable, weights: Optional[List[Data] | Data] = None, + **kwargs ) -> DataArray: """Fit the model to the input data and project the data onto the components. @@ -198,6 +206,8 @@ def fit_transform( will be treated as feature dimensions. weights: Optional[DataObject] Weighting factors for the input data. + **kwargs + Additional keyword arguments to pass to the transform method. Returns ------- @@ -205,7 +215,7 @@ def fit_transform( Projections of the data onto the components. """ - return self.fit(data, dim, weights).transform(data) + return self.fit(data, dim, weights).transform(data, **kwargs) def inverse_transform(self, mode) -> DataObject: """Reconstruct the original data from transformed data. @@ -254,9 +264,20 @@ def components(self) -> DataObject: components = self.data["components"] return self.preprocessor.inverse_transform_components(components) - def scores(self) -> DataArray: - """Get the scores.""" - scores = self.data["scores"] + def scores(self, normalized=True) -> DataArray: + """Get the scores. + + Parameters + ---------- + normalized: bool, default=True + Whether to normalize the scores by the L2 norm. + """ + scores = self.data["scores"].copy() + if normalized: + attrs = scores.attrs.copy() + scores = scores / self.data["norms"] + scores.attrs.update(attrs) + scores.name = "scores" return self.preprocessor.inverse_transform_scores(scores) def compute(self, verbose: bool = False): diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index 72cc899..6e4a20b 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -71,7 +71,8 @@ def _fit_algorithm(self, data: DataArray) -> Self: singular_values = decomposer.s_ components = decomposer.V_ - scores = decomposer.U_ + scores = decomposer.U_ * decomposer.s_ + scores.name = "scores" # Compute the explained variance per mode n_samples = data.coords[self.sample_name].size @@ -82,6 +83,7 @@ def _fit_algorithm(self, data: DataArray) -> Self: self.data.add(data, "input_data", allow_compute=False) self.data.add(components, "components") self.data.add(scores, "scores") + self.data.add(singular_values, "norms") self.data.add(exp_var, "explained_variance") self.data.add(total_variance, "total_variance") @@ -92,10 +94,9 @@ def _transform_algorithm(self, data: DataObject) -> DataArray: feature_name = self.preprocessor.feature_name components = self.data["components"] - singular_values = self.singular_values() # Project the data - projections = xr.dot(data, components, dims=feature_name) / singular_values + projections = xr.dot(data, components, dims=feature_name) projections.name = "scores" return projections @@ -119,9 +120,8 @@ def _inverse_transform_algorithm(self, mode) -> DataArray: """ # Reconstruct the data - svals = self.singular_values().sel(mode=mode) comps = self.data["components"].sel(mode=mode) - scores = self.data["scores"].sel(mode=mode) * svals + scores = self.data["scores"].sel(mode=mode) reconstructed_data = xr.dot(comps.conj(), scores) reconstructed_data.name = "reconstructed_data" @@ -142,24 +142,27 @@ def components(self) -> DataObject: Components of the fitted model. """ - components = self.data["components"] - return self.preprocessor.inverse_transform_components(components) + return super().components() - def scores(self) -> DataArray: + def scores(self, normalized=True) -> DataArray: """Return the (PC) scores. The scores in EOF anaylsis are the projection of the data matrix onto the eigenvectors of the covariance matrix (or correlation) matrix. Other names include the principal component (PC) scores or just PCs. + Parameters + ---------- + normalized : bool, default=True + Whether to normalize the scores by the L2 norm (singular values). + Returns ------- components: DataArray | Dataset | List[DataArray] Scores of the fitted model. """ - scores = self.data["scores"] - return self.preprocessor.inverse_transform_scores(scores) + return super().scores(normalized=normalized) def singular_values(self) -> DataArray: """Return the singular values of the Singular Value Decomposition. @@ -170,12 +173,7 @@ def singular_values(self) -> DataArray: Singular values obtained from the SVD. """ - n_samples = self.data["input_data"].coords[self.sample_name].size - exp_var = self.explained_variance() - svals = (exp_var * (n_samples - 1)) ** 0.5 - svals.attrs.update(exp_var.attrs) - svals.name = "singular_values" - return svals + return self.data["norms"] def explained_variance(self) -> DataArray: """Return explained variance. @@ -296,7 +294,7 @@ def _fit_algorithm(self, data: DataArray) -> Self: singular_values = decomposer.s_ components = decomposer.V_ - scores = decomposer.U_ + scores = decomposer.U_ * decomposer.s_ # Compute the explained variance per mode n_samples = data.coords[self.sample_name].size @@ -307,6 +305,7 @@ def _fit_algorithm(self, data: DataArray) -> Self: self.data.add(data, "input_data", allow_compute=False) self.data.add(components, "components") self.data.add(scores, "scores") + self.data.add(singular_values, "norms") self.data.add(exp_var, "explained_variance") self.data.add(total_variance, "total_variance") @@ -360,7 +359,7 @@ def components_phase(self) -> DataObject: comp_phase.name = "components_phase" return self.preprocessor.inverse_transform_components(comp_phase) - def scores_amplitude(self) -> DataArray: + def scores_amplitude(self, normalized=True) -> DataArray: """Return the amplitude of the (PC) scores. The amplitude of the scores are defined as @@ -371,13 +370,22 @@ def scores_amplitude(self) -> DataArray: where :math:`S_{ij}` is the :math:`i`-th entry of the :math:`j`-th score and :math:`|\\cdot|` denotes the absolute value. + Parameters + ---------- + normalized : bool, default=True + Whether to normalize the scores by the singular values. + Returns ------- scores_amplitude: DataArray | Dataset | List[DataArray] Amplitude of the scores of the fitted model. """ - amplitudes = abs(self.data["scores"]) + scores = self.data["scores"].copy() + if normalized: + scores = scores / self.data["norms"] + + amplitudes = abs(scores) amplitudes.name = "scores_amplitude" return self.preprocessor.inverse_transform_scores(amplitudes) diff --git a/xeofs/models/eof_rotator.py b/xeofs/models/eof_rotator.py index fbb2d52..5c7e7fe 100644 --- a/xeofs/models/eof_rotator.py +++ b/xeofs/models/eof_rotator.py @@ -134,8 +134,16 @@ def _fit_algorithm(self, model) -> Self: # Normalize loadings rot_components = rot_loadings / np.sqrt(expvar) - # Rotate scores + # Compute "pseudo" norms + n_samples = model.data["input_data"].coords[self.sample_name].size + norms = (expvar * (n_samples - 1)) ** 0.5 + norms.name = "singular_values" + + # Get unrotated, normalized scores + svals = model.data["norms"].sel(mode=slice(1, n_modes)) scores = model.data["scores"].sel(mode=slice(1, n_modes)) + scores = scores / svals + # Rotate scores RinvT = self._compute_rot_mat_inv_trans( rot_matrix, input_dims=("mode_m", "mode_n") ) @@ -147,7 +155,10 @@ def _fit_algorithm(self, model) -> Self: # Reorder according to variance scores = scores.isel(mode=idx_sort.values).assign_coords(mode=scores.mode) - # Ensure consitent signs for deterministic output + # Scale scores by "pseudo" norms + scores = scores * norms + + # Ensure consistent signs for deterministic output idx_max_value = abs(rot_loadings).argmax(self.feature_name).compute() modes_sign = xr.apply_ufunc( np.sign, rot_loadings.isel(feature=idx_max_value), dask="allowed" @@ -163,6 +174,7 @@ def _fit_algorithm(self, model) -> Self: self.data.add(model.data["input_data"], "input_data", allow_compute=False) self.data.add(rot_components, "components") self.data.add(scores, "scores") + self.data.add(norms, "norms") self.data.add(expvar, "explained_variance") self.data.add(model.data["total_variance"], "total_variance") self.data.add(idx_sort, "idx_modes_sorted") @@ -178,10 +190,11 @@ def _transform_algorithm(self, data: DataArray) -> DataArray: n_modes = self._params["n_modes"] svals = self.model.singular_values().sel(mode=slice(1, self._params["n_modes"])) + pseudo_norms = self.data["norms"] # Select the (non-rotated) singular vectors of the first dataset components = self.model.data["components"].sel(mode=slice(1, n_modes)) - # Compute non-rotated scores by project the data onto non-rotated components + # Compute non-rotated scores by projecting the data onto non-rotated components projections = xr.dot(data, components) / svals projections.name = "scores" @@ -197,6 +210,9 @@ def _transform_algorithm(self, data: DataArray) -> DataArray: mode=self.data["idx_modes_sorted"].values ).assign_coords(mode=projections.mode) + # Scale scores by "pseudo" norms + projections = projections * pseudo_norms + # Adapt the sign of the scores projections = projections * self.data["modes_sign"] diff --git a/xeofs/models/opa.py b/xeofs/models/opa.py index 382924d..f0fe69d 100644 --- a/xeofs/models/opa.py +++ b/xeofs/models/opa.py @@ -88,11 +88,10 @@ def _fit_algorithm(self, data: DataArray) -> Self: # Perform PCA as a pre-processing step pca = EOF(n_modes=self._params["n_pca_modes"], use_coslat=False) pca.fit(data, dim=sample_name) - svals = pca.singular_values() - expvar = pca.data["explained_variance"] - comps = pca.data["components"] * svals / np.sqrt(expvar) + n_samples = data.coords[sample_name].size + comps = pca.data["components"] * np.sqrt(n_samples - 1) # -> comps (feature x mode) - scores = pca.data["scores"] * np.sqrt(expvar) + scores = pca.data["scores"] / np.sqrt(n_samples - 1) # -> scores (sample x mode) # Compute the covariance matrix with zero time lag @@ -183,11 +182,22 @@ def _fit_algorithm(self, data: DataArray) -> Self: P = P.rename({"mode2": "mode"}) # -> (sample x mode) scores = scores.rename({"mode": feature_name}) # -> (sample x feature) + # Compute the norms of the scores + norms = xr.apply_ufunc( + np.linalg.norm, + P, + input_core_dims=[["sample"]], + vectorize=False, + dask="allowed", + kwargs={"axis": -1}, + ) + # Store the results # NOTE: not sure if "scores" should be taken as input data here, "data" may be more correct -> to be verified self.data.add(name="input_data", data=scores, allow_compute=False) self.data.add(name="components", data=W, allow_compute=True) self.data.add(name="scores", data=P, allow_compute=True) + self.data.add(name="norms", data=norms, allow_compute=True) self.data.add(name="filter_patterns", data=V, allow_compute=True) self.data.add(name="decorrelation_time", data=lbda, allow_compute=True) diff --git a/xeofs/validation/bootstrapper.py b/xeofs/validation/bootstrapper.py index 710c126..8006793 100644 --- a/xeofs/validation/bootstrapper.py +++ b/xeofs/validation/bootstrapper.py @@ -79,8 +79,9 @@ def fit(self, model: EOF): idx_rnd = rng.choice(n_samples, n_samples, replace=True) bst_data = input_data.isel({sample_name: idx_rnd}) # We need to assign the sample coordinates of the real data - # otherwise the transform() method will try to align the sample coordinates - # with the of the bootstrap data + # otherwise the transform() method will raise an error as it + # tries to align the sample coordinates + # with the coordinates of the bootstrapped (permutated) data bst_data = bst_data.assign_coords({sample_name: input_data[sample_name]}) # Perform EOF analysis with the subsampled data # No scaling because we use the pre-scaled data from the model @@ -127,6 +128,7 @@ def fit(self, model: EOF): ) self.data.add(name="components", data=bst_components) self.data.add(name="scores", data=bst_scores) + self.data.add(name="norms", data=model.data["norms"]) self.data.add(name="explained_variance", data=bst_expvar) self.data.add(name="total_variance", data=bst_total_variance) From 2db736cdc5b860c36dbc23c43ffa04251e813533 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 14 Aug 2023 23:02:00 +0200 Subject: [PATCH 33/43] feat: add Extended EOF Analysis --- .../1eof/images/sphx_glr_plot_eeof_001.png | Bin 0 -> 18216 bytes .../1eof/images/sphx_glr_plot_eeof_002.png | Bin 0 -> 30624 bytes .../1eof/images/sphx_glr_plot_eeof_003.png | Bin 0 -> 32909 bytes .../images/thumb/sphx_glr_plot_eeof_thumb.png | Bin 0 -> 12289 bytes docs/auto_examples/1eof/index.rst | 18 + docs/auto_examples/1eof/plot_eeof.ipynb | 151 ++++ docs/auto_examples/1eof/plot_eeof.py | 83 +++ docs/auto_examples/1eof/plot_eeof.py.md5 | 1 + docs/auto_examples/1eof/plot_eeof.rst | 693 ++++++++++++++++++ .../1eof/plot_eeof_codeobj.pickle | Bin 0 -> 10010 bytes .../auto_examples/1eof/sg_execution_times.rst | 6 +- docs/auto_examples/auto_examples_jupyter.zip | Bin 49273 -> 55009 bytes docs/auto_examples/auto_examples_python.zip | Bin 29508 -> 32792 bytes docs/auto_examples/index.rst | 17 + examples/1eof/plot_eeof.png | Bin 0 -> 32860 bytes examples/1eof/plot_eeof.py | 83 +++ tests/models/test_eeof.py | 439 +++++++++++ xeofs/models/__init__.py | 2 + xeofs/models/_base_model.py | 2 + xeofs/models/eeof.py | 146 ++++ xeofs/models/eof.py | 14 +- 21 files changed, 1650 insertions(+), 5 deletions(-) create mode 100644 docs/auto_examples/1eof/images/sphx_glr_plot_eeof_001.png create mode 100644 docs/auto_examples/1eof/images/sphx_glr_plot_eeof_002.png create mode 100644 docs/auto_examples/1eof/images/sphx_glr_plot_eeof_003.png create mode 100644 docs/auto_examples/1eof/images/thumb/sphx_glr_plot_eeof_thumb.png create mode 100644 docs/auto_examples/1eof/plot_eeof.ipynb create mode 100644 docs/auto_examples/1eof/plot_eeof.py create mode 100644 docs/auto_examples/1eof/plot_eeof.py.md5 create mode 100644 docs/auto_examples/1eof/plot_eeof.rst create mode 100644 docs/auto_examples/1eof/plot_eeof_codeobj.pickle create mode 100644 examples/1eof/plot_eeof.png create mode 100644 examples/1eof/plot_eeof.py create mode 100644 tests/models/test_eeof.py create mode 100644 xeofs/models/eeof.py diff --git a/docs/auto_examples/1eof/images/sphx_glr_plot_eeof_001.png b/docs/auto_examples/1eof/images/sphx_glr_plot_eeof_001.png new file mode 100644 index 0000000000000000000000000000000000000000..a6f4eb745cfc0f44ad79a483077ce5752c1288f4 GIT binary patch literal 18216 zcmeHvWmuJMx9vlSf`S2vpnwXBvmm(Z|Rwl=nRx@~KO$=$ZMxo2&E&+N`|Mh9 zt)2N5!`$q^z$e_|$8qk4NlNzdg~9FVl{bk=B)p*RMZ0M&&(ji(%q>HNui|bI&hL zeN*=ESU$Fw1uJ|n*zcC`89dM%lH8}B5nO9xYiBQ#!)SQeS`-`yVY7bzS-W>PDuV43;78a#|W!4-9 zr>bn34fLVq*8B;6YR8dv5y3jmKLz&*rIA6eH~kpvmU>IG4+e#ov|2uGo5tq0W`VSS z2SXNYf%Y)cru=l2bvQ znByI;T_kWY3E|*fQLc$fss%iMUf>Eg^|91zHwOo+c-X0F?u@g#R2VjQ5KjLxXv1I3 zX7hT`9A+>Zu422?MU+c;Hct4pKjG@yEbzJ1pUWR1$_Y;2kK1^`p)zWFl<>gBu@t!1 zuU|{c%BtAdPuJ8?CTB?p=qCRX;SHbb@t-cv9y%? z?AeLZB4tnPonbdtJFR})V_;wq2eTC#8rqQ}!pSL zNy)-V3$(+Nb{>>To|2qwa!Sshv>?I;8iapul;!0@K2Qc6`GTf(`&F1)0n%L-UIY)6Rn=VFC+ z=_o5FaBCNy5fCU)bTv0myI}RbdrCB2J-hSsOO~b8p#b<+kAdGbkjZ&~i1CtR28*;6 z-=86@XOqNyUcC5aTSCLk+@B&MDyp}(w$?b4=QOKl*`0Ic-Me=#5Ia;gww1>+8+iV?sldS`uyumNmUP zcXzQ-us@}IvsW3f@4ghexw+Y1<}_=(LQX-UWN2uZ;B7|beT_ukmw1Nf3?ITQb7Z0i zu~Y0KBD%cgbC1G!^|N%#9L+j2H0$f?TE4vIJH^Oc5h?7DhTnk;XS{lK1X7FKC8wEo zZ)T5_-OO&ciuyguAoR;t_~pI74g+U%+U?GvXRLBDmo;l@YP50;9{&FQo8N8KCQ~y{ z(tTq&locW(I~!YVo7m;cI+~iAr7J&N1#ExatoiuSv^7!6WwGUkT!mL20VSEdzpWW& zRJzAnRZA-^CFM+LNXQWp#?2i^rdD6tNC)|qJzIR1QE!FOdqfX!^s>fY+;z`ni3R8C z!|}^Dcoa6=_(9BF{HlEl1bG$Z(awh~gph)VC`lwQ=DqH;H~6Ci+IeMg94R5f$e`~C zYk-ER`Gzy`aYD4|f1{j1qM@OY!dtbe1cUgPj8f^|y^Qgoa`_JjtK0fW=pibO?JSv7)-6p>-31QIE*rbB*{RUYWgB% zaP!U8d4QlQhK7l!WnaaO#(L^mm(M?~Yb`(Y=Y5V);vfOO2neulWul|2Wgo~piz9^M zYEw*BN~-0or(~}IN`Z5_ zzC0v(cWNk9)dedoH32xOWoX=vJ2_jc1gUbM3`VmpSx#9>N-B66Y6aL2jp7nNvOP}Q z%)FFV?Xaa_#4vs%eoQOu6ri8Pt?Gv)FJ8Pjf9vJSaiIW(Y>*`V}wkPypo@}EpGH+b>I&4s4Phr5{qtE#G! zWnahB={aAi+Fr_D%J$r9Ff`L^Y-)P5{r2{sVjrK5Ecnr*N3vlfOU0*P?+MW+ z5Y6otTYdP$byh^Gb!(+RTYqapok7SZE?GW0t=GD$4{o0AK^jHNBmy_YdwM@an$DyVeQT7dp*lJYX33WqaJ8KG&9d{WByWY-MkK|QINMiryh#;W- z1VF*GhYxp}Zn&@Xb^W}XsZ*k4W@c9ZPl-S|d7su`Dw<`K6!FpFhWcAF&Z)&}ClLMX#spJm{rWYM+X? zgbD>A{g9V0TM~DA3M?FIXw}CciBGg9HPCnHX6NUtK-B;xfEmrnOFl(~OwAdgy#gHP zv^*>MYS4(-Q$%lVl(8IPB>G-ttsi*|pPVeIU3jl0>Y^P(0sLSN(G}mf@@Oy5EmhUf zr&Nq-`rE(5OMkxLl>b%)l?-I|m{BNaGC9kmPt{1?yhDJuuUafHZI1Kjf9EGWl#r0H zEzvsM5CfI(3 z>?5OClAexY0t>AG0l!9jwH&MtGauKS2<|Gq)VJ>Rc=!HJxQ0lRvXW8=a0hHL&R5~V zEgWM@>_$06MEVxW^YinSy{ntWJyGol5Zg@Qb_oxK&jnp5)W=B6?~u-w*-v)L$>a0t zO!co^xe~1SuC3%(kW4-le9Bh)|4QsPOazbnqrDqX7cTIM&iROkYC?Wrlk9DT+_EwzZ+6%KX>om(q|7a*Y~7V7tHOGm#edSYL*R z*Lr)$i`A?y=oAVsPbrTkM?^4GZ2oT0DliXK%hWRZZJA%PuKj-cG=k#Pd=|0&bmJ29Tm%;j%O_kmh#nKNc;*Bnd=0 zxw%RJU)4=bQ{V+hE=sueyRVEpRc%D$`;)@;(&FNdD;v3pG+hsTfBr)#fcLMEI-@Q* zWffZYXBAjaH*%h{@CHehdFq7vZP#LP5Kyb#~TG{>|=m5V!)5-*|$ib$Ts*uk9j@D zbNfM3V&W@~hEb@;wrWDt97-rreJ9K3LLmf|c$c1QrsK@_)Y4L6R9&tEJXoBsWwz)m zb$}$zE)xHU_VAdBgrCfY4<}GUw^3R(2!uro0CF-RKvc=dD2Z&}kvOM*cXcTG*8y@v zm2oeZKlZ1MP%#@Mz`3)P9CObOvMl1ow(9KZMS0mT=v<--F(|z>u0DMK}KT^XOO2xEQqpXU{q}L@o&A}mk zRM1LAO)U(qguuW9(XKNk41!isb-8Yb3Aj;-O=EX6bls|y= zL(-`L^b0jxU~k{5XhB0bub@h(wX}K8;W#7B7*}4HS=Vz681sW)2e~zJ#940ms{}BK z>|ve0Cw{N^7}dC`QRWLy^LFmbpY5e!;4#Q%-%MgVWOyqQwXG{U4CxCG9ve`Z%{oyp z;wI}#QIG25?hwFpusaw;T=MG<-NSpF{A`s)LW)%v!Z-oBG}fW6FQEBoItMXiXI&_Y z)6%3H!noUZHavF}Mq^yFh>3|AEcK!Kc~kF3M|elTqW+AR?6=oOWr9R^HWws&Uma)_ zMFlNvlg)w0`(-Q3xls@f`Yn^LqNbYuYydag4KKIKkkEz24@r2Dow@LHb*g{rJq3Zy zqxR(`tuSjly*141sR+c2xw$!mrRzeYAf@VXZ_u3&sv2sUBD;o##oqW3c-!54!o;iV zli3zuUY$cZ?1#%FcSmQZ`RLbZmz}K@pi+!r34cPUt2?pW@zT+-}`nxIBAW) zNekyiDh|p1vnl55@2{q*`I4Bqe0yWI3xMw%cID);dd_Tq%?Je_pFK*~1FOD=6S<8oxSFI~cw`tY+Ke3vJr&n5TFxU0>_uHu-x@2$?0OWGVAW;tF zsg@?ZYt-{lPIcHzatkAkZJn9g0*+G}e#~Ne7cXAK_hlz0URTD;7xb~45Fh{RJd3U@ zw7^*%##<$zwzT}6Vl74u_wQUBZxgutX&)v1`Rl+KC|1B^Eab#si298iG0Uttv-*9X=$k6JI?)E76Bp>M5!B- zUD@GPo(&$9e-j5{ZEfw*QnOiaC>B?g<)XEB`>Ui`%f1>9f_v;fcY*w4f4{!M<@qN% zS%uo(Uk6PuzcA8N3%(!)$fI;^BytJLEz{<>E8L@htq~_1A5l(( z3TRgV>YmGau~NdKk5>$-yoY?{_v7*SNdUPWfCWQ?g5r%D!hj68!DsR{#NKUj<}bK` zY6iE}ImwOMni_T?p<<~r-UU+e8DI@@nNWXO6_qvm3q1I3NJ~d&@$S*C`i6$Kxxo(w z#*GY1)4l3JA>rXG(H6u#ery;MhuH8EOh)yJ5LZL4EZtz{zf!9%{nPDflR6B)(>?1g zrFOP!pi16HKwi^dd#kE?AGI75mx5j23~V$)MT8<~rfgR9n(DV}fmO=x@z<6d?k6qN zgiu3$kVBq8WCPrC#M#@AI5;^~6&2qA+Xcagou6Oj{{8!;)l`8rbTj0*yFsO|kv3qD zB{Woji4c(K()X;OViwJS1hO@$ztg(CwSmYzK#T7U%?i)q2qC~90+(g)g~k{WZF#&h z{Dn0LE4FzkKrBmt^C#VDnLNLT_YVf4xZC}_1v_%!z`*MV49v{4ual0_($@I)QN2gF zuR8!hFmLeyvE`3)O`tJaY1MV!07&WL;NWQ8*`D1o|M=`6x){;hTDpFo|Fcrh7lh=# zmyQSJAy%oXwMJ_iYU+g8ttk`O#T#K8>jvh@H zu;@IJ@u@QC=lf87t)!{HYJ0?jd<7UJ`uj6W{nxJ-G>HkNIRrhd7o}R}% z7@>z6CW5QBkWhx8)aW$T{RW7@v8wIKs(k19J4+KCGSsJKo71x^|3u$!(n?eFSaDh0 z7n@T68(3iZ1o7YNKFh6Bd z=Nw4Iooy5h;4bfg#s4sPR`(HiyZ-Lt(F15>>yOY91xg)fe2jzwbi@E4yhh>{#yC;T zm@b666dwFGW90#R7Z(?w4MYwH*^eiM7ph-0jf3b2;Z3!@I7D43DMt6IEJ{*6SG;#u zgEV%*rP8S3aET5NCDUb%phbWpz?yPgyjYSTlUE_`DYoQC-tpr-1>`q2s5S-?fV=US zd$&6#mX2312){<}5#_u@Q?axCQME1$cm3W}|%3A_2KV3a0 zfKw1%OFV!6{N+J~pi;-_v>VLsD(Itk^;162A$hHeLR|o5sy7v*d8^*7TX8`4kkFoZ z?n+F84J`PQRWol6?2fo#8S%q;h>}srMtnEbv1UleZ-7Cc{B+bB(zqqUJi?;XFj=d~ zR8?d-Z%ks{)%IVW!+C5DF{c+6E<0b@-i{D;FFnkyWthv~%W%MH)6oXh17Rh_gsj^S zpK^k(YT0Ot?W3erg=hH9w!<$k^B%u$&7dHGYpx=jrby${^O6wTf|Z$*z323jp(k4!pk^?1)kzAR#Iw z$8Im;GhOt5WDSZy*ul-76DHPBRBSQ*m93|!_!2VH*D;O%E9~gGx3ib0SX?<-wk7BG zhxTlJv8~lwu}*<59lIXIYh@|>ak~SUlqDRZk%%I$4Ru1T-%&g*rB z_G`o8767f%pg6v^UnG&Ya`h@`6`im;+kXzx(9+fo+p>#_Rw}x>xs^j?Q7E$pmP-|Q zX(;&`W5x9OjK2sO^-oNgL*?rl6CM^;SS)EB3_^>-GGk}V`}2Ndj79|78P%OsjKV6H z9H*L76cg;1I@HtSjI77fZn9*0j6-k-i8921`0f z()Y+4sD;h8H`gN08%gK616WUAT(oam02f6p<-;YJcgWr{C-RA80IJiaI=0x9xHxgh z{Vjzcz!r`j`G@5Q^O$?g?A()=o4dF*>seJ(pb^$FMl)N7Cf(WgMbRBpbM^x#yA8kY z;x76g|KbIO#P#d^!n%_-LR!cE2p~m2T+$`|kEMo$(0~Rd3xGD_qFr+0P=eF${Y;Cp z!fUZRYs^p{+(0WjacGiQ6e>cbUi8i}x?~eDzES~C5ph)X&*XEk11TZZfU6 zwU_Qf*|9ILU;SHQczt=Ab9{Ar1PlR}xB9G#MqngBDgg|xkF>m5kCjV(4@RRi(>|y# zKBG|6h*PE&;F;#lx}~J^gn%@U-q#?rE?A?etLY(OVUW?KK}9b!=3f?JfUyL6qR zncU{Cpbf|(>3d6DTs&c#noXWxg`U+6L&aY;8x%=S-|u^~ngJ4%E4&EXK_|We%*C%e3EYpN>o+x#YSwfTy;{25r zM3S_vN+gilH<5~${|59qcvv0+;qUH{qVsLnaOoYJC_>G6bZic#>`Ea1M?WH5w91r4 z^wEsc04d_4wPRccQlQA;kR%fob*!Vkoxi6L0LC@dH+oyL1AV)ObGg!6wjv zwY*%EkRni`h;|YH!OMMXsVkRD1`_w1x>lR_xNe(^$CN|sJGB9$o^ISbdKnS<_vE|wR{slWo80Y!;DAgV- zr!o_L;muUiRLwm3h-9BSqhSW)X?-9)&=={NaYizx4eiT*v}C$GGUWlTaTS?qlv8i zuv&rBX$e_c8;zaZ=-)w*O~851 zK&n+;-#w|jTLYEMu>8+zGBH3Z0PajhbyCR&*JdLDV#z)F=v|jlA$#=QXNi1c9_DcHeMBFdPcwrF@6!9@VO? zX={fT2|D_WeQcmUGaR7HvMjk#fRED|wGZG&oUo9L{JwRmn6%;@s`(NF_fbtPMBC>0>Ozy`~@@vhuU zrD*Tfm}2Eqxec zlf@}1D=!7dZeBln;oe!;GY=7gv%=>>X#n|2&vVNGVw-}~j|ce8t*xcyUh7E$({flo zK@^)o%=JJ4F^kRKwHhW{HOaX{zvGltDNjT1(Q-mD~)OsgmLxpcE zNFo4?nh8q;#1f8ErzB>}7ruV0r0j)d0>LKclq-ym6VxfSAX%<0j9S#ZpmhO3A>UzA z&F`q7JSuCLJU8e0ggfg)IYYz3k{>LLm_ZH60OkfHb4Eo)8E<&54tkfaPo$#)*YaDg z1ON>$G)tC!r3KtSBATS94xbkDR#DT%Kgu;H?2f#FoPAV7I_^sy>#+RL-oIeJ11$0z zAou|4W!Kb}uKx6K0E?6PcxzHj;fZTc$3P^#B`GQC;}2%Gvq-;3l9P6O)jv{=qG6)@ zjGyaT{QgBKdi7r4fBT8mfe-oYOg?7y_fyg{5`C$!R|0Yy!COuk4Opfjtc;>=8c0Rd zc8db#06jktp?*DplM3z+eLBqeVcIc0BSW^Or3F8wtgG8MwruZMA;E}LtU0ScGPixB zKo`Q2+C^5X4omaOFM#vlW-16yjE_GF#X?POt)O{8K!Cc3hX=UYeg!L}0g?$mEhlUg z>+8D@Nc9Iyax}!xP&IN6SfWfQKf+mm=mK2m}c5gH}~F2G%=a%YG0ScJ10#P|Tx0O#X*WEPn}% zJd=ovX;X}dT#q2od_V{ktqT1Ec>a-f)HC_8X z40|u!xG^FLl3!!gMdjA{wj!YRmKHsy3%d9nCR9Y$eg&6;4iyp`3)-0*D8BjQu8R_= z_GhB!BKQ`aSrYI#W}qD5mt!wDvyf8v32-yKvB+FCNhz?&0LuSssmQOB)D5Zt@E_qp z!qo<0q6ohQa&;S!-0i1lo5D}kcyU(cOT%uHp6TsC(&50qBf7hD^gzl0DTKYKHxqCs z$K}i4?WMTyj5<6S$=<%oYq_cAZE@_Dn6ciYkzBT_#fTdY znT95k`CQQsGb#pksreB6O8P%b(Yw%bsMhu&5B-SDZRNX&4rGRXWJDX11I{4`%yT^G zI~Me+bZ~cW_6t`JS%86ZfE!LPAi9s%A1I?A{!&K!z2KqtQZA~)>kw3YoZxXRUT)uj zusGa+jOc)6{B}$=Df}QT1{4J;;fY1yQq+{Q$Sf!<2%mn0A)Fg%xto@fMKCW3F~Wz} zyK=;9p6rqiT0Z;-Ktxf&)jsi27v~Ttp8h7v+x? zc2)lyRec0Zu7G}!dOJKiiU;=FpR@f2h)~%+oT|#Bgb5O&{ja^f`ntL(HK5D`rWw86 z>XVUw%#G~fy{+4KjDbmL)?E9qt-ULgT?!DA@{u1QB_)L_t5XmlHh~JIn|qFF{O4-Z zG=;7TU0B0(Az_O|4E!oL_azb_#HNFu1Ct z{)>X_eG^+Yx%0bSbj}_>a*cfS+?P}ly{r#l;zrnv9gvty9dB%n=t3r6yt_}}mDuKA zgL&Ag&mkNnqb%d1ip5qLxgIVbi}oH8w4$3!);Nba9k?Yvtlq<@f-u#JRQuINh1Jm#M4n9FP?dI_V zSPRcYHpv%bsB&@g-qfdrbQqfo}%9> zo*;VRwE?$;7VUW6RF<(yhoDg(wi{r zZhS)McX|M3ZO0B@<1@+p7r0=UI1b4V?k2MCS+-a-y<1gvGK_WC`i9y2xtaBr6wy+l zb6Qm1A8bNF5+2-$#JV}$a9YXvX=r zp8fs(q}6JJB=*bo>67^`y@D?8%KnbW&dlep0I3+jGm)?(KQpZj>*_xV~v@+Rn;8aRGP;} zn?0kc@sT%{T7aRnJr5Tb+WYs#H!i6SwMI1kUR+N;X1Tup3}0cQl5<)&Bu2`r9p=&yPC(!j&jRWDcK za<_qqF-yoJ@}l=O2Ensx%95wX>QBs57^ob;nz6qIx&47kS4l|0LZIFT>kZ~<_T_x1 zz581eUsfz7_^hWW>`MR#bY<14KhGSsOs77!Bv-QDFhat?k-eO+AJHx+#t<6U{-%$1 ztSRL<)=m5ObxXi1?qc0_qGDYtt8L{p?)fvs{DMSUeBZF!<+KAJ;VAL>xR8F*nh|M!2*_JdgAaP8S0ym+>CeMb-P z-e02g=Q!MtdmALGP?XrzNa@cp@a+e?7a%TVxUDV8`J5jaz-|M-OttwI4 z^IKGUm*DUOq`H{pz3cWZ4=T#e2vw&-XUSFKEo7m)wY7l0kESg2>}5JOh8M@jLQ>G( zheZ?p{pw}ktz;%wXvUWmY|%tae9zp7eI9$Q@##kcPy8d2XbSQbai@@sUo+34{dhi`Ejv>~eF(@no2B4yqc;9*P`*-lAw5OAJR zQ~h8mz@yq(dKn+w7p9^(q$J^&t~KqYMT@}5^FPN6BjaSg`_ZYk zvjgcZv*W#Y`Pcgd))@u1yuA|M&5r)snzXoBB4@r@CYoAktY0)Q=J9F&Y?g)h%7LD+ zCLI#z`5L-Os5{zQlVn7Y9liybcYS?*Pj8{h(E?OGRF2>+N4~u7sdO(B-Ci9!TyeVV z044`oi15nuKn;`B>dNDRQ5s+qqwrqBnYDk6KzBxIFp2^Z9^Qe;0SSQC%+UbdrJ%8cq-pI>Ma4f{T0y>U!T+4{kHv4XJGhixkvr7erpwe17Yt%mtSa}<6oAb5Oa&ngKj!@u^p>7K>j1m(^ z_;*9SlU};sTXqvz+`=nMIP3dD$6_pY(VCVr*pu~>ja!qHbR0iBkvO_^<(u%q!lQ+; zgCHfH<%tIPE+j0j;4t9x%_-xr-VffC|gKIxp6-=(!^J26qjr_7Ia1a zs~tfN(0%i~+a1!J|Fc&9-7yKBmU&z7hnAYUZAugjI8a2JN5&Ij*uMGa2dk^A#ZR%Z zadL8|D~w#XKA7>*YD3Ksboy?JNUuvR!r&=Sxc#Kb`5(1508xY- zaNrbL6x&%4oA{Q8_t@F!&m#jAHTOU?k1CElgM}0bs=EqS(fVBv$CtM_B@*+!gSkA(K4>vB3unhU}kbzo?;gh zbJe4)-DAMHy0(@Ia$R!69L39*FE^RZ+1~dj*bUCMl+3M+>GVv_-tKx?<+AKsf!^bv zF4rE_Tekw4JXUW5$?=+J*U#COZ|Lwpx9gz`7Kfwn5lXTDOTY8=$cE3Kn>wyPuXq~5z~r;o zpuepI^gIPQxgyv|QEwjTc5O-D-lJBQ$?PgUFiC}gy$e|(Bzw6VS~u4gQ16D36|exSg!Qd=+X)SgFx7l^a&btf2%;4=x`P5s?O}5?t>GIKGiT3|Rq_W!@Hm`N0&l zOo}~wnn5McjK1VF0a}(yIFGJuxUmdkWMkZyHANP_@S#-)Y~Xz)CVGO8AAY|V>?6WJ zDlUHEYvM240xxG5SnQYJCsS|@O#G-Q%c@*AhxQH!o1{uFBZKA{ioJWm7K2BvBfv!^ zH@wXRjvg7ak=GkI{8;mIg zD%2YdBeH}rWPI@e#t5oL zw8M3_0-s&EE*+GQC~|1v0$0ab9eUIm0c=xJaL`0>9q83dMJz1VYY7| zEfv&n+3-8s!2Xx+Y;P=qTL`&>?JoxYax6o(5M(?FvWKd2%!jwK84?~jjw}Nt6zD6; zMI|PZ5qb+Pm7!l4c3*0*Rav9`gXVeV50skFh(>Ah6bd}M*@_i&NQ&!oANF^E0~Wm} zWET~1DS(ShA+lk(6)kF$mmj^z!8uj6VbHFfh-g%VRAc+Q9$NY^ZX;h;@>1H+X8$$OQI{7eCvp*yW7&m>@U4Vo$Q!EPmS<3_qdtSE>kZSTGI3P6t@eymR04N@nh8uY_I9vpa}QP`2bITT?YALP(m^`W~=(`=?j-ZY3gGD84Gw4Lu4xM zL>{SdLv4t@ozQP$jr^vdaTbEuU=2i36YQi5x)I!N;A&~~F!}D-YZ+#w8P+>ph(c&O zjFB3IQvs{*LX&w-qI}x<`wbXT1dSYIAbk0s|Bn$S;}F!;VU>-a#|C^4g4F)6|C5C1 zC<=MtPELo|ii9a}gKHJudku;*zhw_Ui07u&I3kyg=^~Mh4DBNM1W7+7crfTMcyrNi zBqb~BYhDF#@W2?WLWd6em@U>Vw{Ehty&b?hlTx-`1r5v?Yyk~0h)zE%+J~7yvvD;W z!NCLU(jvsjF=)%_X!2O*>$7i@LsKfcwWzFX*C`2c-)w zDNyQe0pBlDpaTJtLBKo&)^GQ_s$H0nfM?q-`2P=tKui=7yA&;tIwirP7gGqKT1x54 zm4~2~CV^`~VKq1r+IdsT%XN_K_mbs?3b2Fli>77pA%df-dttLpK`hir6Iy6*B?zab zrz2!FGP@sySTy#af$+P`KtE4Ghqp)(q`+JTFEOb!v5R2-Zx5nAWmSZnhtP%dvTgic zKKJFzn;AbyuzcWuvEHM?_9-v`9cT7feJ@r#c<>ou=@!uZv<{y0@$o#z>% literal 0 HcmV?d00001 diff --git a/docs/auto_examples/1eof/images/sphx_glr_plot_eeof_002.png b/docs/auto_examples/1eof/images/sphx_glr_plot_eeof_002.png new file mode 100644 index 0000000000000000000000000000000000000000..e4d103707d796721e3ea89e5b79bfa43040c037e GIT binary patch literal 30624 zcmdqIWl)?=6g4SjT_Q&VTRRggOBQ+#dS+T8V|#mRJ1zzWi~sEb zdMjHa1}d#hFYuCA)?#XQ5D4;j_;}tp+9>rB%l2;GxPYAD)6ceu2>*T`UMH7*>1xQ~>_-kna#NFsje=|GOXS zt5M3P`VAU(_&k48X&J1+VYMVwtX}o4VwTb4*1kCDi_nX1Ad()4w#S9?Laoi0J;V2r z;&7pj)fH>htaTvscfyWZN*xpfd&q#$A`K~Ik`a>zfAYFru4Dud2s%4|l8B}DAI%aa zrJ;Ef@D5)vj$Z3M4o>j)wy{(ad+`XTOd4O9D4_@XW?$?VIl1?{6ZyVQ%O04VoScrZ z6n-xP57?!!vGK?Dj*ih%9sbVlZf^7G%Y%8F$NQ@+rF;d%cke_xej=SLIxXj)F1MzH zhQ57Ez=Ji+bM`io)g)6KH?YV3Nu?zOMSi*2<77K4Vl6*6wLOu|jL~9_Ro~E1_h7CH zi%Iv{zff%7t`N-ioH%X4FdVj#1}7^>YmQ{xdtBTQ@J^{z9_-Q4(XVB?&*4w%b%%ey z+@FP54yNtRl#vdl@kit<=4PrCt6y)%>5P=>v^uh(GwC99ATH+ zqhDeebhP!2j2iY8W2u#&L24c;q~cdDTZT)t8iXQAggXnA3;o^;CV#RKguFj$JPV)K z3kuX+MI=HoDG`kzaO9uPSA5yvbR;reqIFjN{MoZ~kuaQ34!cTA)ed`88J3In##2QJ zizi^~?a7>We=QfeCJL0_>`j-7f&)HmdGu_!fAJQ}TbBQx^$40J8nL$0=Ic^uUQu3t zs=YT=jH^=g^#|BdhD-{V4D9r5n4grFchMq^Mk#OQ^rEPU7Jl&ew-+5vS4(@B?m|Mg z@QXSK_R@C`qk`2+;l7VDSwC)JPZg5KjIy;3RJ$r8I>Nu!q zXec;1uf4pyYNmm)3Iz5ATV-b~$7Z$NUvPdnjUO5EQSpz|lBY(EC0(UXn=nxwECQE< z-YWI`=PI*l$Jsx-lZEqP3S2DUsHKuWjqdEhE)TvaD4?276{&4ksp@!Ols4Z|J08sW zzd*vNZTf&B{~6e$p1)l7+uq>v=|YE6j3@HsmoEL^V1l#H0Ez?g2AZ$b{kGqGo^tB&doE z{BJi?dGc9*tycx@kCyPGDWnB~T#x%1TD}22aK7Eil`mAGgS6^J@cz77azl>by~J8_ zKa-v)QbVIrE$OssxmVdvCiJ>vs&_mHb~|dq1h+u2D;UG(VpqAdtLr;h{q+OvNUJ&@ z%o}`8M?vv|iHXT^UM7|2)ymXIy#~hv^tyK_YG}YDGZ&Q`^y`lQk;dY%#QPV98?dpV zuT-dl1itCm9?d$teDms6TfM{H@!i?5-u_J4-&ynRg*ukzn=RbVfB)uEDu8Ha>K$m{ z3v4`Ye^cvtfFlt{9|nGsX|>$kd`N;4No_9JXKnzu{>(@!Pl&Ke5wo+i(*^xsAD>lheZ_hzz>o~goV>ieI)^WCK0MZ21K3Ih zO5%Hl#y$w?n1WxvAS~QIef{Rmcr6?7LS(|Ww#=ECnWuc+#erblmJs&S-=7KY$C$Bb zWS7gge68TiexvvQk<*I$b~H;SrG_04Qz=!PBq+4O26vvQ3mj&od^cyGc4algFx-aB z7V$_(O5q9^wKLn*P-*!DB~lgcR_h`YQWT9fL{wHSgoH%e;NB%{m_7AlrA6DH_W8iT zP#>T=k+@!X65y-H8+{~oL80uo@F@u13+{b*kKz*k-vjJ3|GhKw|HM^q{2XjUTS;?d z!DdCl#)SKDj_S1fKXY{zcF&%v#=MV=fQ!qPn3P7eDtkB|Tg~`lii-!vMicTr(&~Q< zb$rHnpChG>5^%l2dRGqhF`XtNvGs#bEYiH>s=i0@K2rFpnZyu`CHuCUMu;Evj1oPkh-{~DQ}qiNjLTI3B*= zfj$34C#;O;AqVOlKLTk(;q$$~i;W{CImP&&B9xw>iBMWaoru7q5cm^l71M-@2MZ<% z{*nW9i#94%dnhQI|GDiTG*!zy+oiK04t&euK9mB@h`>Cej%VH#M#CbL1BH@nI+UPxd`7%qr$~2zasbt)v1*pu7Is5tUq~xj!8VZKv{+Ppmdol`y z9M4;|biH9{^gdbd)&nNrc(I-huH56f_?Qa(Hfzs-I{lm~)_{*W-)zP)*=~r@YSmYc zH0R2sw8t~)jnq4sRW*DTqOd2`dT8WLihkYU^VC~tgY+jN#AqP?2l1Dm-?m3G@)UF5 z84e`}<>h@GNMPw+eXxTL34>^X$!<$(0(czY5xRh*U;+V4!+m*0#Yqoo#LC6Aj%bzX zr28la3G*+VOHJ#B;l>u3>vVDNPJ}E4}5ONbnq(2d2Xk}Q0XLgP9DRvtwI0G z7GS^1JYbF>IM}jCVl_eS`%M=D)*@yq>FUZA6&-!>fD&0nzBpjtqKGgr%SlgsH7oKW zT4)2e9vQ-*(;Nz%I?&S@i-^EL0eAPt4!yPYb^OD$*jWGmIEJT?hX{61C=EFE&*fV~91*?wnirp1dN*w-KsRxli0BqwWd);)Rh#Cm&#SgBOIX~f0X$LBpd z`i4auFiU}~IWUw~;6+(27qHBxipu7j4F_?6IpyN{W6=NG|5B&Pl?_B(J|x1yM8J5v zT^)9{JU%2{9RaH@3S8OAQOo1$C_ReFmV!k6lr#~^n-LEPA_Tbke~m6@j9w4!a5n%P z*e8!$R^Wf$txRRjb`!YKT0O3I1gwLT#^QBlU0Yj&KMI#07K`Cq2yp0F7rT=cFx5<< zSAIW#W@}@fkEoXJ?S^d_H-#B z@ccgF(G$`@@kW?O3Y(3k^vlrG3k%B9d|NG&%sX>I#*4gMa8XbQ2*R_o$^88NO@K92 zkUO8zAvsYLQ%s9Gx^rW zM1}9;{V?NTKAwLFaV+Jn`?c8-x)SN40XNbp5Z@Rd((-y?4VmwE%uw=9cD!ak7F9(Q zW$Mm=;>eXB9bUSQg0zHr(21F{uPGlyB68&bhfRU)noFAaE*bwcp-fv zuw{GmuGBN*vmsY;nCHWlxC5>pO2qK;?x+n1OF24NQ=}_c`_4>7h?B|UGvF49f$ii1 zlG7(VyT4?iuJp@(f9j=0pOY=F!6YTL@d`f7b{N209&yt`ej1T6&s=U>X(^bh>S@n^ z6dl;32%mG?P=vjC-g7ETuO_Xrz}l>6y~9D2hwm~CO%>hz)!)lj;VW(w6c-RGMZTk( zv+Y~gi`ze@wXWnMe#UoBf^fDhs`b*_RumJ%GgC@PKLGxiv>G<_q-@!Ioku{n@ob)0 zd6*xD?9!GqX7@TM@D%v(Y?g#s&f{eeALl;tjggbwe}fN|(k&ldaPp}S ztC!p21J)oIT2z$H{~nut zO@FV68<##)CzdElwG*5gf$R_@MhBOvK0Bt>^+06EDW)fDAoY*<9=Y67{+mt5+|mQi zBr=#Q>4$@usz?6rSZNVyp{*{+FELSiGODW?73;g69LkmjVpzN^p_$KzNeONbkLewu zfO()azdYz5V)Z-q1jkme^#jkt$6kH{t$@HHxX5Kb@)}v)F;7J~Y@RyYM40veeBn|V zPpXxJ(;>@=s5?x#q_n9nq)!iDobI*isc3eO9hFIHaqZ<9(UWsB<8!5bjXifKCK6^^ zxRA@nd)L1xEd4l3Eq%B+BZ@zxcsScQIl({{;RxzchhIYPZ>y?l5;AtYHAAt6$|=JL zN*yzVmn#F4N^0HC%q@|DMgH)cU>^7Dcp43xdXTrGJlvu0=G?XApe1l>d}=+87jwTl z&5mi>sypAz67$#l$SM$bjQ!|;4UOn$GcZV|hO*T|o0rI8@eAh+?6MT+RE#f=6S`^? z+iB+=J{&Bm9oc$ODGLSmz!iE&AX6fX?G1a1(q^LwRJ?s_DU4cTZg_Nk&I7suh1hSz z5?-w}SQs(x&M)>1IPzK$u_R@+2yMX42|m_UhcBb=wOst}XI1_mn~NUDOwQZ`G3D{DxKz*i+vu zs^$CHA`zGW-KZRBTtS8`X<`TPzauxEAu)sBwzBU!A6fVZutn|0il`q0!Zj^SV+Uc6 zn;pKf#_u5M#wcE0L&_r;_QM4!@AFN1FDt+%Zrzh!(3P=+1I1}r`<(NVvli#P8Z%Bz zLtu5tNw&)VHqpCLv+Qq{DIM;t&#Vqa2YZlXe1rejp; zX~j;PEeYESuixD6^^jl4$ZQZB1sx?cF4O?Am~4-9p-+~!RlQky^HB~mGc3%!t?IR4 zbamD~n5&t}E!fuG`}{;zR%i&WkR!?b;rX6nXY>+3BU$-*-D(zxOP;j)%<8h^Ir6^d zsL**TnpHc(>e&)AT5kDgo|r=cr@l&Zu-d_QNi zi&N0ji0ahTjML4aHC@jc>~?Q7q;^3L-}`Ef^Lmsb^2}lh!h>fZJ^q?ONV24u42Uc@ zxYcGkw^@u7mxc*TxC_3U@i?)N&hMB7FeEbMO|rm$oN$fSa-iOJ$P#3EMWse7SGCv8 z0%7$heB`ydIxE+LzD-!STR`qB>ZCj>Lq&`fam-JKB<|nnb~$|J`Ne-~n8N1S$^ zDWqJTv`tf>Bo8?ypo*?yjj#Oo{Wxy#^x|!!!naM%NOYjgtVDjTy)>zDUQx5b6j>fwQGT+yus*q$7Gs(dUhK;OZmcwYYcW zPA8qSs6KJS(dWeN+WVTDJg%p$>1vibUf^*;Kb)e$mMtWPf~)~0D5Sxft|h#bSem9S z^mz`Yy@tbxNY}U0mXn^-KM*;mbLGhc=5u(eJr4J@=yitgS)7}UcxMlyf+JprNy;SV zc|fC;&4ZY;8;|H_>=o{RzJ~x2CyL3O9gwPd=3iK0_Y9%Pv@9CxZ>}s(2K>F>IN)ems^H*U zh?~ed=X&Er>aD=nnkA7R_ZSgM$1ABFj7eHl66RDE@9!Rgkp3gNE-_ebFPw;J@@j{+ zrw(|Hy!*b=JkAHzfDk& zNG)D2y4t&ZWAyV6ZjB}Kr%=2Q{--ki1HXAQpFqxDvN_x)7P_`Vj+2sH z;-((Lu)1f>^7FVX$q!R*C{4p3F-G;g+8SHJp-rj3qJElF(sVU3t&G!^M|__Zjq*iE z54Yg4#YUni{`c5Ck3{^eWWz0c!f1#>Q`X27nJz`PapxH6YAC4i?7^#H$7`nPvZKWA zQ{oD2(v`|Y?f1jj%1)1%@!zw_y4XdYX6b(W`{jmjQV(4#hEemv&30Gwdv7e<*5f|$ z-1YLumu)d7Hr;Ro9aK2ETDL=0Y~8rJm>LhR{%j)Ul+W-M;;`GfiR*k`K_@0P*g2MB z#B@;e#j*5=GszFezmO-thX?_p?%Xk(9ov6!b1*4c4rvWl? z)#CQ8&tt!-b37UZIcfu4t-3#?J=tZd8>f)blBZ~EXm~DE(payV)RI9oG7VRS(Ji^`#dQh_u?~j%&%_i zfn6~wQFlY%??Zaw-P{TpS-DO)baNY`FkO5Wu`r+?jL7+9VbewabEQO9xX%%KIl1ex zy>Jj}1jpB!BzHfA^fq!acO|Jl=e=p)TCd;iA)gvAuJ4M~IojkNrt7NbJ?5sU_eSgZ z-D@*MyIq2_T271kV~Ai_!A2imv7Fm_UR_nc;@OX z{Y0U50nxiTd%Ff)ab)6@G zf6!%-AMEXTr&MsdWN}TN?)zDiEe;6bK)vfX#c!&799<)S3QOD@Q=!^p9ow~KK44Km zl6l?O3sj1OS605kvqZ1^&0CGR|0ArhxR>U6OQl|885Tt@nW^6!wGANjYRk!PynLal zkjb#SAnif#&)#mEf{XmOgPSFI$t*(z7%wp8OZgZ@5f*`U$exBC8@3I?DBN0lt={Dh zZ-)~O@o>G4%yW0#@f85}>{d&meX%rI02uh>Bm)b*v9VH<;IV4xNKr_f=!Q^U5&I=}`&;ED}gElirJ; zZ?Szcz64|f=sKR$jsoC!j7CFv0kGi|Zp>I3)qeolkk6IDe~ZtB?)wt?-}%n?*`->g zF^Whi)(-&p!ZT{P&}?mOk3qc!R-hr%doOjPt!pbC1!Y1jNxTOEPMFH6P@%CJ4CWF~ zy)`63&chWxQFWwQJ`KHIzB*ibNiSeZ5BK;mWmwiPMIJNkbW2jOaNQ&3clOh^b) z$dRBLw`SeKo@{@Wg(BSIM1O9W zG74WucrLro9hW^*#}nHTn6>~QhWNs7p0vb%{mE+Kmrj-VkU!E%KLOx{!+&@D)WXVS85#NR3Ko4w|oi_WU_9E$zTz8qP+^lO-3f(GfJyiyk7`3w_DK9uaD@u(^{{3T}i1~{4J zala2I>tEHAUrE+zdJ#Ghy0_HoM-*5PQH$}T>1h7H!htppSllkn_4V~v+FlY?Hca`w zLSwX96G}Vp$F2zH!|s*B<}B=)`EyxvGA<7kHbqUAc*z_3B;yeVcD zuc!Mh3gk&^APOm?ZYN?uDc#q<=_A$E7W43`j?A#r>4vz!`X8REmN3*&3-S;u7~*tF zT?G5-&P5Ne&OqF_?(d=15UM;3lwH^x%+_I659UtNZ-pnu&%6~O7CmOamaiI8v?Yt#_4hXBQz`TN_~xAj{eQ~=tjeJ$`q=73 zrQ0D=Je8dL6LPmX-;(vhju{e7+_BadvI6;%+9~P3F(!OP5ly$P!U6NjZqJIN_utEU zZMt)PFbP0Nf9JvsxxOs$?c?b5(Plj+MazGe@V7IC#%+je`17A>yu^&pyHdk4GKzmd zIdR}-J$|_4Vj_+k&vH3m0lU6Z%7aX&IdzB@!?hD47jO-UpJ3IEtZEKxxD}ktlMoj! zVLDC42~MVrkg$3(el0v2uKh%}%J3eqKHjIu+~e7gY<{o_F-N*@3Z22c$@b=KtHQj6 zj~a4m|6(Wr>AyRw!J9qp7~vGnsTDr0G;li+W+sIWloBLJvlGAIb{C5z+~4 zFc>3u`S3>M%Iw6O42~KKeX{SDb`R?5>vHLH7)M6tHutcdtp>j*E7m^zWl`h2^?Y{c z`S=3(^l~i7?)wgY3&ulsO3{|-S67q5LSO$DOk0yxyPAl@`~yCo8P}(izM6JuIPRK$ z;Ib93Fl%{)p;Wq>;VXLB;Ixj-GU- z-O4AVc&>MOx(oD#T3ho?OMU^G^a<-aXT9C zwf;9fhyYOKIrdIa=-K_1`_!9dnn0xzU&WSX+@%VUrQUs!%dQQNZe&}DLvYkg>|yMW zFN(ma)&+7C93U)CT9H%4d{gF_CVsBLzpN7&D=W@76lqe%xRbOP+WsL zO2!gvcTQJaQv6iwlHVY?VTrg{ ziSn+$>wGn+RF0;H5sl+qBOUo+Ob=(AFT5%2Hbs7d8b+-9gOK9Ba}H^$r7FJU&zgb4 zw4J}6#?{<=$9d+8L6|6$ z9unFnW$upA#H0Vas(nw#^IBEp9JIkuxFuvkU%FZ!d|lm%6Uungq@JK_tkZxefr;EM zsiixTE%4$#;sxtip26+b_Ij3s^zc(ARnvO4$`g=Lh?pV})k`tqd1e)kPm{JUK#6Gc z{$00+5PQyf$XvRFuB{{8szE*rY>r)0MdF1)`0gj)5mx<6gx_+GFRR@Qi*hBzAHMs! zd^QuQakz24;_mO~gW|S)&BX~M@OzGVvU}YM;ZKgAL-KmiaVv5@JZwB5<(?4w=1u77 zd1;9$DtR_+bFnvO+9r{M97f&V#u|{4i{`QX{>^uA8eQ>(y}2bhyA2C+-CUb1467P# zM~`;+WFOA&!z)tRdl?5VpY;L~gYC}yi9)P!3G$H4z!oIZIpvp-iBj|uo4s=y;sk_m zB}L}TD$_kN&P^h*k_8K>fOnDpPS~%{EBVXTg>eXV)McS|b!{IV69J1V>b0H}_kN0V zsTSLFr?dAtGtQmvH$wamXYo_gBVkO-N0+lzMalN_h;Ruc7QV`7Y%(G=Ce<5K$7T3{ z6CXJ%0y%qgd_|)qQP>nv_~XnCr0LpeU#wo2I94<3sPJ+nS}z(+liXmLY|BBWJ76(O zk7lsmYqxD}{;_+R8$9;bCsK0=R;iurHF`3$#b=95<418k z-N1ch4_w)94tK<%o4zu!ILrz&I9RUuEUX4=X}JAWbRTB8Tl(_A)3ZRDO}P|W)SS=2 zZ^=Cwc6^IxSb)yrxI<8!l+JCoj>_8PIcS?)lKB#t?~kp^4Ul6Fd~z7;6JGu&^zpQl zDLxU;dQ&;-uCuS(#lzeFep{y7ch≥{E*WDbDO+g}6G$XQ%443CF6d%9l-#xa3E0 zy|faAaMm)UynlizT`hR5@hYc-R&w*(^6WWM{u`GVxq`3h(@@5Ky>T_i^l+KA9VO@?}iXXjh+zZN4@40Cu@TQ1YcE``)Wz>637p}}PCAV>ju{q5r3SC~s~E>dGVV^6|HaLjO}`(Tc#5=1 zw|(rVMsOkSpu8#S6Hd@VubM6xfO_z2udeI1^w;`OV;ChUU~#UPKVxi7BMxFXX{mm< znf7#hBP2>PAN!W-y4cB;fcW?i$w0+{*~b~wGdWnKlRv8P8;e5qKk@aL4gx`bBi&MR zDyPx>YLC+iUg0{U`*WL;*6-2@05Xzshw!cXeLQ(ZAWw9vL@pGgb?MlVI8rUgDF3mC zU*RR%LhN;eVaX@EJy66b(CL_6&Zu!H6UT<1=*Tj|(bGnEXU0s)`3NGQhteU2k;o23 z>ss_2oPxBO7;UdB)sJZ<)hXSJ!#YMsK2&^wEmK{nP_yV&C#D>9`L z9*VYSQH(q@{?2xmfm#Pbt=!-jx1{GS-%o?dgLV6!UND&neCZVWX|lj+Pk@jh7<`?kh}7n82ePB;5U3so5!?@s%BdcL(wvmqI4WAh#s{kpR2 z=32t^pjS6g4+$(@Qjx+(WJ&;eIjp8ym-;|KjLB_m<_oKz~Qr92*V2K3U&hZs7-c-SPd^@_HAhj#AE# zjhO}yT#)`MIvXDXriWaQ-Iew8($T6CwGX9NMMl@pM)uyBYOqYSlpWBaQ3{0AQJ-26A1?bX?}cgt27yZ5rWC^4=^$R+V3c^&56qRVQx}WSL~{n z9^q4A*8>5!ud%4 zN29@!uHK=c3WtrF`gI(Gj!LtG&2 zVJFl~`Yhjmk$v{%+FnMs+BtO)y`u6;+WmzAVK*6jX4QD|qWSJr(tPGG0$`85ga9re zDDN)i6cZVAQRaYZ>)4Fymn6xhD?#Q*?E%BDd+iR&yfzEZUS){$CyF#^XB--D@C6_= zQ3L6+7!7p}rEs6@R}AakU7zI1uTN>_kW2msNvcZhizuU8kIa@ji;X1@CuQrFXrWEP z97?h+SmDjbx|Pk$wq~N^quE-#`^D3%{C8?~6o-vxG7ZYDbxST#{A(DlTX_XaRgnr# zQcBDjrvv$9cQG+}BWy$~QPU~pr$L6;qh&TO4b}OqnfoIP^#`U?{8<+W8X6jt)e=uA zF2_67QtjsuyieAUSFiqQ(HSb)+BQWp87m8~T^pS7Zqa=DMf54UM-R=5FWmu6f9Li= z2L#{m*XXwyx>K{RBO<&fiTB10hf{g;)G8>JJYg16X?*xWK|v62K$U#k3*-Z?LLR)QY`Xyt~lrMtaZFFGsge%g86C-=iU6= z*A}H;%ShJNZ@%xE6*c!gZ>zoR7UrdtB$5W$iNantWmU zA3cVG^Mrc9b4jVsDv+59BUvfHgH1@Nk=O_1qhvl09KcQ#PUG`XG9ltrZ7SLmYPp-z zrv=e~>RqxerEIQILM{m+&F7E(AFU{luo-Qtem@++c)^*-fT+!%182i(*cjZn@m$$% zKQWgqRb)mNgN6)*4Hp64)I>HYXOIB0^ig>)gfB!yHYeNdC zPXQ_1P9&6+FTpH=-QoC?ri3o5paN(gaJ1KlvKkC(O=D+%(c{c0ABv8+Hq4MUC+c4nP%HAhf2yJMxo(7r=VJ3+angEB zR!if6`LK$S2GO5szYW`B}EYbhE?Ym<`@iKPPO zgo{P&G!$L%T3(0$wz5N%m&3t{r3I@ zXJ&khC0qczS1OIWYwJLgZ;4um>@IPW9M5|WW43(MLc0!*uImJVgN8t7+gk_VBPgZf z(^a?w-Ay}5?n7;xCY0+;k;2cdaL8!@h4Jb|O+j$(WB(#H7SZ1KL~|$Q*WLHPT;ATo z?r}-Bl#f36>O&D1|48UU^4>)}m^$sx`~W24*C8M1+5xFM#u6=Z_i`^!LP+@4>`7Bw z)Q@cc&WX4Cyh$6(SM!NaF;8ahif3GI6ThQQBU@?U<35z)Yd4hA_x*fvvyrIt&f@U& zVuj?v7>CZP%j+}CbU~5;tnFVc*%8dTqR)|kZzm`$0c-&c71er}_bT0F{0|nVEm^9^ zr5e0EfX`(Q2=>isZl8sOn#rps)P2Rxte1M?k;rQLJx4(ttTH8FwUeTdYe9Z$tjkWFhL{qwDC)`#d(kZ!e)t00CS1ga=Lwxl9x}VAWwlFDJES((tX|7$1}>^>>#9V5S>>? z47P|N7l;m;iKcnq4Ht{v5+oz!Z@DU5-21z*89Mbk*b4O8%%ofp^7nBdMq);~EJ51mTkT0;cT`+r4uG@80PKszU|Et8;DMTRUO zM^)z_j;K)fkk~7*7W~$H8iJ#TYJP$yBW_Q! zw-CZ28uub4(_x3$D7&hr*Ef(&cNfXw(?h0O4${B+JivJXi}Z4s|B(kYPNdZwfT#|O z%@iFF+0&iZ#S%Z4japv0*z6E&e7r^)?qOaG{dOId7V*Kjr+CHSyamvXVaqlK{=P)E zmS<({CVnN4s;Sb0b5*9x;tIPC{Pe3$UrWZiau`^tuALaBMxhASo5AV}Nh2k2{bjO-UheHhZ3W6}6AA?y!YHI`ZcD2I18e62Q5P#;u{oiKk?Cle>1jwzW~k*e+z-PSpf zSKXtC?{lIjm`rU{a7H(&Nt$fE5nn7#&*2_+wNFML%#;+z`EIVUVso&pcXYB_@1%qn zUFCHog3(l;s&JVqt#Ixx`YGoN29O)z!0s>9p|#wdzR`AGeIAO(i3(@ehvRe09 zFWGv)4r-iN{V^Fm@0|DNs@{Mm9e6v2&F#5K04xneYvAgA0lgGuX4CZW*cq;nzv;CS zYlZjn-o zl2mlf7n$#4r3dKlfFD?lX*9fd33Mww1wljg{8F8rk@?()GxG^Jm^TMrM@?6{fJNIg zFkoL7QDOH);D6;JV+YBj78g)y`QCU*rdbAgKaSyaN+0@gxK4Kbd^8IP{QE7f>N9e0 zItU|1txs{@-n!$?HC8O;PEUQllJ1sYYEs3>!zk*$%Fmrsxw`&J6(Wk4a3)3?QJOD` z)_C1sIQ^PjgpvlLS#M7T^g=U~i=F+0Prvgo4MXJPMP1Iq$O~1E%{~mj0L%w+&>yk+T*Z{?lMk_Pe=Q63g>g(L`1GyJRTpI66P2HO<%Q^^fVf*-9tY*~tO2h%c%GFSlzobMa}{J3XBUFp6jI$9-`&Hx|FDI2?%lG^NRKTTmkyb}9-2fM9zD zM}4|ym-Pf4p~^=h)+j~Q80q+wblXgA)hE{onNMIu2g@u$!6`1ezPrRFc&bdTY1^Bc zi#^siKQuWROy9b3f4yAb*!2+qP3pyuqfDQn#yuc4)vyM7Yfjce7LSgZZ(V4D*jo*K#$#f+`ZaMct_fsgBI5dQ&y8PGKR>i3XkNJ z=ukQ~(lGiK=+aq&(&B(fQ~7> z(M(~`|3D9@3x8B_nOx6}2a-6DK_ic-rY0Vo3l0(qz3byuh$U~L$9`G&=_Mhb`zK&p z{_ArDl}+$yw^5qMPtZZOac~CjLIU2#8JK39*@>8J#N&yv(~FuY)l7?TlEZ|vG)!}gj`h%x%G2o|mm&{4UuP*2QPU~Q z9~3N?8iTqCz3_~NQ@cRFW0}z~0S|1yf}SvmIzy+$6HZQ`kWTaiu*3>*(4Bw_A)nJ% zoU{Q5@{XJcg@D}w=n88Q{0{=4fs4`QRNwu$?d3KQ$P!;!S=seglGQQDPS=OHk2(R8 zaePp-oau75Rd6XxXNp0>5z-P-5IyybacEt*&^@F%LEia$Pz`x`Xvr7+VU^7KX#UmG z304LLO{po%dVvy*;l;3}u$W$}J*ZKK_f{wh zp|nS_{W}q@Z#vv^-IFrq?}urwOI%50 ziY95dsKix|vByI~V+~pEM?Gu0a*nS*K!y+R+XJmLkZ-+F!M`w440}gkmFVkRO)i59{uc#Q!>L+7Cf=z zI%^rKYT^@hx5bIlAL>$4$cd^+`|7Mt@Jllq1M=t39}?E{sh3M|0s!c8y}r505DxhO z`ZQyxk;n{?wEs%)QCu(lUSE3IH2-g2wb3Qp#eg-8epBDd<8;GLJ)g$|He6SNRKIu5 zCJZ!@3Q*J4yL=ndI)sPdbj7L(Vs)1$C_aEB0Dwud-WchLx z4?!tlpOumS;hJ}UOXq)j+Ig)iwQ_xK6ra?1O= z^i5n|#NM6$qM7Je@HiAxlfRo?msYB)ro2oUFPV#NNkj5@Nb*gvIAGH(h`b0oo--?j z6Aj?eA?O4eEjN$^yo9H??B;nAPlFn(X8Y@_4IcY@H=#yf^qm>+-m3rp$k>YP(AN*k z+$ubJlN6pGm%R`g@sXG5;Ike%IpO_cOVlMy^askronhfupMxCw!$Ny31Nj6N2^qHD zd)e<@Ki=s4x`?}6@kL$(l%JVqj|MHl?RWTGE1-o{vC$@*hh|As6tz(j)y)ZM$A9JG zF#I+1Q-Rny`H{A3^vR*(0InX_6=_ZuIx(%f((|jbDB>S|sf=m=qcAhL$!8GamikN` zN&SyppB7@fa_jD|`|$VJ*g?RuwGSU%W(+2>eUS>~+e20t&-NSp%;BNrHYqiLE?|6O zNblhJ6*R2;%5Seo-Ft`nC7MYr|2X6PU^X+c-pzMQQ4T$W0lA$1(5O>l5*{LjqPWR~%d!4&AGGZn ztvAG0qQ}Ub;oJeFZ3>bL4;o_O6p};3b0u5{Q%y5+I>jZ#qz`shjR_NvD|i4Z_ovmbOsO{VF2s0h{7-+g-M@AebS5Rh5>QV^$Ah zOcK@dO2e4--q^>GkqUL#xwIN0@|Hi4hHM*G*Lc7!6Z1 zCCv@XULMWXs5foJnHbz&`MSG<1~G-YugsK)nQ`FDb$B)+ctUuXc2T3+jqA`=%2Iln zSLlF7mJc|!Vp7Vv&4sZS7HfP+K}(@Go)N<(oBb${$G=G8qZm?@N?%~0d)@JTWLFw1AamBN(lV8}&JZ3R807oS2?L#qUG(n>8&l7bW==J&WfNEQ+COn9^u2j-{ag-pG zmUDV?;Zuq*NTUZw=r;}voJ)0{b`3vzUAgL@Y-p}Y_rkbfYzfBrDfx^LR+M~>gec+p z!GVui28L4`VO3FEMI{Sv2>45jE5}tJ#8j+T`=dxZqO(U)OGb%!qJ$kbR{9nI@$fB4 z9IMfOY~5ohr=(y0M#Ubj3*>p9sPemV`DaI*l4a0IihE%LMReYvNKjG3 zEeX$>U<@x6sFe9ttqOk2Q{ac9bIPm<)$Qc=a9}~AOz7qvhO*YFV_BVs@^_GLO2}qZ zACoU{@u+sx==lYv2qlhhaa5u-<`}0>jns)Hpe|Ee7CafoKC3>>ge6jA`c2eFM4O>F z$+1V#NuI&(z6eAKQ*~ykgSf7FWp#EkRmI9GQxC3R!y2y4qsFHd#^P^p1IH9FVb zKh)=RlUeV4q&v~Vd3PE%oM!wp+RC9@k1p%1nvSXU?5rv&AE zAA8y>FuZ#b<-!DE#m-W4m7AY?ipk4GUoC>|uaPN>tu_YT*#2JLsw0}r@52SE0s(YP zHHOMzWmuWZl#d^6Qwc>X6*hQ8(l0+QAamn;twuL&hq?nsQm+_(8#4^OHRH`9fB9eL zES|&ui?$=t}(-*McahLmQikdsX@y?Cf=Nn{cNpD_hFXW}&% z@9z0h8i1UyNc2mdJxoipy%VLU$nEF*S!s0UJe6xhOE9N-$@|UT)06!f=E=S2L)hCb z$OH|rGo%-TFhCGgV2Kxq%dSf>8Q?gnah(Rj%>JU1u%`ja?x1OjAAOjT#;^l2qQ60Q z#&0)SA%X3B8HTxI@kBZmGDq_wh55a1#*MjziCi53hvk%u5%&gQG{(W0I)R3dHcD;3 ze-Rv`4eJ+aUwiH&yYcc>J7om}vclXsGxGSHldsW0Got?X;v}HT7`FT545Cn)eS;V? z+loN^L77AB>M*Hllycgp=2O?&asZDCJ1dY#-qz>n>D#IHX1gIXhiFguQ37&~h~}&0 z-w9#3JK<1zu6dboc#U$yLsly`&TLq6$;QAGuM4!>k`u$wc zn#@|)^w2)P9=XA6R&|@W*mlk40~gG&QkWOm zmG9T}dDLYUP1?l5eRa>qRq+3p+P*v*%lGT|F-0gdA(;uG2qjaMJFz^UCEa#%b1a)r}t{m2{y8@zZN{n&cU3qCs1J(}e zR}))5%qdIg{!G8X68Bh@*v2iH$TsCN>fh!ayW(q^yaSLXE?N=nF`tf2F$sAhvDTAf z&h=CNt*K2Wta}2N@QrIDrng+wm+|rnv_DA!2~AY9YwqCOyxD*!HpGC)M6!mc>&Jt* zb>VD90*WVYvjx#c=a%32sHQ_Px+APm5Bu~|?3CS8N|F2!mv<-J^H6z$T~`!Md>?!6 zNZ_d$J*{N^JKS|dXn=!~dp2Qh&~%sLhIq-NkNL;j_;2NW{uE?Ngk*5GjIVHvtl*L8 zo%LuMXEiC&E8BQQ&fLm)=h(+BA8;g8>i8?S>d{uIb}2{hW1U0cx^R1aKMSu#qQ{HU ziP!M?+7iA$JMcI&P+~uysH`Tat0q%RK&-LGYX-+tXeB9p?af!_TIMHaVfs>KU8MjUm2tbBa%lk>eq+eKdq zO?h(@O>v^L0;WpzEDnyi;m%*b8$<%4M`{djeKGS2{rz!yS*>lkWLM4eYgHs28%gR) zo%EY{`U@c$L_`Z08;+ooJ;^ytYyILN{b4JrZ)Pv?$2ZogX6;_;^qtc=MzIq8iLu-A z%R1V^LN6k^==wCa>Z}rRpQAHQfPIR$D0i^<1X+Oe9&=}R5v4vKQ%7Y z$?e3Tg^wfE*iGzWQ{Iuf%W|G4jJ=(fp>q0z&(4c_^(iA3#-@Z3oO3)#pXSZcQu3kC zpQ^;{q4kwHIvlBY9*bXZv9BjY7J74ED467YOzrcLkcV3wWmIx%o@$45WG$a6yq;K| zO^^?;*{}Ds1!T?LgW3ATeCxOo`xPA1&D(gKTfk?0x2(Y_eb^(ae|Bp-c{|=@vTB-< zl-Z)cuiEwxPucHEx=vYL9}N`^KSvtP(mZ1|q=*|JQ;yxR&VJ>vqig8vYoePisJ;yB zqY$bAuiD+WS;OTUC??bg8`|FGm*)*DjHhE1o=vFEY+!Zg?Ps&g4n38X*82UJHaegU z4Mq`F<8J(`7i%y+nh^`9!Gd=lUm1!V5-FJ2`itYi%6k<*-`%dggiA9m|JG#JM&oPV zY~~=Ur_G$#UVL`k+PvCy!N$dSpJ$d}rfF--10_Ib2_Jx?|TF zA&y$d)#P+@NXa?wL#qr2HRooa-XWhak(+nDl70H&o~_^BwO>8;30tr{McLg6-%7L= zPrhV7JmMBW3GDRuEU|eIP3y+AdlgmcH_$RCUgbFVsL`c5WtQ)9?1@F|vP@7?uow;Xm4fU0kUuU9 zH;3q8_@sSZm3qHkU^=Dc#J`D>9dwzVtE+zQwl>REyF(kKG>wCK=^!qdd z948$ul?I1QR?UCn6HqFuP4!NuhhcdnKfZqo#rUnIwnlHA_$q|E-o&+FIUpE_#;kAI zvtOm9$B`n+*7|j(KT%V!p;CZ?I>XL@)-acTX)8)YoJyI}rCNR0 zsRbx$8h(;>?w|2cA5=D_npKx)TnNdzSejlO%nyWE3vkOt9Kaa?)u1gxbc=AoOH1Iy zg5jQbwvp0dV$u(M!dU*DScMR`-Kp)O5)|?fJ?3 zk$Pqy?>ZsTjvRe93JXc}i{K08jYRHYSWH+ba)*vQ{2Gl^`9@AYTS@f{VMYop6gE!R zw(Hg0ySxPnW(E&}@GHmVm6j?*=bNssZ*u70stO_R=FYIK-O5djZM)L79jb0ZCXljp zQoNGLGOXJDB&A2juD7%()rG=2-T&>k1`leJ!HdUzc}na_l4l@gCc3wntI0=s*OMou zXuXB6j7st&tvICq+>6nInf|39X>dLBHJOyenM&R^{B%F9h46^FF&P7^Z*X{BK6q>U!6BuP9KD{{ zv+|imw#Acm6AXXMZ1s=beIum>YCBRL>B@Ka8kpi-lG`uBE%`^e*Evo4LACAEx>2On zxffr`ET8J|-W@wu%OI4LOqgL6+T{-5@sMzGSlK^c!I^b&fyCxTN2WN*^5`IUx`O8m z?4$Q6gRHY?_e&M;9Iv4=#6o8zpJADVk%)MdD$IFQ?Jb$2yjAqeKLcY3T=kY#WuI9W z;~*uv*-d1f?@N^c7E}8-zY&0?!p)41_OnivIj-?YsO&4Aq(eYjwP|1Y_#jCOn`1X0 z(9&&|=6T!7coCE!#ZqKk9q=3npG8dU;`H?Ng&R+pK%>j}603g$6!vnCV|0%B*)2>LjfoyVT+NB%i@ zsm?dXZ0joo6{<+>PYpKla4G#W!XZJ2ZeqFU_CGLXWBvc_~EG^kYU6%c$q6~SAK{qT6 zipds-J4-*SpYc;)(f8BI)4kso&Rq99l7ppjEY^YCM0;Pgvn!(P^f^)cB~xWCO}<=O z{gKY`Njob~?Xj1{3 z^l0QWyTtL%-F*E|*ssqsIJvzg#YS*ifPX`3g@bu|B&J@Qc)$kxw_oLny&&?<%(M7Ua;9eCpG%3!e?(uZY z53KiaAPPNs7kh;|&~EmYhqpcZc3Xt7Zs_9z7m6#kQcve|Kh^l0GJs1SOkY7gftnmB z<;ZU@lvA{RdDM>$Z}n$syQ9+#1*aMl5P_g?)-y^?N$C%~8By^wsL04jQfle|Kh*X@ z7@kw*?6Ss zltS{DiSpfL$@#$2GwF2c!2utOs!)+G9lhyRU4Vmbs6_HsxNSVHA3Q7wHam;gqHU9m>(+vz@RTj>(?y2>92TILV@V)$`GZR#v2@TYQTHa3Nqba%N^s zK8H)3xcJrT@?>B>#l=^vedW^jSikJPCLdedUXl0~HtpT5$?RFB8WJ7Vf}QzsY}1li z=^?eGbJr9AUI65TRZ)G0;%P!sVaoF#ph;z1<@^KT8%KrDk$+_owfHsp4_@7Su6j0L|VR!V|Y@u=7 zUQm5|;Tkr;2V`LDeS!v_f53#n{Vx{Z&I2p}52cYF;BEd3*?gj+1}&GYNTTN6;r#)CjTSfCh8g(Om()e@O*&Vb@K(jTDO^-3#k zi*Cou5T5&^?Y5T~3?nNqoV$(2GRx7*-1wnqS~>R7jEWFMMc+_xn$I-9IgfT2E=>B< zL8bc%2xMazQ;({7>K(YKr2!b{=rIeeQuVW zYf%5JQI2Kc9}5Y$3g!Uc`8pew6uD?p@@!1GU}Uz@BU17)uT*C?jsgAePCI{6TQ%LR zmq7O9Huu0TtJzj<8NNM#RUEYD{>+Z57>3QCCVzty{_J{s`9SR-$rS&GcYVHj+LG`L zsi~Ldx6-9*>2Nbk>fe=#^%!sxO0MuSo{tIV=k@&(4J0@*NvdS*@H$A*ZcT&+XFVMA z>AH1LkNS+kr>_;u6PQ?^lv?4q(7IE%#W-wZy|^R`2VcS<%aBJ+hi|@C--qj7^RbQZ z9A3nqy-WG%jZ?>_%pHMe2Zw=!%>p5@Z3;N{t1qg>nNQ8Ii}Dcs|&M8kDsI1*(B z)n6TP-&F#&6On}T*IwmGRSG~+IVnJ2{~b3wh}h)-k)nkdkV^9_%C3ocQ=0vxuh)D$ zYo)EWdh*1V;cTWHOiZG+d!(gD_|B2oVezmw_k&*5ux1YXkWkLYXSTN^E%df|&e2bg^s$bYl_uFT0TNIKE(qkJ` zxmsRi=%q5v3-!eS_s4=(=ce1#+P*kIET)bmjZRg_kk=)K3kpR|=JqW1r{gwpKUpSQ-hj z)tF*65X5ccDbiK4`a#Y)?ZY_$oOtTH%8ig$-1R}PXOU2FN^yHq>KE(dQ+OT$lKn?taq7NfDT_`4wU_*z!d zsQ0p>io=~P|JRfeWxg@7Xwd^|94Ud?X}dgtP0JAWDNey@@>jb`{XtX!t(=VHXP2EW@o3r1@&4-6sWJMhiqg2=$Cx59)NDW6)SY(SCd37| zQ=hGrdn1Qx7~chrqjh_YVSpJ01fRp69ET9ANhI3_T= zb~aD8{i5g)S8S|SHgI~1cBg2>%Gwp1n)z*8$v?Q@w4sCWmQIIFDzynN+3V6Kgc@)D z-lq{Al6BKO<2#Y9t;L9C_NaKJemIlW+RPQ0X69)=U% zz*eZb%aeBFu`qqrF#1T-C2nR}H<@X2NrSU-e#PTnfr_Qv9esgKD{m|H$vm02=QEe1zN=13)(-kp&WlO-4!xQP>uucjDWlz6;j*#e z-E&c&oI(<}UDbL!#|PCLy9X&`e;ICGWbCFtC&{k(EH~OE`DYnmZAq_)%n43}q7G*^ zH6IFtj6=b=CbP;4N4N?YYp#sS&4L7`k$#bCggvo|IsJu=60M4(5MBh$$wx8l&N(zo zKQ{X_Yn8EB@ffivti#TtL!dZ)rz~&vK)VF)7_ zalZA%n)h6q5&$3ZY@5gfigDY1LGXY?J%;E|!e5!8c_-{h&P}J9tsu zVkFe@>M)x?ka@9c8MyVcKtnxMK3?!A=T>x|*`jHWd}-cV zQ%txPY;Swn0KCp$zDwUfahLgT5pBexjl{cJ@L!1##ze1Ze7n)!_2zA%VsxaM>0PI< zGzTCYMC4f!SEt+caK0@;VXUHoKC(occ`gYRm!msK*wq;@kz6i1|NQW%k9mvPisw1! zZcWoSUgk`A#MykiaJVC-Rjj>b*j5O7<9QO7fY67 zutjn0=ggIsNBS3qnaB61rUM|jB!(ZdpVs=E)3(DWvu^5FFwLV=X|c3~HzCsTng+7x zx#r;}gNku0ry`Gv^cSVqC1fqdCdI5zCBoxc2$|$sK^k=|Qp+)2g!3guoI!fWUK|a_ zn5kZROgK3rBgT@U>&VPv!Tp{vfc2ymsYJ?6+?843K%vZ8?tN`)6Kyl%Qju-lQ9>JL zGQYIUdd+e?t>>Eey`B7I@zx+Xzd1as%!9x-qcbgX=_;E{*9#1kRZ?7x(CCK01~oVnLnWD%d> z1>Hyz!;(|61nMA@2==kd);DQcSrh0OI||2A{pFvR0e%UP=MtvRG(Ec5W#Btr411ExmSST zLnK#a1+Kxb)uX@=5%L?zw#0;!c?o5>r`kE41Ox^uFz)xLMwaCj92pgH-aNFr{A~D& z1$rCsr2SBNl%0d1#ui7HbI2WSc}SXmurVEg@*|x7s&XnVYpIcsD;-&ut4W8tT4+n0 zb>g8B@$}5rX6p19-2PxF@g2(>izj4cv{X1!kw`v-^3xUSs2I6hWpXasR7YhwkLv5M zZjkbAou-NG>Rs3EY4D(GA~UQlWmzQVn<;8@2T+Rw@QvppTAB>pyhF*EjdWCGWi#;3 z`1u3EXSLt4Q?A*=Wqduw2K*Z+r_RyBci=NX8RvUR0im6(er1NI`nM6)OlPRR@Gz{%x;YyGOO;;FE}K{Pr4O5kwS{ z2MBSbwv-e;Smm(U2`4o2BD&5wu(jE4V;$Xv{|FHIV^Pj5X4*~e7|5x0#yUT(`U|K4BXgw;{W=!#Z<#{1r-%% zcQ|xIRxSIAkV>QUk!Gpw@Ogl*KdXYQ*Z4kA>Sx0wxhJt+US>2IT(Vu$`hpZ8oI^~` z@uWFMYkmH82Ue-$|Eb^*C5-_*&O0telo-%6mu$68Fdz^9d&@=lgbCJ;1OzM{9UYMZ z2BP8-!)urZ=N3f9_LF%F;YCsBw&`oq1c*HP?6)xa^@&=|A`@{hI7uj|sa%MmQOpO-K_m3uO+$X+J z^BOQg6(>R1&dIF{4gsWa3IgWXq8$+-p){0CRznrxH^3}{xA#gOG7D(^`mOyhIyTPu z6?qTixkVZsZd@XK5g8yGZwv>oZrX%ZxfazAW`3dTW`&nQ@?U5AS9jk=@#vl3+}u

@X-?mw0UOGr_d4CQm)oDcb2YtLsmj@`|_SgIR z#$sgU)L}m3D(y%9{CaD%WBdDVQT)E1l-$hB%HzjUgZla3j@<8o@W3J6M2^J^Zke8RxP* zh*S`+KK^;t>uB>E%8xU}i(K4eM{4y~ISo~Fb29=kcAI*R4`VAp&7I#2MC}pc9inj$ zf;&uXY`8dQh*3{$qx$+A!w_{RkoB!wO2h(~0wk>9T4lNWTtJH$p~ZkI)VA*K1XSnV z3)#kuOF$J&T1!i7qL;sJp9KL=lCjE7{VKBs%e<*uWD?TXr;+kyJn*rw6hT9;tL+1R zu$H(mRf6dj6@FV~H8nXfl79+OSuRg0Bi^wVA4-X>1#wS*79{{%*NofTHsl)*Lq$3q z9E|j3Ca(-Q!2MW0uIj--W{${q{Qr`12SNMyAQ5e5DNTTqa&r3I`j0bok}Q8J`H!+Z zJT?jiy7YAA{@~!|l%SB~=2kJ(-RG~WfAeTpOHJ)OGWY^k09bl=dXo|p-vU1YCOez~ z23%0Gp?dy6uhE~P%6mEWfAZY#rPfRTcjf!XH$qV!Ut#bC%n{JqrGrbdYO%IUlq8>i zs$Y1RdGfhv;lbXCfy*3!tWc8)*xr9vw0GgYe3^iPLq$qMBYa3zGg{{`F8EkTgl9L_ zTZlhzcR06#0J-Njb2tSc%lUtw+~U>99o${+Fd14}dL2&t85elQs%sqDgD#*Bey`3xFpv|?7rNrWU%slZ zuji6f@FzuPlYk+gh4KeP2}*twqm_p_HUM4E*2$xu@J1Vh1ozC+$COt1JO~ad2lvf0 zG1SGawVjxuBQ$TN)!H+GB#0`_;mk6{uB!&xg+{ceN82P&BR$hlGg&8W_sbs{NdoH$ zh}iE{H*@#FEokQJUqh$};Bp%Q>l+9K=_w#9;hFVJu7ck$_BwF7a_=)PjPCdb;@t>2 z72%DbU;|#aPj$+7o5WtIf95_JkEvr{x`4`MJ55jkOtogF5*5O(U9ZFNf&YFD(Sun~ z@cYJO9pM*oko`hw)Ooq@tPGPNNaD5Xb%5RCOG--0ps9-2*I{jtV$gj+lOuZ%*@}Q* zorS-!Ttf2+%gB8~Z44Pt@%8Ihx1E0Z%^^dF``{YgfINY^fWaB$M)R5#c*dd&D%TCS zK$VjUp@D*9a+kzEb7lsL*7At%`4>CGQisV)z9$EqP%uot&r_>coFYwti;Igw!LkK_ zI{x%?{7X_$P+aIqLZ-t2Q&=i*!K1*;_ciFe0a6$T1w!-`@c|cApA7W^rz`Q+v-2=pL&n+pdc3e* z5L`?G)N;8ePp(2~)C&FZF(n=*AuhQVpG$AP|1V~u3Ae|RqHbRfIPM{=y z?P1pi(DEbsS>rB(Y1sLKW^LhoWQ#usJZ+Purg?DsLFy1U0_RA2m zU&he9T(aRT=P%x1f-T1d?mem8ZniZf?NNW4`L9xIpz_KSFw45Sej$|(1V7PVU?_r+ zq!1AOCfMd1kqI}McV9ZE3&C7Aq1O`(zbx2eclixDyHX%Xodtu9>R*4JKwG07sGfYZigXqOB zaWF?wZDCku!w$-VGvD_-I=RsWq(fEz*>n%CqO{w9E(M(e3cW%jGU>n?k@o;B4+6!- zpr+R(i>nEkg)K0fAo#F1u!_MA4N}Mig-H=34!z?mkR{ykIjR7*5hWxJRO%8388;kf zVE9iUbfbdnyfH!GH;^SLi04m8oXjJYX5--a`?b^KbzcdGJP{`hgl(=**55gpn+qgB zR?)}{V!Si~vw)!B_8#L^GrvcDAD$z|(E@Z`gux~Yl-*_^V$Wh)J;AvC=hFaRwVC1B z7_-FDh75D_D!m{q2$1)L)GTJXkeV%#)?#4UKc{E3d7|vrsgg&qlDOcLb)dB4XPjmFOa`RwH`X~ z#yvbdCTxpp_u@CuXqpgu5o)NMQ^^DnUo%Q6aGE5%|F2KcI)fwi3c2R*^(({bFxka~ zz!B3u596t*V0u8YXa0uoDcUy;gcQxeGsTw<-(5Sr2i+3sFm*zo_ZQwIPTJasBlv$u z;MLpF<%f?=g8rR1N4$swAMDHNC-pGA<_#5(ZeT^l4O3zDYv3Nu$algdl-5n~fVO;k+S_k~XSw9@n;FJF{hbQ}DP12Z z9~xD=s_OYR;jNC8;KOq@`eETbfkd)qweoo)?b@i35?ZG>e^5`#z_;6ZGBV6s95+V~ z81=Q#=&X7)`lUd>+&*c4?wz_UT-*%gl0SQ%Fq7;KG%aM|&AuX){xeZ)D7MwcitKGM z7|1o#&Y41)nwn}3$-OkevAEW!zsXJ^X7rUXnXuNW&bq^+7t8=Oo9 zNG&>pTDQ-BDzF)-A+i0OaO>T>cdXECVgc|!2h3*SKn9Vdd@(+swiR$2#zAJFq?C;s0zc-H+y35+FOa_NkXHE~@Mr(qqm* z4_|?I8KX*oW4XJN3UPQJgF)08;Y9=n1!1ESMO_GMJ<+YO%6&krfUJcDBL33S(q%B8 ziA`w&VE1Y?mQYvCYA3bRJ0 z;3#_8p!Ctii5%v5;agpNNs-^t$%4SLw{l%eI|>C8Jz#!R!ksn$skXMZ8j@}ZAf!k; zP(gvf=j6Z&^fhphL{e8*H=J4HBlBKOcXo z`Gy_3;D1HGSPD_^a4+IDC{+Lef_9>q`@qN5-Kf7AY?;09)6;U_(-UMW9r#LkHqIbw zUT%nsEHPK^rKzTiHzQ9dtzT51AsOS1TjMw@hcw(_G}jq$p#jRVeD@&`x?%`8!${6w z5(|XjgRso+z{nl1AC_VNJe)0B1Vkx(e0-38BS^}ETeuDPK@OX^8HR-g>%pusn9DL$ zj1PV4g+DxIwI`8k*0kO>%;bJTZAgTH#4dPn zQ2WQ|3fEu5>O5K46A=>95!-0MNdvvznK-LtGT63M5F#NzgTyvQZ$_o)r{2>&9fFnJ z*qv?xtSKZMn471Ke;Fg*01KM&J;kITgYLdN$-|S7Lq!mHy2i;`et2 zANX#gMbKqHdk#9rphp>EV`4Z!@pVD?>JTBurl*eZK0M2{_Yx?h&z5hpwR0ko-JESy z`Ppr?Y`7V6dw5HZ_(e^*jhM+P67!w|cwZpH`EJ`3i)Bc^)Cs_=Cf3)tpq-1}ea z^!8g6mf2dj0Eh}`=4i1;L`6lRe*yDS7B+h`%nu}Dcm}_TjGmsyWA6_akl~miiDyI9 zd&VccbeF97_F|8t2PWZlt<=^3S>b)Sl1DnU!cQB^wT)URbQ6OCPLLZ!>)~ER0U6LA znG*$u)B>RsYmD!MajrHOF}*M`3>u_BQcNJF?yQX)mRDx{J@oNocrxoDX9HqjSRtmf zZo>Ra9X9gm?}x~;$sw`gP4-;7iA->+*18E(k!T=epMjzPfvyC6T|Jld#Jo{|2Jv_6l2rCKGKXT zon|8BFOfkSKm(N|caiDt1d%%slH9i1IbV_PSziM&XgfWlTM`l`VsIit+{}g@CtLAf zr~Ier-eKpzTg_4;;2g?ftb!TIet{%kU!zzeP0(LzefQdaU+JrUu&%U)(3rNDb01zWsGGe;x1c)!v)ul##fSU$)f`bCb ziDbx7Y2pkx&kzI?8l?yzQF6O@(+M|BY%4IvIK27q(|@jixWtbbHL|jCQLdjpqJgUj z*upiX9LN*3;r(>kk0MVE!}Da})FBI0!#hw_{A(hRj(Sf%BKgN7?CB9a-GQ^SmFcTu z6W?|(6+NN~7>RX3%g~}U1Db1J)*~)KWr)>(bR+0rG6w5 zSP9+?PPi8#!#abi0W}x5lY_;G@bC{0wka+CkLwy>qiqvBsSiGx=ja%N`V#=8UV^~! z6ogn=8yjxK%%O~@HcQRj^zT}I{Tmy=OgPfUr_w*e0g}sRFq;{IEde1RxkK%4%#f`#yZxLYX1T%Bo~%uVkh|$zEk=W^W25iBKdnlnN;`TSW-jdxny|_xhdB zdVfBj@AtRv`*Ht!_jpu~7q8d#x~}Iq&*MCf<9t5QxT1WRgnc zMu`dGC+FG*C*VJES4CadYmSz#9ygpVu*)}Go$MT4?QCwcxLY{8*f=@}@rv>a@~~LD zx;nW?@bTII`whH~&Q^S|J=e2C64Tc45t16t=@=Tl?^6;eT z+9q0DnB^=}4aApkpdzMtGF-z#ABHof68A7HE-ldM{i1|E${ z4b+;tijPmGcqzP~HQFxE$3MU+Up8(?oI0;BxXE9(MJUqpySR{*rAT9fkkEo=Fa@px z{Fx9U3mL*;gFmNr$*>2Zp`pHHcyj19^B3$fysWR|$JpWJvcUiUe!WWB7<)D-ja$s+ zSCdik&<8W3vGMWSYtvo63px(=14W0EB;4fRTzyGFPak@#>+O%HI>xu(6bBsm7ua+V z);^?DZiwb+%ePYB-QQU#8E*Odh>`NdiASlaEKg2dBg7*lle=~6tz}trg6IWJnz}rX z1>IXf46a4$m+YCCQbR)uY~6VDk>^YrR#Kj#;fw`73!Ka zHFb4#?xMnnSQqP0M#8(u7)7$*mtD{@7r@vrOS;cicfPqsdKNSMaFgKX&6^ZdRCmc` zcPVDNbH04MJJK;u|zNYKqBeY0nmo~vYT z&i?$I?LBgtP3&N+B|vz`pv>J~YIpIAhf62{ue9_D92^`CnwRg(u39?`m4>9Iruz29 z*515Rd-K!w+RTcDPsV&xywEfL8@MvNix*aB`|{EFW!%PZz0bkPnPyPz@LKl+8!j%c z)4|?`S^Z%Car$$+PPd?0l_> zXPJMj@K)F7`^^5oi;}x;)n?^ZhQTv_wi8K*+3@@oZ)K>&FC-)+;Js?C%R`G#K=30< zndwud-;T!1i$}@m`KxWJ!qQB?+&k>K-e;v*WTyvj5myq{$?4>?@0)AQcQE@!`XY=}M0lr!rH1 zZA<+eym+wMp)O$4&K`XK{z)-0di(x@PY>e z+NJTjwj2Y|TZ?Xvr3Q|x| zjJnGzM;yQ1>DJ>9(^ewE?dep2gLb2o3<=gACdY9q_3ChCtZo_9tr1NW^`{AeSk*Xd=pf1eyI_oCk4-+w0KC&8(n9FD#@hD-e(tWHM9@;kI= z^-VYChazmtZ8|tzf4AaXelGkeP4OWLI8V>$j6L8=u7(Lej$>tLcpP1>e0w^}y7xV+ z!*Kb7lgbgJ?Jw0l{JwKuNeqRN_<^=kp=}R8Y$eyJc7nQy6G3g(_*m030X%B1D>c{D zr9+MhS)vbxMn)>Tx(ct)_60!z5-+kU)Nd=a)ge13h(BHzsqF49a#}k(6a|`0^%U*H zi<2!WkuWpk+4a| z$7&xw6SN3h)wL5zXf$J(rza!K9Kk!cP<0^VzA*gABw+7)J}>3ZyDTa1U8k<%$gy*9 zl$?_;Jzo0UZb0v*&JgQouG&Q+Pmib9;5_CM=1Y z-Pw9<{QMfiw%wO{=iFI?DF%ui>9Cs26#wbGM^hb{q}W)C?19!$3j}mFEHp{dqx1>d z9iPnwRd-5Yu5sk-`U@ItDw-{+fBg6{_KAqub@e(iD{POGuqPCMetUXiMrLpI8jf7H zaYerkqtE&*B~}9gKbS)PxU{zxMlt^Nk;~h-j>>?8{RlVlo-~sx|0`}SNs?T@|X-~12?i&nC`D_ifHuwAc)xrK#z*W6%9KDS9_`TIUQd1BV-?wm8F9t*tU zYW>dN%`&efC3Xj(IMy@y?dchK%+{B~B`%Y}eRi~d+pD*;XGW^3Fcy^X;`XH1d(E!& zuqa=>8jZ0)Fy_k3&p$Eau`ryoEuH}LfU!U_Tqu*UWZgfI81j-vUxS500FcZH7`;PO z6GSfad(*2eO%XS6b2h&vak@EC{2E5VC}Kw;PrNkbF>(kSgFxs0cB9`m_YijT+mllf zZgT_0bVtu<)7aS9TyeX9n2{5LZ_UrfxR-S_)YLIq3xNUt#$Av=nAzCb70P$!%Oc#c znw1vW+`ib*!SS=E%(2g(hrPIPAK)3TzrX+D#)dMCH%|3Wk^Z6I?Jr4TQ^aH`J{)pT zR#GBFTk8-tXR1YGtmjfK9fWl`T0UUCy~;oDwH_dklYncSU`5 zs-w<^5jL+9ER>{af5>LsdU@=nu2Xj*xz#OKO!`Kx!Nw?|Xi3#(%oIXHD z<7Eq#UpAM{N_kPiWP9k3d@xfEKQ7)Lwf+*~8aBG_J`VH!f}7-GEYGQ-$ZS7rYwP6glzi!j zwPRN;Kj5>v z$o_)9{_|_`kTklobTqSd1vYnfZ08?_?la@bF}qFFQ$mR3(=D<)w!N{ipqs)xbu^aG zBm!~`r;c0U{+-t*Rh0u#_Y&R34?G7wM*N7$$Z8;18cj3F_|ikr&y zyk4BBp`n4-B4B?jGkrvSb^oi&RQt;%*s2$Zf4FA9dGiMHhFXp)>6Csu>$UK{1$P+YsCJn`hoOD}{j(ia0RcP)MVAV9>!VJh&+fSI`7Bi2-x&5G zx9xeS2pQ$vhN8kjBu;ANw=HEhD}|GFuz6;#Za64QJ3HV^-mha0IR221?<?T;SeIk z9CqgV3rV@IBv!+XtaKhQoV%6kyS2OtIEIprZc=Q)swKHL;)Emx6Vv?bff5(1Y{L>N zk%YsL{xyewos5*i9bIQE2W*~dwl|p)5HUMDYq*vL552gu@&)qC%VH(l&ewNP^%Q5~ z{|RD_OQHSf`tONA9K6LwzA6@0R#uaacbFXHcw9WZB?yM&CI{PbW_!#5yLaGk+fVJ+ z4EcStg>B_`J4@Yc5pdlEXr4IyxY=u<+-;^CbI4SOj87}GH9<|rEHkL-2z%nn04WKH ze3o{O698alFpN)BXftw1+2XZHu zEyyrGt>)+FZjbnF+}Pb*qL6117N&tkQP`LxguT|uWx_@w)+-(C@BLU4xtH3 zBBgji3w{9mnRDA)TdbU%U#Ijw#0y!aK@qyyQ?(z`+pC-L>eWrSnb%OSyi*olFz2UV z{B|bS6hWG{bRt4>rtmDCTs9e*;h#kVG3qi8)Md8tAsBxSr99dC@wORkL5`j9`)uLoZ=gO1#ta0Jl67v1j1C{5*Ih;NZjw30FBN zhZlQH0&XBM*d{f)E<1s0M}VvXrqvi)U2%LSUjYrhhcbD0qw0VF6Sir`hnTGd;7RkX z)~Vx??qtku!S67n4U}2XWzPYSc<-f-dHR$TX2(#7q#Pg7+-rEm| zTuPBSfok{!5qoTK-_Ls@N}1n%Ru@6tM-1maug?uEwJ9@~*sY-HT=~@u`JHrkQug3H zl;}=-+ipFZE`bscj#t@V1eYvp2wZOAqW$x(U_N1TK0nOrno z2z{k#y}3GuRuo2wHx0g&9B{lVUo6YG3u~>; z^r)j8^4w3$hg7T%Vlp2T)SQq`UK>}GZjJ_#E5JfE8kTGR_KnK1a_beU+=z*ZDWFcG zJ7?QX0|h!ImwK{mXJdUm#qHa-(+o?T*;rWQP>FpLmMH=!;SA@#Y4S~$F7)Ns*;iF= zj*+$jET%kuoTte$|FvOB9?HD{p3|>Y04!BpogXrQ9C8H~9qGebHYUGS%@5XXY|2Sc z7d=#Z#PHzi%ZrEd`*_}J=bVO@9K6Qmuf6u?r6LdZ76OvGC(ECf0qBie(XaCNyCE2_ zprR7`yE`XA%zO3qwWR&MorjQ!ONK@uwJPQsS13Sz7?Q>vA==@xzVVgO@nga5jUCqD zo#H^{NT#ziij+`Z#4r{2yIu9k?;R2_Ds@#QJ0YR~ zDQajuhVn2+%zyXe!1%?B7pYogcCue(S5e}4yt)zQ{MK&awtM}f!X2KOjC&R^@$1%C5fQG?a4J!_Vo08XF1!D1zSKjM?13EOfWvf<@L3; zuUIP{q+Kq!5J?%Mf`V3XqHF*Ypz76cMF8V8B;JcToKJQ?CCWof3Y8u`{xl7q9Qj03 zh|lhv*b`#mvL_=KE7|Q++|FDmY|@g_=r7U@;qMYOi+8dys2wpWyk!1%;}A9*U%Wg> z>t5zPV7FzGRn)|@Rm8CycJ$g=q1dR&rWHODy}~Br^6Y|~Aw$E$OPeMB#)(3CIIkYH;IWq*@FwdoMWATwbS0@DZzqj5DBM0CsCI~fT7NtkJ1=|p&s2m?v z!|t|4jot|fZdc~me-`mPD(Z{YVvf!mILdA#B-|f%Y3<4J&u^nHXgFWW6U6GT=%}lw zDGI!moic6xoxkd9{WAXj8|IjskC}5<`)nWaxhck`$!o_ZIO|=W&Mk2?hjVK%n*W5c+dXa7+7uJ?)qRY~j8|C0X7i2Qd| zx?^=x>i9v2ENo>MtS<)D86mKPDWkqGa!|B?w)#gQ`1|Hx>%^&CaE%2t9yf|F^cr=< z-{KIB@c8-*d>`Pl#S7V9w+v?@*8nq8d3T#&UIK@r% zqOGy_YDH(&er5M@HTYZX5nyH7MOJ)E=Ed?$-cKZvUG>wFjvo>}^6=FUL>)hUXRy$>#=M`8+0;kn^khW%p?A+xPol z&n;KnDYIsHN75j89WtOkm&xyDVZWL+V3L_ zT7kYh>PWOExf043+yv-iQu>Qn4biD$PO+n*g$5WE(;&cinSQ$Ihn-m3U<3maR) zof^xoXNn5Wm-L1Ftea_~d-g~g?07I1_Q?xN-ItiL(T2EOqI@qED|LSBCVaN7UW>ko z$97Xm6_ZUSTQi1$^-#7or#2AgfEN@$kBj0# z!)b4CKi;^tGATXB3t@}5DOs8ksN9;=^FbO}+Gp9>*;#-rBcJPYHo!BCqz=zoMix6w#O z*1AytgX4%yrg9dR3ewBlNNwOUJ6==iuTRhZ5bozrf~(TT*B!x5-40P@sp*Msj@olzRXO zhQZC2F2@w2r1hUhM~@Zx16c#G`dQX^Q=|V>5!)8+BGmm()19a4T2xh4;mcU$FI>=k zbCu%>uTlS-%Bm_^Kv1_jU*llJhYo!LT@Q&*@H++&FwhnU{%g|n3`E@eo}sPm9gsLF zn3;3+55CvEE?~-W))^V{KI>6taykuX6nI=z9@Mo!HGu^JUPp6S(*HG4+y#d9KB9T4 z&hr@*ktwAwz1uNE0oTdis)zn_EJs-PMYgkQ_1RCGj=8I*nLd&V&BO1^6O{A6q;H=u zGbOpHtEk#Z@si8SUf;jB zZ(h3i?#0-zjpdXK)wqghsk-j(%>AM{ID2koJx%QY^>VyxFneOey3gxdR}pCt&u{MP zKm`H%RL{ko$U;jyUuhs+b4i@=exJob7e-vt~U_sqE90uvO0Tz=kFLw0c{l=yu3#sI3S&l z0<>nR;l^Qm(Mz~;2M)vwqY#ATUyR@Nw*~|>a+m;mDNla)`N0P$5&atN3wXD#Hvbny#3vSetqdBwI~vzgt4rMhI_U5-=3lP>C6op+F(PFIb{!J9Lmxc& z^s70+rO?DV@-OfhJc3Dk5>j~id=(cFq5bgTgG1w>bUHINbPJAG1#{R79a=@Wkp4oL#^h|eWUJ->KJpu!A$ zMsBAk(i{HnLHL}um$O;fb?G-*+Eg}WJU8+!^K@064Ic$pz*^I9NVJ%|aAv8nyJ)3f z+xmF!tl>p!UfZ^dZ+TSLd4!`IPBRSflIe+TT^Q`sUURX@e`Q;6ov-I2zI~5ML}E{o zzG~b{dyXnI{RgQPPs{aVJ(#fB`W^p}32n0<8TwqyhyP-;uj9h}cZ&npBK{5ncreax z3|F9}!nS<+AGl(I%la3|t$qcdHi{B=s5*+fW@5O1uqvVAolijut%Q$UuW7CRGY+-Z zfn%S90>hF-iQ(d=GVcxYZ-{=}&$H$bEwXsS<@S`rkWe_9u`yZ!Ps3ct=*)ux9?Alx z6bse3!U&x#sa*TQXg6Dquj>q|f>t?(Re9{~&sEbbelplwltdO%(KJRg59DXJ7v)XV z>pzGIiKF7$Vd%5zWKdaVp(|c>u~=kbEw#*ZlaW@Gs77U=iWQj>0U^0Dt^ibn{{kLN zTz4(8*FOKC)D|q7*7yc`a^^4ppc(ofKZF9edXJTI{p+bx<(62;Q@QNlSi?B{_xF;) zEK`Mk`ntplv;1dVm@@AyrJg+sL%niGT9R>{r>81*R(p#FUqrQpOUT6fyYY`tO^4hc z>4CSPF5{ffgKnOaUc=k}><*`iJ_@`N7ee?YDy~37*@2%7< zaT{V=@Yxbh)%@aeG|&y^>M)@Zcpj5Oqn-F8XM}*XT^bTe>cRi=O!5oJvPnscNJ@rF;(INFn5d8`CriPsc4-| zKx**n1waglTLT@V!-f&?n&*!YIt>VC$Rz-o zsTNR4ROWtfUfy-u1XoIt7p1-S?o({+4@D1^8Tp;6@&9@KdtAo!P2sn^@&YP!^xHsDGkf5RlSZchkwsX?8MyPW~_WXC=Q3Lctc>B<*`H(kX)|UqXIS zn0O@9fmHkh_!GIV8BbKOy1C&l%fz7op`{&J;pbX`%CqOd4=1cgv;P*1g zBz^#pfHg1*njmOpF28=w4eXs{1y6KhVmJu+Dt}#L(%b-fMI8pKfyX{B3!v`D=%*lL z9D!8^RBZ^v?C}K0$~%BXq8&PdlQjTTu`L_YOz!gs7Hw>`4$r zky8X#-%7qMOjQVkdYd0d|JGxQ&ZuS$?K~Dv7|E2hl<_B6{VC7|xRWdJud1|sxcceF ze~G|%f+o1pvW?2R-$=>GFomUwouz%qVG^kpxq4s&HTSZB ze!$~N%Mt^SqCNwDM|P^?Voumu{Q^0t8WF`>ycQE3-TLMlSAlgKF42+W)in>O1$;N1 zke@=+3J7>laM!dJJDMQv^!Gq9pZCW6qm+RCJ{tp2aLzkB=b4H7ZY~;ajP(37ggI?v zgFGI#U=N@db7CPp%n+dgt+geUkQ#9 z(@;aDf{=j>bGN#)kuH%6Wj*pG_Q$5AF!RtpG&^NbH2d%GV~Yg3aTaP+6dCnL!2LDb z`{9dy0`ih{f(1@`eSYX+b~cYql4P;h@`NgKlL2M~eTv=xV9!IHTyY~p~OMCCh6Pn^O6;;*S2C|Zpj8OI{0O5Uo zX4fSFIg&u-4~0TyJhy89GcwhI+BF997^uRo^O1dZf0-gjGQt)*9>TbS=tsscO4zN^^o+0q?ru%^w%zM&P97bti8%gL(rm;0dag6ee=;<4JyYkO); zP)O+ZY_9+kF#Mx^kN>?GXG$_oIU@jmv_wFY42Eig$GDsx7SI={E^#nw&P&yR_!mJt z!o}_#IS~fdHdZJnf{4h!f=2woyn)Jhdnz-+K7w8VH&SYu7#X3!A@~DS1(X*)fS{(F zghDse1E{dRdhOcMbXG3PeZNPl0)`_;i2g1>#-LAMu*G>t3Yn!E>W{8dR4@(qzy;Eo zV?YO`!Ubev!9Y;IB93$NK&5=m> zaiMYtS=m5OtkIlAS|$=NK?6@ip2^K0_dnS7@YeY^ND~HbRXgN=amX*z<5R!xSfz8V zPp$qHZToZgdQ%-fy8Y$Iw6^B0<~9Gz(*xSOmjTQ=m$jMnYtwfPSXwpa`V~#&r~7j{ zti&`=r!KvmClxxn8)H{`CPz{T~%6I zJSJBgRHVhuUxW0Hf6;ac@(>=w{uNBEfTqmlM@S*^2Qi{3b|H7fxEP{L=4Eq)RaZP$nF6oWQneZrY2;<3Pef}^3e1b zo`J!Slv=Axemdt)DN^QoN?1JLSgCqRYCh630`v9XMd*df;7>9IpWpSX?8X%wS)m4ddN`J&f)tTjxv zI_cQjQ`nAS!=T0IBbTq8(_mcd!HoGH^$80Yp06|v+&R}p@<0Ab!S=t8|LuM0zUfH@Ia6$Dv|9^*q zJGhdxY;0-1R{WmsjB$X^)jf2?PMtagIPMP>m(*vcbmQ~PhldQ5J-1GQzi3b`DRF%sFeIChP{+~= z$au?H1Mq1D0ws@=f#OdjnG9*=KW3N3Xea$w?oG`EmdtX@O@42LHhHlET^?*yFgot} z^J)5{@Z!CG&PV5;VYg0ra9(;A2;R{;XJ9jY=cOqrHDxr&4i1i+8q<=G%x3ze%)fI2 z#t_&aU?fh=WNV*+<$D*lEG`D|Xmnv=L5mAG;6Luv+jVv#Um%{nzT75eBAg0`(0s?>J9?{SdnrvUhuBVa5 zvgGfI!Qp38?n>vj!aLHf`F`xl3fkD;&mdliz)UU!6*(G5K8VhU!VL!i=00 zrGUfHG7aCPELt}&(TeAhUpYUl{qqt_#tRj~Kyc?i`Az|(D|pN)5rf9{AEa2(dI82Y z%||EdPYtPZ_d%*E-rqWK$yGx13PEPTe?wWTUpuA-H)-F`-~JjAt5e?s3AxMH^3;0%u&EPU9Ycb{6{$W z_7Z~fIQaaCFkkQ#g2+#o>@zP8Zc;LM44%R7Lj(|PS!OIn@^+1StTL@8y%OZ373i_x zp@$FBR_bG_=nJD28mY`|3(pb0=5o<)S~mZ_=;CtY1Y_(IocESlCKr&@Q~%xPorC$Y z&SLwRi57gUO8(-_Giu@4ecH<<9TyXMF8gPLNOI;wUihq*cRN>lig$_j*sb*M%L2B( zoonh#iz4qn8&uy4qvMN!%=7}h1-QKGuEWR$HyOq zM^Xms&3Sj&L@EI^wfVCBQ1m2l3`BZ5(g6erIVxw9f>eD zJ6s`2qj)>M_M}x(8n^Xuxj6DeJUM;+ElZga?Ar_**l5qByaW+K02^F7ass02@yDCY zWu={ZmV76)Bg zegFgl{8pXcB4hA)2v%78(HHbAQp96YHd!8EOGCt7iSy6IZWFM#!|V5bQl6n@*wNod zW&E~k)5_9545*8z-T_h&<)v!&Jxkvc+H4L*L_IA z+aP%N?%m@SU@-&2LWs$8^p5=W&=Aa@6BskV1j2~y-l3ty++la2ym|q~vQ2=+ID-+X zCf-vo@6OUfMy%uxvvOQsK5I$`a2j*+!CwSDgFk*>w)PoNui*0x>y#uTV8D>^50Ks$ zc(6#=GKb}fJG1qu0sDsh2q-BB3$XEHBN#2Vc5rKOV%0(|QbAz(9+*36uc&&5>#jBp z0XU+Zyj||W4J}7~a`IWqkS-~N>Ha(~U&+0QEdm^^20k{hvs0^g;aT$-xQMxb&rdwe zHqd*&T_QtstHS1KkWdBR^8CQIw@f9uvBF)^wr7YlYJ|V9%{fmvi!Ew~x^s!a(GH zgoj0F6VT;w`cw)$nhJpl?$Yz~L4-G;odeLNV^o9%>;uCBYf50fwKJ$VfSRWKxjA39 zun1<9G`)PwkfEcIG}ai1AW6}3pt45MIpMe30+kyArrFQ)_v%PiNyuTJK?@Xjy%BtT zi|Go%jua3%ES*hbE94NZ(j_WXj=HgR56O&zxD@x#mswYZ(Pq1qIh!%CNB? z2GzTwdXJ2kkJ4FV&teq7g%VmG+JbpEgQ!7LM!I$PPjUL2*RaC-C^F=u4B8mShxgHM zX5(`RX~nAG;gL= zL(BN#EN?2CpszXcasg{qGs305>YM>x7k?S*hBKL;isdl+qr{t=i)_D%vt83!FKORy z8hUu``z?DN6@NO{+hByo@o5JCc7heQ>+>)mp~Zm}oKPJ=!&-H=UcN?}!lAltQ1j9tgNO9ymyNGsizzO@ zrY!cQV23PUhB^6!9IAPh@*yNppPdS3`g!?I3&ACbh{LrdffXptA+b^~V65iQ6G$Hv z-cLl0$uY3dKbxW9Q{?b~8EuokEPxRXuhLFTaDvSTS@IZU0D^VKX2c zH$gD{0C*leOj6*hgTnCg4Uf5ZG{1!ck#7yjdU0z9`x{kluzh@?M~a~D1bp0Yj5xJN zdi!@eva^$xF?wmI;+O&iN0KsXX~`KWvS-A_o!IJyQRj}M$Gp2x%z&xEfnxIgK+;Mn z?bA=$xQ8)-U=I$!pNP;V6U-b|oPx6p2ERp!fRi(o(9!^aA`%%t0i#0OgcKneeIPVI zeOas`G7F)2CgFA(iT7u8-leL>a6Xd_keTj$7Mgv zUgTbm{-K@TsJN_V63eZRl;)b5S5C}=+qOk&g-l*vo*!}{hE^^=^qip&Kqjt3lmHAn^!-p@X*dD7KaA=H zH`A*-r=Fj`d|vl&Ji7;SEUmka)YVZh46K+*3!joDq0>>J*kNd~*DMkTd#m;4uDa}A z=Mq^O1@56khn@-BM4N?`f*J87AbW^t)sI;(ln*Pz)aQa&k^4bE=Z|XZ@x`pP;xo7n z>d^F}e1rWl?7_-HZAuu+e`<^LNB^!ZG`anNGEDOs0EPRwhrmO@+(-iA45 z6CHO>h&mhxQCuV6@-hgNy>=qy>;1N#({D1dY5^lPUEOEb%An+d&E7pH0xgA6AQx}; z8F4znMddTOlcmdnG=B?yPVYUJZh{Yy5)9J!4^Fg1Lv$;9><#fu2KO1zM9A?0d@I5= z7_tnazAQi={l$*tnCH~XxMjC0L~$WsIhgZI0jTR%`idj59RTUzo8hESZz$x?nF4=6 z2(xos4Z=d@IyeYH#~#F8H*W|6qWhmd#L%#?&p_tBTq}k;1bAowY^ktOV1A<1k`SPB zpX=vOXnt)}CIWK6%#y$K?cc4eQj67i{PbjE&J)C7Fc{T6!X)*(aPOBWjMw_Y$jH(G zgbp!8M1USC-hG%^<~SIBzW}cx0HU|=0CIkWf<1M-fhz?cL)r;)z5@bl1*<-kp_o%S zYvRds=ejJk`0IiXQB#0Besc=~4;e13BPX9x3-H4r?+aMb?|~_w4(iVuprKIjS4pS( z#f$RLuY~$bklxT+=_iRA7@={q?#V6GM~h@uh&u$Rp%O0I2IY$#G#~?~)zINDT?l4! z-x13j+^tqv)xo|GG~j)*fJvBxwD{F`s0R!b$ES>)Vg8jXlk4kM;C_*5;9Jh0S}twd zDHGO{lhgYS`M)Xf)A&z^=Kqh*iQr&-RFVUmBnQ51WNa>Q7-E2Z;5F>%zYVgl(~r*t zxeIlS01{t=8%ycR-Q(QvxU}MtwSJ(*PO{MAB(2hHr7tHF-2zbJ9tDlVNpP9 zDxs4k^M2(VQ5$c~LYaq;&UD~f4kO8zY;eTT9sti_887l zaV6|R&D61JN6*pI%FaPoTP|Y^8`I>i zN-Xk_`6X=e4Ki{pdZ)GEQNFE z56W}Y?%u(_lmz8dc{r>^h3`-!Ev$Zw>9dRNu1cUg(=|O(A|pCbKCtI~6L?)u=*ULh zo^_=_Vy6PIfQsJ&d;JQs*XBfwI*E5NTtntU=)gm+Ih=`YuMJ?as;z}f!%G{Cvw;M0 znAzFd5@#a4mKBrVTt3@#4Vn{_VAUFR_hjn{nRN7d8Qh6{p90;T(CVrL|37~IoCMRY z@|T822>n(gU^&`3$|#ulJieFJL?AYCXSJ^)S)>8`(4 zvKEUyps;{f$IG4VU?g$HcS6{?_rpmP9#Z_b4}p99G?1J30i*uuh6SgD@#7)*!Zcqh zI=cIigwizAl~9*7pkE{tfMuPgdQR&W9j{?E)D^@SGJSxR;yw@~sC61ek!TJz)V(cH zBo5h6YPtU8GjSKrQn#6VP*+ibeXRzPpL4~?u83_n59+8x?XGF*>Cm5sOzKeUn}Mil z+?Oc|*d5I8o~Vg8^x;Dn?>3~!dG5>=ljfa)Aur|FFZOKTcaZ+S^Sh}1A>ALiC5Yzt z;c0F_VGR6Zo#aCKcs&j=_J1IFGdBne))Pj6Jw-j-=tFQXV!;%@p>-D#tk8<|)X)0@ zlq>hincXD=jDc*u4a(x(Sp@kU`fBHDW7fTZhkXcbGsR%nKkK4L_t#INR@|4R!?!x! zb*5!!?cUmioo!HEISL+nWuV4BIQ@{Pe^gBUdB9Gj=Ihrn_;DRL@JLGTmt0H5Nd;3E>WaFQ zAaWQg;Xr5V4>e-t$igfk*5LD$788vp_7PMKD0_%N+s6OHU4WF0%z`AP zf7Wr0LG`c@Xl>yjc|d;t{DoYW4C^hlrTYbK8W8_*E}D-Lynf=4ol z&e#*Dt*s3Ugp6|&I_uyF5i(=HwXQ#PX%w&5HXD%%uI5JPHFX^9Ys86}qDycDh+&OL z!tU*q&swQCqfkdK4f`yJUtz*r=lYKV>Z$`8{*PxSblwIWdLct0zM3$H&6Wrg8+I!z zDzZvSma&vU+{`doKRBCQz_n3+?u*KKL~e~7?2FR!8ixMqST-s56iD{phCbdu8{$yZ z!{YsO{V}Cri!=q$04XeFFQSR`%YAab4CA2_xkP~=8$}%G&=8<%7tCj)tD_T8|HKnN zhmvI83s|*UkdUAgM+12S>^Q^H_^~E*v_xh~dRiJb_&ptOFq!Bhs2-?Arx@Mh1U2xI z8-H5jfvrL?fnM0TceoJ>HwS1aA1LE?of46x`SHAke~9zou;8)I#tqT7~jMmXyBBH9>cGracH(ox-?P?5XNMQ`p+>_d|;TVsdFhA${ z*tsu8E4PU{!NZrd*9R)j5a@!>Qy~hF!}(jdYCSm&#syT|LEXl5rZr9ufHJ?wydmm| zp9_lF*;wGKpQiwCHq{93dNT0y$RLdJ2Z)eF2a@yhLtAMx6>ouyKu0YA*@q}p22qEm zAAYcx`6w0_&yM=eJlID}Ids@nn4SJ8X@Yy|G+-xmNDGq2fNafX@j!U_37=(4@*{K( z3KZqsv+q1J;JVP?nhGsv1SM*4!4xw(`E7Y!hyEBJ%X~dLJUxg`_2}#ffQdfd~_kh1?A+5*RipbU)+ z(VxG35pWun`^$R8*}_=4!wDNGp@Vfi%=zax~j<~lIEu4=?Bbk2Oh z(HIojeS%>12uulMa3hEi{b12Z3yof-@5|tN-lU#TS`(B=qRFoTV#f*&*xB4&X=#JB zQ&$SD)U5pc6i`urg{VZD>*Dc3P8l=1c@w%V7a>NW9@viu`#Xp@f#v;&lXu%JpQPVzh zpu7oZ6g>jX z!6)GEH7mT`c=Yn{!C;R$aMeP$>HnT%Q7t|scRP+)KY6v|mX5l}t;T2$T*6@Ufwafb z3c&fr7CmKuK**xLo*hN3um3J;@wy7NVq;4_k7fan-pX82bj2-Sp~m%@X7zhKUhg!= zr5Y^>qB(JhOc#aTEa;32O-<%j+Q+2$R<)adXHai#j8W?^Jk%Z$DJFphn*N6htJMbAUJ`Ai>R`B{ifozwBCS3w8t!SUIT_j+!CJ zz*q?IbPOD6pP*Baf{`&Hcc&Bp3}9J8@TdE340&uqX|U97;P?Xee1rtl2%jO`2sjQ) z!GA>Hn+H-IwO+3j^1;F(Xy6S74Qc}j<22}u!W5+z9Yu8jHLBO(!9uB|aG1)lh@(ZzwVwd}tTcVU%@xkEF54M`RM1LS@A^y;7 zXY1Z!y>M^BHUPE>Nm#T0A7VZ(I{Gf~L}l6FJm-myi6MQV>~#$yAGQcbcKiU8-CN-! z3e-XfRM?EbL|?32m;}N|9vz948?Z+TYy1Mhr$1Ty8(4OrAL1^&UA#NK*|Mgl#uV;) zYnU0f*y(Nf2ORTG0P<#q&U$FHoRK>;XbqRS5*8HReA$HvC^Z;vT}ro;&8 z_&${B02<-SxEODV@8{2-o#0HL;sH2^%QfR2PW9AGXz1_ci79)-VO(udIFAUIUZrI(a3++e5~eH(0}u#Sg59zzDwG zkN_a{gnU+B#e}%=vX8SP)U^K`uymtP9|warV|t*NfCJY6VXy+Xioj3&mvz$u}jbRap2gKh@aq~H~uXSp>HUQM)4Csu_d(7fC%1% zws)tkWeWrmLWT;<(grB2QJXB{=4SkN=0uUl9HTgP?9jv7yI_%rGhrw&Q|PLI$kvo7 z-Z3ZAY$*iY#~zXpizZ$S0M{*u`z$i4?F~%jIJDTGo^_arcfhNL!s7+-C+J`!zaRiwD3Y~6s{R586j0L81Ow!zab7?Qdjx748zKyZ12`y<`4sF> zroc->tF{6-3jk%=C?|o?$_D3<0Xq<(&2QUf@Y^vcfOj)sK z!tQQvdP3>#Q;D!EXfjndJ-pk+%Mb430x+E- zx0<2|IRKge*b%=TX_+Tnq?Ft1uxFr$Rn6uc`A?f=K54HMJ?`p%-m2ueNfioodug3P zHG1-YH)K*HQ&&voKThsnIP73*UpZfTm?CV3@8NYJ$^!3_*3A{Q-Y%2&4Q{=BI@KMX zQ+Ne24Hs3ZOl+6fw?2e-$7S&l>BN4uy0E@jBwE#Bq3UI!dJm`C^a+PjEEj*#0N%== zc9BU;2H9gSZIN5%{6~b}1gw4Aw(Hrv61Aop>&t+JN0Yvesx#Q$dGi$C@VSD1dJfx; zobBHH>FLRfGoF6Z4Z>6Z2<@N_{kta;j5YA`zdp?u)HwK8k^gbl^j|i}S@#!P-5<5V z+6~=$@b-T@^xer*a3-+SeF3bAs861y@v^0cI@S4sLaOs>(bBdYch>>v48?9_e7onf zH2&%-kM`AfoBdenSE2KH`K)S!WSEj_^vxh`T(bo4k~Z~9lNk2f&W}M1#N{}YP#Dci zCz^G@i%;-6&Sog~46DtV89o;EZu?f1T3)sF?0AL#be@F`mOhWLII707FJK&c7`aD^ zB>n10Z01C%6~}H3Nffe@wXikHWP_INuzkOBSsSWorb_x-uAVPAXiYclkV0MG9c z_YEAU}}o6SSSuHx5CZwzJ(_U~b_yes-<8{e3rp`lSyyNbF z#=>jE>CD(Z!69XBs|kLIF>})f6b!fS{8`uc(YS;_E@0tKv-?poZ_)kk)T$mol8)7? zVs6|}t-<}pwZnNLXQ!~@qD5q|u)ecms2yLjk0kw+S8LP{1@=1B2Ekp^y5K@t>?A0l zP$y9Ggtm=``H7MiYzpJ?FC`=;x82_eE~O7ZSx#nz(*$nNZ%)%$WE8kmOk>Us)7!U# z|8jdk1;`JxWK__jqokav86_<3DTIlv)s5^~`iJ@Q0;eA=HwlRE-T&~(v1=LPQui;` z`*sC?9dmBhYUQbIpKp(0AGOrh@v!=rcu$(Rxj58lxj82wiKNg_dFOz}Ly`o&hI zhF{SI_R5w>xHOXloz)1aJY6lE40~$sH@p~%Bt}QZz)BHKr+l2`r^#2>3y%VSv3&74 zE>2GB!3)FHUhVz?N`();cl|!ko{`gB_d?RkWzUG5UpalpvasitvDdi5#(1L|%p2hV z@A!jpPHg%VFC_ba$m?&-S8=cE{VRf(*@0V4TGeMh0$|WEd)0DFVNP=&vogK4`G82G z(LA|k4rCNV6l9m{5SSvOdQeAyzU??&{enfc2eh619?0;WD~gR=6nZi?Zke<0=pV_~ z)i1r#O16m8cwxiYwXkiV&#s7jt@eW2llDPS?&-w-bnZay!eD52 z7cr(G)sYKBOeG4Hc!>+{oAVyQWXIsL(ik|gu}Is3D2a}gk0=_?#KGZkpW80YZ#c1h z@s`nE=nB-u4}P4V%PLiY>jnze=1V8}QtyI=b+U@{nCT|4TGIJe`blF+qfF)swN2%yG; z_>SyD_CT8qAGYLMgSDOJdHGLeOk4?N>`^+wl3r*=m)qIR0gq9%!(MqBFtJ%Ai$rq> zT96bHLASpuF}9Ofq)3 zEY+KKr_vW}hDel{$6q@mOqEnG_4swxo#`>eFQB0ZZf1BDhN9yDDeR909YyMfVT3;> znpeTSr?M1`r&73AX1`m2lxB3K%$m70jXXUM98d+`Ep{q$G4!B<(kttFlI^v#czz|V z6P`Wwl$FTAWV;ZfuHfXJV5@`QDhvvC88_b5ZQiw0+hV7Fo{+_^a5?sZt#Qxt60Q}H ztnp`2cXAHb%lK{c+1fS?Z@^bMeMzHq`Fs~g=Z42Y$E0d2(|8g}?#!|(S- z3fT>Gi-0LVEMRmwLE4vp7w+93qh`PVM`-D$PIlu+;ZP5mqXBoE=ZJvLW|ATZ(E(eT zjjY)Ltn7pZMV)q1Q>w(+Q9I(CJpGJCX0RRP<+!{IG}+n~q~3Ij)?KhQ&!OM8PB371 zjYZ4CDYERek8ENx90_m&q!`yN@AtPEd|FsueGf}l7R%`?#$EKNYg(plQyr9~;gsNH z8o^|3{?RKUvrK0Ch=4Jn6N*Y%bLHtAFF`HG$r`fF$jSZGOIJHg<#&Pm$nUC=&N}QV z@WFq|Km+bozdam^=Blsc!gcoLGWYnWNV8XupPGMo>D3gg5^c*z`sXiRHxG(4=V4Zw zoUjX-ExWPCBjpmRRfnE1hicC`%qiew{dpDv)5DEiGII6Ok2E5=4g=ABjpz* z7bWtgL}l5x$K?gi9lg`4FGwuVq}wu4sx0NBne9Nd{H{9}JyVW=HNdgI{|TC(~u`9H~)w z(@&N#AnT4Oho_KT1!C);q1JogTQeKA@yN{^s-WSsu}(N8@xWw}AM?)HoAAU{s|x4K zb0;KZOGzbewk>*lHLoBsu3JpMK&NljkHd)z*S{#7x=`+t^rX~OtLHpfi6L4Bhd;C( z-ekPa>Q>3OtFK!Z9{C{U)+S;0Wh+8ab;*Zl0t!ZVAkGmi|XGFw|!$Ghv`WpxQWws#y}S8x6tG z7NbiYuO~K>o957@>PCTXTYhZ}%^?7{)>S?_qY>ds&1e*Ws) zg)y?mRD(RTudne&7>Q?jd>mIF=yH(H4HGuEUq9OKgM9_sfdq6pkODm|3LVt^`zhwY z#K57b3yK&_xtD?O;)sqw`y-(O=Uw0L=LSaDvs!cBmNa4Hho@*phP=-IOi0`e(VDky z+XjikD9qm1POb+lrh9TbWOr#|92v$S&@$xG$DF%x!5i!@@BxaAh^#In!vmM}z3K2W z|05sqrnmFG4>jb5H5+_>uYv##U%k1^jNc^vb&F9^8|{zzgK_Vc0&go8%e+a5qv_tZ zr+Yy;A-T+4>jE*u#e6f(s@g~~_1m5zv)lr4(eQO=cFXavt9m(_NMnJS?^syTX`}HF z>rj};$E2S5cKybUv@>Jhg3rSH*I)aGojg=_ZdEtUz`dXzExyHJQ{$sUu%%!Bql@`X=b*NV9RJet40OQIqUFM!%+ zxGa9R5kAUgwFx+!Yj@F-T+w#w|vMINMr)F5ohe2d<= z4QXh)Xw(QE?-#&04^^q*%-$0cBeJ^59i!AgsP3O9u{L6Mu}ZY9Y7aO`4! zyt=SHKq)=dw8123Ni~tvgX(G`^9H*vNOuJFjG5Y={W0dSxOG3by8bu$+6Ps;W&9kB z#H){cH7}bpseiDhA*2&l?@#TO+uhS?A_99mNruY{Q~k+M>7koFSv$i(ZkL`zMRJ>U z>m`*v#u+jQS6!bLhfE=aTxtfXli7kBJJSb`T zZZ+5RJxfROO7>H;!>-fR|JR1CD66X^Rt%(j?k|I;YS5j9bZERCx^g8&ymKylZUJXh zb$RCnxSFs-n$VKtB``I$Om8Dl-=M&p$b=>Ar~d^_@(Bj$W(oT-k#B-NnL0?O2^?@L z-7t0c*wFt?1tMp@aP@`{Np4LgsW&KU_+}x9_BHsIE-3wv>nn?YzI66{s~!c z5XC7tR*Y1XXm#d;)y2@T;V5oXGa%zM7@&?FvR*0=Ik;%T37EUh-_Cz_kil|DHG>gp*wRT48E6m5I zuUdQKd|1tuIP)9lSv4gV1Dw0?@m*4@gFbC2e|hubz70-yg{vPjTfO>xj#PE!2`&rf zU1NB2JOgJjjo{K1VeLWjAugn`dnj>;F`8F)1+UXxya41QR_5a7M1G-=oP+K*=7z=+-hX&d4*wQrtV`^@m zA6NA*{_uOHK$ZSGv#QpUbw%=S#kWh{la@KD57wq$GiYJy*Vt*aH!s_6Wty-<)6ctb zQ=5Bzd{o!;4y-8)Wj`gFF|s z=3lWsw7}M5t~%RZp}%I0(%p#Ijp@4^G&@)qT!)X!f0TI0=6@DBmv}G#Rxqd6%N#nT zu%VOv>OTr0$XtKls&QmCMxaLK{puOxsL5e>-^wGG5(A#BR+-RDBikh760bdq`O`}- z6oqa3VjS_lW&fz2YEEIzz~_gP-Yz+pbg|G}BJ@31ceVbtL~mw|)WYMkQxk(%G+wr2YP=03h-N(<*s3cTxQonTRxuoy&&yxk(Mm$yo zvJ+FhjyLf}HjKc#+foify*(lF)BN${D7}wp6pMK$2rc_tVe7z1U`W}+(LSI&cMnI$ z^#5>S`@^FRvhnX8OzPGroEcrur;$hn(>0PTV;MnRr84K4v7Gf%MoDAbzrAb@XifPw zb&UOZ8TLb630obLyIuNw@{0TQp4%$l37Y+uakpe2wWAIoxztWhl6`p`Ryj;Q?ekB z&9+IL`5PZxyLN3|G9;fYV3#fJ0CL@v&eB`i(3Ci&HN4_LTEiC*`Hq8NU`Y^8_|bO~ zB(&(V{R$Su`_K{|FJNy+%4}Vxbn35KwQq_@A(1<-UB9jpg#LUIYXfTKPh+{X%9|58 zBL8tZ3a^?aBNK%l*`XHJ6Tcm^SUP?s8eF{0e^o6zSDO3O{>WXh&MAgn9>~EsR96aF z)#t4(xy8f0S{QrVx*%@c&dtZxob5lp$T$gJiSIwuChvGLxLv8{rJ+w$PwtI~1EY%6 zy>xq$dGZ3IrxYz-|Kj8Gj`Cz>?pr}yN&=K~itKw9e^Jsf;SZ2H>MQa1(esYQFKcUB zBiNzK20YA40zUUvjqe$-sayA$$4h;})-?DiEH3)c{zJ9(P%^tXaaCS?`S?41JGyPs z4n!}w9ng{neeRm+q$67 zWuULKQQ&IVkGu&Ia!(VD)I*;K3yZjGh%x_NIqh7j(H{JxZ5rFX!>jMHd)9NI?&N8n z1+^Ndd~UAU^7ybAN(l4noi8bj-XFarD z5CQ;ZzP^x%sm4;yce9T@@j@+_7TcTD&h_VSSYp1%@8^KAmKMXcxV$xS-Vw~;1%AvK z$__{ci1!OZi--(RXyd%i^|}`R?2BN3k*2N>;qhYgj2zI4Ax8c-2Ktwk?2yhJ0_ma1 zJham46iW3bGS5?&KFg3^o$A88y@&ChRqAMRxAgyO}6l)=>^#R9X9Ua$vyH;vXAbCQAxQJ-TX(Lp>M2 zgV~e=P!;2uSZUKtnpt z->2SyhrArr&Sj!1PAm+Vt7kJWYrE)WYX*ZZ#co=r*d8RAXVAcfeOL;S&vpazN5_7Q|`XkOQK z)Sa#q`y5WZHxPnpXUO~~X$}6ae;7q3S5$P}xN)Ny^Ex-!*Oj4BBsn&u3|b_SLzl_) zshTrx?cuI#V3x8jcB6ny;220Jv#=(Hz5=83*=@pfjWZbBqbah)*`+ZcjP&DbKhH^O z$qc;m=#EeKX^U;HkvAtR+fV*WOw}6$!iN`T!;B%3Kij%K+{rjbG2eeu>cF|4Ka7(c z;$_7@6P`dml8~gh*7ruaWq=#Ol)?j_Mji}QBNK?aLFykcj@PAZ5I#F8RgC$BwFo$@ zBCzE?Lws!v_eoKO(SLMW=D{8}+G>DqnZ~9tv0Oy^EO;9@1`chwCH7hKm{#VY4FYYG z{F%21GXKBE|7X%AFeSe0*KWTLXXa+K`nUIeT4a|SDv8)bJFnKR6(l*nrx915Y6IGD z2@J(xUv(4?EReSoErfi$!A;_bjBinZavT3@rn?5Ha+@39M#J1tI3m&D0#G}xiuv#= z{w$f3(w-*xGmVLSCl;S?m5n`2XCe9C2HYLC@EoMFsy~cN*z?!W1t-Ao zaX~=i1Nw4I_rDY-Ri<$!puLU4*&D^MbadcY*m!HTlhXnyIht=e$Cs*_Hw`{sV+hSJ zCUjAd#;$HVpxQw5X5a~E$Nt^%((94`E2{->5XPO@7rR0Jz6$ai^L1YaI4*ew1xKGW z{aj?LG*vA6s?K}2dI4=P%#2+d*V{SyPbz)oDsp~LIz^tp{+=!%C@2B;9T9!R zUf}+T?jxxy{VT9H$-ky+$X@|zG3AyBNMy1Y^~ypdG!I7gWd|2jdR2AVSX-Y$O!q}r z^TlHx^2u4d8E;vuzs;YjlGtg4)fQq?m-6*dEY7J0x_+S=m0c{mg=$5oS6VkTG4GjyE zhh}gFvZaoemKP1r-5{{_u6J;KIeSiSKg5{6II4Vs`BDWz3UXKx*vb3L8@K)0lyN@;DB|%Z&VXxT6xa>h*Cfr3m1gdSA@0+fzT-E-QfV&18ALyBH;V9M%NgCoP zf|{*ud4D6A6Kh}B&yWbmtnnIUDO=e17uqZ&b`+Fkl4@$TMh#$CVkwCzjdQSl4<;J` zaKe)ve|~pV+S4EkauvzgaQ@!8Z2lIJ;#GpRbEpzH7~m|Ubc__SRD+>~k!HG{V6}6^ z41|NV2y-4EOAD}x=jOTqiBL!S8e)eA&-DW%^9{1K>mdgU{8-I&6}$E*a5Wt0PzY9q zeJy$4phM=@#cP*IExbS13VQgX`Cx9V8aKhN)&2E0-g{S-gc`~OfH4V zEj4wT17C#@e!svBDGF2v!p96`jPI6|kfso!aT`i0)Gn8nI7(0{5ttgBb(B z?;L5bbB#efk;dzuEmt9)ty^FGk-?0l3&{cDza=_C113sY6V#Or51BY!8GAn+a)vsI$O6! zen(MPe`U1mz{A4>X|QPA4&>c`9{^@Su%Kx2kMDsozce<9Kzy~g#vC42Igo(jSJSK4 zj7Q)+2Zl>AIskH8+CkLe1uK@AkQ;l^4EVq$3=*F=FTdepguv&xi?0s|!v_>8wTqsh z9YtXdjlV&!DA9>QyQ~*axm+h2tcF@t5NC6uHZPe;b7+B2@qG7|>UJl*AFJC>`pCv( zCe=#&cHjm)UdKqZgU&dVUQI~WT6p&uhByIs8^C2oG0mAe(2Q=M8}4M$8PEinckIDu z=tay;Ly;JxievW_&W@z^kLkfNSORL~-QImD^TE!E={$MjL=?&e|A^ktjT2^a-`)`a z=_N$PLKtlG?Ct-Fr9w|{(eM2Z#eG=SdJs5))$ddF#}`_X90&?Xy`w1o3A{~2x>YII ztvn5t<=&1eg`XW3gP@{tk^lkqT_os7s9R3IO}z9_WiCWeB*r0uF<$v|At9$Q%_09% zh7@+BKLw$k3SZhi3UwDBt}5VDsE@2B!nhN5bP4}#i{)y1&_Lyhf0XRK+Rodxd2+{| zG`;rp_k6_fxT89JfZL%_#vjKXICt~f-UyDMiLU)=f%Oh2%WR*aU=dB(?F^^ny8B{D z&OZ%Rj#Ox-Y{6YkDS6(eu4$W6OEh-S%POv~Mz@Qsw->nu(=d80Mf}gq_37aJ>XOgH*Q-;4&CE-1HoEC(?BE|7)Z}0?Iqm)$d~1Izl=nIA$s0A zN*V}Nox;~ZzTqO00YbV{k{_Nv*{y%?h<^-rHxKhVamU#QtBRf7ltYVl9)eEpd|24W zA=xrI6{ioM0YwqD8$F&VbI-Ybb1m z{unPH-GnQlD20>Z-t=(%;q6!nwYmdle|H`ZB4_Y}nnv!NNM7qY!j?F4g7^FQ!+u0m zQgo8$iAW^mFo^E81$!me?G6lWsY%->M}kGq!Tv_ZVYDAAD;C&LqWduY0IR$dvP~ik zW985SV&g*lhz+p89fqt6joAP2;{oy$v;d_$Ra`%bf;-{(>!*9hmux-~5K;m`2pe7pu*P|k5 zp0R}RzDX7y0>s;x*o zY=!5>X~dM(&MzMz(?Bn(i^o8#idzr{0Sk`CGS`p1Gq9D(nhbd$+t0<&kn96vflfm9 z%EOR50`!khfHQnS!f2s=1@Nap=ObV5>fqmj@aTHx3y$PCGEIX<*m4?q^AL#zoeG^d zS~f?^r1iWuLY*S2>cWfsnT0X-A=RGo0`5jLmmXZP?j3rhc}%K299<_No?!w|!Kb~Pg2F1=JOy+SzkVlOXt`slFJbZc7 zOec}_&qWubgQen%g{<CJH=ih z{*pfh3%qt6P(z=AC98w;BRX&YPg+Zc>`fktbd7ZVcBQ9T_QFPG{3boPDNq`?ioNwmN@d} zjT?-xWPAYzw%u1kXP@x#6$kmDRR;*O4!#s5r;A8R13`%v-eO zegK5T0xf_RMv|C>%02W+=i>WIPvefFke-G@88x^>IR~IabHJwi$O4Wyt5}>z_BJVr zHg$Ih9l?p_{63gs_ICwZgZZGsQGo)GD+i+EV}P^T`VTwp!Z(I~DB8}wcwxwKaxi9^ zy1&gAFfxagqCnXojN@96Xl}P9r1+kKN62rua@c;}W8Bc-=6?bs|E=lyIeoV2?nkRH Sr5G{z4_jw7>#p|JqyG&Ic;&Xu zvwJ9%RF^(h(=;G^eezzA=|Y40X0PHK=e;$`F+R#C3>0yEVveUwU+!6JnD)iZeZBp# z$VQ{du7tvW%Od?URtm3EVU3Q8eJg1(R?JD8ijq9hf{D~%tn9rceAc(;;26!w=pTLQ zsg>HsNSsBqZ%_6CU3bu;Vz2Mh^Iv6Yi71x!u~_ER7aC6m@A|R8kKF8h@Gk=y1sY8n zNu-6v5;=r1piMOXKYqDCPV(8=hYue*^gdPXlQr-=g=LNs)YobZIZjSb7pzOCLz@x( z+8e)>S5QE4eE1tTy}@Qnht@$yG>1tw9x*y~Cbo0mo#3y_MlQs3pAj~C8gG)P8zUKo zCm35>bC{a3*y`e9>eP~7y%O#hVlYYn71%(yxNK~~2C*8H`Cmh`^sIoxKVz8ieoCN@ z8AC{JMJbXe88zSfccA*WcDrEtG9Ir*NAjIZT$g)Im?XBF6Pu}pWxmbMx5THZFB#>t1iP;{ilI)=Wy2ywykN5PGBbLbTH<<=JT#4!YaxDCJlm&^)<^h|t# zDp?n4mtobMw}HSB748=!`mKGKO;~Zj*`wG`rws7;Z^*Cl0cT}VE_V_L7x^(%Cmr4j z2in?lABCthGBJ(cEGsFA8efkV)W^rfXnJ~z`zhJt8oYF~Pxi^C56Kdbar;|i2<5DD zM88(@a~%(UEug|y%NlvEyt;H_^7qvJjpxsvp+XvYuLW(94wu@-O-xMC-M{G(&qHZ0 zKxbaByq1l{&Ps|CV+Ul5Eh~uWRd*NyfB#GoIcn>G=$jM#qt}Ly5-^d%|*!u*^4F2=r|$el7D`d{R>MTU%Rt7cOX7S&iN3 zEU~R;m7nk?+4+-JYiVnn(8(LhMX!D&Jryud%bw5?=DwWc-uZz_M8r%Gqr@+6%_kv2 zzTSqbDJsg}^-0gnbo%{sGE4ccSN(cLMMc&*4^~0_mP(iInQce=PH^L631Y|h;EzIiHsYYk5H6dW3i<| zr+CvO$Mu~aN~~0(&o&6@f60KR>_pX4s9y0bDA@K6_QPPLH6$+-36RNr|>4NaORuJ{D?9+wj|9T^SDa6zI+l-Iew|KF=W8CDTW`*DH( zFPKcyox*HhtYJ@cG=!KA$|J?9CrZ|(^U1KXY2D0w;vls~XEZg#7Pg zd(AW>9UT(ouQ8&5G8)9m?(+P)QuE|f#(G$jcz6Rdj#9VbGjg|~mh1Xgbxg{A=5-Zv z3~laC-YFmRT;JOmuc0R;CDqf{Z^okq?4{Ym%`X;@q3*}?sGIuSyNZdma~L4Ds`flN zTy`zVw%*@i>ixaVLnFx_rk0j?ojBI!jo*3CpA#7vG;cgve(KmtZ{}6bnS&>oS)Ra8 zZ*(qxd9t;fmoyh6@>JvAJ;lABljC)vUml;6=15Su{hfl5kEtZzn==(UJdHUA_8yW1^NxEtlTnTJX6HajWng<-65CUze42 z#z}p^x5u%~N6@P+mouz9a&LVu#-^fHngnq>1$T0+&@eNTm6IbzO?b%{n|rMO-QN$0 z=T>GF(2dua{t#T@+WT}kST_$1Ex0q8UmGgZLi9ps6?^5J#HN^7;;@{5*8v{iVqob< z;7ri{zO*zS`tNsWf5}yO_5@w{in9kxxt&!v2Bf_v6pEorV+r=q@UvuZTwe7x5N0Z^ z`yqyElAjJDFT#ZSk3N>!umuA}GF)sGaJ)NWVr3O`u)Wj@ zwWp@W6HA!Mxa#%5Fu!$geO$(O@zT!9z*78@+xS*U;o5bQ%WE>Cg)`~GSmvA-VU1$* z)2|G3zIlzq(CGgYaJ0Y0t{xJUKg5)mnes#0;hN9}DWo1VYcSt)F_Hf8UxD=RLy&~s zaR{>s|9Q$Ens|Z7cO<`pBgygO>m9SLIOVEP!kh2HS54L(!x&8b7{8JK>J9zNRQCQC z)#vMHnRS}86@8`cn@}l2w&|smtotl1=T|xU1r|QbOEIE##2h4&J34gVZ*_3)A8&*v z!P&9=p1jLz7q}v1vT&l!eUSfjbGW0Vqz=|Z&oa#IF`0fU0}~TT<8)G17Q356wMLqq zh=9n)*oMw`?LaPa5_n^Y*yzZLVMHU=SVHEtxHTk7c%vtkQ6IcQ3bZCJs8<>MVIp&Z8c}wX(8;`BWA)WZ6micC;8Gh^vhI~1?J zHJ$k9PoQXQIAp>6kJn|aW|y08r+OCn+&5Ww*Q%TptmDEE4Efg#{W80we*L=rIa`rQ zekwo!#@mQP_ZR^8)t~!J*7k^Yv=<4f4%`c}^YJ+k z$d!oKrFB#T`9|s+(o>a{XSfu+XR7qsatuW-T)41#{ChgDs!BG?B{xM-zYAY81NdPy z0s4a0vUBXh!a~5}BUkBwO?x<WMrkx2s=0y+4DCuH8p&){_cEN ziU4F#S9iCvQ&>S!?14j-gz$dQZr^Nj<@JHc5HLpql1E-v4Q}FM}k-9H{6qdI6<>lF~ zzd0Eb6C)}mb&EFHqpX2dM@MJ0(uH~|a8*KO`}5gyOL0_KQIQz(b3s|zc@q;ftGFdY z`tJciYSuOMW`TM*~ChKb^xyMnXauOfRLi?^bPF1u_ z6U%%d@(v+4K~TTsL4S*51)rkgN#2lseky8e6AKI6ix+~PW0j<_glF&GvBSZLii=|n z3=Hb;=m_eEe{>G-cZ=+Iqp_Y8j-Wf~!@5>D0}wHmj(>#M#Ed1R@n6+?lFBa%WGW4!h=%*@O)*Z%!si`Nx+Wt5w^wzg(irmQO^aJo5ObzxRsNEL0u z7fFUMDiW2H)P?+XbaHxLB=+^|*UdN3J*u31eSIqew&wiD7khK*QCTYY5EwpjQYiOI z4VGH%pp+}N2r8J+4VY65+LNq7>qV)CNOGP4)U6K(!Z7sW*z5v`;BeSw93u-VCH4IY zERtp6Vio~YCPr94naG|ffP#`p0AmS!aFmC?H`vS!amyKQ54u-7eO$eje#GFlV#-nd ztGm+dG@~`Qcx@!v)Q&c_1%-uU#qzbCMXdY3UokP+Km*z_>7%4(Nc$M6yz!n1kXtjr z#CTdxxtRy#JPdGG?<`uq>Q4KAK8bA@LYGJ}lkI@4v$OLIj4<4*8^&hm3hu_<&EWYc zwCv7Rl&FA57Ox0iZ5q1DIP zH3R~HftGA$s>t2p+{Zu06A>{)!|>mQSQBPv7nj)&hd-5X?oE1+oE?kiEt)Vkqdl_( zfuj&wOB?r;o9}zB%`Wf7_3PKKneBtVd?VhYUzv@}W|wg*lkL&H7ZUS6mbH8yqKpyM zqCEbg9{k5Ipuw)IlE%y|B{`X|GMjGTYd!lG zVr?D!?VGj9f?ok(x$6;h-cs`zZGVr@j;3keoNd*4IOxD7 zOmdk^INel1MppUnkr5w;O1s6H&#Q(LXlRzAudr?cM-(!J4GdB$5~C+yoUc?R6Jp8! z8i~hbma`%)+R+i?DaYx*{w+%V?^cql%_ljo6=%wo3Sq9|xfiimWpll>=cXcH1!v~)E&Lnjw+~IlN;1jQhVm(T0g|l8>OU@QN z?a%8G*1d`wVz+8dkHMofGs|HdP$qRoAe49G{WcvPuUh=)&zGS8{4VZ2*v|bIA0AE& zu=QwBzVX4xc*sy=S>s=h?Zq#xe|A?RZ2yw3Jas((@HOQfuUv$?Ba(i(lS8I?Ck);-_#KFPwjr*#dUE-b9;aT8#QA6&ktE(I| z(R86wt^)#Nu^gH7=e#6uUBss7cfEZm=q%nQ- zlvBw0^7?OKQS8f$nT~CE^k#sH=&4f=dY3$CO;8t;h zDUOmgk51jRHYJm1{8X7fcJNW8KOr+c-G06!VR;~*Ue+MuqqCCJbo7vWR8f()QclUN zL(MqRvt6~+nyEl)938iojt;5trK~WA)@ULW03#q;BnAUB{`JpAFXX>h&$H77RCK?a zGwkn;oL`Tz*c^S!uw_3bQ&Us>=5W&GuP>=(4WOpfcm?zd4qIH+|zusy*W<70DiUm{x=slXAH*sYkKX=PAuRq{h zY!u)Pr}}AES62aLWo63+yDFe)Drzidl2WOjy8R0b=sTj%S0AYSJKEMWG9pLOaL6uK zJ+GaW6quqh8}P2GP5~VWeSXwb*shEX2!Q(WiqWjMg7^5%n%SA=2tbvwrCY3`MbUY? zJP2*Pqu@Z~2x!W4RQkn>7tiYIIDSv~(*OCrAyt(93}3_HkkCKRU?Cz%pb%VR%n&g3`D&V6S7(&&|`u z8F0OYH-hWwp35`#WXX`gFjENJ5&hZtFT`(k*uYd#b3~#)d|iF;S$}?$XCE>lms2UZ zI}W!xdFMZhGqG;ajoVS7W2>sJAgjU`r{VAagN29 zzk46Tbyp4%Cg3qV9so6-%egz%{6aqsM8(+E(t5hw(7%ep29NzZ=0}RHB0`VmdCRR! zpw2rcYJEYJV|u~H!s5`KAput_^h^Ak(OfXp)PgQBMJwLvL5 zfAi-0NfPXL?w=>OzO>%$c2{Lj7b>Q~5a)-H0q-)y0Fb8~9~-m$gWN}szm z8XZjrXgfZh2Ki(`!Dmo{=QNzHI$~K)mxmk&+Jqb|=v}<%5nKu+B0o2T3WfqqnD5`e z`+Yi8HjB5@tvNw`;Zu%=bIHU-tjPs8kv}(UCZn4p$mgLcEKfBYTB%PY=nBXhc&s17 z<4Zrfw4PWC8sQeqDlXr}&&^L&@AuW*x`z5vl37=$6rjTMZt~}Fw;g8Z(%b*k9vTNC zVic#mR~t~6XQEVqulgH9LqF#z|E`*cG&*zk>=vR5Ta?)WqS4dSYpUP)5XX7zJ$gFi zPy&WCz`q|qete6H{BpguEry8*#gnvDMYrEG2w=YEgPQo-ZVX;2wa;*e<+SwV@E^mc874y{MOkYy5C(p{wL2Vs2PCDH~!r%>yxF8CCIYnv3(O_@}ra!xD>s} z3JUW(re86~kS0^E*@i>PLk~*Gi)JiLm+$b`@Cl;(Ntu384#Np$eYtKE zdc3>*?d@rJD=5fqB~c;5s#n$(0wxlK8^sVU{UXLsEG>$fZEm|)9XGNX1v(f&Ga2t0 z4HKa|u@i!)e{cjV{M>2bU>XZn!!nOW+G_jM+9>!fGhFX|nve^_cORliW-x$#IB;XG zL>Kt+s09oRDs;aPdXL4jY(zjt1`QJK=vxmq7>OW_>A;m-*Hu&3K@4(hD(J>E_Tv5r z&W(9?0Nzf?%}u($(SS_2H?F;`W`_9{>`zFXuL+%ZM4JRKeI*{N^%eQl-HptQA~9%U z(X^b=Ko)@);6)1u^iXxemc~J!DRl;(F)7pvpkWj;@{QNrI`81X54i%NaJJIDa)l{y z_ea#xQ4m__P7x4@po%go+JulcD@^G3lV}}bXtE&v%>S(4AVUSgPlyC4S-*D1*+r-G z!o}KCa;CM9jYpP2Uo{zJ9F8fy!NDqF9l1VUBLG>}ohjA1UNfCY$s~*yD;)SPY&E;H zZ?fX4Rf9#~*(~6icR*Q3D@XuEo`ElgG!*rtS$Y#RAKe?CLOP}kVjR*huD)NsjmHyX zJekceU81>s`SNCu@`gQVfQUSu{P1B^D~>*|wA9elDyOAjfM5P&?3|NLPBOj)!N%g< z(C#c70?$na|F(rsQ{bXg`Lmn{Iv=A~-HNcuOVS)cqA>vSRK8kl>DJH`LE(geLU3-} zICc5TmFEL1*oQ+mD$~SAEAtucbfQwy#28K38TURe{65@jV=}Ubmg?s2-s0P0#JL2*`=0;InlOsP7jhg4Su##!8IrcKKojz+tM}gDB^(B+1*o|G zZMsuG={m_43uDgMl(zFnYyNc%p*oUoUP4SP92&+m%?M+FyaMMFpPf(Qaa9Q!ufB#CW5xVlpNaKTVM&=YG>O#JbpJD%_;3O*VYrrNz{#(*B$`BL@pMFhbUhQ zaog&yMP<`uh5!5)wCQlh@WfQBA+6Lz6($8TG6lUP6W- z5Zv?H+9Xs|R6Xp*gq;g-ovxAom;a1)JYA6R(l@`!zMJ$C6cTcPfUq3}t)%Ny018MD zx|o3&7@#PWjC0eG|NL<1rLxnfz2Bb{aw4~P#9?FhG9Z@cFTO@~T_cUcV^YddDJ~~L zk6Ya(M#aX)+E--Xv!;JNLGm zO@m!haj`hCPktdGp%<(rKc0HF_Jeitw2$%}hCnUkrm|e>;2q+2f1(TsICn4S>sfO z9JJ!?I-14yA{b2e(cfOH1n#&Y0qsoUJ#TGoU4PLipW}=(1)|u3@00fN1ZsaE_wNa4 z$e`eX)ZMhSt(md#rrKG{fFMEUpTI z{Z_40gUkJk<&kj#aYV@ViN1HVFjVR(q1|^W&MrIq`DERrjKjAul3>f97Ro`g5kB%- zAD_iyms(NiV%Jz&u=u?1A0sYbohS`MbKr|nW0UE~Ke}`aLWIp06k5F9D_lZ1>3B^~ zgdh#?`RqB!e|9RqVZjgq@wUulczfW|Cw;E3om$EUd~L_h7p~8k{LsGnxx2iUTLue; zubOGKOdhx9CxjtfIvVrSD z3~ji)bJ?dnR{LIzXpppqR+zZ{@>9FCf2QQHSwYGJLY)E}~7c7(TpMj|x0k$lL;!!4G}{q8uI_Z1Z=2IjC{%Th4<}<08L;_UQlbJUAWtiA|VEd4o2IpqhOU;SkYQKvlK=O@H#t32Ihx zjoY`+RQX;K9r9P*H69c|6cSr|c_nmIL^(xrboAmQ_q5bh6bg7`L1E#INhy6sQ!`bk z0HE^k4;G&wd0Oo`M%2){W zt_&yMolhDm-VMAos@?HHyQ88jwNBMkp+8sA?L0un1@v*AA_P%zalZnxEVh121u`tI zs0hJOpaEJ;_;hl+d3v^0-yBDvE|enz21|&XDlDb1e`ZH%SXrH9)>#%+9?%vuM)RL&EXcLo>3el4Rv9Md=FwlC$mbd5s#!LM1lM<8 zy?GGyyy54&R?q)5(1-5@a`0N$EUPq7eEj@G01$7~tb-hfLhbw*qK;#gU^Fqu#hyEv z>>Q`5Pl^zRJ~latWHUD4qtiNtGh!SBu;kNAlcE{}^M(8L##nmx?~_c`FKKQCgk zQ{8kp8!vCo?sulNOTTH3_P+Ov;e@S-Gby~QxRZWu!@#7SLz>b#^{E_aqK?{&!2q~_rvSS!F3;P=F zM1I{KZXe|Pf^K6&DR$xN-fYwtp?7CQh?vTjV;?;7k>(t;(86X$6EQabacLu5(-nVp z{El2M=vv~gj>~>~(M{N)n0s~PH<6wjU8RsXgCR6YYY@Vq(N&O~^}Z6KbU`e~vi_K2 zYQmqryE@(8d4oBgH|v2zZHx^r2QNuQA{g6M8EIGOfAl9mbM^u6!rbfQp@8{m+~)Kx z*5?}&QMME-Rg%-L_ClLnwY9Yk?A87^ia{2kC=fpap1?j(M%}Tq8b0Sl8xT5TvZitw_sWTObDu@_ zz%(F;3RD`N##rH8;>IE(RJ;;pU=B@$VYHn{vuL;Uve2^2NwNl-2=SbW{IusIAEke| zWbK@}Bof}`>_#V)_WVGlqk3r|M7K;=v0H?@rM!9FhQ)Ik;6OTKpcH$$Y}dfhsF z7d5z-;`c)iqm)@rnT(h>03yjz2!IC_<-+QRjSq)(@Ukq*zA20dkpp-Bl;87c+5PEw zenZl~;4zsQN!68*qQ_(gB=18=c^avbEHvk1yl?EBa@#q$T66B{=&;{hMdvDRe1uDn zmPftF)^6XFR;`gLx2UDe4X{_ytL8Fy~KE;r73dz&K% zVp!Ue$36P>){E}6wHd)TBMg;gvvHYuE9{%)O->p&^BntIIBw z$7da9r8yZ&TIQ~p|L^-O@Av28z+hGZa1?R)vz~rB^q(qdVWHpdIX@cN@72jL40`r- zS2>{b$k*f?@V4eq4BsFV5h0tRWKrXWn&HjEcS3#ES~_O4X0kBx)LtG`jFdHvRDLOI zd0nu1@OCk`q_fc2Oe11gSlsA@&|_W?>nFi(Azw0rCGOnN@aS{5-aas;?K7Ojr7e-6 zvI83Au0Fd_QvbNdLj?Zpx!rs^e%o_f#`+V9Iz~Lczpk}@ zbRYj}_|LXmc%|6L{q9lWzi&O;>1T7bm9GVh#3>#;zFglJ@kVWz=7}k7dHeF_Kd+DH z)qU4=%()c7u+KIW0l@{7f7p6SNl%Xi@6q}Cb*%weQ!_Jub#-1a|4 zsh^;O7;iwCo(BEWuecTcIO2j7e_&%xRQ_U9yJG7rTfJ)9F8}eXJ<$VdFN|nwhXvkV zNxZTB$Mt^c@FHoI9PVkMZpr$=_=o&AgC#e)Y9~g7Nz40Nz!qR4-!ny<>7uT90an<

pQe5ycN4abPJ(KL<53N)QnSb(K;Q$6^78TZ5>0I^Hd9!f z_yp?u=d1^ye9#-I?lL1{Rn{e8#Oy~9Cs2Y4@U;+%gk(wJ-rBtmUuhLQUdhnXriD1{ zL-1jBVvGA$r}|$sDmfz~8i2OOK-Dms9oGD8VDY4ufrcJK=wUKJmi=->eh^2NFk={n zzN&?`yrO?0uzwr^am{=6iY%6Zmt4vQ()Zr3Z23%h56~tP=7J)zy7XsXSNtFXA&uAk zPH771Gt!W0n;wmy<46EomQ6+BeAx){e%P|oEG^!~nuYmoOmTaRl(sMBhVpzBsD$+f zSlfs=Iy%B1c6{r!_t67x*xx2~gH9?W|?saFP1qBHFR z0Yz&5?h;6{2lIP*=ym4u<#2acm;n{&9aNZ3CZj2ip0}eVin_x2BV+%|6O)65S1R24 zF|Y*C|I#lq>_efhEw7#e>>&g;sLeGt6MZR3*1LJHYDlHf@q4xKRkz2(UKw~jR*8S- zd!rKNfe{W{;?zE1sE*WfoPyphxv55!E21(R3VT0JZs5Q*tz^TtbCR^A2=O25NHjCs2 zg@ZYu4L4o5UAuX--8;WKa_zjWZO-!Ta4j=4aoaj(IHk_z{ybP}`AVI-q#wL(-gf)wbiM4>5c*0`z0D#X6V7XaJcf?8? z_N-}mRM=fyTnHDbI1C2LZ!cVcM`OHq?;fF>@-eJayocoi0K?7G|Nh>6XH$yjW=FHJ zk-r(E0n!H^xTQEN^1z7HSX%!x93ZZEH=^^Trlt9;mekB;HXaFm01Ah1G}(b2N`@-m zB}30|Rg@3~M%xR0K|B5Dq2r)R-_21C4i6LR>a3Bdg17)rGYf?#gP27iP7$iB&Hb!2 z5&0F-Ol~#ovdS97r5_4>yKx^EeCN)cRoev_8JRxW1R^cR^{EEVI5w${_xH9T_l4@R zpKS%G6qJ;-1CcN>HH|#yF^rcyC5nh?J1Y^VOWzSAwvMw^^Z>|K=EevLM?_JAPTY*Q z!X^+Ah-+(mTU*uCQtH#EiLm8LgaRiH>CEO z@K-s0=QEG&SOMin7%%!#H6i%~@~Vux7s0dnP(w@a=1mE;Kg$BBu-+wY zJ3AgSS`NW?MKhPpSP<00S;D+jCGBd-DD6%l=;vyv3 zc=YQ{DGf&{9ghlL>cc*9U;_`VNC43X(RlxRni10U&;q)6$*s$qx>^_v6~!$xGAwbvvS zIJV#(>Z}!HfjN$m!#scLQMaX)6>K>_$HXD}!neS+;fU-|jW&^$=>SechEGoqT@WHAyFv?zPICq6sNCIC)RC?Jzz zi#IH-tKVMY866xv0kPseUOo@o6#Gg4_UT{b=TisBV{>p45CD)g=C@F8PsPh#YELZO zejfVoFPwqphag2nkA%{6TJC#ZP+W{`aK(eb{;ay1wYIhvY|Ty>8;PGjU09oHQ2sH} z{i>@|x&PD;ZpGUtkS~r%Ul6lA*gzm5qiF&Pnw*_Y1GdoJ>0mZWN=jsr0JK+RZ3zq= zWDx`)1hS&&sgr{axW9B^rH)=l``9|NS3{nyPKh UYn_Z>SnWsY>lk87wd}+H4~tj=p#T5? literal 0 HcmV?d00001 diff --git a/docs/auto_examples/1eof/index.rst b/docs/auto_examples/1eof/index.rst index bc6bffd..7ecc9cd 100644 --- a/docs/auto_examples/1eof/index.rst +++ b/docs/auto_examples/1eof/index.rst @@ -12,6 +12,23 @@

+.. raw:: html + +
+ +.. only:: html + + .. image:: /auto_examples/1eof/images/thumb/sphx_glr_plot_eeof_thumb.png + :alt: + + :ref:`sphx_glr_auto_examples_1eof_plot_eeof.py` + +.. raw:: html + +
Extented EOF analysis
+
+ + .. raw:: html
@@ -139,6 +156,7 @@ .. toctree:: :hidden: + /auto_examples/1eof/plot_eeof /auto_examples/1eof/plot_eof-tmode /auto_examples/1eof/plot_eof-smode /auto_examples/1eof/plot_multivariate-eof diff --git a/docs/auto_examples/1eof/plot_eeof.ipynb b/docs/auto_examples/1eof/plot_eeof.ipynb new file mode 100644 index 0000000..a3a7b80 --- /dev/null +++ b/docs/auto_examples/1eof/plot_eeof.ipynb @@ -0,0 +1,151 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Extented EOF analysis\n\nThis example demonstrates Extended EOF (EEOF) analysis on ``xarray`` tutorial \ndata. EEOF analysis, also termed as Multivariate/Multichannel Singular \nSpectrum Analysis, advances traditional EOF analysis to capture propagating \nsignals or oscillations in multivariate datasets. At its core, this \ninvolves the formulation of a lagged covariance matrix that encapsulates \nboth spatial and temporal correlations. Subsequently, this matrix is \ndecomposed to yield its eigenvectors (components) and eigenvalues (explained variance).\n\nLet's begin by setting up the required packages and fetching the data:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import xarray as xr\nimport xeofs as xe\nimport matplotlib.pyplot as plt\n\nxr.set_options(display_expand_data=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load the tutorial data.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "t2m = xr.tutorial.load_dataset(\"air_temperature\").air" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Prior to conducting the EEOF analysis, it's essential to determine the\nstructure of the lagged covariance matrix. This entails defining the time\ndelay ``tau`` and the ``embedding`` dimension. The former signifies the\ninterval between the original and lagged time series, while the latter\ndictates the number of time-lagged copies in the delay-coordinate space,\nrepresenting the system's dynamics.\nFor illustration, using ``tau=4`` and ``embedding=40``, we generate 40\ndelayed versions of the time series, each offset by 4 time steps, resulting\nin a maximum shift of ``tau x embedding = 160``. Given our dataset's\n6-hour intervals, tau = 4 translates to a 24-hour shift.\nIt's obvious that this way of constructing the lagged covariance matrix\nand subsequently decomposing it can be computationally expensive. For example,\ngiven our dataset's dimensions,\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "t2m.shape" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "the extended dataset would have 40 x 25 x 53 = 53000 features\nwhich is much larger than the original dataset's 1325 features.\nTo mitigate this, we can first preprocess the data using PCA / EOF analysis\nand then perform EEOF analysis on the resulting PCA / EOF scores. Here,\nwe'll use ``n_pca_modes=50`` to retain the first 50 PCA modes, so we end\nup with 40 x 50 = 200 (latent) features.\nWith these parameters set, we proceed to instantiate the ``ExtendedEOF``\nmodel and fit our data.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model = xe.models.ExtendedEOF(\n n_modes=10, tau=4, embedding=40, n_pca_modes=50, use_coslat=True\n)\nmodel.fit(t2m, dim=\"time\")\nscores = model.scores()\ncomponents = model.components()\ncomponents" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A notable distinction from standard EOF analysis is the incorporation of an\nextra ``embedding`` dimension in the components. Nonetheless, the\noverarching methodology mirrors traditional EOF practices. The results,\nfor instance, can be assessed by examining the explained variance ratio.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model.explained_variance_ratio().plot()\nplt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Additionally, we can look into the scores; let's spotlight mode 4.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "scores.sel(mode=4).plot()\nplt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In wrapping up, we visualize the corresponding EEOF component of mode 4.\nFor visualization purposes, we'll focus on the component at a specific\nlatitude, in this instance, 60 degrees north.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "components.sel(mode=4, lat=60).plot()\nplt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/docs/auto_examples/1eof/plot_eeof.py b/docs/auto_examples/1eof/plot_eeof.py new file mode 100644 index 0000000..2a7ab1d --- /dev/null +++ b/docs/auto_examples/1eof/plot_eeof.py @@ -0,0 +1,83 @@ +""" +Extented EOF analysis +===================== + +This example demonstrates Extended EOF (EEOF) analysis on ``xarray`` tutorial +data. EEOF analysis, also termed as Multivariate/Multichannel Singular +Spectrum Analysis, advances traditional EOF analysis to capture propagating +signals or oscillations in multivariate datasets. At its core, this +involves the formulation of a lagged covariance matrix that encapsulates +both spatial and temporal correlations. Subsequently, this matrix is +decomposed to yield its eigenvectors (components) and eigenvalues (explained variance). + +Let's begin by setting up the required packages and fetching the data: +""" + +import xarray as xr +import xeofs as xe +import matplotlib.pyplot as plt + +xr.set_options(display_expand_data=False) + +# %% +# Load the tutorial data. +t2m = xr.tutorial.load_dataset("air_temperature").air + + +# %% +# Prior to conducting the EEOF analysis, it's essential to determine the +# structure of the lagged covariance matrix. This entails defining the time +# delay ``tau`` and the ``embedding`` dimension. The former signifies the +# interval between the original and lagged time series, while the latter +# dictates the number of time-lagged copies in the delay-coordinate space, +# representing the system's dynamics. +# For illustration, using ``tau=4`` and ``embedding=40``, we generate 40 +# delayed versions of the time series, each offset by 4 time steps, resulting +# in a maximum shift of ``tau x embedding = 160``. Given our dataset's +# 6-hour intervals, tau = 4 translates to a 24-hour shift. +# It's obvious that this way of constructing the lagged covariance matrix +# and subsequently decomposing it can be computationally expensive. For example, +# given our dataset's dimensions, + +t2m.shape + +# %% +# the extended dataset would have 40 x 25 x 53 = 53000 features +# which is much larger than the original dataset's 1325 features. +# To mitigate this, we can first preprocess the data using PCA / EOF analysis +# and then perform EEOF analysis on the resulting PCA / EOF scores. Here, +# we'll use ``n_pca_modes=50`` to retain the first 50 PCA modes, so we end +# up with 40 x 50 = 200 (latent) features. +# With these parameters set, we proceed to instantiate the ``ExtendedEOF`` +# model and fit our data. + +model = xe.models.ExtendedEOF( + n_modes=10, tau=4, embedding=40, n_pca_modes=50, use_coslat=True +) +model.fit(t2m, dim="time") +scores = model.scores() +components = model.components() +components + +# %% +# A notable distinction from standard EOF analysis is the incorporation of an +# extra ``embedding`` dimension in the components. Nonetheless, the +# overarching methodology mirrors traditional EOF practices. The results, +# for instance, can be assessed by examining the explained variance ratio. + +model.explained_variance_ratio().plot() +plt.show() + +# %% +# Additionally, we can look into the scores; let's spotlight mode 4. + +scores.sel(mode=4).plot() +plt.show() + +# %% +# In wrapping up, we visualize the corresponding EEOF component of mode 4. +# For visualization purposes, we'll focus on the component at a specific +# latitude, in this instance, 60 degrees north. + +components.sel(mode=4, lat=60).plot() +plt.show() diff --git a/docs/auto_examples/1eof/plot_eeof.py.md5 b/docs/auto_examples/1eof/plot_eeof.py.md5 new file mode 100644 index 0000000..48d6006 --- /dev/null +++ b/docs/auto_examples/1eof/plot_eeof.py.md5 @@ -0,0 +1 @@ +7f3b66c7aec555c78dde9031213be3ad \ No newline at end of file diff --git a/docs/auto_examples/1eof/plot_eeof.rst b/docs/auto_examples/1eof/plot_eeof.rst new file mode 100644 index 0000000..494c85a --- /dev/null +++ b/docs/auto_examples/1eof/plot_eeof.rst @@ -0,0 +1,693 @@ + +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "auto_examples/1eof/plot_eeof.py" +.. LINE NUMBERS ARE GIVEN BELOW. + +.. only:: html + + .. note:: + :class: sphx-glr-download-link-note + + :ref:`Go to the end ` + to download the full example code + +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_auto_examples_1eof_plot_eeof.py: + + +Extented EOF analysis +===================== + +This example demonstrates Extended EOF (EEOF) analysis on ``xarray`` tutorial +data. EEOF analysis, also termed as Multivariate/Multichannel Singular +Spectrum Analysis, advances traditional EOF analysis to capture propagating +signals or oscillations in multivariate datasets. At its core, this +involves the formulation of a lagged covariance matrix that encapsulates +both spatial and temporal correlations. Subsequently, this matrix is +decomposed to yield its eigenvectors (components) and eigenvalues (explained variance). + +Let's begin by setting up the required packages and fetching the data: + +.. GENERATED FROM PYTHON SOURCE LINES 15-22 + +.. code-block:: default + + + import xarray as xr + import xeofs as xe + import matplotlib.pyplot as plt + + xr.set_options(display_expand_data=False) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 23-24 + +Load the tutorial data. + +.. GENERATED FROM PYTHON SOURCE LINES 24-27 + +.. code-block:: default + + t2m = xr.tutorial.load_dataset("air_temperature").air + + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 28-41 + +Prior to conducting the EEOF analysis, it's essential to determine the +structure of the lagged covariance matrix. This entails defining the time +delay ``tau`` and the ``embedding`` dimension. The former signifies the +interval between the original and lagged time series, while the latter +dictates the number of time-lagged copies in the delay-coordinate space, +representing the system's dynamics. +For illustration, using ``tau=4`` and ``embedding=40``, we generate 40 +delayed versions of the time series, each offset by 4 time steps, resulting +in a maximum shift of ``tau x embedding = 160``. Given our dataset's +6-hour intervals, tau = 4 translates to a 24-hour shift. +It's obvious that this way of constructing the lagged covariance matrix +and subsequently decomposing it can be computationally expensive. For example, +given our dataset's dimensions, + +.. GENERATED FROM PYTHON SOURCE LINES 41-44 + +.. code-block:: default + + + t2m.shape + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + + (2920, 25, 53) + + + +.. GENERATED FROM PYTHON SOURCE LINES 45-53 + +the extended dataset would have 40 x 25 x 53 = 53000 features +which is much larger than the original dataset's 1325 features. +To mitigate this, we can first preprocess the data using PCA / EOF analysis +and then perform EEOF analysis on the resulting PCA / EOF scores. Here, +we'll use ``n_pca_modes=50`` to retain the first 50 PCA modes, so we end +up with 40 x 50 = 200 (latent) features. +With these parameters set, we proceed to instantiate the ``ExtendedEOF`` +model and fit our data. + +.. GENERATED FROM PYTHON SOURCE LINES 53-62 + +.. code-block:: default + + + model = xe.models.ExtendedEOF( + n_modes=10, tau=4, embedding=40, n_pca_modes=50, use_coslat=True + ) + model.fit(t2m, dim="time") + scores = model.scores() + components = model.components() + components + + + + + + +.. raw:: html + +
+
+ + + + + + + + + + + + + + +
<xarray.DataArray 'components' (mode: 10, embedding: 40, lat: 25, lon: 53)>
+    0.0003857 0.0003649 0.0003575 0.0003567 ... -0.001347 -0.0009396 -0.0005447
+    Coordinates:
+      * lat        (lat) float32 15.0 17.5 20.0 22.5 25.0 ... 67.5 70.0 72.5 75.0
+      * lon        (lon) float32 200.0 202.5 205.0 207.5 ... 322.5 325.0 327.5 330.0
+      * embedding  (embedding) int64 0 4 8 12 16 20 24 ... 136 140 144 148 152 156
+      * mode       (mode) int64 1 2 3 4 5 6 7 8 9 10
+    Attributes:
+        model:        Extended EOF Analysis
+        n_modes:      10
+        center:       True
+        standardize:  False
+        use_coslat:   True
+        solver:       auto
+        software:     xeofs
+        version:      1.0.3
+        date:         2023-10-23 11:30:31
+
+
+
+ +.. GENERATED FROM PYTHON SOURCE LINES 63-67 + +A notable distinction from standard EOF analysis is the incorporation of an +extra ``embedding`` dimension in the components. Nonetheless, the +overarching methodology mirrors traditional EOF practices. The results, +for instance, can be assessed by examining the explained variance ratio. + +.. GENERATED FROM PYTHON SOURCE LINES 67-71 + +.. code-block:: default + + + model.explained_variance_ratio().plot() + plt.show() + + + + +.. image-sg:: /auto_examples/1eof/images/sphx_glr_plot_eeof_001.png + :alt: plot eeof + :srcset: /auto_examples/1eof/images/sphx_glr_plot_eeof_001.png + :class: sphx-glr-single-img + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 72-73 + +Additionally, we can look into the scores; let's spotlight mode 4. + +.. GENERATED FROM PYTHON SOURCE LINES 73-77 + +.. code-block:: default + + + scores.sel(mode=4).plot() + plt.show() + + + + +.. image-sg:: /auto_examples/1eof/images/sphx_glr_plot_eeof_002.png + :alt: mode = 4 + :srcset: /auto_examples/1eof/images/sphx_glr_plot_eeof_002.png + :class: sphx-glr-single-img + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 78-81 + +In wrapping up, we visualize the corresponding EEOF component of mode 4. +For visualization purposes, we'll focus on the component at a specific +latitude, in this instance, 60 degrees north. + +.. GENERATED FROM PYTHON SOURCE LINES 81-84 + +.. code-block:: default + + + components.sel(mode=4, lat=60).plot() + plt.show() + + + +.. image-sg:: /auto_examples/1eof/images/sphx_glr_plot_eeof_003.png + :alt: lat = 60.0 [degrees_north], mode = 4 + :srcset: /auto_examples/1eof/images/sphx_glr_plot_eeof_003.png + :class: sphx-glr-single-img + + + + + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** (0 minutes 3.585 seconds) + + +.. _sphx_glr_download_auto_examples_1eof_plot_eeof.py: + +.. only:: html + + .. container:: sphx-glr-footer sphx-glr-footer-example + + + + + .. container:: sphx-glr-download sphx-glr-download-python + + :download:`Download Python source code: plot_eeof.py ` + + .. container:: sphx-glr-download sphx-glr-download-jupyter + + :download:`Download Jupyter notebook: plot_eeof.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/docs/auto_examples/1eof/plot_eeof_codeobj.pickle b/docs/auto_examples/1eof/plot_eeof_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..9f9912cc42c85eb286d4a2b14a477fbc2325000f GIT binary patch literal 10010 zcmcJUe`p*<6vv}!eqMewi5C-6gQdZ=#V(>C6a=|65sIY2^pE}}$KKu~yT{(`vb)z_ zL{Su_!9@^P1wl{*Q4|G16#o+n{w0Y26$%AW5Cox6C=?3L?A_kHdAGMWb64|MvOC}J ze%{P`v$HR+jl4g*J{13iIm5ASVg!~`^P@1g(g)XJp1wewteO*l$T>oy%f*C5WG zuf@;sHA4T0!l~xXqHj80K>vh8qHx^ui-xWHesmb-==bf0WzrwEO1 zNrt&ooa>&!G^Am@WH9>2TQU;n8zjkOPzJ3E zp_!S3SE)lv@~14*Ri1^|08c?j24`q_+-J>cFm9M8Y9yPFDLNIVYE}54j&oH7t{I+M30~jn1&~v0F}WG^ z0^!V8grQ?>OKQz!l4>utMv!o(;b}ezijLcSo40c&2d~m)SZL^;ryppB;}I#%sRQw}(>t?&&o@6mHdo1@vlw`2z%w!>qm0(uVd8MV&~VjdPm>t;VS(ZvTZURGIj zvEZxufv}pBtTH&U>iZwbRX~xH1AZz1&Vdr)ocE(j(w%V#<`)t;9R4d|^dzelPMws? zH?2V=oHU%O>(ods0NFILsRw$2RR`Oz@*usnGQAVm4b9)Kci@Rn^und`gFq{e1%>ic zoA+ol|I?@!VGV!*{nSB*7ha#NTF9{BEB1>p2Q#c5M0(YlNU>0XPLXri-v!t_s11?% ze$+>IcN~s+T@s52azjweusRVVNNVL~YapR#D^CmRfkQw1Y|VD`a*Jxw;9Ejmo#R0j-xh|>6-s{K=|&K5GzsoJ+-zu+Rj0;g?~a5Nliiij0J?j^@Od_o zm#n}%=Qv*3&&I0^JuoyRBS(mpVM!gwtc+sG!|f$6c$BwF0?SBn7vXHuhDGZ(ozcpK ziVxhCzIP z+iF;~Wct&Sxw0aw$H=R)LI8UlYnIPfU7J+d8$YhRT~e+aqzCevnhOSBSCa$$(^$E3 zzGm3(ZVco&KaC zliAOx*QXV1F#cHuEg*je%ku4N-EqZC7r=|bflE82jvG(@v+^#OdqZ9du(!C1gd@%N z8vhCeheY;{mTNwNPuu?57|ot6*0xV0m0id@?%IJy+rC%$d%f$qqH>8Vn-4h>D_Lk& zJ#cL&u&t8j9yejun&5FNHW8+cVbrN+0Zlpwka8dl`q+DFMs9k4LK298Us8@iFH7nG=>eAXdBS*E#Rqf0sv-vPuVdXHCyY1M%y0}>)#M>s&`grlwYTKk15cCiL~S&Ckm{%6k7eD{mP z0r53Hl7z#-lU07S5He{v#ihp~-6b4)wtW(9-5I#kk>-f!`#Q&J z29PEKvwj1QUqC&NI4FwVr_IqzCQa(gqm!A894v{TGYC zn;v{vCD6hbir5OdrRId`Z>z~c2zRh@rJdFV(CpwTp4||+t7?ZMxue_BNh;LmU!scZiM DUvxh| literal 0 HcmV?d00001 diff --git a/docs/auto_examples/1eof/sg_execution_times.rst b/docs/auto_examples/1eof/sg_execution_times.rst index 36a3329..7752aa4 100644 --- a/docs/auto_examples/1eof/sg_execution_times.rst +++ b/docs/auto_examples/1eof/sg_execution_times.rst @@ -6,15 +6,17 @@ Computation times ================= -**00:33.235** total execution time for **auto_examples_1eof** files: +**00:03.585** total execution time for **auto_examples_1eof** files: +--------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_1eof_plot_gwpca.py` (``plot_gwpca.py``) | 00:33.235 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_1eof_plot_eeof.py` (``plot_eeof.py``) | 00:03.585 | 0.0 MB | +--------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_auto_examples_1eof_plot_eof-smode.py` (``plot_eof-smode.py``) | 00:00.000 | 0.0 MB | +--------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_auto_examples_1eof_plot_eof-tmode.py` (``plot_eof-tmode.py``) | 00:00.000 | 0.0 MB | +--------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_auto_examples_1eof_plot_gwpca.py` (``plot_gwpca.py``) | 00:00.000 | 0.0 MB | ++--------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_auto_examples_1eof_plot_mreof.py` (``plot_mreof.py``) | 00:00.000 | 0.0 MB | +--------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_auto_examples_1eof_plot_multivariate-eof.py` (``plot_multivariate-eof.py``) | 00:00.000 | 0.0 MB | diff --git a/docs/auto_examples/auto_examples_jupyter.zip b/docs/auto_examples/auto_examples_jupyter.zip index bb6eaf3b621331898c6f6a46251f9d610e2b12b1..09abd69776501c105e13483f0a02f01ed452b82f 100644 GIT binary patch delta 3103 zcmZ`*&yO6%6<*C^ButErgpIQ?`WvWe2IYE-1jJ_C^>K-L?TM!U9LGs`3I6i?m2Mb0(`H!XT0kqtNk(C{obqh zzW2TF)mI-Z{_B&)=B?KkAA4qjzyAKSyLZ3-=*^Q~o?cksAAY(nP5)w^8owj)seJmn znQo80)LtgE{gc;7Xpv5wa*>YoouP6h$0Ey9Nr}vicHRmv9R-)9c3Iwr>M9~Y@tBLNpLFNS~Oq0m~UbX(~` zkqV1Qw{sbLTV%9Vr;9krNaz^3A;m;_W$@-mMdS^|BKL)rlv|UFf$)e(XCv*@0M?F- zCF5e1rg&p?*}bteZPAt|0RT8$qXADn2XvI^{4bi3Gf^5+Fwl}_qJ z>aZcDM8*b=4hN&qiIQm&@|J2K^$2ho>u5RHXvA>bu!O3&A}x^1vK;5BP#O_tiLbog zVj|p>{(_@jIZ&E*C*-6L$Q600-y$nz;gyT{T`@o;j@*|%9x}cR5qJKFwkwkaws@Mr63jE2<5et@mY9mF zuPSOW(U9W^`R+=;C#4RFVV=;5nl9X`7vQ-ft5s;CJePA3v)CKYY z*HI_D*5-xgd@g#-Hlc*I*cgj^;mI70Wi!%N=2iw6tCFYWCJwCuyvan1OvMhQc?~_m zAQeF|P-K$|#~1}nx}6%8Lx^-c?OqR=O9EfH%aS_n8cAlPw2T#Iv}(@7a;K;C%ep2^lC&d|PG@Nj*71~v!4TcHY=v21hEWmPuS!i}g$`mXfkb|HodteI^ z0fQ?R&e+7ADO;U0hAHF@2>{wtjXPT^4`IV(N(9LA0(}NjB26a{@0>X{k}cwf)LM=v z4*t8YhlF&^^6A9~Pxy_j<%S}cJfTzOgyc=Zvd_cGSnrtvGBXq-ZWa*S*u<~ROMqhY zQoG%T2!v?`W?&>*1f|Lf{Dq1SKtKTS2&3!30BQXag3bI}(Ny0tltI^_+RSXlEJwiz z&{x)ZNIH0FumCDi=0G*&ZoRTa7muuhNY^8&34+4k)e=FqL_Jc#2>06o4g zu@F$0ypX0iiN{;-t!3dK_=q$X`>Q$Gk^M z7B)`#>vx`goI)*ORjwBFc#AK0%jFe)uFOCD>DCt3%}fsF6O$VAF3tcy7?O(6KcX~j zd6yfXs=?3)hEj(EpRGCXWV*~$=yq0)9y7h6Y0rv0XE`eJaL`D(LZs>!)gI!@j6`fK zBAA5TJO`AekHb=@UK}&ms!4f)hM|0p%CputF~qYT(JIJQOb9_t1-6uou|MNTgJQ}T z2`~)fhyGW?z^GnsLqG>sV%=$D{VU{y&UH<#Q^No*REI&B$T>0ny zQ{~3~QsbqU$~X3JHlBF3{ATTWJY8)ZzjOb}_sU-#dwISo&Rg>i%=(=`U6r$w9gG#p+W}ErQ65>cRBHcCz19bp^?+&~UAgjc3&~7#KiUlmP*@G(K^kJoTz0lWoD| z`ZnpwnOAuxJ6vO#+%`#o2c~LEV^Gax`D^w}UnWjYxaQ7ew{mjBekHL0Z&o&t5-uP- M%f!GiZ7+xi02TB(F8}}l diff --git a/docs/auto_examples/auto_examples_python.zip b/docs/auto_examples/auto_examples_python.zip index 41853d3d284f9d5d20998d3beb3445854f4fdf1c..1074732efdc94159e6b1e047ba89b614a1be9585 100644 GIT binary patch delta 3123 zcmZu!&u=8f6*iMdD|#&g;-_|3+9$8TV|I3SygPQHgh4W_y)2Nh(ymw%4xXCsnwb*a zRXtVJV^5T*!HKe?i{#q;t$|}xJ2R-iCd2OUUkpdUSiqfanGyw<9pxx zUiq7UKl1TETEDyd#;<<;)Ea+%_wIuS?_B!tSL5}yHU4=5&$gu*U(ZwH4<+7p^To!- zMznwIrLOkQy--Hc{i$*!k42WJk`kF2?YtFUIto^aOlbeD18VQ%b&G@+>B8ZJ(KH+$ z3v0zW@(fa>^xUL#jz5(&HWnv?A2MSSl#0m& z^~EMshCC?~-l}6*3r|ubF~g>*5s<#C=0rQu?k z_f|KG-jsgR(MV1J&}cz`Bmi0Dp>~Uul*N%;#P5j-Dq_E5>EkKG&G0agzmJ$~)!ofY zmFcfnudGw`%f%WhEzjPy z3cciZ8=Vw!NRC}SL_Eb+Nas*I0}cB`G7nKNM}{|$9mkn4Xd-(EZ7ly!AG=hKiuOW* zN{JjRy{y)&jGZND+yZmqMS-CRZH33-P-Y{UBrwL)1g6@7eeAEMk+j4FRby3iE4osW z(#Ug;oJZ0hNvT6_m@za257~9YHx7;dSe)(9(NtlI>oPnXa84@rfpq*-7sv!NfKqYn z+KRF{M@FmJhKnc{*J5KVQpQmxYAieOWo2$<0JG_`TR1cc7$*xYG8MZntPao_j93wL z1ci2}a12zyWzcVsIfcrg-y05*yd=<;8!k)g_m+4vGo@w3Fu!$=&eSYLJjDlNaFnUq z52c~6G6x_?m&aRA;0^T&%;d4kpek;v#-9C!tmv3lJ)!|^m%GTQOSjb=$eY40bz#%N z&+fG;KU#JOQP6LkLS!qnt4ADE6Lh0rn}%Qv%^l{W8O@a`SVf=GG8Pw6J8UIcG8h4phHtJm*^spy|IIrotr>t=Vq_h zgHD8zW+=f|G!Igi75EL^pMagfrF@9*jcX*deG~4M9vIU5hBE8|C_NKj@erVD6hBth zd8`3&)?jJWs?ClXI^KPCkFK9t9}TJ~3h|*lEb(U*h1VX}yrLDIcyYWcusB|m*ce=! z+)Pu%Vgb;Hc`ObylSnt%0kv3(tb~x%?&?%`dchl8l_&!%6O{m3_=4^pDXffuC+r4v z13Y7 z%@~YY&H>g#{lS2aWjDNa-IKEi?TGNDn{u~%VLk@^&dG>j;5#rljR}i{Jd6#G&*1)L zTgYgu4%0>KHmKKOG7dI)_BSee{X*4xoywElw&LU-Y2(F+Z!^ka^syY!wXroAdK8=p zTi<9_mj=*}dkRZqc?So3jHA$i0PMB`*sSE?w1kKI1D`!fqf37>TH;v>%JIhLySEx zhs9L=eCydqDU^ebRX^ULvdb62_Ewaq9)oO-@D@J9GTy^}T8hMKgi>SPW1Yk04Mo-j z{{yArZs2m`Q#G0Tz!K^sMl%6Urfp`$pucrCVD-(d$IBxt@|-oQ$U~gDa)n6MU+c9M zZa;9Xd0W=Ax5AbslKpb5ddn=&)-B5mv=2bATOPf!iJ`L1#4f#HNkDZ4cAAW_aASA^ zw|m<<{tab=$tul7Fr~urIYae!UZ(;yY4YAW87437$+zq5n9jOSt_zU> s8|7Rt#dN`Ea(ulF)AESPbL(B0gi|L!u2<*dV&Gy}&&0q`UjVWQ08KhBzyJUM diff --git a/docs/auto_examples/index.rst b/docs/auto_examples/index.rst index 9f5f34a..2877785 100644 --- a/docs/auto_examples/index.rst +++ b/docs/auto_examples/index.rst @@ -27,6 +27,23 @@ Examples
+.. raw:: html + +
+ +.. only:: html + + .. image:: /auto_examples/1eof/images/thumb/sphx_glr_plot_eeof_thumb.png + :alt: + + :ref:`sphx_glr_auto_examples_1eof_plot_eeof.py` + +.. raw:: html + +
Extented EOF analysis
+
+ + .. raw:: html
diff --git a/examples/1eof/plot_eeof.png b/examples/1eof/plot_eeof.png new file mode 100644 index 0000000000000000000000000000000000000000..71cc545863977612b7bdda7730ddd3515ef3b529 GIT binary patch literal 32860 zcmd?Ri91$r_ddLlDN2SUnIl4oQe-TYC@J$iPnj~$6e2Q*l%WvGJkK&zLNaHb37N+m zx8b+8dY<3+d%VYcynn#k@jM^h+}-xRu4}DzuJb(Ce(~V`?Gwjok0S_jLiUc-Lj=L~ zK@glVA_Dl%)sCSl_#)&Wt>*B^#>Bx{-_97hr|)2EY2#pNZouSZY-ew7V=cgQjfbC` z$;`pQ)?S#G*XqA-;IXkY<-Mpq?F}D7Yt-31LM=uwfJ>h zV?h#%5-R=1oY;itoLRA7$Ko^Z1Tk+Le2;O_+6&^`Vw{ApukVsO_yEaTt!9 zL^v$)XYFjoGi!t#{!H@{_(;JYvFA8U@G>GyAGrfBrwnNj3V6Lk^Z&me?*@}$gb-z~9}E4{>KH1vLg@O!0=h|s(izF=}ULUBNA>zE8hU$z`)=X9o@?? z@dKLqU)c?1PbQiCZ2OkJyr4AeFJR-QCaKw;Ao1AfxV^oP8SO}xCfS3ppBnos9Jz#@ z76S9~_&oMEdtETY&iUqD7mBQhY0sWLD`YXhx3ixAxR<9pOI<*9=I&05HsT_l=(TIU zFi=c_M?m`htxQBWyywa8#@sV&0>y|K)%Fuik>w6^6gn>B_@oRXG$hpAdAT+{pQ5`; za%*-uU&@@X!7kl6hL4YfBqt|7D7H{WZ~nEAecFZ^@8>=KPs*9k;VXrgm-A=Zh=>TY zUs-3xc4yD;&4?c=TG?c46t8?gA2!r>@polW(UsTo7dV0*w%@zR=e#r|si&uxxX@xL z*kCDWxWBvUS|Dz>ywG3xnol2B>|on)1OAS#Uur&KdjK0N>pRU->yHoK40^~BI8x=awe zuMi^9oXR&dRkHJ(7B$TGl9Fa!X(#R{ip&-5-)8jm+ufWW4C_s~O-3i~HKhOcU>}>} zvO00yZi;B2_;COQ+ue7L^S{t@S95sa9)H96=Q#ONwMkoI$oKDesCaegIyyS~s@lX znKZ%<9L=%3WWIGioXVf{I#XruSNC4C9?D}xFJrd-J)YO$zH{uNu;U*pH_TEg z^6<|3%*@WtOg9N{l~0M)VDjxS2K-+HEZKTw@5pZ0|0YdNNx5u0CUK0Aa1uQ-qV9rk zuGxg1za3gTI?9Mgc=8Ku-eu<{tt|CI0*GQf+tTzFdgfzUDvv`4R@(GhqLaK2&N4GI zUw<-jtio~rdCKiDbj=~Xe1U(mP&X1MU>Y71^W1IokI-U@PPN;eJ9i}9+=S&XM72!P ziMn;yWM}T)a9a4@u~SxH+0T!jgxO!&=SEy-=u^|uSa^6&!l`JVJHT9f-D+~H)OtA2 zYEbYc{SE4SZw0@W+t2VBH&Yc^_ETWD7H>&NAjnwxOkx8>6Q9$9n#fk4;r3$bXgh4$ zP}!3P<8z7L2d*Lq+mqWR^1E>P>2mBD1G( z7k#;_zgypSyuU?w#k})aF!iOIwzhmKIa)8$6WxwO@Z}T6=jZ36AGfkQpPK?ZiUv$5 zgJ=+!YR(YGRPf1!)Ro7-NEyAhnXQM)o<5oU_8x-ey4T+I4L4Cl@7v3>udhF0Np$^v z_Q&hXc&a&Ck`RfoEBVgLI!f6ZJvHLNU!TEAHdq;}qY?KKDJUp-@ct%#)k;0db^G5h z2NCasy`7JWDbM#WuX5#9J%a?!eEBkYU0of=#;Ygb{9&|t5ew(3eeLR5Cpe<1p-6o|{wq&|fQ zmoHMW(}HjBK>!5V&-^4tCgyWLm&8_Y{?hEMSPG4flD@b3r^mm^2qN#*sjhT|GjFfi zP#2oCQsCg=B&!#i)SDC%GyC=B8wMauVuyS7>7MdQV%o?l?}MH6!AXy;ep6hZzT(HR zkNRO9m`DkHixSXJK)%3+)Bn=URv~9#;9>CE)_2$RKQ#f-Q9nAWtMo~!sb*=tP5cb< z1%i@(3*9Fi32wDK2_oh%`G$?hkjvIX*8>&{<}vfVZ8bbtjg7^@Uj4!KnKlw}aq-no zIbNZx&#vofd~aKmRdaQ=9Isk*pMnH+DlRVWz*zLqauyukJOs9Boxy}7v{ceKznY!GYN{OTgS54-$%c|P8Md(^Q4j(;fm8pd!{OHz^;PW zPU;{-4|}3Z{rKoPK}!i073$rs#d%xqs3yPHuW7LP_mjjz;8Mo=O_T7E0IM60fBaHY zS+OboP&FrE6>GOwC-aS4s8c_DnEoSx=L4}UymzD0bZe;G9`VtxbYi}AsdfCxH3XpP zSbigeeFd+UCxOr^?BAd}op$m0T*sxM{_)X%UP@8OT=(Y>`=2Ly?Gk?tI^7VZAU2n$ zy)_fH?`k3ERK`!dHA)%-DuFFwDK|$4%>fivoJTrTZiQR^Q z>@5GW&H3KOcapwn+{KTd*JCs;>~GJ}zOlD6ljJfTt@@zUTAPYXg;enEi7=71z}ttb zCy9xh3L6dIzkh!ns?0#C&2nCiVPhzbkX@?iqMVr-2U6xSMv6!Z6%uF~)z{xy=pJJAN*AX)QSUs;7ha$E4-rPn& z8R$`eV3Mz9yT9v*F4n=p!KX%+N>5+@FV9w30ymm$sX0UX;JspZkG7z6BV9Tst}KS}c8!^lVd zP~OdLoCUK#uhP-d2I+e3;NK()u)ilOE2*Lqv%gMs;zaQ%`ynara`hRxXiofUoiH)a z-7yGh2iG5fd(7Wzlv-OYXUEL-<}$`y()?YrH`|c{smN}}X`hhM!-q_48+qKDBaK-a zRt}SvI&47NZyO)pW~sb=eh|%1MT`4eZbss`KS>lsR*zBh>b!*1g9GU@FITtJnjYyp zOc|S*34klt1AO`V>f;zuHDT`q5kZ@gmvDk)!yeUmxL&ayqJfaP2*uXWd!f`FV~tio z069ODv6-s5p?1^FUm6={%m+>*fuW%?09eHKHy-WH>mC;5es)=9@!FkZPrvj7^JG* zHix$Ewp1M+V9TJ^{Bgn%%wzt*6=;-L&K1=tXR6GORNs(!NfR+I-X7?27EgjvIYs6K zdIV9B;dhFNn7FQV)N6iYmK6E(E1LvCo1~6h-J_fgU~Y?PUI)^EHKX)MuG8WGifLX8 zTD{p{-Kg=zqR7EvY3OY0+iRsZaBh{ZSacJiO9`nk0x}ca&1pEg-{2_FZ_UH@K_U&v zHxMxWDFt9-HP?HeAQE2e+y#_13)`fc6(h%IN@Y&^%gA(X&6M^^xEG^LgOkt zF)?DX}Pt^PAMuX zw$C}3_2ua&77G9phIf4haH36c+4Y0XXw7SJZ!u^}0-$IoTMt+C)p*`WQ%E{$;Q`eF zPD^6&+)zZZa;eRzOvCdN08dV$S8^Zgf$nCNt9_AK2kzlcoA-lqJL91$SITsSq;Ih8 z!4NI718ED1`U=W9TK$VJg&?b7{>HOJpQeti0zstwNJ)}T$Y)nG}o)C-DapbRE- z4|ij)dmmJF4f@<9x~yM+48>0l!U-*<+SPME6K)s)1P6ZR=Jw9c3&4j##FQ5TH#Qv7 ztuTv&?2sldK}RS3;6XT?%*yMYyG312h?Kni%dTD6%%Iy0u2RtTl2cHyK>6i5*q!I? z5v*`0xjqObmgAF3HYWhMk1E-3Q1l}O$u@pOqV>lQDx1&yB_DM^*YL&3CqtW*EE7VD zBIyZ8d|Qj1U0u=;^BkHbS@83Th}#V0l}?KkfSnDxKPql57UW__s@=6q<}-6^C;$~x zK&Mu3b|nZP#0&N$kB626MNRsZkl~e~X_I?(NmB(73rS%=H+r)2%{o}Hf|HG5XQ0KT z`fxXz0$>`flk`j4t9Jnnt8cDXV(j;py$|Q}#IV^z5D>ziIdEnu34A4?9}9*peF4Q) z$g&HUoBh-L`P!ES$8`wfU?xVUnlozND1cX{wNh)MmB1p?7o0c z0&uCtD zKlN@%TDLgqRxxK|8}dI7#q2+k)w#xXzQrnTgz58){L>}znRW^S^Wg%?C2_Lq`(7bD zT8@XJ6}ICQg9Y6!k;{yTZ^!vaxhA)zj=$Z4c}%h7 zUQU&X#o{7<3+|~Os-rb2jSUw0i zfmjBmipa6ma=paSyX;zE;`sz~I8MnHBsYKDH#O(82EtK(ka*1JEa#zQIKB_Tj%PF* zF@hJ|5_N(G*Vvo1$(`SC;y`@EixcPF0@)?+=;hqYd>EW}-fysW)gAj++ncLMsOG{krNs_SlIP>GejZ(*+Z@wf= z1S-+ZG2ECWn!T!z13D#=hI4J(-W;i+t4S?*i5{#@k`_XL}y^9H8Q zNUEqhS>Z5Yoax?CeLHia8#lea&mM?5Sm*xsAM}Ki+ z9IZZ$Dqv<(eqFFplw-NXsvhmHVI1w=`Pk9EQrCvQ3;*Z_kI5gY6aTIm?icpy==Ku= z!hCjI9$;NU(4^o&>W;wIE6>AxWKZB7ZSuHgZ7RI{Uq2P~Nqvr^$8bvUZOc@PO0>~W z71;@+1W7ORRM}W<)(=0;%OA*EbIUzQh>l^-=8}7m9Yds{!Xvjq`dwVE`YwyvoT<7* zQ~Z13oJLRWIWL?riXII%S@$mGoaJl>3?`cGmY0Hw$(@?p!aJtM(M58iafMi?Imfj& z#XDPV3jgs7=X;y%zQ^=* zXGP?CPyZBse?h-oxN*d<(~)sE9XJJ^azxto&}Bx(5%}Hrvf&VH+WGY30dvB*gz*+$ ze`ADsbkY-*G_3nWl{A$c)i}0qG11sKiK*!|_gjK^FFaySMK3!s8$F7VIat0S`Z=LV*1D(!`LANusUK7Y*gQm`p4VK!ZcsB7W<{1Qm=8_~<7pBaJYGV9Gr@YHs3x&Eg+!xtKZieVcW83N!($Mb;t zIV~t$J+XRoAgu|p1=0x+OT)@|gLVu%J9~p3-#friC+8dL>%-&Y7o0vrA(ybV&GUq_ z|GmozE@GOv`eYCxiPE>WhaKtVTvgtOURBtoN;SYKp3eFt%&CqS1 zy$3DoweFLR}$Wv%AZ-MLuePPoRSzjEzBr#9h?G=4GVTbQEegOfK)3LfJi;5u7 z4*(pyee>qCYMqADG7m_VxzuL@8H?9ziuY96SO&SSug|r0?7zf==5P z4I z-p-w67S%a(dqY}K>13;cdqbVuH4*}7yE)$BYNCD`(DQDxw_h;?v74I8G=;xL-s{CB3_aQgkz&BqUD7JsAZP=TmeX{>L@(zS7wE`nzegT2;<>lq* z$xdx)e#8>Qr7xaC@uqEwjvA)?aiIQb{YbcRjZ9NUN=j)(2iU3I=^X!%J-;1;TvN4WRftpBb!IK&`2Wwv$9lm3|_;s}R!#gz@W>PNY6 zxhtRdTRN3jCnY6ysd+iu*=tn<{{<}e*N_d?vm3+6%1F2A60QU&SP&Kg3$#c$uycZy zG7b(!U0dg_SxZ5WYJcT`w-hexs0vCk1c!u3Lp-$S>EDvNecNz#;w!Y6BF{6xtt^9-ub%9jd;pSWV8EGncehph^gm5;X;c@2zB5Vv~I{yYbmyE$O90+8n8}q zD*ZuNI}a-1ay2GV6rU79Ia9OtcTe|6q2m-p5Ac^SU-W?Z6B6br|Ms`+QNb~AMSKie zw1**>3_!BuY0>&%9{l1(9R$YEQZ8(YQ1dSHjsN}&2RC|$-668|Ug8>-caEgUx=(8) zab_^DhKk$sqD1A$>*~30R(gC{zeF0f-DD##;>IcF?hs&7pvrzjr13E4p2+so<`=!lb}|DU<7UjNftcBOe9r>hP^y@Sekg>suadg7WU*1=Ix6VXf6ngRT zl`zz^5%G<~Ex{sG$=vfC<0%Vzyjl^xmgZ-d9xYetL~hd{med&8=$=1!DcRk_tJE&l z6=e-Y8U3_yw2GYkv@b6^nB`Pc;5T&hLk=Nvod3^#)kiV5J4-oi-sPDW-fjKV-H+i0 zSQNR&9W3&`Vu3b;&3=n@&Mt<~(YQ1;>z>7nfnb(mZ2w~s+1EL1!|a^L|3Q-bxe_yV za!eIkKk?5Z`Gk>x4J3bI)R4~yA|8apSf$f;ji6w83Y)%38+2dvg`2G6X zoP{Q8#=Y0NSh63t*y&oYckXLEph{SOc}wU6X5uwZ|Fm+#Y(tbuMr+?VTfuVfL=)+G zshz;TTjCN=*zQ}Qbmbp7Mbp_j6SDD+=k$rH`v1A#DP8Ws6fojEW;nWy>{VM*CK|-z z@2e0+2Zows^_P9#G;;kva4%I8*B%hEDZna%Fd2}{vHd4AcflBt1I;ySI^ZLN4Za4U zB0Z$Zuh1*c<%PX`Nm{khDTn4G)C>SB%LEt#$}PfrBB$;0jhoM8e*NQ=uWr@(fsaD) ztw(-^?(wYFNI~Mv6YX=uyg~Pk6^D2W_Wx|;9Uu3~&)|-%xY;3l=3?=B^n=HRms&2U zbC>H>H7TT*X^Xs{r78`+Hs9RGlp0*x=jX9X{hp)q14r3g!UxNB4_O=VD#)bQj*r%O zo8L5wxxD%qFH4gCSuSA23Y%Pz5{rDkY^tktD z&AtB#$rS!KBun@?>0fkjUG+Nf6+kcD2AzqVis~2wW#xIDo&*q`bR{Q9NN(BL6%2Z| zf|d<5+7=rNOMRvlJBr;2?h*RH2D7T?_B)dSuJBu8 z<|06jHL6A=lJ=g?_`=vSfMWnTuY$HeQTr5c8(1g3VvWVg-J3UW>haP1e}Ep&PgGwUq}nZ9 zB{RNrMdFMBE#3~C6!x>Bp&Uc?DwC>mjHTL)&Dehd}Mvx>UrgNNIAPBN1G`mHLG9GsyiPx z4%RBnzG)l^>7x@(i* zAuokhuz2y#KC-gwoDtq|4?$fEg=A&|FS|OE&7?dNbk6^(kElcW>;6AUo3exdyHp*q z45WYia2%$LBWGdi<)EKX<3v8nN#o`6LFj_xknHGlpxXWSE%#Ts5v3+yS!qVJoE7%# z*++a-ih`hU@`b50@<2*_f4b65!nHQeL4AIt`C}+&qXf28x2+JB+`X)7dvSE2< ztD^eel=mMeWG=T!C4LpB6?dF`Bm~x-Hfofq3G zz_&g={BWH5lEzd~EA$JBnJSk+pz8jlOacwHBS%4s=pUPNI;=eUZ(ZMY& z7F%r5!}*dMiFu_EjX8Z0(lAAjvsvP7&I%W4dyu+V<;Go221;k zAD>Bd`*U$)u8R;vO)l{0_=0(8JXY81NuMfU<%n0WjC4ckgzQEaF;|cF;kzN|Am4m& z@c6*b1kOFm)zwu9?b>>O5^d|xK*pn462A9Qz3>96ApteOfT{zYZ@0HyimV2yK^v6- zOBx?27pM~u-g*+dw!ZG$Z&|_%4EXBr3=7=9g{)&vRCmc2h)3{%b%0c<>jbot7);zhXY15MDXC*1Y%R8ceJ6F03sc2>$tSIqC=ny!uU<`sQ}XfiQ=qn3aEI}EA9$ctLN#Utr5yA})4&3wR&K|3K_T&_ zs*Wo@naBzPDvtk!d(eTGgA#qDOu|uwlV5i}Oyn8ZkOss4(=X8|n5=)#Nje8A?br7A zx3odoFZp4vEiX@clI}Xw@C^7;P=Tz|Y6lMFa%PCbnZJRfxFjB%^vJWbC>c`~0Kpk3 zQvY{Ho4#5y+~1V#`Wq7Rk|8*$$-e*1{3zl37$`lZzmW3{?ww=zq~#*2>ctQD1W@}9 zX!QVRfbMF3uSN8v<+71TG8pWYLv0eg9r7$6M{!`9Us4E#Clu7Z+&5uQRv$ z_zU$~emH3ODc?Eswsk*Qy5I1jUE%EB{tz!gwDd#O%dhRKiM&%BnhJd zAXX11GW5m5aOD}3!m}Wregz?DaS+XrkV(J7V&!{corVC204esohcoH@)4;%wd5<>S z!p?M-u9H79K6)6E8 zXr$9c_nbUc`T6<7BO>$pbBef%AFY41z_4Ubv!c+C;!`0m-abVs;GZjk4 zB^E^o26FIbRP()x0>o*L^RC~L07*}*f&g^Sg%{5u^8%AOWIfpo{;%*iZ!C0qw93L8 zvjJx|;{`2w0}WTm$K~_eeChaYF#1{wo@ZvRs{z}w9)yG0K+#2niIejq zfT@uF>C>mP!GSV5X*_%w@aVGm2&MUJc`%DCocM551fiNx5L;o-*8jSyqw#Dz2A z3i3IR%1zv&>h`@(TySmJdl^YzIm(NuvG%qbMC+!=lSMu~$LeSm9_HIK$0kn98Rlms z>z$4NYDg&K8vt zHOXeMO~xkVhI=R&8M}rGJPi*5(hk_yhi~w8j@;KA9M>|F!bd+ne zTZI0~49)LJ&hU(Z*XtPKvDu@F3ht}*B)spyQwj*(xK40M3>DDgocY7!=HmOV35d#R zw3$3fBJE3y)cDz6yhFK1 zAPXbgaV663M~16Mic=zcMlURS3UJPxQZV5TXA|GzCU;n(e9+P|HSM?0ooy-CF&1I#I zko@e~xTJfq(wz*_iV4-Xi^a;jORO|W>8_*p(^^=)P~g(&5Zi^m%St&~<$C9w8P~2N z3t$wYl78~$>v{KtWDc{DDxsR)KU}Xkl{v-V0%s_dY5@jh)Ibd}=mX&GJUH5sJYd~L z)_(}#4^_ms^eQG26BC@f*X~)8YQLZ=v+TaHen&aPEO{&pCB;9YtTe8p{@N94OA6BNsosQnK zq=p`&gd@n*W^L_~9y=Jnj@gzHu7IG$M{^Ks@B_pW(1N7@&_DM$fuKe==-ZDFBTT_C z`=MaB9S*?kfjF5K`0v&WWzit}1lBoH#AT^`X649d(W&xk%ALhbS`i98VZb$0SWphK zIS1!63V)*HfS|cFdtvH@2N?o>soQ&CTD>DH`?U4<^}(+&Gx2J)wTUvWv@m>b*}~H# z2V?Gdx$wKVtm*OmjH;)pDv5F^U-jK&7nv&boZVA4!HxK_^`1t_1h=CyN&3qt`zIZT zbq0>&lrJsQ$dEg+THeKH&yhy9b$;Tpx6iEB{Lu^u3Tg+_ZWDWj;D3s%Gpm59y#x!|l6z8GK4Eht;)@W%3Edv2! zK6%3k;bAi-$D|_wO@t-1KY%VTcIM015cq&@QV%l~K}yL|XtuHx-&?^$Cq2O3593J_ z5wOIUsDp1&n88oiaJLY*dq9bRV)C%Pbw#a4XB>DRAgxaCxZZ&`j5cJ9RsAmR)oB20 zQ4siLs2m=NMCOY6%+1ZAXBsWkz%f~2^3!t&nHyjE z2~W~-yo(u5f?rR@C}!Cf>A5kh7*LQb92kj02lAi^5Qq0{3T|jLsmABx=s8hHqZh~$ zinH67sF@2FNI*|L*6IbhNn`g4AL?V&!BeskJ=zM{;MuwUkyy^GYFeeZ-e{9>WOJqeG z5fOL8d`@xR;5FFbeDO~7ebj=cN}8aO4DSxl$AqxYgfsI^T{qPY?29j4q^+D;qng3p zdIPFf3P_YhCr^IsFEqVu*3RVT=cfl9+I4VtLOU?|%-SH-MHC@Dd9vit44;tD9cv3~ z`{j164&}LX!2oy+(XybJ?C0qT5?u!jJK$RO@2|Qc+igPYMf~0jkP3Fvs@*XY5J%1$ zy5K(o40J{!*p7X41{W2omnXDpM-2JnY$1iEZ?Y>SNN=AId`3){V^&(IQ;1*1`SMFLxUEu9NHg3gH^Tq?HvAa z72P^|FCIeZd=DtAf2aKHK@GLJ<`3ch{XB@8s78#sQR%MRBB0`uiSO}2qw%QyUM262 zFu}^_!+l;9NfsS|?&kX7B)6~!MBDwy~&+%(NMu4ldUEl?h#@!%Z<>4 zfaXI}3%`d5GN(l%0~buB!T2KT`$X>#0{Ff6;Q?BT91g|#REVBtJW4kJ#Rzyp+Ba9s zpMhkP0)-#rnDz4Q*_Eyv>`SdxRuR9t&JMA2lzd`+MlC3N(?nKX@xvVgffhhetLx6X zzn5!B5lc7Bbb(Zx1^OQ(JaPJfR>L105>3;*LFV?SWm#-CE7sbYqYl3{QE+9lX~z`H z{kl+{)LhNepDXzxC#%(Bd9~@j>ZP4B%Tal0dH)X7bqy}lE0%qyK#gx%4bJhu2N?!s zRl%Bvrd>2sAUQKSRV&=3e_G)bd0By_L{0@|rRoOhHg z1Ro@=j`zMJ%4)#jbGWkvE*Pa(MAEB#C*(UwE!G;6rGxWe+6t|vN4cvFuMg^j2-%hv z%DkY1Tt`po||jr6ILo9t;9rBn3>Yh!1eN5Hcw+{jenrwC&}~m;1`? zO`jIU!SOp@3aN4+$$;8eXBmeZWa-ePFx_gmt8Jk-V6p~=!cY?@@S%Ac*)EeI*H(TA zv>`s86)|u{0d7E>K$yq3c$yd)F`Yl(aA2bvR|}VSJU;m&*@#l60?&u^bW}z}TYTWM z2oK%AMM65O^wDtgt{TCkVPp&wvj2^eu;@WL5@G{`gHf^;Yz|}yIv26FQHBm6q$hcS z(x5U1+%Q-Wh0vhm)~y-RuXOi)#Oycd;|L-+B%u;sNEfaHCQvg(BKC>;qS($*bF2A) zIUewel(e)ySe`oiEVSXSi)F5@^P)b`ZWbOM!ng`9noM8BI0P(l>hQXG~<|L^s!KE1*#VLqduVvA|G~;=)SW zmQET^eUDJf)5C%D`5ZIq%?QH87ogIf!*U2)CB1$f2(U*Vc$RJW1*qqklwOz$c!Yd~ zj1ritMP*i(#o}IcDlbdBk{Z0~^^h9CU>~JL-}?w=uzs$4Qu}$(qPQl!!n|nV+|SP%bkvQ39kQ%mAW(o{ioJyTMQE>D z=)qE$4Od)+x7vFSRXM;IlU9X8(|qG93a@V6x~1(j>;?S-&|s}yhK?|<1d~w&N5i0^ z?$l_14}~18sAPyU<33QAXcO58k^BS-_QvukkwF=ifYfI9t!H+f^%{Abc8aQh~IA;q4P+)-1CiR82OJiT~&_gaO$zxm9b&pP!@; zf_@u#6hp8}pCnF)N*D~2$FP;%i9}$%4Z$SfwcBx#9JtL`rq9i(ce))=W3JszyQ}^7 zt7mz*F)jg9)>71|Oty#;**|VKJ2F|zqF-@&5x8orvzYldlU|ARG|ez7i$+CP@*>&q z;#`@wqdb`!Dmu}r1N;S-C;z;NFg9M3YA>)$XqIqFxbmc6udKkH|Km_wCu?V9MFBsu zvhF-;QaxZg{z3APGj^n~f(3J!ugp41S=@G0L+egK_Gl~KYb=>~DNv8nQ5lrtck-qzb0rr@$@;SWmcL z3^MROi&j@UZE#QMmqm4$y|VTe=T)f~JQ&#&>Yv$X=%oa^MkDyy13|qU=w+^}ttN#ni~D6accGlG3Y&;OHZx1{}Ko$@f8uqpqEs$tcLlsdN8K;u21IGRdpq?zv(w|(78)y{697hH}xTgZQn zCFrnK5zmDS1kfOaFRg=b?+dW9ar2XIw_IFYB3%0$65xK}guj4mhH-e|EO8hRh5_@W zq$4PG=*!15 z{Pt23veJ6tMS#?A*!)&pn0p~F2SKJrLj;9lIXcxr@K-VroBE(@m%PT?2;KZj$h8rP ze-kzCCnYj_g5YQxu1z)V^i#p*v~qsDU0Z_=mPvsynrAzHb9U1?Di@}7(c;2ev&{n4 zP^0$Jk2tui{r&w_bT$g|AUgGIDR_?dikS~g5BxS2SkwANPr|SLLnhoL+S@lPl6y#^i@XYWkTgC)~l1o=ucR@1JK*sU}>+;+?$|9h^@bE98W z{ql@R_{E*gp3FUd)4NMIdbpN9e98DE@bo*yS2upjKvZ-qUnb>r4B?#SFYs&`$Z`@3 zaxjS+D(#Kx7g542X4AX#$px}gH9avlR&t0JvL^rlRzm2Fyz8AOKX>l2cSAYHcZ^g! z0wY;u@1XNXro>S<&8r&=U)Dn*NzlMqy$uk7{uiUT7ac%?2X$9C&#Z|eT3`bJtmq3s z3kQM8m>?J^G=S2#GmI|ey@~UB%V)(ruEX%xQ>Z|bF!h3il04_2<7h;)9rRCwrP+`* zB(FYF23U%g1N8G^b=-)Lnhq#E`Y<1J-F=H2Q0C^I9+F1YTp5bEMSS&lPJ#6e)_ z=$COEOsJFxW=Fo4z`vx6R}1xa!k0`e&)WrQFMh0wj+XOQZ%`a51DxFyl$0bD#I7v5 zc8Fa$o-U~8fOJ4Kz*i z6cj&7VCpxAv_&!$mb5Dzh{4n<0goklq!K#b5OC%}=2BQMUM(2Ku!pKt_~=|dFs!qD zwre01${)K2OYA=a>E^Q0_fs&D;rINxu#tcwl|PPN31BVZy&J$B*A9ya*ABb-v_J>~ zA@5p=9(J*(HaPBWXgiONw}F`Gb3bylBoA6Gn7}i!VN9+yHc^Y1n_#$8_$egW)_bx~ z>CbW5@~j_^gsZWsmG!>Ix(Zl3oLktMe3U8hZxz_Q|LEb$hAsIdaz291D2C0Az=u>e z*{u5RA9+n2(aMfG&xURa$$2)}pv?lQk?_%TZsKI2nVlx;2F97qo6$Wza*>ZR6T?z- zlA3q6<(hjsi`^nymERoGx~jmPLglXeL zI|zux?*KfiefAPXrvwk()6Q38 zqDE9-;S$!gPqZKE^f$;ihza zd=Z-E;YmD&(j*|%*}{b%eNu!1xockd)@E%CPh3duSZ*=l=l2d;Fs@h8=DX9VA5khtNZjQ&X#? z63C$s6^aJ11&>zXo;7jdhBpCPHGmog6KGyu48P_(vjjQngL>6dBuQz+a14A}6H1qh6f({5_!$@Jsp;=A^_*L>_)i`~@t3~x zRIv9&)~d~zhX3l3-MTc$%cSt#a^+o)28&q=Z1`u?PfqZiQo3b;QL6NbAfob&F8v1C_;QWC;#01@VfKm-9VPZib z?lXZ;3&oqT7RM`Gg;3B0?wUNb6M*M)!0;V6WH)~(pYMUT=YY-l7w~>79?ZP}3-TT3 zRXnhZr6@fGW8Mh-Hi+*}!Tf|=gd_Y7( zVy~eKbTRPlrjTvTMr%aj(@+Psk&zLCSdUb_fRjA)w+aVC56mtsObZXXkaTma@YDwO zCGCDf(Cq9iN+C7%-bUMCVOSJDum_-4{H;wC4}fE9?Qg~WS_+>aCdPoGCSZ;qeD?AL z@)sJsIMwghB;9E{rdYTHv|2Gi1-LFetb~X_f{In&`AhzaO8oM2#qQZ z;&J($eV2~-%W_0(;YEhUb9At@?*%&M?VVT05qX5p;hPR+Eky{_s^ z({%;edJ}1O#-?{4*@9wJoivSn_X@-Br6p$Y*AHB{s0t%7H*(nVDvf3H551SI<+>wJ z&9hFsMOPG@^*6#QKaS;AFVxU3mTD&u$6!*q;|euE4oNoadCVm1mE|iUQ1H`NJt4JG zc`$pyP$gK;NH+B}nU{bOcJtwcMYAqCA)uy4LvVt2D(?W!Wc{f3VF+jsrysN|Db$4G z(1Boz7DIHnJt%n$+P^PQNS6Jf#qCocE$fzjA94(>rP3XE|j{hM}5r;Dz`cue=%j5~P({bNgs9PGY zT!rp^5?JV#^+}KQ)@v|n%zUt!>-`>#=3r*V5!)Wab9V)s4tyT!bHNIybOgez_QFuP zK-(ZteNxi8pejSRGhXBc4I;o?ULWe|`+)pSNJvRhJ9o9)OQ3-8Kv0u{$IGD4pMmVC z2Se-dqy}^V8a^4P_F}98Y!F1-j9n5u?Psg|DstVE9>~$ViDKWT&+B1OzYd@n76AcD z;1p?)F~IR(VCjS?@92Yt3U!COd)2`XqwA?wX3GpS!jDInMsTKX5|#QkoBnu>a)EBS z9dOC6wHJ_RSPQg(L0chQUJX56`GQvkqVv| zys~q3VgGNUwKGm>BJd3uaO!4sSR#GUBch}X;Fns+N-)Lp0vcEm>_Xv|6{>3eFA4xS zaw(jWc&L}(E3ceGa3#RE+5tRX>2y#s+A0G%%RtZG%88sQbW6?HBQ_3CEicXZ!NS6d zjfr-R8hBzQj1e$(33vW{=i#~dY`dLlM`KFzk94Y@C1e<*D9KsnG>7n3+xLQ4f0q8z z0n*b2P8qH7?^Spx^=!QxTTIqh1ipVZlxgzn8OV(FP+^eYc&`EOIk&!NZ`4zWe$3XD z`pAZ)9RW;Tqs^5|dJ2CYcGUhg<6EvQNQc&sHp(V;TGG-$?P9SD`8%#)SWK)YI*YrJ z3Wnyt4nG-}ZHmAF5<{hpRdPPq;{W>k)cgbHQu}Z|y8d$lx*{-gVOKZJK3kmo zP9f_1TX%@GF#%o`iigMsxFg(NYk z(G^9Zr*L8C(I-m5^DW?k6Mv`Jqaq@nqAdi}7nyv6uV@RPRprv5{}zG_`o{ocP&-^b zgQRg8ab})g$@F;;A2pSh@iO`9+)yk@bcJAaYKwn_MzWc?wk*GCv{F9j4W9TgAtPd2 zwjH+~%vC)R1RgCE5`2c&00>mX=ZuaE4m!7nJ^`cT|5WxJ@L2Bu`?ry# zk_OeO$d-r}5t67>LP=@ZG!7XJgitz4OGM+5)lw-@_ACt}qh*iCNJimAPuA~xOXr;L z`G0@^*Z+TBud~AAc|P}jzsL2yuIpWda#Yd1*N)g0;C$r3hUy&Xkqt<}cmTK=bD*K$ zVTM2iZnO)QP(gh{fCR64QpS*1-!L8{MGT?(v+(krHy6uSkC2=UGpVsIjWs)u8P0X& zQf9;p1r63Rw{D5MTVp$o*6b zaxnz9;@{nu0uNP$Y-1+%1jwCvu` zE%ORC%Pkt^?z(*w`U)z-?9y3hSI;>6V&@lS6_uG7Lme8ap@0;m-REL{Q5#X6tMn`J z9Oppc;au4q-aludCATmPw?Ho7qYOpt9-zrIdn#)&G8!Qgp4{nw^5ny;7&u6QTVtDC zy#Q>I)+T@VOZd@KZl8@J7b8Hx-_ zf7lMmfDc8QSuj1zhK#=gvwVnjN0=Fy|K({e3V7KQ_|qs1v)IEH(-eP(ppUJr^x7li zXD}N5?TaUfXjBiq*pC*8cyWIAXg5ybQ}<{64L4827gvGqO5TqEhM0fI{9vgS3Fr;d z&MWF;BVO-$M!tWrn?YqBfqr7fl05>*S#1di~PBXpNby}%Ft$QJ1UC5G|%z#8**x*2u zw1h;Rct$v&XzOC0VD5K5fsOC4$R3Zieks4d+hs?{1<5Rp_|$!6njs$5Q~Q=btc#P2 z-4Qg&+Gn$dR9I4l@4g1dgderIn8(m~8>JRz)cvkwC@_7xAF%19L*VZ;7hO9I>7 zF94*dA3FQ@#~P1nhVJ=sL`ahk=x4(ECGiKz{1?02n6-Q5PK!AqdTx-6BliC{u{gNu z`~D@D<~ty^M|QS{7!!)yfQmFs6z8G~$*z(6NTxGT(saDde-*w$1XHNYFM~VmIQ<(2 zYz$fJ*7ODj>)A+gB$y~if5@{n{#ur|yl%0ii(loLiSf<}CNr(41)FH8shCx~nsFp} zri%4|#G^xu_sT`~>lOy5uGZak?R4F7uRzLb$&wqo!|hCVCHy{@HBY-Etfw(^)0Of+ zobzPWJh(&8KMFB;QZgX^0aZ2)b)&b&hUNS~S#AON2XbfO-p(3WubiRCjADGuyG=;x zQ3H{!4Dt9}ft+`N(M96n-fw@+G6`HI{1!0#N0grktvVcu&>Q3)s8H~6jSooNtIBwE zJ)cfyY{V(xZ=3pm1W9ugJ^TAEA-j!?Yw)O@fXHpr^@@ewfM=f}=sbi);O%%=dot)l zF6aEfppxuCJ2Xb?(q{WrxK!QSLi9AhtQ5HPoH6KGQo-d4WSXpu$hZn_nN=N~bCTtJ z1Nd#j;Oh#4XGa0BukM&a{6cNEPLR4c21Kym<%7L!ds~e0{G&tc-LF{6lP~}VD@ghw zAnRUx^a7zr#ti4}SppxMBeC`PSvT)f5xQrk+!^%LXQ4&mtrb90p?J>-$>d7e1xX{ z#H1qDq`CQA`0(u#&?BBizVg)x%mJ#z(Q4-47k8YBAPP9(o$9{KYUyJkWmZvz!-f*N zhd4A}r|3K0ifDs)x6dB|*0! zbS}TDDCj}prbi{^uO~E)Z&plVb(W-k)2z~~qn;gh!Wx{mVtalWSQxXs&sj0L$2iPI zZ*JBNZPV?x+Pldnq5XxIhf7ZXxkLk7ZvuAQ%3 zTJnT7lT{NmXG@pmB1@Au2O{@;bqV++{Ie32D&UcUT*te_KdkbB)zib$K zp|KjRE17_I5V`!bHddUx;Tbq;skBCT5)8F zivbeWLi>7l<>zmivU0xh>8eG?YdssRG{a=J5s)>GWdyS16oa-cfgA->%^{ z3{nq@9kq?n;^m!mq|0~uZi~pLK0jHa<#tc6J9*6JiuU|@46AwT4HbmlnS}Gl=i+Mr z6-`%{1JcS}ll|*jqC)=n$LFo00o@Jz|0u!TG-G8J?EY3Vzgr=oot?JPhKFnB`zufk z-CJvW#Xy|ZvC30QK-Vp#ze1JKv`$r+9&&K+F3Go^DNl@cZdcB(8{EC6dhYONo3$F} zHS>;gXe#-fFtst}c`h41Q)hMT;uQJKtP@&FR(>#s(DKf2nLYQ)1)cka@!cEl4WudD zo6jm-Wq)KrP-8=Ztq4cs;;Hja98#S$NhS~#)D*4EQKsxyNr7KJ9hZ6jc=s@zMfds3 zpT-)$tF?1*yE98i)w$)q-VIM!=}MVnVn$n_)x48lGK-jZYUk*&`1@AL9J65`TErsg z0>PJia%!ryfPet`L|dX&jyBCm^_(TjO;8zWI7XSz&Bfv8@t<{Naew-^ai_&UyUUoD zq$rf#v2G8Rv@_N#ZC|-)w?mxC{Ws2TQ}za@nq28DSY$M1W!$UoT25`c8`VUpzU`^!1O_z2PP4^1TmJ2SXr@ zRC5y$_Qj`-)1hr?L}aF4LDjYC4!(3Thb#LpJA+tbV`J?9P}SX_X`l40Ddf zVBMOVIViC>vhO4*NJ>7xvsIDxPgaTbZJ#b{jt@)P=ZWWd+WLl)o`}>795WcT?p$$j zFAgsskX2rl*!{VD`>hJbfY%54)B+<^8Yci#X*CdrJqW55sGs^(;Q;|x$0+by*fl#* zv}gh7d1~?LEzVA@(u~@S5c#&pnE>VX`JRn@mk5k7YX6AIo{W;QH%N&iyDq z!3)`vT8wYubQC@^CfCo*CM{fahf2+w{e9QRT%E|kg^ZWMEpRjE2YUD@Q0*9RuzSSh z|`Rkw7PN#LHPcXe-!7f(2I(?lpq zM{5%gB|QJj2{~97SYU|3eLtjRD~R*4S9vQ3ci)Ll7m}$;lB-`dSHE}mrn%D6znU@)ldBW-oSz0OVzVh=bgJRJFI@mvnbw*^TZO-N6Kb8@0YOz4Aj;(S@sX7 zs?6yGr9e>){*Z6%Ww$;!gX!i+VDQee{lMpH__Kbr1l$L6u%fKs54)7A;z3~??JTBQ z!tX+q%unK1Xx}WP{fqkD?y||GA41D^w@PQt8u6t|1<|Fh1S-$L5Pus*+2{+R$6~(M z1>6Tyw>kN2ib`UL;FC^a-2Afs`$}U@;qE8o z6Yw#Uvfft#WI+vBY{3eW)~}RyWSo_(e@6?<|4~2$){xA(Rawr?c4kk0LSnvpt``f6 zgFYDQLHSI+r0lLQ<1`N}vYs!!Ds|-h)@0)(e#tO?6Zif4Y2gE95suCEqo7)Mzr4BZ zk-wV@#Yvxn(+V;s$X?lE13X{ujeo6Y{5Amj@u?+wQ(Hb;zx+1$)ML9W)0=@9dJSxg zv(atKpQ5xrh?gs1(EgV3)~n{#w!x)amfx-x-YS2`*(s<$JgPy;YcG+ADvol@MN0JB z+kGji!tBRjNMBg4#w3$PyIJ{GD%Ts+VnZ~mceq6)lov#VOv_MwSi${ar9zmrLV&?9 z4A8fbFS2`V_;$}XO7cTaBxcZ_GU_n(cWEPTr!eGRle`}EWV989>D4W zKlPV(S<7`9kHN#S2ER-&$cv0bf~JN$rJfuLp8X@d@l-@p#ZhvfnYyTdX7wc-+vXq1 zs;fF3q!UtAy>wG5*>@$9LINlH8nb==44S=FUN^b@j8V78s*tR0OJ9R0{cv6Pj%yO^ zxb0IcgBovG=Usiz9qn=6dfb(!Q(;YI1~{b%0E9OCtPf2*+HKrw)AI4Y+-qix1ZKSK>)4U+<;`mgZxjswS{G$b&@L%p+Gf>qbUH|iAVJ<%Eu6Cx-c8jN>UV_Hn z-~Q9L;=c-_mm1IaEjm88D4(GdY38`T(P!TDJKbCon@@b&V#`&s-u8uok!@x`(X@sy z3is!;F06{n-97nrYfaL6)wg3Aoki}a8WgTnG9u5=6| z5NNV=!07Y3K6zFvm&)aN+N&28_*Aw_u3phs7VWEh)M1-_XYrlO`4bkuxw6>kR(Ds> z#RA);(4z$*nuIPk z_GadNCD_b0*27yG@gnpPKk&dFk6I3~5RH%R)x9};)s zD=$yZ$n)H#|KdzE^xhKD#zsXzy+fAFN?H5$nqEkNNo=B7UdIji63J}O+ob!aoNjZ> z>JaTpeaZSn!|q)&ort7BmKm7^y3_0pbVKa|t!;{oQqrYX^v%kxY>W*YxBYOYu%v*r zSYcP|LT}=_B3qKy5Th~yygyA;1n`|;AKOoQ{|6Oz#*il}h4V9*nE!X8rO?5*sCAt6 zw%H39-y1u=j-g{bQ2Der<$2-2W6^&38Ly56m6}1y7Lc z#y1i&L0MKntmRSbugQjnA0JsU9CQCSG4k4TpT|2X4{tlN;K1#gxRm0wLpvnRstOOH z%Ne|e0{_|qIPc_MEKAVsg&ALc;)b)8XQQWf_ORxeboOL(cv~JCb~$J2wZ^o}73P{- zTsCi4E-W52D@l^?c7GvT9Uin@Sd;Z+kZo(5SG?8bDSl&SUJyS)xLNW36;qcwT6~QTc-@qfpwPBx6Yp9e8K4t{M(i5uC^wAYA#I6nyJ-2 zSTtX3am^!BwLG@~rSAus@9#G2*8?FXfemS{Zf~3erX4>R?)ip;?Ft8~JDyc9)$FaS zl$UNglw?<2v+&f(Xw4e95CcCntw?%e?v069w}Y8$y27K13CCA1a$*Gc`sbcsp|b=m z*Ln;Ge%vT%jIAfD@lTil0g-97bFq!9+{@&qn9twK^wPI)j@XNEIv>>@zRYAhROzNwD1?!}%ZV&KU@p2p4mdlJ-Z&v(zgxU5FWKPm#e9p zg`okBAI$Pg2OQpKw6yt$;O6S)NP8tHYvc`bG|`x####cMi{|U)bwE2KAjt)@fgC7GAM82} z?!eyOr2HCo?fUguTeRJt$41ODFv3E!zZ+UeYriLrsNkLB^;QO6Q*d+%_y)QDL{{M+ zW5x?bcgD(ueSs#e3#uTpA?mB?jdG=`*#!Hm#>H zfWp{Eu@J*oG+kprzamA;SIR3`7sO-mXHl8CA_=vLX+q%>PrC>$ zrLpz8SAESv>HeVc=mX4H}$O+LE#<7fHz;lA=vd)49 zR`kevDm{zH!-)+N4pdP%8+ z;DxP?EwiL`3cZGUL@?r%gKl07$+Nuxqq_F0bapcfQF8CM4DiHqVDm?Misj z>q4uiNu0V9i{5)zq8zaGM^yKR{`e#sbQL>H;4SLT8yA%YyVr3FY3*o9gAW<(B^AIE zk;8CT60@iIFmo(Vv}bwFlE3Bv_xIczp6a)BVG0c1ZR zKu!(N5mF@7y8ZSP2EcrxkH77Z^d0a>=-r{y2pWYXle`mGcwZfs_nZQgkQXlbXI5V) z4%>5J6|;MTLPDg#$CN^F*2ZEZOwb>Jjpv~eepWT1M{mQ1gK32r>vZ(L&S$4K$TIIL z*$YhKwbfvLgGFwqz<^uc_-raZ6YJtd39O4#fO8kUSKQarLDo9=u)?m_-ans>MNmCa zm<>KPS3J4MCjTU6LsiFD{O7y%7tnIO=W+oF{v@BsFC25EYzF{5!X^Q*|5pL<6XyKi zzbvbXsu*Vdc+vv4_QuNaK|X(PXIWjm&G>0LK2b8X?%-e*n3R^_5u|7c<=L3|!MjrE z;O3KSxzLLhV{J#)K7)_;sBlMpUYG%e52+Ar_$0QOV!ZPVVU;g+rpA*R&nxI7zJLGj z;V^0P-l<%gQJgo*+ANe<^)1*?sCPjYcvs{nxBOSHgAyr;GEq^3!E}tAq&@DAzimzfomFOWE+DoXVTQEY}1V4SIT# zSiK5UuM#aebNY17s!8g;BpC&-6eXIk04La5uJtFTlLY|QG1g<_QV#H-&kw&q!6x*# z+ZC|r&yIrtLPuN81gTb3d$4xv>l>C zAe>z&Rm?F1|Ag%=B)CHy#b&ceOomZPi5+D(O|M4DjFJ3d_g5%_QBD0ut8!?P9uYAY zMO$NZ2GRv6B-%J?Vx$pWcduG4A__Uu(FzdC4oHO~E9?*|mU(WIb=(WH%W4^K#+V@fuVN67V$b}`n0eUb`i(OfVvI}W6Her zPIniDKEb*+hPezub>=|#dU8d4V!Jy@urkQ&EuqiZJWMT#exWKe&zT|90E>WnK zXf+F2zCfh#1VMCUF(6J463pWQoY1a3fzL3gp%dkHrB0)IDDY#Omi9%fW4UX9R_jBf zU1(E*jHH7%e*#*oSk?-!Oc~SkWN1;fctw$Xwed*I`HYorYtPJ5-iVcOAg|2D(Q0Fx zQihtSD7K~n*b~&^y>R)m4095a0zT}hLx5%GB33s)tOFPN6cwv6_pr4D9}>anBo2W$ z!&CtV8|DhbsNZIvO8_PJ_JRD|TmdY*vqbh=uwv}ijucueH)FyGulNvF2~MUj!X*Q5 z4ZxxNG@Z#HtUH&R!3F0zxHY~4T2ym4j2L`-VcM$$BkjRDG&7UojRP(A^UO|M9okO` zntV-UC)Xskxx{2eEejNYcJ8^z48tCgc2(lV(tC=Q)7sCcmT}2y4ld5lrVpS(*F6Ja z4h{k}%oxV0x83AYReomy5M)&k4Q=fdGJc;<7GKbo$@pW19Cogf0TRkwZHS;b7##J1 zXnYuz8#it+$^8xJrnq0(KFUbPbu#_k0pCL#=lWCe^M3XOC*W-n8qVML_U&6IOqXVh zkaZ3imHhpa#*JHv*XtP8aQpx^Gwq96RRiZx#2wD$em0m}Ig#|bWJ@x;6d_|A2((Vi z&LbvO^aaUdQQJ{r(xO9}1SP#gdBJ_-`Hv9Zp)H=iW8Ca}3~Y1ZWd54m@NYOrXJA_um$;=p%UU@qRp~2u>0mLWDijwFu@poPMAlxE@J1pBX(j@ z>DYykJ4V!Z)z?cI{N6c7V7l#RkF-%}dm!z6^_$j%y&re?w~U|M7gAHT@k6Io12n{> z65Lodv^aay$IsjP;w$X%n3ZRFWevvJCxXaP)%US;@zCw|?AGx91y~4E5a(3!ajd=4 zESkQ7exzy-;{pggd;l4XiHdSSF836Z`7W%PvA~V9vFy*I#ciaofM!VmYH51sAWqCs zahAZsUGFr7A?R^w2b_!C#L!0>1o>Dyyj@1)25&s-l5kALiXeyS6azkMLl3 zt5niXSjV9OH(E#rdNUUTtE{ctrQK(TVQb_zGpvfm(_1m@iAPjIN=o%{AMDf4-S4Vk3v~5tGJJcf)M03dH1`K9K^El5$xOrYcMG7{4a2DDByKM-(vAm%-W z{QQP&m*1Pp;jJU{T9TOejZTYch*xnt@O zh!|(V)6^%UeTevGhmZjT_*%)1^_-@dO@%IV;>4z*rKDqpcY1SE-%?Tg=wixu|u#s2H#xV$X|)%jXDRR7M={m7sOknMXgaN`1#ch>?4xK-3Y-PyGfVhxoiJxQZp^v@> zn;W4o+UQQ;pXDklu~2uE)aJNgDoCrD(HZu7Vqv5LcC)D>J^Rr~(`*g{p$-lTo}^C= z#fCUI|Ma%LQ`T6WL znkj%xS|FUhs+SZH^#vpJ21cwYi0u?#>8JS>e0W7Eso<1ji9v|2GYa<<*&B*2UHZOh zcVp+xi%8x6Uc>!$Vv33eKu=GBE~VlrEOc5Y9W`haZfpWBfJsovL?60e2^>`+LKkR< zDoTt7JcqEVt1#8Tnq5PPQEnfuNp@OAPRn9yYCEy+3VuzzU2x#=C&VbpBog`t_0hV`#atmlP>9Y1<#+L5Agy@W^nY{(I;z}S%;Atj!=yI!rmP^kS(tpp7IR7XRRV`42pV)tE zhM4RTkUDZtQx}gr{W=^N;03pYbRxjO1W$eV5>DPssNa@JF6n&-fBCa`N+=M5wkJY1 zn($MdtA9RH5QK&`Fx}rEwRt7nRhEEdabY@voOoKFgTX}$8o>~L(G)ZD#1;&zVM#QP z8FnvNs?bsKXe^lq0^xu(N08*RGnP4;hlxnvrUw{%I#T6}g@a54kZ!n{jSeD+tvoc8 z8O#DppR5V&Q`+xaibX~F61#<1>%Ybp|If_}Vet!x$Bu_@{|t+rat!{`)!C^1K+F8t F{{WrgnH2y4 literal 0 HcmV?d00001 diff --git a/examples/1eof/plot_eeof.py b/examples/1eof/plot_eeof.py new file mode 100644 index 0000000..2a7ab1d --- /dev/null +++ b/examples/1eof/plot_eeof.py @@ -0,0 +1,83 @@ +""" +Extented EOF analysis +===================== + +This example demonstrates Extended EOF (EEOF) analysis on ``xarray`` tutorial +data. EEOF analysis, also termed as Multivariate/Multichannel Singular +Spectrum Analysis, advances traditional EOF analysis to capture propagating +signals or oscillations in multivariate datasets. At its core, this +involves the formulation of a lagged covariance matrix that encapsulates +both spatial and temporal correlations. Subsequently, this matrix is +decomposed to yield its eigenvectors (components) and eigenvalues (explained variance). + +Let's begin by setting up the required packages and fetching the data: +""" + +import xarray as xr +import xeofs as xe +import matplotlib.pyplot as plt + +xr.set_options(display_expand_data=False) + +# %% +# Load the tutorial data. +t2m = xr.tutorial.load_dataset("air_temperature").air + + +# %% +# Prior to conducting the EEOF analysis, it's essential to determine the +# structure of the lagged covariance matrix. This entails defining the time +# delay ``tau`` and the ``embedding`` dimension. The former signifies the +# interval between the original and lagged time series, while the latter +# dictates the number of time-lagged copies in the delay-coordinate space, +# representing the system's dynamics. +# For illustration, using ``tau=4`` and ``embedding=40``, we generate 40 +# delayed versions of the time series, each offset by 4 time steps, resulting +# in a maximum shift of ``tau x embedding = 160``. Given our dataset's +# 6-hour intervals, tau = 4 translates to a 24-hour shift. +# It's obvious that this way of constructing the lagged covariance matrix +# and subsequently decomposing it can be computationally expensive. For example, +# given our dataset's dimensions, + +t2m.shape + +# %% +# the extended dataset would have 40 x 25 x 53 = 53000 features +# which is much larger than the original dataset's 1325 features. +# To mitigate this, we can first preprocess the data using PCA / EOF analysis +# and then perform EEOF analysis on the resulting PCA / EOF scores. Here, +# we'll use ``n_pca_modes=50`` to retain the first 50 PCA modes, so we end +# up with 40 x 50 = 200 (latent) features. +# With these parameters set, we proceed to instantiate the ``ExtendedEOF`` +# model and fit our data. + +model = xe.models.ExtendedEOF( + n_modes=10, tau=4, embedding=40, n_pca_modes=50, use_coslat=True +) +model.fit(t2m, dim="time") +scores = model.scores() +components = model.components() +components + +# %% +# A notable distinction from standard EOF analysis is the incorporation of an +# extra ``embedding`` dimension in the components. Nonetheless, the +# overarching methodology mirrors traditional EOF practices. The results, +# for instance, can be assessed by examining the explained variance ratio. + +model.explained_variance_ratio().plot() +plt.show() + +# %% +# Additionally, we can look into the scores; let's spotlight mode 4. + +scores.sel(mode=4).plot() +plt.show() + +# %% +# In wrapping up, we visualize the corresponding EEOF component of mode 4. +# For visualization purposes, we'll focus on the component at a specific +# latitude, in this instance, 60 degrees north. + +components.sel(mode=4, lat=60).plot() +plt.show() diff --git a/tests/models/test_eeof.py b/tests/models/test_eeof.py new file mode 100644 index 0000000..b9b2b98 --- /dev/null +++ b/tests/models/test_eeof.py @@ -0,0 +1,439 @@ +import numpy as np +import xarray as xr +import pytest +import dask.array as da +from numpy.testing import assert_allclose + +from xeofs.models.eeof import ExtendedEOF + + +def test_init(): + """Tests the initialization of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + + # Assert preprocessor has been initialized + assert hasattr(eof, "_params") + assert hasattr(eof, "preprocessor") + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_fit(dim, mock_data_array): + """Tests the fit method of the ExtendedEOF class""" + + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array, dim) + + # Assert the required attributes have been set + assert hasattr(eof, "preprocessor") + assert hasattr(eof, "data") + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_singular_values(dim, mock_data_array): + """Tests the singular_values method of the ExtendedEOF class""" + + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array, dim) + + # Test singular_values method + singular_values = eof.singular_values() + assert isinstance(singular_values, xr.DataArray) + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_explained_variance(dim, mock_data_array): + """Tests the explained_variance method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array, dim) + + # Test explained_variance method + explained_variance = eof.explained_variance() + assert isinstance(explained_variance, xr.DataArray) + # Explained variance must be positive + assert (explained_variance > 0).all() + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_explained_variance_ratio(dim, mock_data_array): + """Tests the explained_variance_ratio method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array, dim) + + # Test explained_variance_ratio method + explained_variance_ratio = eof.explained_variance_ratio() + assert isinstance(explained_variance_ratio, xr.DataArray) + # Explained variance ratio must be positive + assert ( + explained_variance_ratio > 0 + ).all(), "The explained variance ratio must be positive" + # The sum of the explained variance ratio must be <= 1 + assert ( + explained_variance_ratio.sum() <= 1 + 1e-5 + ), "The sum of the explained variance ratio must be <= 1" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_isolated_nans(dim, mock_data_array_isolated_nans): + """Tests the components method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + with pytest.raises(ValueError): + eof.fit(mock_data_array_isolated_nans, dim) + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_components(dim, mock_data_array): + """Tests the components method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array, dim) + + # Test components method + components = eof.components() + feature_dims = tuple(set(mock_data_array.dims) - set(dim)) + assert isinstance(components, xr.DataArray), "Components is not a DataArray" + given_dims = set(components.dims) + expected_dims = set(feature_dims + ("mode", "embedding")) + assert ( + given_dims == expected_dims + ), "Components does not have the right feature dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_components_fulldim_nans(dim, mock_data_array_full_dimensional_nans): + """Tests the components method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array_full_dimensional_nans, dim) + + # Test components method + components = eof.components() + feature_dims = tuple(set(mock_data_array_full_dimensional_nans.dims) - set(dim)) + assert isinstance(components, xr.DataArray), "Components is not a DataArray" + given_dims = set(components.dims) + expected_dims = set(feature_dims + ("mode", "embedding")) + assert ( + given_dims == expected_dims + ), "Components does not have the right feature dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_components_boundary_nans(dim, mock_data_array_boundary_nans): + """Tests the components method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array_boundary_nans, dim) + + # Test components method + components = eof.components() + feature_dims = tuple(set(mock_data_array_boundary_nans.dims) - set(dim)) + assert isinstance(components, xr.DataArray), "Components is not a DataArray" + given_dims = set(components.dims) + expected_dims = set(feature_dims + ("mode", "embedding")) + assert ( + given_dims == expected_dims + ), "Components does not have the right feature dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_components_dataset(dim, mock_dataset): + """Tests the components method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_dataset, dim) + + # Test components method + components = eof.components() + feature_dims = tuple(set(mock_dataset.dims) - set(dim)) + assert isinstance(components, xr.Dataset), "Components is not a Dataset" + assert set(components.data_vars) == set( + mock_dataset.data_vars + ), "Components does not have the same data variables as the input Dataset" + given_dims = set(components.dims) + expected_dims = set(feature_dims + ("mode", "embedding")) + assert ( + given_dims == expected_dims + ), "Components does not have the right feature dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_components_dataarray_list(dim, mock_data_array_list): + """Tests the components method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array_list, dim) + + # Test components method + components = eof.components() + feature_dims = [tuple(set(data.dims) - set(dim)) for data in mock_data_array_list] + assert isinstance(components, list), "Components is not a list" + assert len(components) == len( + mock_data_array_list + ), "Components does not have the same length as the input list" + assert isinstance( + components[0], xr.DataArray + ), "Components is not a list of DataArrays" + for comp, feat_dims in zip(components, feature_dims): + given_dims = set(comp.dims) + expected_dims = set(feat_dims + ("mode", "embedding")) + assert ( + given_dims == expected_dims + ), "Components does not have the right feature dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_scores(dim, mock_data_array): + """Tests the scores method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array, dim) + + # Test scores method + scores = eof.scores() + assert isinstance(scores, xr.DataArray), "Scores is not a DataArray" + assert set(scores.dims) == set( + (dim + ("mode",)) + ), "Scores does not have the right dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_scores_fulldim_nans(dim, mock_data_array_full_dimensional_nans): + """Tests the scores method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array_full_dimensional_nans, dim) + + # Test scores method + scores = eof.scores() + assert isinstance(scores, xr.DataArray), "Scores is not a DataArray" + assert set(scores.dims) == set( + (dim + ("mode",)) + ), "Scores does not have the right dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_scores_boundary_nans(dim, mock_data_array_boundary_nans): + """Tests the scores method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array_boundary_nans, dim) + + # Test scores method + scores = eof.scores() + assert isinstance(scores, xr.DataArray), "Scores is not a DataArray" + assert set(scores.dims) == set( + (dim + ("mode",)) + ), "Scores does not have the right dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_scores_dataset(dim, mock_dataset): + """Tests the scores method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_dataset, dim) + + # Test scores method + scores = eof.scores() + assert isinstance(scores, xr.DataArray) + assert set(scores.dims) == set( + (dim + ("mode",)) + ), "Scores does not have the right dimensions" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_scores_dataarray_list(dim, mock_data_array_list): + """Tests the scores method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + eof.fit(mock_data_array_list, dim) + + # Test scores method + scores = eof.scores() + assert isinstance(scores, xr.DataArray) + assert set(scores.dims) == set( + (dim + ("mode",)) + ), "Scores does not have the right dimensions" + + +def test_get_params(): + """Tests the get_params method of the ExtendedEOF class""" + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + + # Test get_params method + params = eof.get_params() + assert isinstance(params, dict) + assert params.get("n_modes") == 5 + assert params.get("tau") == 2 + assert params.get("embedding") == 2 + assert params.get("solver") == "auto" + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_transform(dim, mock_data_array): + """Test projecting new unseen data onto the components (EOFs/eigenvectors)""" + + # Create a xarray DataArray with random data + model = ExtendedEOF(n_modes=5, tau=2, embedding=2, solver="full") + model.fit(mock_data_array, dim) + scores = model.scores() + + # Create a new xarray DataArray with random data + new_data = mock_data_array + + with pytest.raises(NotImplementedError): + projections = model.transform(new_data) + + # # Check that the projection has the right dimensions + # assert projections.dims == scores.dims, "Projection has wrong dimensions" # type: ignore + + # # Check that the projection has the right data type + # assert isinstance(projections, xr.DataArray), "Projection is not a DataArray" + + # # Check that the projection has the right name + # assert projections.name == "scores", "Projection has wrong name: {}".format( + # projections.name + # ) + + # # Check that the projection's data is the same as the scores + # np.testing.assert_allclose( + # scores.sel(mode=slice(1, 3)), projections.sel(mode=slice(1, 3)), rtol=1e-3 + # ) + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_inverse_transform(dim, mock_data_array): + """Test inverse_transform method in ExtendedEOF class.""" + + # instantiate the ExtendedEOF class with necessary parameters + eof = ExtendedEOF(n_modes=5, tau=2, embedding=2) + + # fit the ExtendedEOF model + eof.fit(mock_data_array, dim=dim) + + # Test with scalar + mode = 1 + with pytest.raises(NotImplementedError): + reconstructed_data = eof.inverse_transform(mode) + # assert isinstance(reconstructed_data, xr.DataArray) + + # # Test with slice + # mode = slice(1, 2) + # reconstructed_data = eof.inverse_transform(mode) + # assert isinstance(reconstructed_data, xr.DataArray) + + # # Test with array of tick labels + # mode = np.array([1, 3]) + # reconstructed_data = eof.inverse_transform(mode) + # assert isinstance(reconstructed_data, xr.DataArray) + + # # Check that the reconstructed data has the same dimensions as the original data + # assert set(reconstructed_data.dims) == set(mock_data_array.dims) diff --git a/xeofs/models/__init__.py b/xeofs/models/__init__.py index a45ac7d..49ee8ec 100644 --- a/xeofs/models/__init__.py +++ b/xeofs/models/__init__.py @@ -1,5 +1,6 @@ from .eof import EOF, ComplexEOF from .mca import MCA, ComplexMCA +from .eeof import ExtendedEOF from .opa import OPA from .gwpca import GWPCA from .rotator_factory import RotatorFactory @@ -11,6 +12,7 @@ __all__ = [ "EOF", "ComplexEOF", + "ExtendedEOF", "EOFRotator", "ComplexEOFRotator", "OPA", diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index cef12a9..251f081 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -34,6 +34,8 @@ class _BaseModel(ABC): ---------- n_modes: int, default=10 Number of modes to calculate. + center: bool, default=True + Whether to center the input data. standardize: bool, default=False Whether to standardize the input data. use_coslat: bool, default=False diff --git a/xeofs/models/eeof.py b/xeofs/models/eeof.py new file mode 100644 index 0000000..765cb59 --- /dev/null +++ b/xeofs/models/eeof.py @@ -0,0 +1,146 @@ +from typing import Optional + +import numpy as np +import xarray as xr + +from ._base_model import _BaseModel +from .eof import EOF +from .decomposer import Decomposer +from ..utils.data_types import DataArray, Data, Dims +from ..data_container import DataContainer +from ..utils.xarray_utils import total_variance as compute_total_variance + + +class ExtendedEOF(EOF): + """Extended EOF analysis. + + Extended EOF (EEOF) analysis [1]_ [2]_, often referred to as + Multivariate/Multichannel Singular Spectrum Analysis, enhances + traditional EOF analysis by identifying propagating signals or + oscillations in multivariate datasets. This approach integrates the + spatial correlation of EOFs with the temporal auto- and cross-correlation + derived from the lagged covariance matrix. + + Parameters + ---------- + n_modes : int + Number of modes to be computed. + tau : int + Time delay used to construct a time-delayed version of the original time series. + embedding : int + Embedding dimension is the number of dimensions in the delay-coordinate space used to represent + the dynamics of the system. It determines the number of delayed copies + of the time series that are used to construct the delay-coordinate space. + n_pca_modes : Optional[int] + If provided, the input data is first preprocessed using PCA with the + specified number of modes. The EEOF analysis is then performed on the + resulting PCA scores. This approach can lead to important computational + savings. + **kwargs : + Additional keyword arguments passed to the EOF model. + + References + ---------- + .. [1] Weare, B. C. & Nasstrom, J. S. Examples of Extended Empirical Orthogonal Function Analyses. Monthly Weather Review 110, 481–485 (1982). + .. [2] Broomhead, D. S. & King, G. P. Extracting qualitative dynamics from experimental data. Physica D: Nonlinear Phenomena 20, 217–236 (1986). + + + Examples + -------- + >>> from xeofs.models import EEOF + >>> model = EEOF(n_modes=5, tau=1, embedding=20, n_pca_modes=20) + >>> model.fit(data, dim=("time")) + + Retrieve the extended empirical orthogonal functions (EEOFs) and their explained variance: + + >>> eeofs = model.components() + >>> exp_var = model.explained_variance() + + Retrieve the time-dependent coefficients corresponding to the EEOF modes: + + >>> scores = model.scores() + """ + + def __init__( + self, + n_modes: int, + tau: int, + embedding: int, + n_pca_modes: Optional[int] = None, + **kwargs, + ): + super().__init__(n_modes=n_modes, **kwargs) + self.attrs.update({"model": "Extended EOF Analysis"}) + self._params.update( + {"tau": tau, "embedding": embedding, "n_pca_modes": n_pca_modes} + ) + + # Initialize the DataContainer to store the results + self.data = DataContainer() + self.pca = ( + EOF( + n_modes=n_pca_modes, + center=True, + standardize=False, + use_coslat=False, + sample_name=self.sample_name, + feature_name=self.feature_name, + ) + if n_pca_modes + else None + ) + + def _fit_algorithm(self, X: DataArray): + self.data.add(X.copy(), "input_data", allow_compute=False) + + # Preprocess the data using PCA + if self.pca: + self.pca.fit(X, dim=self.sample_name) + X = self.pca.data["scores"] + X = X.rename({"mode": self.feature_name}) + + # Construct the time-delayed version of the original time series + tau = self._params["tau"] + embedding = self._params["embedding"] + shift = np.arange(embedding) * tau + X_extended = [] + for i in shift: + X_extended.append(X.shift(sample=-i)) + X_extended = xr.concat(X_extended, dim="embedding") + n_samples_cut = (embedding - 1) * tau + X_extended = X_extended.isel(sample=slice(None, -n_samples_cut)) + X_extended.coords.update({"embedding": shift}) + + # Perform standard PCA on extended data + n_modes = self._params["n_modes"] + model = EOF( + n_modes=n_modes, + center=True, + standardize=False, + use_coslat=False, + sample_name=self.sample_name, + feature_name=self.feature_name, + solver=self._params["solver"], + solver_kwargs=self._solver_kwargs, + ) + model.fit(X_extended, dim=self.sample_name) + + self.model = model + self.data = model.data + self.data["components"] = model.components() + self.data["scores"] = model.scores(normalized=False) + + if self.pca: + self.data["components"] = xr.dot( + self.pca.data["components"].rename({"mode": "temp"}), + self.data["components"].rename({"feature": "temp"}), + dims="temp", + ) + + self.data.set_attrs(self.attrs) + + def _transform_algorithm(self, X): + raise NotImplementedError("EEOF does currently not support transform") + + def _inverse_transform_algorithm(self, X): + raise NotImplementedError("EEOF does currently not support inverse transform") diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index 6e4a20b..b458f83 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -38,16 +38,22 @@ class EOF(_BaseModel): def __init__( self, n_modes=10, + center=True, standardize=False, use_coslat=False, + sample_name="sample", + feature_name="feature", solver="auto", solver_kwargs={}, **kwargs, ): super().__init__( n_modes=n_modes, + center=center, standardize=standardize, use_coslat=use_coslat, + sample_name=sample_name, + feature_name=feature_name, solver=solver, solver_kwargs=solver_kwargs, **kwargs, @@ -219,7 +225,7 @@ def explained_variance_ratio(self) -> DataArray: class ComplexEOF(EOF): """Complex Empirical Orthogonal Functions (Complex EOF) analysis. - The Complex EOF analysis [1]_ [2]_ (also known as Hilbert EOF analysis) applies a Hilbert transform + The Complex EOF analysis [1]_ [2]_ [3]_ [4]_ (also known as Hilbert EOF analysis) applies a Hilbert transform to the data before performing the standard EOF analysis. The Hilbert transform is applied to each feature of the data individually. @@ -252,8 +258,10 @@ class ComplexEOF(EOF): References ---------- - .. [1] Horel, J., 1984. Complex Principal Component Analysis: Theory and Examples. J. Climate Appl. Meteor. 23, 1660–1673. https://doi.org/10.1175/1520-0450(1984)023<1660:CPCATA>2.0.CO;2 - .. [2] Hannachi, A., Jolliffe, I., Stephenson, D., 2007. Empirical orthogonal functions and related techniques in atmospheric science: A review. International Journal of Climatology 27, 1119–1152. https://doi.org/10.1002/joc.1499 + .. [1] Rasmusson, E. M., Arkin, P. A., Chen, W.-Y. & Jalickee, J. B. Biennial variations in surface temperature over the United States as revealed by singular decomposition. Monthly Weather Review 109, 587–598 (1981). + .. [2] Barnett, T. P. Interaction of the Monsoon and Pacific Trade Wind System at Interannual Time Scales Part I: The Equatorial Zone. Monthly Weather Review 111, 756–773 (1983). + .. [3] Horel, J., 1984. Complex Principal Component Analysis: Theory and Examples. J. Climate Appl. Meteor. 23, 1660–1673. https://doi.org/10.1175/1520-0450(1984)023<1660:CPCATA>2.0.CO;2 + .. [4] Hannachi, A., Jolliffe, I., Stephenson, D., 2007. Empirical orthogonal functions and related techniques in atmospheric science: A review. International Journal of Climatology 27, 1119–1152. https://doi.org/10.1002/joc.1499 Examples -------- From 807b7e896990c4cfb788478644e7647ab3a9ffa1 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 15:22:12 +0200 Subject: [PATCH 34/43] perf(dask): compute SVD result immediately New parameter allows to choose whether dask models will be computed immediately after decomposition or not. Before, all dask objects were delayed until the end resulting in redundant dask computations. --- tests/models/test_decomposer.py | 26 ++++++-- tests/models/test_eof_rotator.py | 56 ++++++---------- tests/models/test_mca.py | 35 +++++----- tests/models/test_mca_rotator.py | 103 +++++++++-------------------- xeofs/models/_base_cross_model.py | 12 +++- xeofs/models/_base_model.py | 8 ++- xeofs/models/cca.py | 8 ++- xeofs/models/decomposer.py | 37 ++++++++++- xeofs/models/eeof.py | 2 + xeofs/models/eof.py | 46 ++++++++++--- xeofs/models/eof_rotator.py | 11 +++- xeofs/models/mca.py | 104 +++++++++++++++++++++++++----- xeofs/models/mca_rotator.py | 9 ++- xeofs/models/opa.py | 51 +++++++++++++-- xeofs/utils/rotation.py | 6 +- 15 files changed, 345 insertions(+), 169 deletions(-) diff --git a/tests/models/test_decomposer.py b/tests/models/test_decomposer.py index 35ad47b..861563e 100644 --- a/tests/models/test_decomposer.py +++ b/tests/models/test_decomposer.py @@ -6,6 +6,7 @@ from scipy.sparse.linalg import svds as complex_svd # type: ignore from dask.array.linalg import svd_compressed as dask_svd from xeofs.models.decomposer import Decomposer +from ..utilities import data_is_dask @pytest.fixture @@ -99,25 +100,36 @@ def test_fit_dask_full(mock_dask_data_array): assert decomposer.V_.shape[1] == 2 -def test_fit_dask_randomized(mock_dask_data_array): +@pytest.mark.parametrize("compute", [True, False]) +def test_fit_dask_randomized(mock_dask_data_array, compute): # The Dask SVD solver has no parameter 'random_state' but 'seed' instead, # so let's create a new decomposer for this case - decomposer = Decomposer(n_modes=2, solver="randomized", seed=42) + decomposer = Decomposer(n_modes=2, solver="randomized", compute=compute, seed=42) decomposer.fit(mock_dask_data_array) assert "U_" in decomposer.__dict__ assert "s_" in decomposer.__dict__ assert "V_" in decomposer.__dict__ - # Check if the Dask SVD solver has been used - assert isinstance(decomposer.U_.data, DaskArray) - assert isinstance(decomposer.s_.data, DaskArray) - assert isinstance(decomposer.V_.data, DaskArray) - # Check that indeed 2 modes are returned assert decomposer.U_.shape[1] == 2 assert decomposer.s_.shape[0] == 2 assert decomposer.V_.shape[1] == 2 + is_dask_before = data_is_dask(mock_dask_data_array) + U_is_dask_after = data_is_dask(decomposer.U_) + s_is_dask_after = data_is_dask(decomposer.s_) + V_is_dask_after = data_is_dask(decomposer.V_) + # Check if the Dask SVD solver has been used + assert is_dask_before + if compute: + assert not U_is_dask_after + assert not s_is_dask_after + assert not V_is_dask_after + else: + assert U_is_dask_after + assert s_is_dask_after + assert V_is_dask_after + def test_fit_complex(mock_complex_data_array): decomposer = Decomposer(n_modes=2, solver="randomized", random_state=42) diff --git a/tests/models/test_eof_rotator.py b/tests/models/test_eof_rotator.py index 1633ff5..3fb26eb 100644 --- a/tests/models/test_eof_rotator.py +++ b/tests/models/test_eof_rotator.py @@ -5,6 +5,7 @@ from xeofs.models import EOF, EOFRotator from xeofs.data_container import DataContainer +from ..utilities import data_is_dask @pytest.fixture @@ -16,7 +17,7 @@ def eof_model(mock_data_array, dim): @pytest.fixture def eof_model_delayed(mock_dask_data_array, dim): - eof = EOF(n_modes=5) + eof = EOF(n_modes=5, compute=False) eof.fit(mock_dask_data_array, dim) return eof @@ -168,43 +169,26 @@ def test_scores(eof_model): @pytest.mark.parametrize( - "dim", + "dim, compute", [ - (("time",)), - (("lat", "lon")), - (("lon", "lat")), + (("time",), True), + (("lat", "lon"), True), + (("lon", "lat"), True), + (("time",), False), + (("lat", "lon"), False), + (("lon", "lat"), False), ], ) -def test_compute(eof_model_delayed): - eof_rotator = EOFRotator(n_modes=5) +def test_compute(eof_model_delayed, compute): + eof_rotator = EOFRotator(n_modes=5, compute=compute) eof_rotator.fit(eof_model_delayed) - # before computation, the attributes should be dask arrays - assert isinstance( - eof_rotator.data["explained_variance"].data, DaskArray - ), "The attribute _explained_variance should be a dask array." - assert isinstance( - eof_rotator.data["components"].data, DaskArray - ), "The attribute _components should be a dask array." - assert isinstance( - eof_rotator.data["rotation_matrix"].data, DaskArray - ), "The attribute _rotation_matrix should be a dask array." - assert isinstance( - eof_rotator.data["scores"].data, DaskArray - ), "The attribute _scores should be a dask array." - - eof_rotator.compute() - - # after computation, the attributes should be numpy ndarrays - assert isinstance( - eof_rotator.data["explained_variance"].data, np.ndarray - ), "The attribute _explained_variance should be a numpy ndarray." - assert isinstance( - eof_rotator.data["components"].data, np.ndarray - ), "The attribute _components should be a numpy ndarray." - assert isinstance( - eof_rotator.data["rotation_matrix"].data, np.ndarray - ), "The attribute _rotation_matrix should be a numpy ndarray." - assert isinstance( - eof_rotator.data["scores"].data, np.ndarray - ), "The attribute _scores should be a numpy ndarray." + if compute: + assert not data_is_dask(eof_rotator.data["explained_variance"]) + assert not data_is_dask(eof_rotator.data["components"]) + assert not data_is_dask(eof_rotator.data["rotation_matrix"]) + + else: + assert data_is_dask(eof_rotator.data["explained_variance"]) + assert data_is_dask(eof_rotator.data["components"]) + assert data_is_dask(eof_rotator.data["rotation_matrix"]) diff --git a/tests/models/test_mca.py b/tests/models/test_mca.py index c04b7bd..afa4c51 100644 --- a/tests/models/test_mca.py +++ b/tests/models/test_mca.py @@ -5,6 +5,7 @@ from numpy.testing import assert_allclose from xeofs.models.mca import MCA +from ..utilities import data_is_dask @pytest.fixture @@ -357,26 +358,26 @@ def test_heterogeneous_patterns(mca_model, mock_data_array, dim): @pytest.mark.parametrize( - "dim", + "dim, compute", [ - (("time",)), - (("lat", "lon")), - (("lon", "lat")), + (("time",), True), + (("lat", "lon"), True), + (("lon", "lat"), True), + (("time",), False), + (("lat", "lon"), False), + (("lon", "lat"), False), ], ) -def test_compute(mca_model, mock_dask_data_array, dim): +def test_compute(mock_dask_data_array, dim, compute): + mca_model = MCA(n_modes=10, compute=compute) mca_model.fit(mock_dask_data_array, mock_dask_data_array, (dim)) - assert isinstance(mca_model.data["squared_covariance"].data, DaskArray) - assert isinstance(mca_model.data["components1"].data, DaskArray) - assert isinstance(mca_model.data["components2"].data, DaskArray) - assert isinstance(mca_model.data["scores1"].data, DaskArray) - assert isinstance(mca_model.data["scores2"].data, DaskArray) - - mca_model.compute() + if compute: + assert not data_is_dask(mca_model.data["squared_covariance"]) + assert not data_is_dask(mca_model.data["components1"]) + assert not data_is_dask(mca_model.data["components2"]) - assert isinstance(mca_model.data["squared_covariance"].data, np.ndarray) - assert isinstance(mca_model.data["components1"].data, np.ndarray) - assert isinstance(mca_model.data["components2"].data, np.ndarray) - assert isinstance(mca_model.data["scores1"].data, np.ndarray) - assert isinstance(mca_model.data["scores2"].data, np.ndarray) + else: + assert data_is_dask(mca_model.data["squared_covariance"]) + assert data_is_dask(mca_model.data["components1"]) + assert data_is_dask(mca_model.data["components2"]) diff --git a/tests/models/test_mca_rotator.py b/tests/models/test_mca_rotator.py index f056c56..0cd9c59 100644 --- a/tests/models/test_mca_rotator.py +++ b/tests/models/test_mca_rotator.py @@ -5,6 +5,7 @@ # Import the classes from your modules from xeofs.models import MCA, MCARotator +from ..utilities import data_is_dask @pytest.fixture @@ -16,7 +17,7 @@ def mca_model(mock_data_array, dim): @pytest.fixture def mca_model_delayed(mock_dask_data_array, dim): - mca = MCA(n_modes=5) + mca = MCA(n_modes=5, compute=False) mca.fit(mock_dask_data_array, mock_dask_data_array, dim) return mca @@ -213,82 +214,38 @@ def test_heterogeneous_patterns(mca_model, mock_data_array, dim): @pytest.mark.parametrize( - "dim", + "dim, compute", [ - (("time",)), - (("lat", "lon")), - (("lon", "lat")), + (("time",), True), + (("lat", "lon"), True), + (("lon", "lat"), True), + (("time",), False), + (("lat", "lon"), False), + (("lon", "lat"), False), ], ) -def test_compute(mca_model_delayed): +def test_compute(mca_model_delayed, compute): """Test the compute method of the MCARotator class.""" - mca_rotator = MCARotator(n_modes=4, rtol=1e-5) + mca_rotator = MCARotator(n_modes=4, compute=compute, rtol=1e-5) mca_rotator.fit(mca_model_delayed) - assert isinstance( - mca_rotator.data["squared_covariance"].data, DaskArray - ), "squared_covariance is not a delayed object" - assert isinstance( - mca_rotator.data["components1"].data, DaskArray - ), "components1 is not a delayed object" - assert isinstance( - mca_rotator.data["components2"].data, DaskArray - ), "components2 is not a delayed object" - assert isinstance( - mca_rotator.data["scores1"].data, DaskArray - ), "scores1 is not a delayed object" - assert isinstance( - mca_rotator.data["scores2"].data, DaskArray - ), "scores2 is not a delayed object" - assert isinstance( - mca_rotator.data["rotation_matrix"].data, DaskArray - ), "rotation_matrix is not a delayed object" - assert isinstance( - mca_rotator.data["phi_matrix"].data, DaskArray - ), "phi_matrix is not a delayed object" - assert isinstance( - mca_rotator.data["norm1"].data, DaskArray - ), "norm1 is not a delayed object" - assert isinstance( - mca_rotator.data["norm2"].data, DaskArray - ), "norm2 is not a delayed object" - assert isinstance( - mca_rotator.data["modes_sign"].data, DaskArray - ), "modes_sign is not a delayed object" - - mca_rotator.compute() - - assert isinstance( - mca_rotator.data["squared_covariance"].data, np.ndarray - ), "squared_covariance is not computed" - assert isinstance( - mca_rotator.data["total_squared_covariance"].data, np.ndarray - ), "total_squared_covariance is not computed" - assert isinstance( - mca_rotator.data["components1"].data, np.ndarray - ), "components1 is not computed" - assert isinstance( - mca_rotator.data["components2"].data, np.ndarray - ), "components2 is not computed" - assert isinstance( - mca_rotator.data["scores1"].data, np.ndarray - ), "scores1 is not computed" - assert isinstance( - mca_rotator.data["scores2"].data, np.ndarray - ), "scores2 is not computed" - assert isinstance( - mca_rotator.data["rotation_matrix"].data, np.ndarray - ), "rotation_matrix is not computed" - assert isinstance( - mca_rotator.data["phi_matrix"].data, np.ndarray - ), "phi_matrix is not computed" - assert isinstance( - mca_rotator.data["norm1"].data, np.ndarray - ), "norm1 is not computed" - assert isinstance( - mca_rotator.data["norm2"].data, np.ndarray - ), "norm2 is not computed" - assert isinstance( - mca_rotator.data["modes_sign"].data, np.ndarray - ), "modes_sign is not computed" + if compute: + assert not data_is_dask(mca_rotator.data["squared_covariance"]) + assert not data_is_dask(mca_rotator.data["components1"]) + assert not data_is_dask(mca_rotator.data["components2"]) + assert not data_is_dask(mca_rotator.data["rotation_matrix"]) + assert not data_is_dask(mca_rotator.data["phi_matrix"]) + assert not data_is_dask(mca_rotator.data["norm1"]) + assert not data_is_dask(mca_rotator.data["norm2"]) + assert not data_is_dask(mca_rotator.data["modes_sign"]) + + else: + assert data_is_dask(mca_rotator.data["squared_covariance"]) + assert data_is_dask(mca_rotator.data["components1"]) + assert data_is_dask(mca_rotator.data["components2"]) + assert data_is_dask(mca_rotator.data["rotation_matrix"]) + assert data_is_dask(mca_rotator.data["phi_matrix"]) + assert data_is_dask(mca_rotator.data["norm1"]) + assert data_is_dask(mca_rotator.data["norm2"]) + assert data_is_dask(mca_rotator.data["modes_sign"]) diff --git a/xeofs/models/_base_cross_model.py b/xeofs/models/_base_cross_model.py index cec95fd..128ebf9 100644 --- a/xeofs/models/_base_cross_model.py +++ b/xeofs/models/_base_cross_model.py @@ -27,6 +27,8 @@ class _BaseCrossModel(ABC): Whether to use cosine of latitude for scaling. n_pca_modes: int, default=None Number of PCA modes to calculate. + compute : bool, default=True + Whether to compute the decomposition immediately. sample_name: str, default="sample" Name of the new sample dimension. feature_name: str, default="feature" @@ -45,6 +47,7 @@ def __init__( standardize=False, use_coslat=False, n_pca_modes=None, + compute=True, sample_name="sample", feature_name="feature", solver="auto", @@ -52,6 +55,7 @@ def __init__( ): self.sample_name = sample_name self.feature_name = feature_name + self._compute = compute # Define model parameters self._params = { @@ -90,8 +94,12 @@ def __init__( self.data = DataContainer() # Initialize PCA objects - self.pca1 = EOF(n_modes=n_pca_modes) if n_pca_modes else None - self.pca2 = EOF(n_modes=n_pca_modes) if n_pca_modes else None + self.pca1 = ( + EOF(n_modes=n_pca_modes, compute=self._compute) if n_pca_modes else None + ) + self.pca2 = ( + EOF(n_modes=n_pca_modes, compute=self._compute) if n_pca_modes else None + ) def fit( self, diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index 251f081..18e942d 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -44,6 +44,10 @@ class _BaseModel(ABC): Name of the sample dimension. feature_name: str, default="feature" Name of the feature dimension. + compute: bool, default=True + Whether to compute the decomposition immediately. This is recommended + if the SVD result for the first ``n_modes`` can be accommodated in memory, as it + boosts computational efficiency compared to deferring the computation. solver: {"auto", "full", "randomized"}, default="auto" Solver to use for the SVD computation. solver_kwargs: dict, default={} @@ -59,12 +63,14 @@ def __init__( use_coslat=False, sample_name="sample", feature_name="feature", + compute=True, solver="auto", solver_kwargs={}, ): + self.n_modes = n_modes self.sample_name = sample_name self.feature_name = feature_name - self.n_modes = n_modes + self._compute = compute # Define model parameters self._params = { "n_modes": n_modes, diff --git a/xeofs/models/cca.py b/xeofs/models/cca.py index bf32e5c..b2d0d0e 100644 --- a/xeofs/models/cca.py +++ b/xeofs/models/cca.py @@ -50,6 +50,7 @@ def __init__( pca: bool = False, variance_fraction: float = 0.99, init_pca_modes: int | float = 0.75, + compute: bool = True, sample_name: str = "sample", feature_name: str = "feature", ): @@ -58,6 +59,7 @@ def __init__( self.n_modes = n_modes self.use_coslat = use_coslat self.pca = pca + self._compute = compute self.variance_fraction = variance_fraction self.init_pca_modes = init_pca_modes @@ -167,7 +169,7 @@ def _apply_pca(self, views: DataList): view_transformed = [] for i, view in enumerate(views): - pca = EOF(n_modes=n_pca_modes[i]) + pca = EOF(n_modes=n_pca_modes[i], compute=self._compute) pca.fit(view, dim=self.sample_name) self.pca_models.append(pca) @@ -237,6 +239,8 @@ class CCA(CCABaseModel): A value of 1.0 will perform a full SVD of the data. Choosing a smaller value can increase computation speed. Default 0.75 c : Sequence[float] | float], optional Regularisation parameter, by default 0 (no regularization) + compute : bool, optional + Whether to compute the decomposition immediately, by default True Notes @@ -267,12 +271,14 @@ def __init__( pca=True, variance_fraction=0.99, init_pca_modes=0.75, + compute=True, eps=1e-6, ): super().__init__( n_modes=n_modes, use_coslat=use_coslat, pca=pca, + compute=compute, variance_fraction=variance_fraction, init_pca_modes=init_pca_modes, ) diff --git a/xeofs/models/decomposer.py b/xeofs/models/decomposer.py index 05ebb39..b8d7383 100644 --- a/xeofs/models/decomposer.py +++ b/xeofs/models/decomposer.py @@ -1,6 +1,7 @@ import numpy as np import xarray as xr from dask.array import Array as DaskArray # type: ignore +from dask.diagnostics.progress import ProgressBar from numpy.linalg import svd from sklearn.utils.extmath import randomized_svd from scipy.sparse.linalg import svds as complex_svd # type: ignore @@ -18,6 +19,10 @@ class Decomposer: ---------- n_modes : int Number of components to be computed. + flip_signs : bool, default=True + Whether to flip the sign of the components to ensure deterministic output. + compute : bool, default=True + Whether to compute the decomposition immediately. solver : {'auto', 'full', 'randomized'}, default='auto' The solver is selected by a default policy based on size of `X` and `n_modes`: if the input data is larger than 500x500 and the number of modes to extract is lower @@ -28,9 +33,10 @@ class Decomposer: Additional keyword arguments passed to the SVD solver. """ - def __init__(self, n_modes=100, flip_signs=True, solver="auto", **kwargs): + def __init__(self, n_modes, flip_signs=True, compute=True, solver="auto", **kwargs): self.n_modes = n_modes self.flip_signs = flip_signs + self.compute = compute self.solver = solver self.solver_kwargs = kwargs @@ -106,6 +112,7 @@ def fit(self, X, dims=("sample", "feature")): elif (not use_complex) and use_dask: self.solver_kwargs.update({"k": self.n_modes}) U, s, VT = self._svd(X, dims, dask_svd, self.solver_kwargs) + U, s, VT = self._compute_svd_result(U, s, VT) else: err_msg = ( "Complex data together with dask is currently not implemented. See dask issue 7639 " @@ -180,3 +187,31 @@ def _svd(self, X, dims, func, kwargs): "1. Check for and remove any isolated NaNs in your dataset.\n" "2. If the error persists, please raise an issue at https://github.com/nicrie/xeofs/issues." ) + + def _compute_svd_result(self, U, s, VT): + """Computes the SVD result. + + Parameters + ---------- + U : DataArray + Left singular vectors. + s : DataArray + Singular values. + VT : DataArray + Right singular vectors. + + Returns + ------- + U : DataArray + Left singular vectors. + s : DataArray + Singular values. + VT : DataArray + Right singular vectors. + """ + if self.compute: + with ProgressBar(): + U = U.compute() + s = s.compute() + VT = VT.compute() + return U, s, VT diff --git a/xeofs/models/eeof.py b/xeofs/models/eeof.py index 765cb59..d522108 100644 --- a/xeofs/models/eeof.py +++ b/xeofs/models/eeof.py @@ -83,6 +83,7 @@ def __init__( center=True, standardize=False, use_coslat=False, + compute=self._compute, sample_name=self.sample_name, feature_name=self.feature_name, ) @@ -118,6 +119,7 @@ def _fit_algorithm(self, X: DataArray): center=True, standardize=False, use_coslat=False, + compute=self._compute, sample_name=self.sample_name, feature_name=self.feature_name, solver=self._params["solver"], diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index b458f83..6a4e13e 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -18,10 +18,20 @@ class EOF(_BaseModel): ---------- n_modes: int, default=10 Number of modes to calculate. + center: bool, default=True + Whether to center the input data. standardize: bool, default=False Whether to standardize the input data. use_coslat: bool, default=False Whether to use cosine of latitude for scaling. + sample_name: str, default="sample" + Name of the sample dimension. + feature_name: str, default="feature" + Name of the feature dimension. + compute: bool, default=True + Whether to compute the decomposition immediately. This is recommended + if the SVD result for the first ``n_modes`` can be accommodated in memory, as it + boosts computational efficiency compared to deferring the computation. solver: {"auto", "full", "randomized"}, default="auto" Solver to use for the SVD computation. solver_kwargs: dict, default={} @@ -43,6 +53,7 @@ def __init__( use_coslat=False, sample_name="sample", feature_name="feature", + compute: bool = True, solver="auto", solver_kwargs={}, **kwargs, @@ -54,6 +65,7 @@ def __init__( use_coslat=use_coslat, sample_name=sample_name, feature_name=feature_name, + compute=compute, solver=solver, solver_kwargs=solver_kwargs, **kwargs, @@ -71,7 +83,10 @@ def _fit_algorithm(self, data: DataArray) -> Self: n_modes = self._params["n_modes"] decomposer = Decomposer( - n_modes=n_modes, solver=self._params["solver"], **self._solver_kwargs + n_modes=n_modes, + solver=self._params["solver"], + compute=self._compute, + **self._solver_kwargs, ) decomposer.fit(data, dims=(sample_name, feature_name)) @@ -236,12 +251,6 @@ class ComplexEOF(EOF): ---------- n_modes : int Number of modes to calculate. - standardize : bool - Whether to standardize the input data. - use_coslat : bool - Whether to use cosine of latitude for scaling. - use_weights : bool - Whether to use weights. padding : str, optional Specifies the method used for padding the data prior to applying the Hilbert transform. This can help to mitigate the effect of spectral leakage. @@ -253,6 +262,24 @@ class ComplexEOF(EOF): A smaller value (e.g. 0.05) is recommended for data with high variability, while a larger value (e.g. 0.2) is recommended for data with low variability. Default is 0.2. + center: bool, default=True + Whether to center the input data. + standardize : bool + Whether to standardize the input data. + use_coslat : bool + Whether to use cosine of latitude for scaling. + sample_name: str, default="sample" + Name of the sample dimension. + feature_name: str, default="feature" + Name of the feature dimension. + compute: bool, default=True + Whether to compute the decomposition immediately. This is recommended + if the SVD result for the first ``n_modes`` can be accommodated in memory, as it + boosts computational efficiency compared to deferring the computation. + solver: {"auto", "full", "randomized"}, default="auto" + Solver to use for the SVD computation. + solver_kwargs: dict, default={} + Additional keyword arguments to be passed to the SVD solver. solver_kwargs : dict, optional Additional keyword arguments to be passed to the SVD solver. @@ -296,7 +323,10 @@ def _fit_algorithm(self, data: DataArray) -> Self: n_modes = self._params["n_modes"] decomposer = Decomposer( - n_modes=n_modes, solver=self._params["solver"], **self._solver_kwargs + n_modes=n_modes, + solver=self._params["solver"], + compute=self._compute, + **self._solver_kwargs, ) decomposer.fit(data) diff --git a/xeofs/models/eof_rotator.py b/xeofs/models/eof_rotator.py index 5c7e7fe..258b9ba 100644 --- a/xeofs/models/eof_rotator.py +++ b/xeofs/models/eof_rotator.py @@ -32,6 +32,8 @@ class EOFRotator(EOF): rtol : float, default=1e-8 Define the relative tolerance required to achieve convergence and terminate the iterative process. + compute: bool, default=True + Whether to compute the decomposition immediately. References ---------- @@ -53,7 +55,9 @@ def __init__( power: int = 1, max_iter: int = 1000, rtol: float = 1e-8, + compute: bool = True, ): + self._compute = compute # Define model parameters self._params = { "n_modes": n_modes, @@ -106,7 +110,10 @@ def _fit_algorithm(self, model) -> Self: loadings = components * np.sqrt(expvar) promax_kwargs = {"power": power, "max_iter": max_iter, "rtol": rtol} rot_loadings, rot_matrix, phi_matrix = promax( - loadings, feature_dim=self.feature_name, **promax_kwargs + loadings, + feature_dim=self.feature_name, + compute=self._compute, + **promax_kwargs ) # Assign coordinates to the rotation/correlation matrices @@ -271,6 +278,8 @@ class ComplexEOFRotator(EOFRotator, ComplexEOF): rtol : float, default=1e-8 Define the relative tolerance required to achieve convergence and terminate the iterative process. + compute: bool, default=True + Whether to compute the decomposition immediately. References ---------- diff --git a/xeofs/models/mca.py b/xeofs/models/mca.py index 0de2da7..4a03d1d 100644 --- a/xeofs/models/mca.py +++ b/xeofs/models/mca.py @@ -20,16 +20,12 @@ class MCA(_BaseCrossModel): ---------- n_modes: int, default=10 Number of modes to calculate. + center: bool, default=True + Whether to center the input data. standardize: bool, default=False Whether to standardize the input data. use_coslat: bool, default=False Whether to use cosine of latitude for scaling. - use_weights: bool, default=False - Whether to use additional weights. - solver: {"auto", "full", "randomized"}, default="auto" - Solver to use for the SVD computation. - solver_kwargs: dict, default={} - Additional keyword arguments passed to the SVD solver. n_pca_modes: int, default=None The number of principal components to retain during the PCA preprocessing step applied to both data sets prior to executing MCA. @@ -38,6 +34,16 @@ class MCA(_BaseCrossModel): only the specified number of principal components. This reduction in dimensionality can be especially beneficial when dealing with high-dimensional data, where computing the cross-covariance matrix can become computationally intensive or in scenarios where multicollinearity is a concern. + compute: bool, default=True + Whether to compute the decomposition immediately. + sample_name: str, default="sample" + Name of the new sample dimension. + feature_name: str, default="feature" + Name of the new feature dimension. + solver: {"auto", "full", "randomized"}, default="auto" + Solver to use for the SVD computation. + solver_kwargs: dict, default={} + Additional keyword arguments passed to the SVD solver. Notes ----- @@ -57,8 +63,31 @@ class MCA(_BaseCrossModel): """ - def __init__(self, solver_kwargs={}, **kwargs): - super().__init__(solver_kwargs=solver_kwargs, **kwargs) + def __init__( + self, + n_modes=10, + center=True, + standardize=False, + use_coslat=False, + n_pca_modes=None, + compute=True, + sample_name="sample", + feature_name="feature", + solver="auto", + solver_kwargs={}, + ): + super().__init__( + n_modes=n_modes, + center=center, + standardize=standardize, + use_coslat=use_coslat, + n_pca_modes=n_pca_modes, + compute=compute, + sample_name=sample_name, + feature_name=feature_name, + solver=solver, + solver_kwargs=solver_kwargs, + ) self.attrs.update({"model": "MCA"}) def _compute_cross_covariance_matrix(self, X1, X2): @@ -87,6 +116,7 @@ def _fit_algorithm( decomposer = Decomposer( n_modes=self._params["n_modes"], solver=self._params["solver"], + compute=self._compute, **self._solver_kwargs, ) @@ -533,12 +563,6 @@ class ComplexMCA(MCA): ---------- n_modes: int, default=10 Number of modes to calculate. - standardize: bool, default=False - Whether to standardize the input data. - use_coslat: bool, default=False - Whether to use cosine of latitude for scaling. - use_weights: bool, default=False - Whether to use additional weights. padding : str, optional Specifies the method used for padding the data prior to applying the Hilbert transform. This can help to mitigate the effect of spectral leakage. @@ -550,6 +574,28 @@ class ComplexMCA(MCA): A smaller value (e.g. 0.05) is recommended for data with high variability, while a larger value (e.g. 0.2) is recommended for data with low variability. Default is 0.2. + center: bool, default=True + Whether to center the input data. + standardize: bool, default=False + Whether to standardize the input data. + use_coslat: bool, default=False + Whether to use cosine of latitude for scaling. + n_pca_modes: int, default=None + The number of principal components to retain during the PCA preprocessing + step applied to both data sets prior to executing MCA. + If set to None, PCA preprocessing will be bypassed, and the MCA will be performed on the original datasets. + Specifying an integer value greater than 0 for `n_pca_modes` will trigger the PCA preprocessing, retaining + only the specified number of principal components. This reduction in dimensionality can be especially beneficial + when dealing with high-dimensional data, where computing the cross-covariance matrix can become computationally + intensive or in scenarios where multicollinearity is a concern. + compute: bool, default=True + Whether to compute the decomposition immediately. + sample_name: str, default="sample" + Name of the new sample dimension. + feature_name: str, default="feature" + Name of the new feature dimension. + solver: {"auto", "full", "randomized"}, default="auto" + Solver to use for the SVD computation. solver_kwargs: dict, default={} Additional keyword arguments passed to the SVD solver. @@ -575,8 +621,33 @@ class ComplexMCA(MCA): """ - def __init__(self, padding="exp", decay_factor=0.2, **kwargs): - super().__init__(**kwargs) + def __init__( + self, + n_modes=10, + padding="exp", + decay_factor=0.2, + center=True, + standardize=False, + use_coslat=False, + n_pca_modes=None, + compute=True, + sample_name="sample", + feature_name="feature", + solver="auto", + solver_kwargs={}, + ): + super().__init__( + n_modes=n_modes, + center=center, + standardize=standardize, + use_coslat=use_coslat, + n_pca_modes=n_pca_modes, + compute=compute, + sample_name=sample_name, + feature_name=feature_name, + solver=solver, + solver_kwargs=solver_kwargs, + ) self.attrs.update({"model": "Complex MCA"}) self._params.update({"padding": padding, "decay_factor": decay_factor}) @@ -594,6 +665,7 @@ def _fit_algorithm(self, data1: DataArray, data2: DataArray) -> Self: decomposer = Decomposer( n_modes=self._params["n_modes"], solver=self._params["solver"], + compute=self._compute, **self._solver_kwargs, ) diff --git a/xeofs/models/mca_rotator.py b/xeofs/models/mca_rotator.py index f2dc14c..d73a93b 100644 --- a/xeofs/models/mca_rotator.py +++ b/xeofs/models/mca_rotator.py @@ -37,6 +37,8 @@ class MCARotator(MCA): conserving the squared covariance under rotation. This allows estimation of mode importance after rotation. If False, the combined vectors are loaded with the square root of the singular values, following the method described by Cheng & Dunkerton [1]_. + compute : bool, default=True + Whether to compute the decomposition immediately. References ---------- @@ -59,7 +61,9 @@ def __init__( max_iter: int = 1000, rtol: float = 1e-8, squared_loadings: bool = False, + compute: bool = True, ): + self._compute = compute # Define model parameters self._params = { "n_modes": n_modes, @@ -158,7 +162,10 @@ def fit(self, model: MCA | ComplexMCA): # Rotate loadings promax_kwargs = {"power": power, "max_iter": max_iter, "rtol": rtol} rot_loadings, rot_matrix, phi_matrix = promax( - loadings=loadings, feature_dim=feature_name, **promax_kwargs + loadings=loadings, + feature_dim=feature_name, + compute=self._compute, + **promax_kwargs ) # Assign coordinates to the rotation/correlation matrices diff --git a/xeofs/models/opa.py b/xeofs/models/opa.py index f0fe69d..97fde8a 100644 --- a/xeofs/models/opa.py +++ b/xeofs/models/opa.py @@ -24,8 +24,24 @@ class OPA(_BaseModel): Number of optimal persistence patterns (OPP) to be computed. tau_max : int Maximum time lag for the computation of the covariance matrix. + center : bool, default=True + Whether to center the input data. + standardize : bool, default=False + Whether to standardize the input data. + use_coslat : bool, default=False + Whether to use cosine of latitude for scaling. n_pca_modes : int Number of modes to be computed in the pre-processing step using EOF. + compute : bool, default=True + Whether to compute the decomposition immediately. + sample_name : str, default="sample" + Name of the sample dimension. + feature_name : str, default="feature" + Name of the feature dimension. + solver : {"auto", "full", "randomized"}, default="auto" + Solver to use for the SVD computation. + solver_kwargs : dict, default={} + Additional keyword arguments to pass to the solver. References ---------- @@ -48,12 +64,35 @@ class OPA(_BaseModel): >>> decorrelation_time = model.decorrelation_time() """ - def __init__(self, n_modes, tau_max, n_pca_modes, **kwargs): + def __init__( + self, + n_modes, + tau_max, + center=True, + standardize=False, + use_coslat=False, + n_pca_modes=100, + compute=True, + sample_name="sample", + feature_name="feature", + solver="auto", + solver_kwargs={}, + ): if n_modes > n_pca_modes: raise ValueError( f"n_modes must be smaller or equal to n_pca_modes (n_modes={n_modes}, n_pca_modes={n_pca_modes})" ) - super().__init__(n_modes=n_modes, **kwargs) + super().__init__( + n_modes=n_modes, + center=center, + standardize=standardize, + use_coslat=use_coslat, + compute=compute, + sample_name=sample_name, + feature_name=feature_name, + solver=solver, + solver_kwargs=solver_kwargs, + ) self.attrs.update({"model": "OPA"}) self._params.update({"tau_max": tau_max, "n_pca_modes": n_pca_modes}) @@ -86,7 +125,9 @@ def _fit_algorithm(self, data: DataArray) -> Self: feature_name = self.feature_name # Perform PCA as a pre-processing step - pca = EOF(n_modes=self._params["n_pca_modes"], use_coslat=False) + pca = EOF( + n_modes=self._params["n_pca_modes"], use_coslat=False, compute=self._compute + ) pca.fit(data, dim=sample_name) n_samples = data.coords[sample_name].size comps = pca.data["components"] * np.sqrt(n_samples - 1) @@ -119,7 +160,9 @@ def _fit_algorithm(self, data: DataArray) -> Self: # using a symmtric matrix given in # A. Hannachi (2021), Patterns Identification and # Data Mining in Weather and Climate, Equation (8.20) - decomposer = Decomposer(n_modes=C0.shape[0], flip_signs=False, solver="full") + decomposer = Decomposer( + n_modes=C0.shape[0], flip_signs=False, compute=self._compute, solver="full" + ) decomposer.fit(C0, dims=("feature1", "feature2")) C0_sqrt = decomposer.U_ * np.sqrt(decomposer.s_) # -> C0_sqrt (feature1 x mode) diff --git a/xeofs/utils/rotation.py b/xeofs/utils/rotation.py index 33577b8..8f57d9d 100644 --- a/xeofs/utils/rotation.py +++ b/xeofs/utils/rotation.py @@ -4,7 +4,7 @@ from .data_types import DataArray -def promax(loadings: DataArray, feature_dim, **kwargs): +def promax(loadings: DataArray, feature_dim, compute=True, **kwargs): rotated, rot_mat, phi_mat = xr.apply_ufunc( _promax, loadings, @@ -17,6 +17,10 @@ def promax(loadings: DataArray, feature_dim, **kwargs): kwargs=kwargs, dask="allowed", ) + if compute: + rotated = rotated.compute() + rot_mat = rot_mat.compute() + phi_mat = phi_mat.compute() return rotated, rot_mat, phi_mat From 43e9274a3eb270a6e331e2f73f77a785e3eb6654 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 15:34:25 +0200 Subject: [PATCH 35/43] fix(GWPCA): raise error in scores scores aren't (yet) supported by GWPCA --- xeofs/models/gwpca.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/xeofs/models/gwpca.py b/xeofs/models/gwpca.py index 4b1e2ac..e5d202f 100644 --- a/xeofs/models/gwpca.py +++ b/xeofs/models/gwpca.py @@ -244,6 +244,9 @@ def largest_locally_weighted_components(self): llwc.name = "largest_locally_weighted_components" return self.preprocessor.inverse_transform_scores(llwc) + def scores(self): + raise NotImplementedError("GWPCA does not support scores() yet.") + def _transform_algorithm(self, data: DataArray) -> DataArray: raise NotImplementedError("GWPCA does not support transform() yet.") From 98c5cef82e542daa7cf7f003c52666bbedba994b Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 15:54:42 +0200 Subject: [PATCH 36/43] fix: streamline model attribute types Boolean or None values cannot be serialized by xarray which required manual conversion of attributes before to_netcdf can be used. Now model attributes are streamlined internally by converting any boolean or None values to strings (resolves #89 ) --- xeofs/data_container/data_container.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/xeofs/data_container/data_container.py b/xeofs/data_container/data_container.py index 38a6340..0f1e25f 100644 --- a/xeofs/data_container/data_container.py +++ b/xeofs/data_container/data_container.py @@ -1,3 +1,4 @@ +from typing import Dict from dask.diagnostics.progress import ProgressBar from ..utils.data_types import DataArray @@ -34,6 +35,17 @@ def compute(self, verbose=False): else: self[k] = v.compute() - def set_attrs(self, attrs: dict): + def _validate_attrs(self, attrs: Dict) -> Dict: + """Convert any boolean and None values to strings""" + for key, value in attrs.items(): + if isinstance(value, bool): + attrs[key] = str(value) + elif value is None: + attrs[key] = "None" + + return attrs + + def set_attrs(self, attrs: Dict): + attrs = self._validate_attrs(attrs) for key in self.keys(): self[key].attrs = attrs From cf8b641fffa92892fb7168b0e20dcdd7275cc736 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 16:54:45 +0200 Subject: [PATCH 37/43] feat: provide standard kwarg for random_state resolves #90 --- tests/models/test_decomposer.py | 33 ++++++++++++++++++++++++++++++- xeofs/models/_base_cross_model.py | 21 ++++++++++++++++---- xeofs/models/_base_model.py | 12 +++++++++-- xeofs/models/decomposer.py | 28 ++++++++++++++++++++++---- xeofs/models/eeof.py | 4 ++-- xeofs/models/eof.py | 16 ++++----------- xeofs/models/eof_rotator.py | 4 ++-- xeofs/models/mca.py | 22 ++++++++++----------- xeofs/models/opa.py | 17 ++++++++++++++-- 9 files changed, 116 insertions(+), 41 deletions(-) diff --git a/tests/models/test_decomposer.py b/tests/models/test_decomposer.py index 861563e..2cdb86a 100644 --- a/tests/models/test_decomposer.py +++ b/tests/models/test_decomposer.py @@ -38,7 +38,7 @@ def test_complex_dask_data_array(mock_complex_data_array): def test_init(decomposer): assert decomposer.n_modes == 2 - assert decomposer.solver_kwargs["random_state"] == 42 + assert decomposer.random_state == 42 def test_fit_full(mock_data_array): @@ -146,3 +146,34 @@ def test_fit_complex(mock_complex_data_array): # Check that U and V are complex assert np.iscomplexobj(decomposer.U_.data) assert np.iscomplexobj(decomposer.V_.data) + + +@pytest.mark.parametrize( + "data", + ["real", "complex", "dask_real"], +) +def test_random_state( + data, mock_data_array, mock_complex_data_array, mock_dask_data_array +): + match data: + case "real": + X = mock_data_array + case "complex": + X = mock_complex_data_array + case "dask_real": + X = mock_dask_data_array + case _: + raise ValueError(f"Unrecognized data type '{data}'.") + + decomposer = Decomposer( + n_modes=2, solver="randomized", random_state=42, compute=True + ) + decomposer.fit(X) + U1 = decomposer.U_.data + + # Refit + decomposer.fit(X) + U2 = decomposer.U_.data + + # Check that the results are the same + assert np.alltrue(U1 == U2) diff --git a/xeofs/models/_base_cross_model.py b/xeofs/models/_base_cross_model.py index 128ebf9..85467a2 100644 --- a/xeofs/models/_base_cross_model.py +++ b/xeofs/models/_base_cross_model.py @@ -51,11 +51,12 @@ def __init__( sample_name="sample", feature_name="feature", solver="auto", + random_state=None, solver_kwargs={}, ): + self.n_modes = n_modes self.sample_name = sample_name self.feature_name = feature_name - self._compute = compute # Define model parameters self._params = { @@ -64,9 +65,17 @@ def __init__( "standardize": standardize, "use_coslat": use_coslat, "n_pca_modes": n_pca_modes, + "compute": compute, + "sample_name": sample_name, + "feature_name": feature_name, "solver": solver, + "random_state": random_state, } + self._solver_kwargs = solver_kwargs + self._solver_kwargs.update( + {"solver": solver, "random_state": random_state, "compute": compute} + ) self._preprocessor_kwargs = { "sample_name": sample_name, "feature_name": feature_name, @@ -77,7 +86,6 @@ def __init__( # Define analysis-relevant meta data self.attrs = {"model": "BaseCrossModel"} - self.attrs.update(self._params) self.attrs.update( { "software": "xeofs", @@ -85,6 +93,7 @@ def __init__( "date": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), } ) + self.attrs.update(self._params) # Initialize preprocessors to scale and stack left (1) and right (2) data self.preprocessor1 = Preprocessor(**self._preprocessor_kwargs) @@ -95,10 +104,14 @@ def __init__( # Initialize PCA objects self.pca1 = ( - EOF(n_modes=n_pca_modes, compute=self._compute) if n_pca_modes else None + EOF(n_modes=n_pca_modes, compute=self._params["compute"]) + if n_pca_modes + else None ) self.pca2 = ( - EOF(n_modes=n_pca_modes, compute=self._compute) if n_pca_modes else None + EOF(n_modes=n_pca_modes, compute=self._params["compute"]) + if n_pca_modes + else None ) def fit( diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index 18e942d..42350ae 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -64,26 +64,33 @@ def __init__( sample_name="sample", feature_name="feature", compute=True, + random_state=None, solver="auto", solver_kwargs={}, ): self.n_modes = n_modes self.sample_name = sample_name self.feature_name = feature_name - self._compute = compute + # Define model parameters self._params = { "n_modes": n_modes, "center": center, "standardize": standardize, "use_coslat": use_coslat, + "sample_name": sample_name, + "feature_name": feature_name, + "random_state": random_state, + "compute": compute, "solver": solver, } self._solver_kwargs = solver_kwargs + self._solver_kwargs.update( + {"solver": solver, "random_state": random_state, "compute": compute} + ) # Define analysis-relevant meta data self.attrs = {"model": "BaseModel"} - self.attrs.update(self._params) self.attrs.update( { "software": "xeofs", @@ -91,6 +98,7 @@ def __init__( "date": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), } ) + self.attrs.update(self._params) # Initialize the Preprocessor to scale and stack the data self.preprocessor = Preprocessor( diff --git a/xeofs/models/decomposer.py b/xeofs/models/decomposer.py index b8d7383..2416308 100644 --- a/xeofs/models/decomposer.py +++ b/xeofs/models/decomposer.py @@ -6,6 +6,7 @@ from sklearn.utils.extmath import randomized_svd from scipy.sparse.linalg import svds as complex_svd # type: ignore from dask.array.linalg import svd_compressed as dask_svd +from typing import Optional class Decomposer: @@ -29,15 +30,26 @@ class Decomposer: than 80% of the smallest dimension of the data, then the more efficient `randomized` method is enabled. Otherwise the exact full SVD is computed and optionally truncated afterwards. + random_state : Optional[int], default=None + Seed for the random number generator. **kwargs Additional keyword arguments passed to the SVD solver. """ - def __init__(self, n_modes, flip_signs=True, compute=True, solver="auto", **kwargs): + def __init__( + self, + n_modes: int, + flip_signs: bool = True, + compute: bool = True, + solver: str = "auto", + random_state: Optional[int] = None, + **kwargs, + ): self.n_modes = n_modes self.flip_signs = flip_signs self.compute = compute self.solver = solver + self.random_state = random_state self.solver_kwargs = kwargs def fit(self, X, dims=("sample", "feature")): @@ -95,13 +107,21 @@ def fit(self, X, dims=("sample", "feature")): # Use randomized SVD for large, real-valued data sets elif (not use_complex) and (not use_dask): - self.solver_kwargs.update({"n_components": self.n_modes}) + self.solver_kwargs.update( + {"n_components": self.n_modes, "random_state": self.random_state} + ) U, s, VT = self._svd(X, dims, randomized_svd, self.solver_kwargs) # Use scipy sparse SVD for large, complex-valued data sets elif use_complex and (not use_dask): # Scipy sparse version - self.solver_kwargs.update({"k": self.n_modes, "solver": "lobpcg"}) + self.solver_kwargs.update( + { + "k": self.n_modes, + "solver": "lobpcg", + "random_state": self.random_state, + } + ) U, s, VT = self._svd(X, dims, complex_svd, self.solver_kwargs) idx_sort = np.argsort(s)[::-1] U = U[:, idx_sort] @@ -110,7 +130,7 @@ def fit(self, X, dims=("sample", "feature")): # Use dask SVD for large, real-valued, delayed data sets elif (not use_complex) and use_dask: - self.solver_kwargs.update({"k": self.n_modes}) + self.solver_kwargs.update({"k": self.n_modes, "seed": self.random_state}) U, s, VT = self._svd(X, dims, dask_svd, self.solver_kwargs) U, s, VT = self._compute_svd_result(U, s, VT) else: diff --git a/xeofs/models/eeof.py b/xeofs/models/eeof.py index d522108..019e5b0 100644 --- a/xeofs/models/eeof.py +++ b/xeofs/models/eeof.py @@ -83,7 +83,7 @@ def __init__( center=True, standardize=False, use_coslat=False, - compute=self._compute, + compute=self._params["compute"], sample_name=self.sample_name, feature_name=self.feature_name, ) @@ -119,7 +119,7 @@ def _fit_algorithm(self, X: DataArray): center=True, standardize=False, use_coslat=False, - compute=self._compute, + compute=self._params["compute"], sample_name=self.sample_name, feature_name=self.feature_name, solver=self._params["solver"], diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index 6a4e13e..bcf1be7 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -54,6 +54,7 @@ def __init__( sample_name="sample", feature_name="feature", compute: bool = True, + random_state=None, solver="auto", solver_kwargs={}, **kwargs, @@ -66,6 +67,7 @@ def __init__( sample_name=sample_name, feature_name=feature_name, compute=compute, + random_state=random_state, solver=solver, solver_kwargs=solver_kwargs, **kwargs, @@ -82,12 +84,7 @@ def _fit_algorithm(self, data: DataArray) -> Self: # Decompose the data n_modes = self._params["n_modes"] - decomposer = Decomposer( - n_modes=n_modes, - solver=self._params["solver"], - compute=self._compute, - **self._solver_kwargs, - ) + decomposer = Decomposer(n_modes=n_modes, **self._solver_kwargs) decomposer.fit(data, dims=(sample_name, feature_name)) singular_values = decomposer.s_ @@ -322,12 +319,7 @@ def _fit_algorithm(self, data: DataArray) -> Self: # Decompose the complex data n_modes = self._params["n_modes"] - decomposer = Decomposer( - n_modes=n_modes, - solver=self._params["solver"], - compute=self._compute, - **self._solver_kwargs, - ) + decomposer = Decomposer(n_modes=n_modes, **self._solver_kwargs) decomposer.fit(data) singular_values = decomposer.s_ diff --git a/xeofs/models/eof_rotator.py b/xeofs/models/eof_rotator.py index 258b9ba..7047c9d 100644 --- a/xeofs/models/eof_rotator.py +++ b/xeofs/models/eof_rotator.py @@ -57,13 +57,13 @@ def __init__( rtol: float = 1e-8, compute: bool = True, ): - self._compute = compute # Define model parameters self._params = { "n_modes": n_modes, "power": power, "max_iter": max_iter, "rtol": rtol, + "compute": compute, } # Define analysis-relevant meta data @@ -112,7 +112,7 @@ def _fit_algorithm(self, model) -> Self: rot_loadings, rot_matrix, phi_matrix = promax( loadings, feature_dim=self.feature_name, - compute=self._compute, + compute=self._params["compute"], **promax_kwargs ) diff --git a/xeofs/models/mca.py b/xeofs/models/mca.py index 4a03d1d..ff777d6 100644 --- a/xeofs/models/mca.py +++ b/xeofs/models/mca.py @@ -42,6 +42,8 @@ class MCA(_BaseCrossModel): Name of the new feature dimension. solver: {"auto", "full", "randomized"}, default="auto" Solver to use for the SVD computation. + random_state: int, default=None + Seed for the random number generator. solver_kwargs: dict, default={} Additional keyword arguments passed to the SVD solver. @@ -74,6 +76,7 @@ def __init__( sample_name="sample", feature_name="feature", solver="auto", + random_state=None, solver_kwargs={}, ): super().__init__( @@ -86,6 +89,7 @@ def __init__( sample_name=sample_name, feature_name=feature_name, solver=solver, + random_state=random_state, solver_kwargs=solver_kwargs, ) self.attrs.update({"model": "MCA"}) @@ -113,12 +117,7 @@ def _fit_algorithm( feature_name = self.feature_name # Initialize the SVD decomposer - decomposer = Decomposer( - n_modes=self._params["n_modes"], - solver=self._params["solver"], - compute=self._compute, - **self._solver_kwargs, - ) + decomposer = Decomposer(n_modes=self._params["n_modes"], **self._solver_kwargs) # Perform SVD on PCA-reduced data if (self.pca1 is not None) and (self.pca2 is not None): @@ -596,6 +595,8 @@ class ComplexMCA(MCA): Name of the new feature dimension. solver: {"auto", "full", "randomized"}, default="auto" Solver to use for the SVD computation. + random_state: int, optional + Random state for randomized SVD solver. solver_kwargs: dict, default={} Additional keyword arguments passed to the SVD solver. @@ -634,6 +635,7 @@ def __init__( sample_name="sample", feature_name="feature", solver="auto", + random_state=None, solver_kwargs={}, ): super().__init__( @@ -646,6 +648,7 @@ def __init__( sample_name=sample_name, feature_name=feature_name, solver=solver, + random_state=random_state, solver_kwargs=solver_kwargs, ) self.attrs.update({"model": "Complex MCA"}) @@ -662,12 +665,7 @@ def _fit_algorithm(self, data1: DataArray, data2: DataArray) -> Self: } # Initialize the SVD decomposer - decomposer = Decomposer( - n_modes=self._params["n_modes"], - solver=self._params["solver"], - compute=self._compute, - **self._solver_kwargs, - ) + decomposer = Decomposer(n_modes=self._params["n_modes"], **self._solver_kwargs) # Perform SVD on PCA-reduced data if (self.pca1 is not None) and (self.pca2 is not None): diff --git a/xeofs/models/opa.py b/xeofs/models/opa.py index 97fde8a..e98266b 100644 --- a/xeofs/models/opa.py +++ b/xeofs/models/opa.py @@ -76,6 +76,7 @@ def __init__( sample_name="sample", feature_name="feature", solver="auto", + random_state=None, solver_kwargs={}, ): if n_modes > n_pca_modes: @@ -91,6 +92,7 @@ def __init__( sample_name=sample_name, feature_name=feature_name, solver=solver, + random_state=random_state, solver_kwargs=solver_kwargs, ) self.attrs.update({"model": "OPA"}) @@ -126,7 +128,15 @@ def _fit_algorithm(self, data: DataArray) -> Self: # Perform PCA as a pre-processing step pca = EOF( - n_modes=self._params["n_pca_modes"], use_coslat=False, compute=self._compute + n_modes=self._params["n_pca_modes"], + standardize=False, + use_coslat=False, + sample_name=self.sample_name, + feature_name=self.feature_name, + solver=self._params["solver"], + compute=self._params["compute"], + random_state=self._params["random_state"], + solver_kwargs=self._solver_kwargs, ) pca.fit(data, dim=sample_name) n_samples = data.coords[sample_name].size @@ -161,7 +171,10 @@ def _fit_algorithm(self, data: DataArray) -> Self: # A. Hannachi (2021), Patterns Identification and # Data Mining in Weather and Climate, Equation (8.20) decomposer = Decomposer( - n_modes=C0.shape[0], flip_signs=False, compute=self._compute, solver="full" + n_modes=C0.shape[0], + flip_signs=False, + compute=self._params["compute"], + solver="full", ) decomposer.fit(C0, dims=("feature1", "feature2")) C0_sqrt = decomposer.U_ * np.sqrt(decomposer.s_) From 6be892354a208a95d7fe56ebbd3ca90ec86dc31b Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 17:57:45 +0200 Subject: [PATCH 38/43] docs: provide top-level type hints --- xeofs/models/cca.py | 16 +++---- xeofs/models/eeof.py | 23 ++++++++- xeofs/models/eof.py | 93 +++++++++++++++++++++++-------------- xeofs/models/eof_rotator.py | 19 ++++++-- xeofs/models/mca.py | 54 ++++++++++----------- xeofs/models/opa.py | 26 +++++------ 6 files changed, 142 insertions(+), 89 deletions(-) diff --git a/xeofs/models/cca.py b/xeofs/models/cca.py index b2d0d0e..a41678c 100644 --- a/xeofs/models/cca.py +++ b/xeofs/models/cca.py @@ -265,14 +265,14 @@ class CCA(CCABaseModel): def __init__( self, - n_modes, - use_coslat=False, - c=0, - pca=True, - variance_fraction=0.99, - init_pca_modes=0.75, - compute=True, - eps=1e-6, + n_modes: int = 2, + use_coslat: bool = False, + c: float = 0, + pca: bool = True, + variance_fraction: float = 0.99, + init_pca_modes: float = 0.75, + compute: bool = True, + eps: float = 1e-6, ): super().__init__( n_modes=n_modes, diff --git a/xeofs/models/eeof.py b/xeofs/models/eeof.py index 019e5b0..82a8654 100644 --- a/xeofs/models/eeof.py +++ b/xeofs/models/eeof.py @@ -67,9 +67,28 @@ def __init__( tau: int, embedding: int, n_pca_modes: Optional[int] = None, - **kwargs, + center: bool = True, + standardize: bool = False, + use_coslat: bool = False, + sample_name: str = "sample", + feature_name: str = "feature", + compute: bool = True, + solver: str = "auto", + random_state: Optional[int] = None, + solver_kwargs: dict = {}, ): - super().__init__(n_modes=n_modes, **kwargs) + super().__init__( + n_modes=n_modes, + center=center, + standardize=standardize, + use_coslat=use_coslat, + sample_name=sample_name, + feature_name=feature_name, + compute=compute, + solver=solver, + random_state=random_state, + solver_kwargs=solver_kwargs, + ) self.attrs.update({"model": "Extended EOF Analysis"}) self._params.update( {"tau": tau, "embedding": embedding, "n_pca_modes": n_pca_modes} diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index bcf1be7..3265be5 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -1,4 +1,4 @@ -from typing import Self +from typing import Self, Optional, Dict import numpy as np import xarray as xr @@ -12,7 +12,7 @@ class EOF(_BaseModel): """Empirical Orthogonal Functions (EOF) analysis. - EOF analysis is more commonly referend to as principal component analysis (PCA). + More commonly known as Principal Component Analysis (PCA). Parameters ---------- @@ -47,16 +47,16 @@ class EOF(_BaseModel): def __init__( self, - n_modes=10, - center=True, - standardize=False, - use_coslat=False, - sample_name="sample", - feature_name="feature", + n_modes: int = 2, + center: bool = True, + standardize: bool = False, + use_coslat: bool = False, + sample_name: str = "sample", + feature_name: str = "feature", compute: bool = True, - random_state=None, - solver="auto", - solver_kwargs={}, + random_state: Optional[int] = None, + solver: str = "auto", + solver_kwargs: Dict = {}, **kwargs, ): super().__init__( @@ -162,7 +162,7 @@ def components(self) -> DataObject: """ return super().components() - def scores(self, normalized=True) -> DataArray: + def scores(self, normalized: bool = True) -> DataArray: """Return the (PC) scores. The scores in EOF anaylsis are the projection of the data matrix onto the @@ -259,26 +259,26 @@ class ComplexEOF(EOF): A smaller value (e.g. 0.05) is recommended for data with high variability, while a larger value (e.g. 0.2) is recommended for data with low variability. Default is 0.2. - center: bool, default=True - Whether to center the input data. - standardize : bool - Whether to standardize the input data. - use_coslat : bool - Whether to use cosine of latitude for scaling. - sample_name: str, default="sample" - Name of the sample dimension. - feature_name: str, default="feature" - Name of the feature dimension. - compute: bool, default=True - Whether to compute the decomposition immediately. This is recommended - if the SVD result for the first ``n_modes`` can be accommodated in memory, as it - boosts computational efficiency compared to deferring the computation. - solver: {"auto", "full", "randomized"}, default="auto" - Solver to use for the SVD computation. - solver_kwargs: dict, default={} - Additional keyword arguments to be passed to the SVD solver. - solver_kwargs : dict, optional - Additional keyword arguments to be passed to the SVD solver. + center: bool, default=True + Whether to center the input data. + standardize : bool + Whether to standardize the input data. + use_coslat : bool + Whether to use cosine of latitude for scaling. + sample_name: str, default="sample" + Name of the sample dimension. + feature_name: str, default="feature" + Name of the feature dimension. + compute: bool, default=True + Whether to compute the decomposition immediately. This is recommended + if the SVD result for the first ``n_modes`` can be accommodated in memory, as it + boosts computational efficiency compared to deferring the computation. + solver: {"auto", "full", "randomized"}, default="auto" + Solver to use for the SVD computation. + solver_kwargs: dict, default={} + Additional keyword arguments to be passed to the SVD solver. + solver_kwargs : dict, optional + Additional keyword arguments to be passed to the SVD solver. References ---------- @@ -294,8 +294,33 @@ class ComplexEOF(EOF): """ - def __init__(self, padding="exp", decay_factor=0.2, **kwargs): - super().__init__(**kwargs) + def __init__( + self, + n_modes: int = 2, + padding: str = "exp", + decay_factor: float = 0.2, + center: bool = True, + standardize: bool = False, + use_coslat: bool = False, + sample_name: str = "sample", + feature_name: str = "feature", + compute: bool = True, + random_state: Optional[int] = None, + solver: str = "auto", + solver_kwargs: Dict = {}, + ): + super().__init__( + n_modes=n_modes, + center=center, + standardize=standardize, + use_coslat=use_coslat, + sample_name=sample_name, + feature_name=feature_name, + compute=compute, + random_state=random_state, + solver=solver, + solver_kwargs=solver_kwargs, + ) self.attrs.update({"model": "Complex EOF analysis"}) self._params.update({"padding": padding, "decay_factor": decay_factor}) diff --git a/xeofs/models/eof_rotator.py b/xeofs/models/eof_rotator.py index 7047c9d..5af1535 100644 --- a/xeofs/models/eof_rotator.py +++ b/xeofs/models/eof_rotator.py @@ -21,7 +21,7 @@ class EOFRotator(EOF): Parameters ---------- - n_modes : int, default=10 + n_modes : int, default=2 Specify the number of modes to be rotated. power : int, default=1 Set the power for the Promax rotation. A ``power`` value of 1 results @@ -51,7 +51,7 @@ class EOFRotator(EOF): def __init__( self, - n_modes: int = 10, + n_modes: int = 2, power: int = 1, max_iter: int = 1000, rtol: float = 1e-8, @@ -267,7 +267,7 @@ class ComplexEOFRotator(EOFRotator, ComplexEOF): Parameters ---------- - n_modes : int, default=10 + n_modes : int, default=2 Specify the number of modes to be rotated. power : int, default=1 Set the power for the Promax rotation. A ``power`` value of 1 results @@ -298,8 +298,17 @@ class ComplexEOFRotator(EOFRotator, ComplexEOF): """ - def __init__(self, **kwargs): - super().__init__(**kwargs) + def __init__( + self, + n_modes: int = 2, + power: int = 1, + max_iter: int = 1000, + rtol: float = 1e-8, + compute: bool = True, + ): + super().__init__( + n_modes=n_modes, power=power, max_iter=max_iter, rtol=rtol, compute=compute + ) self.attrs.update({"model": "Rotated Complex EOF analysis"}) def _transform_algorithm(self, data: DataArray) -> DataArray: diff --git a/xeofs/models/mca.py b/xeofs/models/mca.py index ff777d6..89b33f7 100644 --- a/xeofs/models/mca.py +++ b/xeofs/models/mca.py @@ -1,4 +1,4 @@ -from typing import Tuple, Optional, Sequence, Self +from typing import Tuple, Optional, Sequence, Self, Dict import numpy as np import xarray as xr @@ -18,7 +18,7 @@ class MCA(_BaseCrossModel): Parameters ---------- - n_modes: int, default=10 + n_modes: int, default=2 Number of modes to calculate. center: bool, default=True Whether to center the input data. @@ -67,17 +67,17 @@ class MCA(_BaseCrossModel): def __init__( self, - n_modes=10, - center=True, - standardize=False, - use_coslat=False, - n_pca_modes=None, - compute=True, - sample_name="sample", - feature_name="feature", - solver="auto", - random_state=None, - solver_kwargs={}, + n_modes: int = 2, + center: bool = True, + standardize: bool = False, + use_coslat: bool = False, + n_pca_modes: Optional[int] = None, + compute: bool = True, + sample_name: str = "sample", + feature_name: str = "feature", + solver: str = "auto", + random_state: Optional[int] = None, + solver_kwargs: Dict = {}, ): super().__init__( n_modes=n_modes, @@ -560,7 +560,7 @@ class ComplexMCA(MCA): Parameters ---------- - n_modes: int, default=10 + n_modes: int, default=2 Number of modes to calculate. padding : str, optional Specifies the method used for padding the data prior to applying the Hilbert @@ -624,19 +624,19 @@ class ComplexMCA(MCA): def __init__( self, - n_modes=10, - padding="exp", - decay_factor=0.2, - center=True, - standardize=False, - use_coslat=False, - n_pca_modes=None, - compute=True, - sample_name="sample", - feature_name="feature", - solver="auto", - random_state=None, - solver_kwargs={}, + n_modes: int = 2, + padding: str = "exp", + decay_factor: float = 0.2, + center: bool = True, + standardize: bool = False, + use_coslat: bool = False, + n_pca_modes: Optional[int] = None, + compute: bool = True, + sample_name: str = "sample", + feature_name: str = "feature", + solver: str = "auto", + random_state: Optional[bool] = None, + solver_kwargs: Dict = {}, ): super().__init__( n_modes=n_modes, diff --git a/xeofs/models/opa.py b/xeofs/models/opa.py index e98266b..da25eb5 100644 --- a/xeofs/models/opa.py +++ b/xeofs/models/opa.py @@ -1,4 +1,4 @@ -from typing import Optional, Self +from typing import Optional, Self, Dict import xarray as xr import numpy as np @@ -66,18 +66,18 @@ class OPA(_BaseModel): def __init__( self, - n_modes, - tau_max, - center=True, - standardize=False, - use_coslat=False, - n_pca_modes=100, - compute=True, - sample_name="sample", - feature_name="feature", - solver="auto", - random_state=None, - solver_kwargs={}, + n_modes: int, + tau_max: int, + center: bool = True, + standardize: bool = False, + use_coslat: bool = False, + n_pca_modes: int = 100, + compute: bool = True, + sample_name: str = "sample", + feature_name: str = "feature", + solver: str = "auto", + random_state: Optional[int] = None, + solver_kwargs: Dict = {}, ): if n_modes > n_pca_modes: raise ValueError( From 76c0b1bbe7f64d16b340ae10789f3df3234321b0 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 21:29:19 +0200 Subject: [PATCH 39/43] fix(CCA): add checks for edge cases --- tests/models/test_cca.py | 99 ++++++++++++++++++++++++++++++++++++++++ xeofs/models/cca.py | 65 ++++++++++++++++---------- 2 files changed, 140 insertions(+), 24 deletions(-) create mode 100644 tests/models/test_cca.py diff --git a/tests/models/test_cca.py b/tests/models/test_cca.py new file mode 100644 index 0000000..36e8e62 --- /dev/null +++ b/tests/models/test_cca.py @@ -0,0 +1,99 @@ +import numpy as np +import xarray as xr +import pytest +import dask.array as da +from cca_zoo.linear import MCCA as ReferenceCCA +from cca_zoo.linear import PCACCA as ReferenceCCA2 +from numpy.testing import assert_allclose +from ..conftest import generate_list_of_synthetic_dataarrays + +from xeofs.models.cca import CCA + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_fit(dim, mock_data_array_list): + """Tests the fit method of the CCA class""" + + cca = CCA() + cca.fit(mock_data_array_list, dim) + + # Assert the required attributes have been set + assert hasattr(cca, "preprocessors") + assert hasattr(cca, "data") + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_components(dim, mock_data_array_list): + """Tests the components method of the CCA class""" + + cca = CCA() + cca.fit(mock_data_array_list, dim) + + comps = cca.components() + assert isinstance(comps, list) + + +@pytest.mark.parametrize( + "dim", + [ + (("time",)), + (("lat", "lon")), + (("lon", "lat")), + ], +) +def test_scores(dim, mock_data_array_list): + """Tests the components method of the CCA class""" + + cca = CCA() + cca.fit(mock_data_array_list, dim) + + scores = cca.scores() + assert isinstance(scores, list) + + +@pytest.mark.parametrize( + "c", + [ + (0.0), + (0.5), + (1.0), + ], +) +def test_solution(c): + """Check numerical results with cca-zoo reference implementation""" + + dalist = generate_list_of_synthetic_dataarrays( + 2, 1, 1, "index", "no_nan", "no_dask" + ) + # Ensure that the numpy 2D arrays is in the correct format + Xlist = [X.transpose("sample0", "feature0").data for X in dalist] + + cca = CCA(n_modes=2, pca=False, c=c) + cca.fit(dalist, dim="sample0") + comps = cca.components() + scores = cca.scores() + + # Compare with cca-zoo + # cca-zoo requires centered data + Xlist = [X - X.mean(0) for X in Xlist] + cca_ref = ReferenceCCA(latent_dimensions=2, c=c) + scores_ref = cca_ref.fit_transform(Xlist) + comps_ref = cca_ref.factor_loadings(Xlist) + + for i in range(len(scores)): + assert_allclose(abs(comps[i]), abs(comps_ref[i].T), rtol=1e-5) + assert_allclose(abs(scores[i]), abs(scores_ref[i].T), rtol=1e-5) diff --git a/xeofs/models/cca.py b/xeofs/models/cca.py index a41678c..cf01604 100644 --- a/xeofs/models/cca.py +++ b/xeofs/models/cca.py @@ -4,7 +4,7 @@ The original code is licensed under the MIT License. -Copyright (c) 2020 James Chapman +Copyright (c) 2020-2023 James Chapman """ from abc import abstractmethod @@ -59,7 +59,7 @@ def __init__( self.n_modes = n_modes self.use_coslat = use_coslat self.pca = pca - self._compute = compute + self.compute = compute self.variance_fraction = variance_fraction self.init_pca_modes = init_pca_modes @@ -69,7 +69,6 @@ def __init__( "sample_name": sample_name, "feature_name": feature_name, "with_std": False, - "with_weights": False, } # Define analysis-relevant meta data @@ -169,13 +168,23 @@ def _apply_pca(self, views: DataList): view_transformed = [] for i, view in enumerate(views): - pca = EOF(n_modes=n_pca_modes[i], compute=self._compute) + pca = EOF(n_modes=n_pca_modes[i], compute=self.compute) pca.fit(view, dim=self.sample_name) + if self.compute: + pca.compute() self.pca_models.append(pca) # TODO: method to get cumulative explained variance cum_exp_var_ratio = pca.explained_variance_ratio().cumsum() - if cum_exp_var_ratio.isel(mode=-1) < self.variance_fraction: + # Ensure that the sum of the explained variance ratio is always less than 1 + # Due to rounding errors the total sum may be slightly larger than 1, + # which we counter by a small correction + cum_exp_var_ratio -= 1e-6 + max_exp_var_ratio = cum_exp_var_ratio.isel(mode=-1).item() + if ( + max_exp_var_ratio <= self.variance_fraction + and max_exp_var_ratio <= 0.9999 + ): print( "Warning: variance fraction {:.4f} is not reached. ".format( self.variance_fraction @@ -185,8 +194,11 @@ def _apply_pca(self, views: DataList): ) ) n_modes_keep = cum_exp_var_ratio.where( - cum_exp_var_ratio < self.variance_fraction, drop=True + cum_exp_var_ratio <= self.variance_fraction, drop=True ).size + if n_modes_keep == 0: + n_modes_keep += 1 + # TODO: it's more convinient to work the common scaling of sklearn; provide additional parameter # provide this parameter to transform method as well scores = pca.scores().isel(mode=slice(0, n_modes_keep)) @@ -205,12 +217,15 @@ def _fit_algorithm(self, views: List[DataArray]) -> Self: class CCA(CCABaseModel): - r""" + r"""Canonical Correlation Analysis (CCA) model. + Regularised CCA (canonical ridge) model. - This model adds a regularization term to the CCA objective function to avoid overfitting and improve stability. It uses PCA to perform the optimization efficiently for high dimensional data. + CCA identifies linear combinations of variables from multiple datasets that + maximize their mutual correlations. An optional regularisation parameter can be used to + improve the conditioning of the covariance matrix. - The objective function of regularised CCA is: + The objective function of (regularised) CCA is: .. math:: @@ -222,7 +237,7 @@ class CCA(CCABaseModel): (1-c_2)w_2^TX_2^TX_2w_2+c_2w_2^Tw_2=n - where :math:`c_i` are the regularization parameters for each view. + where :math:`c_i` are the regularization parameters for dataset. Parameters ---------- @@ -614,6 +629,14 @@ def _transform(self, views: Sequence[DataArray]) -> List[DataArray]: return transformed_views def transform(self, views: Sequence[DataObject]) -> List[DataArray]: + """Transform the input data into the canonical space. + + Parameters + ---------- + views : List[DataArray | Dataset] + Input data to transform + + """ view_preprocessed = [] for i, view in enumerate(views): view_preprocessed = self.preprocessors[i].transform(view) @@ -626,7 +649,8 @@ def transform(self, views: Sequence[DataObject]) -> List[DataArray]: unstacked_transformed_views.append(unstacked_view) return unstacked_transformed_views - def canonical_loadings(self, normalize: bool = True) -> List[DataObject]: + def components(self, normalize: bool = True) -> List[DataObject]: + """Get the canonical loadings for each view.""" can_loads = self.data["canonical_loadings"] input_data = self.data["input_data"] variates = self.data["variates"] @@ -651,7 +675,8 @@ def canonical_loadings(self, normalize: bool = True) -> List[DataObject]: ] return loadings - def canonical_variates(self) -> List[DataArray]: + def scores(self) -> List[DataArray]: + """Get the canonical variates for each view.""" variates = [] for i, view in enumerate(self.data["variates"]): vari = self.preprocessors[i].inverse_transform_scores(view) @@ -659,25 +684,17 @@ def canonical_variates(self) -> List[DataArray]: return variates def explained_variance(self) -> List[DataArray]: + """Get the explained variance for each view.""" return self.data["explained_variance"] def explained_variance_ratio(self) -> List[DataArray]: + """Get the explained variance ratio for each view.""" return self.data["explained_variance_ratio"] def explained_covariance(self) -> DataArray: - """ - Calculates the covariance matrix of the transformed components for each view. - - Parameters - ---------- - views : list/tuple of numpy arrays or array likes with the same number of rows (samples) - - Returns - ------- - explained_covariances : list of numpy arrays - Covariance matrices for the transformed components of each view. - """ + """Get the explained covariance.""" return self.data["explained_covariance"] def explained_covariance_ratio(self) -> DataArray: + """Get the explained covariance ratio.""" return self.data["explained_covariance_ratio"] From 10faa036da1555b5fadbd714ccd529f4849bb2b9 Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 21:47:51 +0200 Subject: [PATCH 40/43] build: py3.10 requires typing_extension for Self --- xeofs/models/_base_cross_model.py | 3 ++- xeofs/models/_base_model.py | 3 ++- xeofs/models/cca.py | 3 ++- xeofs/models/eof.py | 3 ++- xeofs/models/eof_rotator.py | 2 +- xeofs/models/gwpca.py | 3 ++- xeofs/models/mca.py | 3 ++- xeofs/models/opa.py | 3 ++- xeofs/preprocessing/concatenator.py | 3 ++- xeofs/preprocessing/dimension_renamer.py | 2 +- xeofs/preprocessing/list_processor.py | 3 ++- xeofs/preprocessing/multi_index_converter.py | 4 ++-- xeofs/preprocessing/sanitizer.py | 4 ++-- xeofs/preprocessing/scaler.py | 3 ++- xeofs/preprocessing/stacker.py | 3 ++- xeofs/preprocessing/transformer.py | 3 ++- 16 files changed, 30 insertions(+), 18 deletions(-) diff --git a/xeofs/models/_base_cross_model.py b/xeofs/models/_base_cross_model.py index 85467a2..5075e6c 100644 --- a/xeofs/models/_base_cross_model.py +++ b/xeofs/models/_base_cross_model.py @@ -1,4 +1,5 @@ -from typing import Tuple, Hashable, Sequence, Dict, Self, Optional, List +from typing import Tuple, Hashable, Sequence, Dict, Optional, List +from typing_extensions import Self from abc import ABC, abstractmethod from datetime import datetime diff --git a/xeofs/models/_base_model.py b/xeofs/models/_base_model.py index 42350ae..bbf0639 100644 --- a/xeofs/models/_base_model.py +++ b/xeofs/models/_base_model.py @@ -1,5 +1,6 @@ import warnings -from typing import Optional, Sequence, Hashable, Dict, Any, Self, List, TypeVar, Tuple +from typing import Optional, Sequence, Hashable, Dict, Any, List, TypeVar, Tuple +from typing_extensions import Self from abc import ABC, abstractmethod from datetime import datetime diff --git a/xeofs/models/cca.py b/xeofs/models/cca.py index cf01604..0d3029e 100644 --- a/xeofs/models/cca.py +++ b/xeofs/models/cca.py @@ -9,7 +9,8 @@ from abc import abstractmethod from datetime import datetime -from typing import Sequence, Self, List, Hashable +from typing import Sequence, List, Hashable +from typing_extensions import Self import dask.array as da import numpy as np diff --git a/xeofs/models/eof.py b/xeofs/models/eof.py index 3265be5..027094a 100644 --- a/xeofs/models/eof.py +++ b/xeofs/models/eof.py @@ -1,4 +1,5 @@ -from typing import Self, Optional, Dict +from typing import Optional, Dict +from typing_extensions import Self import numpy as np import xarray as xr diff --git a/xeofs/models/eof_rotator.py b/xeofs/models/eof_rotator.py index 5af1535..fa97d3c 100644 --- a/xeofs/models/eof_rotator.py +++ b/xeofs/models/eof_rotator.py @@ -1,7 +1,7 @@ from datetime import datetime import numpy as np import xarray as xr -from typing import Self +from typing_extensions import Self from .eof import EOF, ComplexEOF from ..data_container import DataContainer diff --git a/xeofs/models/gwpca.py b/xeofs/models/gwpca.py index e5d202f..1293077 100644 --- a/xeofs/models/gwpca.py +++ b/xeofs/models/gwpca.py @@ -1,4 +1,5 @@ -from typing import Self, List, Sequence, Hashable, Optional, Callable +from typing import Sequence, Hashable, Optional, Callable +from typing_extensions import Self from sklearn.utils.extmath import randomized_svd diff --git a/xeofs/models/mca.py b/xeofs/models/mca.py index 89b33f7..f4e26b6 100644 --- a/xeofs/models/mca.py +++ b/xeofs/models/mca.py @@ -1,4 +1,5 @@ -from typing import Tuple, Optional, Sequence, Self, Dict +from typing import Tuple, Optional, Sequence, Dict +from typing_extensions import Self import numpy as np import xarray as xr diff --git a/xeofs/models/opa.py b/xeofs/models/opa.py index da25eb5..6db9916 100644 --- a/xeofs/models/opa.py +++ b/xeofs/models/opa.py @@ -1,4 +1,5 @@ -from typing import Optional, Self, Dict +from typing import Optional, Dict +from typing_extensions import Self import xarray as xr import numpy as np diff --git a/xeofs/preprocessing/concatenator.py b/xeofs/preprocessing/concatenator.py index 1e138dc..1eaf893 100644 --- a/xeofs/preprocessing/concatenator.py +++ b/xeofs/preprocessing/concatenator.py @@ -1,4 +1,5 @@ -from typing import List, Self, Optional +from typing import List, Optional +from typing_extensions import Self import pandas as pd import numpy as np diff --git a/xeofs/preprocessing/dimension_renamer.py b/xeofs/preprocessing/dimension_renamer.py index 7647ba7..7824173 100644 --- a/xeofs/preprocessing/dimension_renamer.py +++ b/xeofs/preprocessing/dimension_renamer.py @@ -1,4 +1,4 @@ -from typing import Self +from typing_extensions import Self from .transformer import Transformer from ..utils.data_types import Dims, DataArray, DataSet, Data, DataVar, DataVarBound diff --git a/xeofs/preprocessing/list_processor.py b/xeofs/preprocessing/list_processor.py index 1d413df..d7e910b 100644 --- a/xeofs/preprocessing/list_processor.py +++ b/xeofs/preprocessing/list_processor.py @@ -1,4 +1,5 @@ -from typing import List, Self, TypeVar, Generic, Type, Dict, Any +from typing import List, TypeVar, Generic, Type, Dict, Any +from typing_extensions import Self from .dimension_renamer import DimensionRenamer from .scaler import Scaler diff --git a/xeofs/preprocessing/multi_index_converter.py b/xeofs/preprocessing/multi_index_converter.py index 626bb15..74c497a 100644 --- a/xeofs/preprocessing/multi_index_converter.py +++ b/xeofs/preprocessing/multi_index_converter.py @@ -1,5 +1,5 @@ -from typing import List, Self, Optional - +from typing import List, Optional +from typing_extensions import Self import pandas as pd from .transformer import Transformer diff --git a/xeofs/preprocessing/sanitizer.py b/xeofs/preprocessing/sanitizer.py index 8b9ad95..f9e05a3 100644 --- a/xeofs/preprocessing/sanitizer.py +++ b/xeofs/preprocessing/sanitizer.py @@ -1,5 +1,5 @@ -from typing import Self, Optional - +from typing import Optional +from typing_extensions import Self import xarray as xr from .transformer import Transformer diff --git a/xeofs/preprocessing/scaler.py b/xeofs/preprocessing/scaler.py index d90143a..c7eb920 100644 --- a/xeofs/preprocessing/scaler.py +++ b/xeofs/preprocessing/scaler.py @@ -1,4 +1,5 @@ -from typing import Optional, Self +from typing import Optional +from typing_extensions import Self import numpy as np import xarray as xr diff --git a/xeofs/preprocessing/stacker.py b/xeofs/preprocessing/stacker.py index 975d450..3f599b5 100644 --- a/xeofs/preprocessing/stacker.py +++ b/xeofs/preprocessing/stacker.py @@ -1,5 +1,6 @@ from abc import abstractmethod -from typing import List, Optional, Self, Type +from typing import List, Optional, Type +from typing_extensions import Self import numpy as np import pandas as pd diff --git a/xeofs/preprocessing/transformer.py b/xeofs/preprocessing/transformer.py index 9ccc74a..26e33d5 100644 --- a/xeofs/preprocessing/transformer.py +++ b/xeofs/preprocessing/transformer.py @@ -1,4 +1,5 @@ -from typing import Self, Optional +from typing import Optional +from typing_extensions import Self from abc import abstractmethod from sklearn.base import BaseEstimator, TransformerMixin From 47e6909f88fc15ae1891ac5b15d76020c179299b Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 21:55:43 +0200 Subject: [PATCH 41/43] build: add cca-zoo as dev dependency --- poetry.lock | 965 ++++++++++++++++++++++++++++++++++++++++++++++++- pyproject.toml | 1 + 2 files changed, 961 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 973645c..1f9e470 100644 --- a/poetry.lock +++ b/poetry.lock @@ -14,6 +14,128 @@ files = [ [package.dependencies] pygments = ">=1.5" +[[package]] +name = "aiohttp" +version = "3.8.6" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:41d55fc043954cddbbd82503d9cc3f4814a40bcef30b3569bc7b5e34130718c1"}, + {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1d84166673694841d8953f0a8d0c90e1087739d24632fe86b1a08819168b4566"}, + {file = "aiohttp-3.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:253bf92b744b3170eb4c4ca2fa58f9c4b87aeb1df42f71d4e78815e6e8b73c9e"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fd194939b1f764d6bb05490987bfe104287bbf51b8d862261ccf66f48fb4096"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c5f938d199a6fdbdc10bbb9447496561c3a9a565b43be564648d81e1102ac22"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2817b2f66ca82ee699acd90e05c95e79bbf1dc986abb62b61ec8aaf851e81c93"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fa375b3d34e71ccccf172cab401cd94a72de7a8cc01847a7b3386204093bb47"}, + {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9de50a199b7710fa2904be5a4a9b51af587ab24c8e540a7243ab737b45844543"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e1d8cb0b56b3587c5c01de3bf2f600f186da7e7b5f7353d1bf26a8ddca57f965"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8e31e9db1bee8b4f407b77fd2507337a0a80665ad7b6c749d08df595d88f1cf5"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7bc88fc494b1f0311d67f29fee6fd636606f4697e8cc793a2d912ac5b19aa38d"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ec00c3305788e04bf6d29d42e504560e159ccaf0be30c09203b468a6c1ccd3b2"}, + {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad1407db8f2f49329729564f71685557157bfa42b48f4b93e53721a16eb813ed"}, + {file = "aiohttp-3.8.6-cp310-cp310-win32.whl", hash = "sha256:ccc360e87341ad47c777f5723f68adbb52b37ab450c8bc3ca9ca1f3e849e5fe2"}, + {file = "aiohttp-3.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:93c15c8e48e5e7b89d5cb4613479d144fda8344e2d886cf694fd36db4cc86865"}, + {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e2f9cc8e5328f829f6e1fb74a0a3a939b14e67e80832975e01929e320386b34"}, + {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e6a00ffcc173e765e200ceefb06399ba09c06db97f401f920513a10c803604ca"}, + {file = "aiohttp-3.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:41bdc2ba359032e36c0e9de5a3bd00d6fb7ea558a6ce6b70acedf0da86458321"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14cd52ccf40006c7a6cd34a0f8663734e5363fd981807173faf3a017e202fec9"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2d5b785c792802e7b275c420d84f3397668e9d49ab1cb52bd916b3b3ffcf09ad"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1bed815f3dc3d915c5c1e556c397c8667826fbc1b935d95b0ad680787896a358"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96603a562b546632441926cd1293cfcb5b69f0b4159e6077f7c7dbdfb686af4d"}, + {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d76e8b13161a202d14c9584590c4df4d068c9567c99506497bdd67eaedf36403"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e3f1e3f1a1751bb62b4a1b7f4e435afcdade6c17a4fd9b9d43607cebd242924a"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:76b36b3124f0223903609944a3c8bf28a599b2cc0ce0be60b45211c8e9be97f8"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a2ece4af1f3c967a4390c284797ab595a9f1bc1130ef8b01828915a05a6ae684"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:16d330b3b9db87c3883e565340d292638a878236418b23cc8b9b11a054aaa887"}, + {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42c89579f82e49db436b69c938ab3e1559e5a4409eb8639eb4143989bc390f2f"}, + {file = "aiohttp-3.8.6-cp311-cp311-win32.whl", hash = "sha256:efd2fcf7e7b9d7ab16e6b7d54205beded0a9c8566cb30f09c1abe42b4e22bdcb"}, + {file = "aiohttp-3.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:3b2ab182fc28e7a81f6c70bfbd829045d9480063f5ab06f6e601a3eddbbd49a0"}, + {file = "aiohttp-3.8.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fdee8405931b0615220e5ddf8cd7edd8592c606a8e4ca2a00704883c396e4479"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d25036d161c4fe2225d1abff2bd52c34ed0b1099f02c208cd34d8c05729882f0"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d791245a894be071d5ab04bbb4850534261a7d4fd363b094a7b9963e8cdbd31"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0cccd1de239afa866e4ce5c789b3032442f19c261c7d8a01183fd956b1935349"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f13f60d78224f0dace220d8ab4ef1dbc37115eeeab8c06804fec11bec2bbd07"}, + {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a9b5a0606faca4f6cc0d338359d6fa137104c337f489cd135bb7fbdbccb1e39"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:13da35c9ceb847732bf5c6c5781dcf4780e14392e5d3b3c689f6d22f8e15ae31"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:4d4cbe4ffa9d05f46a28252efc5941e0462792930caa370a6efaf491f412bc66"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:229852e147f44da0241954fc6cb910ba074e597f06789c867cb7fb0621e0ba7a"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:713103a8bdde61d13490adf47171a1039fd880113981e55401a0f7b42c37d071"}, + {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:45ad816b2c8e3b60b510f30dbd37fe74fd4a772248a52bb021f6fd65dff809b6"}, + {file = "aiohttp-3.8.6-cp36-cp36m-win32.whl", hash = "sha256:2b8d4e166e600dcfbff51919c7a3789ff6ca8b3ecce16e1d9c96d95dd569eb4c"}, + {file = "aiohttp-3.8.6-cp36-cp36m-win_amd64.whl", hash = "sha256:0912ed87fee967940aacc5306d3aa8ba3a459fcd12add0b407081fbefc931e53"}, + {file = "aiohttp-3.8.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e2a988a0c673c2e12084f5e6ba3392d76c75ddb8ebc6c7e9ead68248101cd446"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebf3fd9f141700b510d4b190094db0ce37ac6361a6806c153c161dc6c041ccda"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3161ce82ab85acd267c8f4b14aa226047a6bee1e4e6adb74b798bd42c6ae1f80"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95fc1bf33a9a81469aa760617b5971331cdd74370d1214f0b3109272c0e1e3c"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c43ecfef7deaf0617cee936836518e7424ee12cb709883f2c9a1adda63cc460"}, + {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca80e1b90a05a4f476547f904992ae81eda5c2c85c66ee4195bb8f9c5fb47f28"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:90c72ebb7cb3a08a7f40061079817133f502a160561d0675b0a6adf231382c92"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bb54c54510e47a8c7c8e63454a6acc817519337b2b78606c4e840871a3e15349"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:de6a1c9f6803b90e20869e6b99c2c18cef5cc691363954c93cb9adeb26d9f3ae"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:a3628b6c7b880b181a3ae0a0683698513874df63783fd89de99b7b7539e3e8a8"}, + {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fc37e9aef10a696a5a4474802930079ccfc14d9f9c10b4662169671ff034b7df"}, + {file = "aiohttp-3.8.6-cp37-cp37m-win32.whl", hash = "sha256:f8ef51e459eb2ad8e7a66c1d6440c808485840ad55ecc3cafefadea47d1b1ba2"}, + {file = "aiohttp-3.8.6-cp37-cp37m-win_amd64.whl", hash = "sha256:b2fe42e523be344124c6c8ef32a011444e869dc5f883c591ed87f84339de5976"}, + {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e2ee0ac5a1f5c7dd3197de309adfb99ac4617ff02b0603fd1e65b07dc772e4b"}, + {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01770d8c04bd8db568abb636c1fdd4f7140b284b8b3e0b4584f070180c1e5c62"}, + {file = "aiohttp-3.8.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c68330a59506254b556b99a91857428cab98b2f84061260a67865f7f52899f5"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89341b2c19fb5eac30c341133ae2cc3544d40d9b1892749cdd25892bbc6ac951"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71783b0b6455ac8f34b5ec99d83e686892c50498d5d00b8e56d47f41b38fbe04"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f628dbf3c91e12f4d6c8b3f092069567d8eb17814aebba3d7d60c149391aee3a"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04691bc6601ef47c88f0255043df6f570ada1a9ebef99c34bd0b72866c217ae"}, + {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee912f7e78287516df155f69da575a0ba33b02dd7c1d6614dbc9463f43066e3"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9c19b26acdd08dd239e0d3669a3dddafd600902e37881f13fbd8a53943079dbc"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:99c5ac4ad492b4a19fc132306cd57075c28446ec2ed970973bbf036bcda1bcc6"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f0f03211fd14a6a0aed2997d4b1c013d49fb7b50eeb9ffdf5e51f23cfe2c77fa"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8d399dade330c53b4106160f75f55407e9ae7505263ea86f2ccca6bfcbdb4921"}, + {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ec4fd86658c6a8964d75426517dc01cbf840bbf32d055ce64a9e63a40fd7b771"}, + {file = "aiohttp-3.8.6-cp38-cp38-win32.whl", hash = "sha256:33164093be11fcef3ce2571a0dccd9041c9a93fa3bde86569d7b03120d276c6f"}, + {file = "aiohttp-3.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:bdf70bfe5a1414ba9afb9d49f0c912dc524cf60141102f3a11143ba3d291870f"}, + {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d52d5dc7c6682b720280f9d9db41d36ebe4791622c842e258c9206232251ab2b"}, + {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ac39027011414dbd3d87f7edb31680e1f430834c8cef029f11c66dad0670aa5"}, + {file = "aiohttp-3.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3f5c7ce535a1d2429a634310e308fb7d718905487257060e5d4598e29dc17f0b"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b30e963f9e0d52c28f284d554a9469af073030030cef8693106d918b2ca92f54"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:918810ef188f84152af6b938254911055a72e0f935b5fbc4c1a4ed0b0584aed1"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:002f23e6ea8d3dd8d149e569fd580c999232b5fbc601c48d55398fbc2e582e8c"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fcf3eabd3fd1a5e6092d1242295fa37d0354b2eb2077e6eb670accad78e40e1"}, + {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:255ba9d6d5ff1a382bb9a578cd563605aa69bec845680e21c44afc2670607a95"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d67f8baed00870aa390ea2590798766256f31dc5ed3ecc737debb6e97e2ede78"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:86f20cee0f0a317c76573b627b954c412ea766d6ada1a9fcf1b805763ae7feeb"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:39a312d0e991690ccc1a61f1e9e42daa519dcc34ad03eb6f826d94c1190190dd"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e827d48cf802de06d9c935088c2924e3c7e7533377d66b6f31ed175c1620e05e"}, + {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bd111d7fc5591ddf377a408ed9067045259ff2770f37e2d94e6478d0f3fc0c17"}, + {file = "aiohttp-3.8.6-cp39-cp39-win32.whl", hash = "sha256:caf486ac1e689dda3502567eb89ffe02876546599bbf915ec94b1fa424eeffd4"}, + {file = "aiohttp-3.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3f0e27e5b733803333bb2371249f41cf42bae8884863e8e8965ec69bebe53132"}, + {file = "aiohttp-3.8.6.tar.gz", hash = "sha256:b0cf2a4501bff9330a8a5248b4ce951851e415bdcce9dc158e76cfd55e15085c"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +async-timeout = ">=4.0.0a3,<5.0" +attrs = ">=17.3.0" +charset-normalizer = ">=2.0,<4.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "cchardet"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + [[package]] name = "alabaster" version = "0.7.13" @@ -25,11 +147,22 @@ files = [ {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, ] +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + [[package]] name = "attrs" version = "23.1.0" description = "Classes Without Boilerplate" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, @@ -135,6 +268,29 @@ webencodings = "*" [package.extras] css = ["tinycss2 (>=1.1.0,<1.2)"] +[[package]] +name = "cca-zoo" +version = "2.3.11" +description = "Canonical Correlation Analysis Zoo: A collection of Regularized, Deep Learning based, Kernel, and Probabilistic methods in a scikit-learn style framework" +optional = false +python-versions = ">=3.8,<4.0.0" +files = [ + {file = "cca_zoo-2.3.11-py3-none-any.whl", hash = "sha256:378b8e5b3f3a7a39b2ac738b84beb0abf2129bfc25c94f713abc0a3cf0214814"}, + {file = "cca_zoo-2.3.11.tar.gz", hash = "sha256:21411d38de49b102e27cce52692b4785ef4ddd4be96d18b365dee8d6af23dce2"}, +] + +[package.dependencies] +joblib = "*" +lightning = "*" +mvlearn = "*" +numpy = "*" +pandas = "*" +scikit-learn = ">=1.2.2,<2.0.0" +scipy = "*" +tensorly = "*" +torch = {version = ">=2.0.1,<3.0.0", markers = "sys_platform == \"darwin\" or sys_platform == \"linux\" or sys_platform == \"win32\""} +tqdm = "*" + [[package]] name = "certifi" version = "2023.7.22" @@ -437,6 +593,21 @@ files = [ [package.extras] toml = ["tomli"] +[[package]] +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, +] + +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] + [[package]] name = "dask" version = "2023.7.1" @@ -516,6 +687,22 @@ files = [ [package.extras] devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] +[[package]] +name = "filelock" +version = "3.12.4" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.12.4-py3-none-any.whl", hash = "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4"}, + {file = "filelock-3.12.4.tar.gz", hash = "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"] +typing = ["typing-extensions (>=4.7.1)"] + [[package]] name = "flake8" version = "4.0.1" @@ -532,6 +719,76 @@ mccabe = ">=0.6.0,<0.7.0" pycodestyle = ">=2.8.0,<2.9.0" pyflakes = ">=2.4.0,<2.5.0" +[[package]] +name = "frozenlist" +version = "1.4.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"}, + {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"}, + {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"}, + {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"}, + {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"}, + {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"}, + {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"}, + {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"}, + {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"}, + {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"}, + {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"}, + {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"}, + {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"}, + {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"}, + {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"}, + {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"}, + {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"}, + {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"}, + {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"}, + {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"}, + {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, +] + [[package]] name = "fsspec" version = "2023.6.0" @@ -543,6 +800,10 @@ files = [ {file = "fsspec-2023.6.0.tar.gz", hash = "sha256:d0b2f935446169753e7a5c5c55681c54ea91996cc67be93c39a154fb3a2742af"}, ] +[package.dependencies] +aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""} +requests = {version = "*", optional = true, markers = "extra == \"http\""} + [package.extras] abfs = ["adlfs"] adl = ["adlfs"] @@ -623,7 +884,7 @@ files = [ name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, @@ -735,6 +996,200 @@ files = [ {file = "jupyterlab_pygments-0.2.2.tar.gz", hash = "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d"}, ] +[[package]] +name = "kiwisolver" +version = "1.4.5" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.7" +files = [ + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, + {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, +] + +[[package]] +name = "lightning" +version = "2.1.0" +description = "The Deep Learning framework to train, deploy, and ship AI products Lightning fast." +optional = false +python-versions = ">=3.8" +files = [ + {file = "lightning-2.1.0-py3-none-any.whl", hash = "sha256:c12bd10bd28b9e29a8e877be039350a585f248c10b76360faa2aa2497f980de6"}, + {file = "lightning-2.1.0.tar.gz", hash = "sha256:1f78f5995ae7dcffa1edf34320db136902b73a0d1b304404c48ec8be165b3a93"}, +] + +[package.dependencies] +fsspec = {version = ">2021.06.0,<2025.0", extras = ["http"]} +lightning-utilities = ">=0.8.0,<2.0" +numpy = ">=1.17.2,<3.0" +packaging = ">=20.0,<25.0" +pytorch-lightning = "*" +PyYAML = ">=5.4,<8.0" +torch = ">=1.12.0,<4.0" +torchmetrics = ">=0.7.0,<3.0" +tqdm = ">=4.57.0,<6.0" +typing-extensions = ">=4.0.0,<6.0" + +[package.extras] +all = ["Jinja2 (<4.0)", "Pillow (>=9.5.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "deepspeed (>=0.8.2,<=0.9.3)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "fsspec[http] (>2021.06.0,<2024.0)", "gym[classic-control] (>=0.17.0,<1.0)", "hydra-core (>=1.0.5,<2.0)", "inquirer (>=2.10.0,<4.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "packaging", "panel (>=1.0.0,<2.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "tensorboardX (>=2.2,<3.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)", "torchvision (>=0.15.2,<1.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +app = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +app-all = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "panel (>=1.0.0,<2.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +app-cloud = ["docker (>=5.0.0,<7.0)", "redis (>=4.0.1,<6.0)", "s3fs (>=2022.5.0,<2024.0)"] +app-components = ["aiohttp (>=3.8.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-fabric (>=1.9.0)", "pytorch-lightning (>=1.9.0)"] +app-dev = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "coverage (==7.3.1)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "httpx (==0.25.0)", "inquirer (>=2.10.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "panel (>=1.0.0,<2.0)", "playwright (==1.38.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "pympler", "pytest (==7.4.0)", "pytest-asyncio (==0.21.1)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==0.9.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytest-xdist (==3.3.1)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "requests-mock (==1.11.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "setuptools (<69.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "traitlets (>=5.3.0,<6.0)", "trio (<0.22.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +app-extra = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "panel (>=1.0.0,<2.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +app-test = ["coverage (==7.3.1)", "httpx (==0.25.0)", "playwright (==1.38.0)", "psutil (<6.0)", "pympler", "pytest (==7.4.0)", "pytest-asyncio (==0.21.1)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==0.9.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytest-xdist (==3.3.1)", "requests-mock (==1.11.0)", "setuptools (<69.0)", "trio (<0.22.0)"] +app-ui = ["panel (>=1.0.0,<2.0)", "streamlit (>=1.13.0,<2.0)"] +cloud = ["docker (>=5.0.0,<7.0)", "fsspec[http] (>2021.06.0,<2024.0)", "redis (>=4.0.1,<6.0)", "s3fs (>=2022.5.0,<2024.0)"] +components = ["aiohttp (>=3.8.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-fabric (>=1.9.0)", "pytorch-lightning (>=1.9.0)"] +data = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +data-all = ["Jinja2 (<4.0)", "Pillow (>=9.5.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "fsspec[http] (>2021.06.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "torchvision (>=0.15.2,<1.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +data-cloud = ["fsspec[http] (>2021.06.0,<2024.0)", "s3fs (>=2022.5.0,<2024.0)"] +data-dev = ["Jinja2 (<4.0)", "Pillow (>=9.5.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "coverage (==7.3.1)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "fsspec[http] (>2021.06.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "torchvision (>=0.15.2,<1.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +data-examples = ["Pillow (>=9.5.0)", "torchvision (>=0.15.2,<1.0)"] +data-test = ["coverage (==7.3.1)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)"] +dev = ["Jinja2 (<4.0)", "Pillow (>=9.5.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "click (==8.1.7)", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "deepspeed (>=0.8.2,<=0.9.3)", "docker (>=5.0.0,<7.0)", "fastapi", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "fsspec[http] (>2021.06.0,<2024.0)", "gym[classic-control] (>=0.17.0,<1.0)", "httpx (==0.25.0)", "hydra-core (>=1.0.5,<2.0)", "inquirer (>=2.10.0,<4.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "onnx (>=0.14.0,<2.0)", "onnxruntime (>=0.15.0,<2.0)", "packaging", "pandas (>1.0,<3.0)", "panel (>=1.0.0,<2.0)", "playwright (==1.38.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "pympler", "pytest (==7.4.0)", "pytest-asyncio (==0.21.1)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==0.9.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytest-xdist (==3.3.1)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "requests-mock (==1.11.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "scikit-learn (>0.22.1,<2.0)", "setuptools (<69.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchmetrics (>=0.7.0,<2.0)", "torchvision (>=0.13.0,<1.0)", "torchvision (>=0.15.2,<1.0)", "traitlets (>=5.3.0,<6.0)", "trio (<0.22.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +examples = ["Pillow (>=9.5.0)", "gym[classic-control] (>=0.17.0,<1.0)", "ipython[all] (<9.0)", "lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)", "torchvision (>=0.15.2,<1.0)"] +extra = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "hydra-core (>=1.0.5,<2.0)", "inquirer (>=2.10.0,<4.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "packaging", "panel (>=1.0.0,<2.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "tensorboardX (>=2.2,<3.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +fabric-all = ["deepspeed (>=0.8.2,<=0.9.3)", "lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] +fabric-dev = ["click (==8.1.7)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3)", "lightning-utilities (>=0.8.0,<1.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchmetrics (>=0.7.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] +fabric-examples = ["lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] +fabric-strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] +fabric-test = ["click (==8.1.7)", "coverage (==7.3.1)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.7.0,<2.0)"] +pytorch-all = ["deepspeed (>=0.8.2,<=0.9.3)", "gym[classic-control] (>=0.17.0,<1.0)", "hydra-core (>=1.0.5,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] +pytorch-dev = ["cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3)", "fastapi", "gym[classic-control] (>=0.17.0,<1.0)", "hydra-core (>=1.0.5,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "onnx (>=0.14.0,<2.0)", "onnxruntime (>=0.15.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "rich (>=12.3.0,<14.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)", "uvicorn"] +pytorch-examples = ["gym[classic-control] (>=0.17.0,<1.0)", "ipython[all] (<9.0)", "lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] +pytorch-extra = ["hydra-core (>=1.0.5,<2.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)"] +pytorch-strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] +pytorch-test = ["cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "fastapi", "onnx (>=0.14.0,<2.0)", "onnxruntime (>=0.15.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "uvicorn"] +store = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] +store-test = ["coverage (==7.3.1)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)"] +strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] +test = ["click (==8.1.7)", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "fastapi", "httpx (==0.25.0)", "onnx (>=0.14.0,<2.0)", "onnxruntime (>=0.15.0,<2.0)", "pandas (>1.0,<3.0)", "playwright (==1.38.0)", "psutil (<6.0)", "pympler", "pytest (==7.4.0)", "pytest-asyncio (==0.21.1)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==0.9.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytest-xdist (==3.3.1)", "requests-mock (==1.11.0)", "scikit-learn (>0.22.1,<2.0)", "setuptools (<69.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.7.0,<2.0)", "trio (<0.22.0)", "uvicorn"] +ui = ["panel (>=1.0.0,<2.0)", "streamlit (>=1.13.0,<2.0)"] + +[[package]] +name = "lightning-utilities" +version = "0.9.0" +description = "PyTorch Lightning Sample project." +optional = false +python-versions = ">=3.7" +files = [ + {file = "lightning-utilities-0.9.0.tar.gz", hash = "sha256:efbf2c488c257f942abdfd06cf646fb84ca215a9663b60081811e22a15ee033b"}, + {file = "lightning_utilities-0.9.0-py3-none-any.whl", hash = "sha256:918dd90c775719e3855631db6282ad75c14da4c5727c4cebdd1589d865fad03d"}, +] + +[package.dependencies] +packaging = ">=17.1" +typing-extensions = "*" + +[package.extras] +cli = ["fire"] +docs = ["requests (>=2.0.0)"] +typing = ["mypy (>=1.0.0)"] + [[package]] name = "llvmlite" version = "0.40.1" @@ -783,7 +1238,7 @@ files = [ name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, @@ -838,6 +1293,48 @@ files = [ {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, ] +[[package]] +name = "matplotlib" +version = "3.3.4" +description = "Python plotting package" +optional = false +python-versions = ">=3.6" +files = [ + {file = "matplotlib-3.3.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:672960dd114e342b7c610bf32fb99d14227f29919894388b41553217457ba7ef"}, + {file = "matplotlib-3.3.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:7c155437ae4fd366e2700e2716564d1787700687443de46bcb895fe0f84b761d"}, + {file = "matplotlib-3.3.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:a17f0a10604fac7627ec82820439e7db611722e80c408a726cd00d8c974c2fb3"}, + {file = "matplotlib-3.3.4-cp36-cp36m-win32.whl", hash = "sha256:215e2a30a2090221a9481db58b770ce56b8ef46f13224ae33afe221b14b24dc1"}, + {file = "matplotlib-3.3.4-cp36-cp36m-win_amd64.whl", hash = "sha256:348e6032f666ffd151b323342f9278b16b95d4a75dfacae84a11d2829a7816ae"}, + {file = "matplotlib-3.3.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:94bdd1d55c20e764d8aea9d471d2ae7a7b2c84445e0fa463f02e20f9730783e1"}, + {file = "matplotlib-3.3.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:a1acb72f095f1d58ecc2538ed1b8bca0b57df313b13db36ed34b8cdf1868e674"}, + {file = "matplotlib-3.3.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:46b1a60a04e6d884f0250d5cc8dc7bd21a9a96c584a7acdaab44698a44710bab"}, + {file = "matplotlib-3.3.4-cp37-cp37m-win32.whl", hash = "sha256:ed4a9e6dcacba56b17a0a9ac22ae2c72a35b7f0ef0693aa68574f0b2df607a89"}, + {file = "matplotlib-3.3.4-cp37-cp37m-win_amd64.whl", hash = "sha256:c24c05f645aef776e8b8931cb81e0f1632d229b42b6d216e30836e2e145a2b40"}, + {file = "matplotlib-3.3.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7310e353a4a35477c7f032409966920197d7df3e757c7624fd842f3eeb307d3d"}, + {file = "matplotlib-3.3.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:451cc89cb33d6652c509fc6b588dc51c41d7246afdcc29b8624e256b7663ed1f"}, + {file = "matplotlib-3.3.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:3d2eb9c1cc254d0ffa90bc96fde4b6005d09c2228f99dfd493a4219c1af99644"}, + {file = "matplotlib-3.3.4-cp38-cp38-win32.whl", hash = "sha256:e15fa23d844d54e7b3b7243afd53b7567ee71c721f592deb0727ee85e668f96a"}, + {file = "matplotlib-3.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:1de0bb6cbfe460725f0e97b88daa8643bcf9571c18ba90bb8e41432aaeca91d6"}, + {file = "matplotlib-3.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f44149a0ef5b4991aaef12a93b8e8d66d6412e762745fea1faa61d98524e0ba9"}, + {file = "matplotlib-3.3.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:746a1df55749629e26af7f977ea426817ca9370ad1569436608dc48d1069b87c"}, + {file = "matplotlib-3.3.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:5f571b92a536206f7958f7cb2d367ff6c9a1fa8229dc35020006e4cdd1ca0acd"}, + {file = "matplotlib-3.3.4-cp39-cp39-win32.whl", hash = "sha256:9265ae0fb35e29f9b8cc86c2ab0a2e3dcddc4dd9de4b85bf26c0f63fe5c1c2ca"}, + {file = "matplotlib-3.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:9a79e5dd7bb797aa611048f5b70588b23c5be05b63eefd8a0d152ac77c4243db"}, + {file = "matplotlib-3.3.4-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1e850163579a8936eede29fad41e202b25923a0a8d5ffd08ce50fc0a97dcdc93"}, + {file = "matplotlib-3.3.4-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:d738acfdfb65da34c91acbdb56abed46803db39af259b7f194dc96920360dbe4"}, + {file = "matplotlib-3.3.4-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:aa49571d8030ad0b9ac39708ee77bd2a22f87815e12bdee52ecaffece9313ed8"}, + {file = "matplotlib-3.3.4-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:cf3a7e54eff792f0815dbbe9b85df2f13d739289c93d346925554f71d484be78"}, + {file = "matplotlib-3.3.4.tar.gz", hash = "sha256:3e477db76c22929e4c6876c44f88d790aacdf3c3f8f3a90cb1975c0bf37825b0"}, +] + +[package.dependencies] +cycler = ">=0.10" +kiwisolver = ">=1.0.1" +numpy = ">=1.15" +pillow = ">=6.2.0" +pyparsing = ">=2.0.3,<2.0.4 || >2.0.4,<2.1.2 || >2.1.2,<2.1.6 || >2.1.6" +python-dateutil = ">=2.1" + [[package]] name = "mccabe" version = "0.6.1" @@ -860,6 +1357,129 @@ files = [ {file = "mistune-3.0.1.tar.gz", hash = "sha256:e912116c13aa0944f9dc530db38eb88f6a77087ab128f49f84a48f4c05ea163c"}, ] +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + +[[package]] +name = "multidict" +version = "6.0.4" +description = "multidict implementation" +optional = false +python-versions = ">=3.7" +files = [ + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, + {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, + {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, + {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, + {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, + {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, + {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, + {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, + {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, + {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, + {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, + {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, + {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, +] + +[[package]] +name = "mvlearn" +version = "0.5.0" +description = "A set of python modules for multiview learning" +optional = false +python-versions = "*" +files = [ + {file = "mvlearn-0.5.0-py3-none-any.whl", hash = "sha256:449a5c649176d4a61a0408844ad45908cfcf6825cc029aa5b876b7624a244df6"}, + {file = "mvlearn-0.5.0.tar.gz", hash = "sha256:5e1a53318dec2c5f41e627080af657364e497aa7a3cdaaaacef879fc3d427ba5"}, +] + +[package.dependencies] +joblib = ">=0.11" +matplotlib = ">=3.0.0,<=3.3.4" +numpy = ">=1.17.0" +scikit-learn = ">=0.19.1" +scipy = ">=1.5.0" +seaborn = ">=0.9.0" + +[package.extras] +multiviewica = ["multiviewica (>=0.0.1)", "python-picard (>=0.4)"] +torch = ["torch (>=1.1.0)", "tqdm"] + [[package]] name = "mypy-extensions" version = "1.0.0" @@ -1011,6 +1631,24 @@ certifi = "*" cftime = "*" numpy = "*" +[[package]] +name = "networkx" +version = "3.2" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.9" +files = [ + {file = "networkx-3.2-py3-none-any.whl", hash = "sha256:8b25f564bd28f94ac821c58b04ae1a3109e73b001a7d476e4bb0d00d63706bf8"}, + {file = "networkx-3.2.tar.gz", hash = "sha256:bda29edf392d9bfa5602034c767d28549214ec45f620081f0b74dc036a1fbbc1"}, +] + +[package.extras] +default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] + [[package]] name = "numba" version = "0.57.1" @@ -1220,6 +1858,73 @@ six = "*" [package.extras] test = ["pytest", "pytest-cov", "scipy"] +[[package]] +name = "pillow" +version = "10.1.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, + {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, + {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, + {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, + {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, + {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, + {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, + {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + [[package]] name = "platformdirs" version = "3.10.0" @@ -1345,6 +2050,20 @@ files = [ [package.extras] plugins = ["importlib-metadata"] +[[package]] +name = "pyparsing" +version = "3.1.1" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, + {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + [[package]] name = "pytest" version = "7.4.0" @@ -1381,6 +2100,37 @@ files = [ [package.dependencies] six = ">=1.5" +[[package]] +name = "pytorch-lightning" +version = "2.1.0" +description = "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytorch-lightning-2.1.0.tar.gz", hash = "sha256:bf9e26b293e1ccda5f8e146fe58716eecfd77e9639ef3ec2210b0dcba51c4593"}, + {file = "pytorch_lightning-2.1.0-py3-none-any.whl", hash = "sha256:2802d683ef513235dfc211f6bc45d7086e8982feaac1625aafd2886c5e5b96f8"}, +] + +[package.dependencies] +fsspec = {version = ">2021.06.0", extras = ["http"]} +lightning-utilities = ">=0.8.0" +numpy = ">=1.17.2" +packaging = ">=20.0" +PyYAML = ">=5.4" +torch = ">=1.12.0" +torchmetrics = ">=0.7.0" +tqdm = ">=4.57.0" +typing-extensions = ">=4.0.0" + +[package.extras] +all = ["deepspeed (>=0.8.2,<=0.9.3)", "gym[classic-control] (>=0.17.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.15.0)", "jsonargparse[signatures] (>=4.18.0)", "lightning-utilities (>=0.8.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=12.3.0)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)"] +deepspeed = ["deepspeed (>=0.8.2,<=0.9.3)"] +dev = ["cloudpickle (>=1.3)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3)", "fastapi", "gym[classic-control] (>=0.17.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.15.0)", "jsonargparse[signatures] (>=4.18.0)", "lightning-utilities (>=0.8.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "onnx (>=0.14.0)", "onnxruntime (>=0.15.0)", "pandas (>1.0)", "psutil (<5.9.6)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "rich (>=12.3.0)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)", "uvicorn"] +examples = ["gym[classic-control] (>=0.17.0)", "ipython[all] (<8.15.0)", "lightning-utilities (>=0.8.0)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)"] +extra = ["hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=12.3.0)", "tensorboardX (>=2.2)"] +strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] +test = ["cloudpickle (>=1.3)", "coverage (==7.3.1)", "fastapi", "onnx (>=0.14.0)", "onnxruntime (>=0.15.0)", "pandas (>1.0)", "psutil (<5.9.6)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "uvicorn"] + [[package]] name = "pytz" version = "2023.3" @@ -1802,6 +2552,27 @@ dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +[[package]] +name = "seaborn" +version = "0.13.0" +description = "Statistical data visualization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "seaborn-0.13.0-py3-none-any.whl", hash = "sha256:70d740828c48de0f402bb17234e475eda687e3c65f4383ea25d0cc4728f7772e"}, + {file = "seaborn-0.13.0.tar.gz", hash = "sha256:0e76abd2ec291c655b516703c6a022f0fd5afed26c8e714e8baef48150f73598"}, +] + +[package.dependencies] +matplotlib = ">=3.3,<3.6.1 || >3.6.1" +numpy = ">=1.20,<1.24.0 || >1.24.0" +pandas = ">=1.2" + +[package.extras] +dev = ["flake8", "flit", "mypy", "pandas-stubs", "pre-commit", "pytest", "pytest-cov", "pytest-xdist"] +docs = ["ipykernel", "nbconvert", "numpydoc", "pydata_sphinx_theme (==0.10.0rc2)", "pyyaml", "sphinx (<6.0.0)", "sphinx-copybutton", "sphinx-design", "sphinx-issues"] +stats = ["scipy (>=1.7)", "statsmodels (>=0.12)"] + [[package]] name = "six" version = "1.16.0" @@ -2058,6 +2829,35 @@ build = ["cython (>=0.29.26)"] develop = ["colorama", "cython (>=0.29.26)", "cython (>=0.29.28,<3.0.0)", "flake8", "isort", "joblib", "matplotlib (>=3)", "oldest-supported-numpy (>=2022.4.18)", "pytest (>=7.0.1,<7.1.0)", "pytest-randomly", "pytest-xdist", "pywinpty", "setuptools-scm[toml] (>=7.0.0,<7.1.0)"] docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"] +[[package]] +name = "sympy" +version = "1.12" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, + {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, +] + +[package.dependencies] +mpmath = ">=0.19" + +[[package]] +name = "tensorly" +version = "0.8.1" +description = "Tensor learning in Python." +optional = false +python-versions = "*" +files = [ + {file = "tensorly-0.8.1-py3-none-any.whl", hash = "sha256:08988dbc5e433c3f255d0e00855f99a613fe273d50a1627b7e82b03ff2a6da9a"}, + {file = "tensorly-0.8.1.tar.gz", hash = "sha256:cf78e4ffe612feca3510214002845c6831b267b1f2c1181154d41430310b237d"}, +] + +[package.dependencies] +numpy = "*" +scipy = "*" + [[package]] name = "threadpoolctl" version = "3.2.0" @@ -2109,6 +2909,74 @@ files = [ {file = "toolz-0.12.0.tar.gz", hash = "sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194"}, ] +[[package]] +name = "torch" +version = "2.1.0" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.1.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:bf57f8184b2c317ef81fb33dc233ce4d850cd98ef3f4a38be59c7c1572d175db"}, + {file = "torch-2.1.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a04a0296d47f28960f51c18c5489a8c3472f624ec3b5bcc8e2096314df8c3342"}, + {file = "torch-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0bd691efea319b14ef239ede16d8a45c246916456fa3ed4f217d8af679433cc6"}, + {file = "torch-2.1.0-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:101c139152959cb20ab370fc192672c50093747906ee4ceace44d8dd703f29af"}, + {file = "torch-2.1.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:a6b7438a90a870e4cdeb15301519ae6c043c883fcd224d303c5b118082814767"}, + {file = "torch-2.1.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:2224622407ca52611cbc5b628106fde22ed8e679031f5a99ce286629fc696128"}, + {file = "torch-2.1.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:8132efb782cd181cc2dcca5e58effbe4217cdb2581206ac71466d535bf778867"}, + {file = "torch-2.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:5c3bfa91ce25ba10116c224c59d5b64cdcce07161321d978bd5a1f15e1ebce72"}, + {file = "torch-2.1.0-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:601b0a2a9d9233fb4b81f7d47dca9680d4f3a78ca3f781078b6ad1ced8a90523"}, + {file = "torch-2.1.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:3cd1dedff13884d890f18eea620184fb4cd8fd3c68ce3300498f427ae93aa962"}, + {file = "torch-2.1.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fb7bf0cc1a3db484eb5d713942a93172f3bac026fcb377a0cd107093d2eba777"}, + {file = "torch-2.1.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:761822761fffaa1c18a62c5deb13abaa780862577d3eadc428f1daa632536905"}, + {file = "torch-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:458a6d6d8f7d2ccc348ac4d62ea661b39a3592ad15be385bebd0a31ced7e00f4"}, + {file = "torch-2.1.0-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:c8bf7eaf9514465e5d9101e05195183470a6215bb50295c61b52302a04edb690"}, + {file = "torch-2.1.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:05661c32ec14bc3a157193d0f19a7b19d8e61eb787b33353cad30202c295e83b"}, + {file = "torch-2.1.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:556d8dd3e0c290ed9d4d7de598a213fb9f7c59135b4fee144364a8a887016a55"}, + {file = "torch-2.1.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:de7d63c6ecece118684415a3dbd4805af4a4c1ee1490cccf7405d8c240a481b4"}, + {file = "torch-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:2419cf49aaf3b2336c7aa7a54a1b949fa295b1ae36f77e2aecb3a74e3a947255"}, + {file = "torch-2.1.0-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:6ad491e70dbe4288d17fdbfc7fbfa766d66cbe219bc4871c7a8096f4a37c98df"}, + {file = "torch-2.1.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:421739685eba5e0beba42cb649740b15d44b0d565c04e6ed667b41148734a75b"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +sympy = "*" +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + +[[package]] +name = "torchmetrics" +version = "1.2.0" +description = "PyTorch native Metrics" +optional = false +python-versions = ">=3.8" +files = [ + {file = "torchmetrics-1.2.0-py3-none-any.whl", hash = "sha256:da2cb18822b285786d082c40efb9e1d861aac425f58230234fe6ce233cf002f8"}, + {file = "torchmetrics-1.2.0.tar.gz", hash = "sha256:7eb28340bde45e13187a9ad54a4a7010a50417815d8181a5df6131f116ffe1b7"}, +] + +[package.dependencies] +lightning-utilities = ">=0.8.0" +numpy = ">1.20.0" +torch = ">=1.8.1" + +[package.extras] +all = ["SciencePlots (>=2.0.0)", "lpips (<=0.1.4)", "matplotlib (>=3.2.0)", "mypy (==1.5.1)", "nltk (>=3.6)", "piq (<=0.8.0)", "pycocotools (>2.0.0)", "pystoi (>=0.3.0)", "regex (>=2021.9.24)", "scipy (>1.0.0)", "torch-fidelity (<=0.4.0)", "torchaudio (>=0.10.0)", "torchvision (>=0.8)", "tqdm (>=4.41.0)", "transformers (>4.4.0)", "transformers (>=4.10.0)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"] +audio = ["pystoi (>=0.3.0)", "torchaudio (>=0.10.0)"] +detection = ["pycocotools (>2.0.0)", "torchvision (>=0.8)"] +dev = ["SciencePlots (>=2.0.0)", "bert-score (==0.3.13)", "cloudpickle (>1.3)", "coverage (==7.3.1)", "dython (<=0.7.4)", "fairlearn", "fast-bss-eval (>=0.1.0)", "faster-coco-eval (>=1.3.3)", "fire (<=0.5.0)", "huggingface-hub (<0.18)", "jiwer (>=2.3.0)", "kornia (>=0.6.7)", "lpips (<=0.1.4)", "matplotlib (>=3.2.0)", "mir-eval (>=0.6)", "mypy (==1.5.1)", "netcal (>1.0.0)", "nltk (>=3.6)", "numpy (<1.25.0)", "pandas (>1.0.0)", "pandas (>=1.4.0)", "phmdoctest (==1.4.0)", "piq (<=0.8.0)", "psutil (<=5.9.5)", "pycocotools (>2.0.0)", "pystoi (>=0.3.0)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==1.0.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytorch-msssim (==1.0.0)", "regex (>=2021.9.24)", "requests (<=2.31.0)", "rouge-score (>0.1.0)", "sacrebleu (>=2.0.0)", "scikit-image (>=0.19.0)", "scikit-learn (>=1.1.1)", "scipy (>1.0.0)", "sewar (>=0.4.4)", "statsmodels (>0.13.5)", "torch-complex (<=0.4.3)", "torch-fidelity (<=0.4.0)", "torchaudio (>=0.10.0)", "torchvision (>=0.8)", "tqdm (>=4.41.0)", "transformers (>4.4.0)", "transformers (>=4.10.0)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"] +image = ["lpips (<=0.1.4)", "scipy (>1.0.0)", "torch-fidelity (<=0.4.0)", "torchvision (>=0.8)"] +multimodal = ["piq (<=0.8.0)", "transformers (>=4.10.0)"] +test = ["bert-score (==0.3.13)", "cloudpickle (>1.3)", "coverage (==7.3.1)", "dython (<=0.7.4)", "fairlearn", "fast-bss-eval (>=0.1.0)", "faster-coco-eval (>=1.3.3)", "fire (<=0.5.0)", "huggingface-hub (<0.18)", "jiwer (>=2.3.0)", "kornia (>=0.6.7)", "mir-eval (>=0.6)", "netcal (>1.0.0)", "numpy (<1.25.0)", "pandas (>1.0.0)", "pandas (>=1.4.0)", "phmdoctest (==1.4.0)", "psutil (<=5.9.5)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==1.0.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytorch-msssim (==1.0.0)", "requests (<=2.31.0)", "rouge-score (>0.1.0)", "sacrebleu (>=2.0.0)", "scikit-image (>=0.19.0)", "scikit-learn (>=1.1.1)", "scipy (>1.0.0)", "sewar (>=0.4.4)", "statsmodels (>0.13.5)", "torch-complex (<=0.4.3)"] +text = ["nltk (>=3.6)", "regex (>=2021.9.24)", "tqdm (>=4.41.0)", "transformers (>4.4.0)"] +typing = ["mypy (==1.5.1)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"] +visual = ["SciencePlots (>=2.0.0)", "matplotlib (>=3.2.0)"] + [[package]] name = "tornado" version = "6.3.2" @@ -2168,7 +3036,7 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] name = "typing-extensions" version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, @@ -2255,6 +3123,93 @@ io = ["cftime", "fsspec", "h5netcdf", "netCDF4", "pooch", "pydap", "scipy", "zar parallel = ["dask[complete]"] viz = ["matplotlib", "nc-time-axis", "seaborn"] +[[package]] +name = "yarl" +version = "1.9.2" +description = "Yet another URL library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"}, + {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"}, + {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"}, + {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"}, + {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"}, + {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"}, + {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"}, + {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"}, + {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"}, + {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"}, + {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"}, + {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"}, + {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"}, + {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"}, + {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + [[package]] name = "zipp" version = "3.16.2" @@ -2273,4 +3228,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "71e4691a62ab430759f19da70b074878428cdaeb31de7dc1dc438d92dc5f4ac2" +content-hash = "99fac1aa63c4cfcfac25aa4b1adf09283d35bc1df94faef8fea7541151618184" diff --git a/pyproject.toml b/pyproject.toml index cfeca38..d66bae1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ coverage = "^6.3.1" [tool.poetry.group.dev.dependencies] black = "~23.7.0" +cca-zoo = "^2.3.11" [tool.poetry.group.docs.dependencies] rpy2 = {version = ">=3.5", optional = true} From 6ed06f0654a2a8b1f44f57ea54159f6b142a04ae Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 22:16:31 +0200 Subject: [PATCH 42/43] test: remove cca-zoo test Conflict between poetry and torch version that cannot be resolved. Therefore remove cca-zoo dependency and associated tests. --- .gitignore | 2 + poetry.lock | 965 +-------------------------------------- pyproject.toml | 8 +- tests/models/test_cca.py | 36 -- 4 files changed, 9 insertions(+), 1002 deletions(-) diff --git a/.gitignore b/.gitignore index 9fb7292..91b8f5c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ # Personal .vscode/ +# Test related to CCA +tests/models/test_cca_solution.py # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/poetry.lock b/poetry.lock index 1f9e470..5de8f24 100644 --- a/poetry.lock +++ b/poetry.lock @@ -14,128 +14,6 @@ files = [ [package.dependencies] pygments = ">=1.5" -[[package]] -name = "aiohttp" -version = "3.8.6" -description = "Async http client/server framework (asyncio)" -optional = false -python-versions = ">=3.6" -files = [ - {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:41d55fc043954cddbbd82503d9cc3f4814a40bcef30b3569bc7b5e34130718c1"}, - {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1d84166673694841d8953f0a8d0c90e1087739d24632fe86b1a08819168b4566"}, - {file = "aiohttp-3.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:253bf92b744b3170eb4c4ca2fa58f9c4b87aeb1df42f71d4e78815e6e8b73c9e"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fd194939b1f764d6bb05490987bfe104287bbf51b8d862261ccf66f48fb4096"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c5f938d199a6fdbdc10bbb9447496561c3a9a565b43be564648d81e1102ac22"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2817b2f66ca82ee699acd90e05c95e79bbf1dc986abb62b61ec8aaf851e81c93"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fa375b3d34e71ccccf172cab401cd94a72de7a8cc01847a7b3386204093bb47"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9de50a199b7710fa2904be5a4a9b51af587ab24c8e540a7243ab737b45844543"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e1d8cb0b56b3587c5c01de3bf2f600f186da7e7b5f7353d1bf26a8ddca57f965"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8e31e9db1bee8b4f407b77fd2507337a0a80665ad7b6c749d08df595d88f1cf5"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7bc88fc494b1f0311d67f29fee6fd636606f4697e8cc793a2d912ac5b19aa38d"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ec00c3305788e04bf6d29d42e504560e159ccaf0be30c09203b468a6c1ccd3b2"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad1407db8f2f49329729564f71685557157bfa42b48f4b93e53721a16eb813ed"}, - {file = "aiohttp-3.8.6-cp310-cp310-win32.whl", hash = "sha256:ccc360e87341ad47c777f5723f68adbb52b37ab450c8bc3ca9ca1f3e849e5fe2"}, - {file = "aiohttp-3.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:93c15c8e48e5e7b89d5cb4613479d144fda8344e2d886cf694fd36db4cc86865"}, - {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e2f9cc8e5328f829f6e1fb74a0a3a939b14e67e80832975e01929e320386b34"}, - {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e6a00ffcc173e765e200ceefb06399ba09c06db97f401f920513a10c803604ca"}, - {file = "aiohttp-3.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:41bdc2ba359032e36c0e9de5a3bd00d6fb7ea558a6ce6b70acedf0da86458321"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14cd52ccf40006c7a6cd34a0f8663734e5363fd981807173faf3a017e202fec9"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2d5b785c792802e7b275c420d84f3397668e9d49ab1cb52bd916b3b3ffcf09ad"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1bed815f3dc3d915c5c1e556c397c8667826fbc1b935d95b0ad680787896a358"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96603a562b546632441926cd1293cfcb5b69f0b4159e6077f7c7dbdfb686af4d"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d76e8b13161a202d14c9584590c4df4d068c9567c99506497bdd67eaedf36403"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e3f1e3f1a1751bb62b4a1b7f4e435afcdade6c17a4fd9b9d43607cebd242924a"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:76b36b3124f0223903609944a3c8bf28a599b2cc0ce0be60b45211c8e9be97f8"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a2ece4af1f3c967a4390c284797ab595a9f1bc1130ef8b01828915a05a6ae684"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:16d330b3b9db87c3883e565340d292638a878236418b23cc8b9b11a054aaa887"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42c89579f82e49db436b69c938ab3e1559e5a4409eb8639eb4143989bc390f2f"}, - {file = "aiohttp-3.8.6-cp311-cp311-win32.whl", hash = "sha256:efd2fcf7e7b9d7ab16e6b7d54205beded0a9c8566cb30f09c1abe42b4e22bdcb"}, - {file = "aiohttp-3.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:3b2ab182fc28e7a81f6c70bfbd829045d9480063f5ab06f6e601a3eddbbd49a0"}, - {file = "aiohttp-3.8.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fdee8405931b0615220e5ddf8cd7edd8592c606a8e4ca2a00704883c396e4479"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d25036d161c4fe2225d1abff2bd52c34ed0b1099f02c208cd34d8c05729882f0"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d791245a894be071d5ab04bbb4850534261a7d4fd363b094a7b9963e8cdbd31"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0cccd1de239afa866e4ce5c789b3032442f19c261c7d8a01183fd956b1935349"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f13f60d78224f0dace220d8ab4ef1dbc37115eeeab8c06804fec11bec2bbd07"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a9b5a0606faca4f6cc0d338359d6fa137104c337f489cd135bb7fbdbccb1e39"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:13da35c9ceb847732bf5c6c5781dcf4780e14392e5d3b3c689f6d22f8e15ae31"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:4d4cbe4ffa9d05f46a28252efc5941e0462792930caa370a6efaf491f412bc66"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:229852e147f44da0241954fc6cb910ba074e597f06789c867cb7fb0621e0ba7a"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:713103a8bdde61d13490adf47171a1039fd880113981e55401a0f7b42c37d071"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:45ad816b2c8e3b60b510f30dbd37fe74fd4a772248a52bb021f6fd65dff809b6"}, - {file = "aiohttp-3.8.6-cp36-cp36m-win32.whl", hash = "sha256:2b8d4e166e600dcfbff51919c7a3789ff6ca8b3ecce16e1d9c96d95dd569eb4c"}, - {file = "aiohttp-3.8.6-cp36-cp36m-win_amd64.whl", hash = "sha256:0912ed87fee967940aacc5306d3aa8ba3a459fcd12add0b407081fbefc931e53"}, - {file = "aiohttp-3.8.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e2a988a0c673c2e12084f5e6ba3392d76c75ddb8ebc6c7e9ead68248101cd446"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebf3fd9f141700b510d4b190094db0ce37ac6361a6806c153c161dc6c041ccda"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3161ce82ab85acd267c8f4b14aa226047a6bee1e4e6adb74b798bd42c6ae1f80"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95fc1bf33a9a81469aa760617b5971331cdd74370d1214f0b3109272c0e1e3c"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c43ecfef7deaf0617cee936836518e7424ee12cb709883f2c9a1adda63cc460"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca80e1b90a05a4f476547f904992ae81eda5c2c85c66ee4195bb8f9c5fb47f28"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:90c72ebb7cb3a08a7f40061079817133f502a160561d0675b0a6adf231382c92"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bb54c54510e47a8c7c8e63454a6acc817519337b2b78606c4e840871a3e15349"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:de6a1c9f6803b90e20869e6b99c2c18cef5cc691363954c93cb9adeb26d9f3ae"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:a3628b6c7b880b181a3ae0a0683698513874df63783fd89de99b7b7539e3e8a8"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fc37e9aef10a696a5a4474802930079ccfc14d9f9c10b4662169671ff034b7df"}, - {file = "aiohttp-3.8.6-cp37-cp37m-win32.whl", hash = "sha256:f8ef51e459eb2ad8e7a66c1d6440c808485840ad55ecc3cafefadea47d1b1ba2"}, - {file = "aiohttp-3.8.6-cp37-cp37m-win_amd64.whl", hash = "sha256:b2fe42e523be344124c6c8ef32a011444e869dc5f883c591ed87f84339de5976"}, - {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e2ee0ac5a1f5c7dd3197de309adfb99ac4617ff02b0603fd1e65b07dc772e4b"}, - {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01770d8c04bd8db568abb636c1fdd4f7140b284b8b3e0b4584f070180c1e5c62"}, - {file = "aiohttp-3.8.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c68330a59506254b556b99a91857428cab98b2f84061260a67865f7f52899f5"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89341b2c19fb5eac30c341133ae2cc3544d40d9b1892749cdd25892bbc6ac951"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71783b0b6455ac8f34b5ec99d83e686892c50498d5d00b8e56d47f41b38fbe04"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f628dbf3c91e12f4d6c8b3f092069567d8eb17814aebba3d7d60c149391aee3a"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04691bc6601ef47c88f0255043df6f570ada1a9ebef99c34bd0b72866c217ae"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee912f7e78287516df155f69da575a0ba33b02dd7c1d6614dbc9463f43066e3"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9c19b26acdd08dd239e0d3669a3dddafd600902e37881f13fbd8a53943079dbc"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:99c5ac4ad492b4a19fc132306cd57075c28446ec2ed970973bbf036bcda1bcc6"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f0f03211fd14a6a0aed2997d4b1c013d49fb7b50eeb9ffdf5e51f23cfe2c77fa"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8d399dade330c53b4106160f75f55407e9ae7505263ea86f2ccca6bfcbdb4921"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ec4fd86658c6a8964d75426517dc01cbf840bbf32d055ce64a9e63a40fd7b771"}, - {file = "aiohttp-3.8.6-cp38-cp38-win32.whl", hash = "sha256:33164093be11fcef3ce2571a0dccd9041c9a93fa3bde86569d7b03120d276c6f"}, - {file = "aiohttp-3.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:bdf70bfe5a1414ba9afb9d49f0c912dc524cf60141102f3a11143ba3d291870f"}, - {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d52d5dc7c6682b720280f9d9db41d36ebe4791622c842e258c9206232251ab2b"}, - {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ac39027011414dbd3d87f7edb31680e1f430834c8cef029f11c66dad0670aa5"}, - {file = "aiohttp-3.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3f5c7ce535a1d2429a634310e308fb7d718905487257060e5d4598e29dc17f0b"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b30e963f9e0d52c28f284d554a9469af073030030cef8693106d918b2ca92f54"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:918810ef188f84152af6b938254911055a72e0f935b5fbc4c1a4ed0b0584aed1"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:002f23e6ea8d3dd8d149e569fd580c999232b5fbc601c48d55398fbc2e582e8c"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fcf3eabd3fd1a5e6092d1242295fa37d0354b2eb2077e6eb670accad78e40e1"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:255ba9d6d5ff1a382bb9a578cd563605aa69bec845680e21c44afc2670607a95"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d67f8baed00870aa390ea2590798766256f31dc5ed3ecc737debb6e97e2ede78"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:86f20cee0f0a317c76573b627b954c412ea766d6ada1a9fcf1b805763ae7feeb"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:39a312d0e991690ccc1a61f1e9e42daa519dcc34ad03eb6f826d94c1190190dd"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e827d48cf802de06d9c935088c2924e3c7e7533377d66b6f31ed175c1620e05e"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bd111d7fc5591ddf377a408ed9067045259ff2770f37e2d94e6478d0f3fc0c17"}, - {file = "aiohttp-3.8.6-cp39-cp39-win32.whl", hash = "sha256:caf486ac1e689dda3502567eb89ffe02876546599bbf915ec94b1fa424eeffd4"}, - {file = "aiohttp-3.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3f0e27e5b733803333bb2371249f41cf42bae8884863e8e8965ec69bebe53132"}, - {file = "aiohttp-3.8.6.tar.gz", hash = "sha256:b0cf2a4501bff9330a8a5248b4ce951851e415bdcce9dc158e76cfd55e15085c"}, -] - -[package.dependencies] -aiosignal = ">=1.1.2" -async-timeout = ">=4.0.0a3,<5.0" -attrs = ">=17.3.0" -charset-normalizer = ">=2.0,<4.0" -frozenlist = ">=1.1.1" -multidict = ">=4.5,<7.0" -yarl = ">=1.0,<2.0" - -[package.extras] -speedups = ["Brotli", "aiodns", "cchardet"] - -[[package]] -name = "aiosignal" -version = "1.3.1" -description = "aiosignal: a list of registered asynchronous callbacks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, -] - -[package.dependencies] -frozenlist = ">=1.1.0" - [[package]] name = "alabaster" version = "0.7.13" @@ -147,22 +25,11 @@ files = [ {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, ] -[[package]] -name = "async-timeout" -version = "4.0.3" -description = "Timeout context manager for asyncio programs" -optional = false -python-versions = ">=3.7" -files = [ - {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, - {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, -] - [[package]] name = "attrs" version = "23.1.0" description = "Classes Without Boilerplate" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, @@ -268,29 +135,6 @@ webencodings = "*" [package.extras] css = ["tinycss2 (>=1.1.0,<1.2)"] -[[package]] -name = "cca-zoo" -version = "2.3.11" -description = "Canonical Correlation Analysis Zoo: A collection of Regularized, Deep Learning based, Kernel, and Probabilistic methods in a scikit-learn style framework" -optional = false -python-versions = ">=3.8,<4.0.0" -files = [ - {file = "cca_zoo-2.3.11-py3-none-any.whl", hash = "sha256:378b8e5b3f3a7a39b2ac738b84beb0abf2129bfc25c94f713abc0a3cf0214814"}, - {file = "cca_zoo-2.3.11.tar.gz", hash = "sha256:21411d38de49b102e27cce52692b4785ef4ddd4be96d18b365dee8d6af23dce2"}, -] - -[package.dependencies] -joblib = "*" -lightning = "*" -mvlearn = "*" -numpy = "*" -pandas = "*" -scikit-learn = ">=1.2.2,<2.0.0" -scipy = "*" -tensorly = "*" -torch = {version = ">=2.0.1,<3.0.0", markers = "sys_platform == \"darwin\" or sys_platform == \"linux\" or sys_platform == \"win32\""} -tqdm = "*" - [[package]] name = "certifi" version = "2023.7.22" @@ -593,21 +437,6 @@ files = [ [package.extras] toml = ["tomli"] -[[package]] -name = "cycler" -version = "0.12.1" -description = "Composable style cycles" -optional = false -python-versions = ">=3.8" -files = [ - {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, - {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, -] - -[package.extras] -docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] -tests = ["pytest", "pytest-cov", "pytest-xdist"] - [[package]] name = "dask" version = "2023.7.1" @@ -687,22 +516,6 @@ files = [ [package.extras] devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] -[[package]] -name = "filelock" -version = "3.12.4" -description = "A platform independent file lock." -optional = false -python-versions = ">=3.8" -files = [ - {file = "filelock-3.12.4-py3-none-any.whl", hash = "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4"}, - {file = "filelock-3.12.4.tar.gz", hash = "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"}, -] - -[package.extras] -docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"] -typing = ["typing-extensions (>=4.7.1)"] - [[package]] name = "flake8" version = "4.0.1" @@ -719,76 +532,6 @@ mccabe = ">=0.6.0,<0.7.0" pycodestyle = ">=2.8.0,<2.9.0" pyflakes = ">=2.4.0,<2.5.0" -[[package]] -name = "frozenlist" -version = "1.4.0" -description = "A list-like structure which implements collections.abc.MutableSequence" -optional = false -python-versions = ">=3.8" -files = [ - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"}, - {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"}, - {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"}, - {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"}, - {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"}, - {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"}, - {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"}, - {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"}, - {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"}, - {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, -] - [[package]] name = "fsspec" version = "2023.6.0" @@ -800,10 +543,6 @@ files = [ {file = "fsspec-2023.6.0.tar.gz", hash = "sha256:d0b2f935446169753e7a5c5c55681c54ea91996cc67be93c39a154fb3a2742af"}, ] -[package.dependencies] -aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""} -requests = {version = "*", optional = true, markers = "extra == \"http\""} - [package.extras] abfs = ["adlfs"] adl = ["adlfs"] @@ -884,7 +623,7 @@ files = [ name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, @@ -996,200 +735,6 @@ files = [ {file = "jupyterlab_pygments-0.2.2.tar.gz", hash = "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d"}, ] -[[package]] -name = "kiwisolver" -version = "1.4.5" -description = "A fast implementation of the Cassowary constraint solver" -optional = false -python-versions = ">=3.7" -files = [ - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, - {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, -] - -[[package]] -name = "lightning" -version = "2.1.0" -description = "The Deep Learning framework to train, deploy, and ship AI products Lightning fast." -optional = false -python-versions = ">=3.8" -files = [ - {file = "lightning-2.1.0-py3-none-any.whl", hash = "sha256:c12bd10bd28b9e29a8e877be039350a585f248c10b76360faa2aa2497f980de6"}, - {file = "lightning-2.1.0.tar.gz", hash = "sha256:1f78f5995ae7dcffa1edf34320db136902b73a0d1b304404c48ec8be165b3a93"}, -] - -[package.dependencies] -fsspec = {version = ">2021.06.0,<2025.0", extras = ["http"]} -lightning-utilities = ">=0.8.0,<2.0" -numpy = ">=1.17.2,<3.0" -packaging = ">=20.0,<25.0" -pytorch-lightning = "*" -PyYAML = ">=5.4,<8.0" -torch = ">=1.12.0,<4.0" -torchmetrics = ">=0.7.0,<3.0" -tqdm = ">=4.57.0,<6.0" -typing-extensions = ">=4.0.0,<6.0" - -[package.extras] -all = ["Jinja2 (<4.0)", "Pillow (>=9.5.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "deepspeed (>=0.8.2,<=0.9.3)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "fsspec[http] (>2021.06.0,<2024.0)", "gym[classic-control] (>=0.17.0,<1.0)", "hydra-core (>=1.0.5,<2.0)", "inquirer (>=2.10.0,<4.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "packaging", "panel (>=1.0.0,<2.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "tensorboardX (>=2.2,<3.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)", "torchvision (>=0.15.2,<1.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -app = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -app-all = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "panel (>=1.0.0,<2.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -app-cloud = ["docker (>=5.0.0,<7.0)", "redis (>=4.0.1,<6.0)", "s3fs (>=2022.5.0,<2024.0)"] -app-components = ["aiohttp (>=3.8.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-fabric (>=1.9.0)", "pytorch-lightning (>=1.9.0)"] -app-dev = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "coverage (==7.3.1)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "httpx (==0.25.0)", "inquirer (>=2.10.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "panel (>=1.0.0,<2.0)", "playwright (==1.38.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "pympler", "pytest (==7.4.0)", "pytest-asyncio (==0.21.1)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==0.9.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytest-xdist (==3.3.1)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "requests-mock (==1.11.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "setuptools (<69.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "traitlets (>=5.3.0,<6.0)", "trio (<0.22.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -app-extra = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "panel (>=1.0.0,<2.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -app-test = ["coverage (==7.3.1)", "httpx (==0.25.0)", "playwright (==1.38.0)", "psutil (<6.0)", "pympler", "pytest (==7.4.0)", "pytest-asyncio (==0.21.1)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==0.9.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytest-xdist (==3.3.1)", "requests-mock (==1.11.0)", "setuptools (<69.0)", "trio (<0.22.0)"] -app-ui = ["panel (>=1.0.0,<2.0)", "streamlit (>=1.13.0,<2.0)"] -cloud = ["docker (>=5.0.0,<7.0)", "fsspec[http] (>2021.06.0,<2024.0)", "redis (>=4.0.1,<6.0)", "s3fs (>=2022.5.0,<2024.0)"] -components = ["aiohttp (>=3.8.0,<4.0)", "lightning-api-access (>=0.0.3)", "lightning-fabric (>=1.9.0)", "pytorch-lightning (>=1.9.0)"] -data = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -data-all = ["Jinja2 (<4.0)", "Pillow (>=9.5.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "fsspec[http] (>2021.06.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "torchvision (>=0.15.2,<1.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -data-cloud = ["fsspec[http] (>2021.06.0,<2024.0)", "s3fs (>=2022.5.0,<2024.0)"] -data-dev = ["Jinja2 (<4.0)", "Pillow (>=9.5.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "coverage (==7.3.1)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "fsspec[http] (>2021.06.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "torchvision (>=0.15.2,<1.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -data-examples = ["Pillow (>=9.5.0)", "torchvision (>=0.15.2,<1.0)"] -data-test = ["coverage (==7.3.1)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)"] -dev = ["Jinja2 (<4.0)", "Pillow (>=9.5.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "click (==8.1.7)", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "deepspeed (>=0.8.2,<=0.9.3)", "docker (>=5.0.0,<7.0)", "fastapi", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "fsspec[http] (>2021.06.0,<2024.0)", "gym[classic-control] (>=0.17.0,<1.0)", "httpx (==0.25.0)", "hydra-core (>=1.0.5,<2.0)", "inquirer (>=2.10.0,<4.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "onnx (>=0.14.0,<2.0)", "onnxruntime (>=0.15.0,<2.0)", "packaging", "pandas (>1.0,<3.0)", "panel (>=1.0.0,<2.0)", "playwright (==1.38.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "pympler", "pytest (==7.4.0)", "pytest-asyncio (==0.21.1)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==0.9.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytest-xdist (==3.3.1)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "requests-mock (==1.11.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "scikit-learn (>0.22.1,<2.0)", "setuptools (<69.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torch (>0.14.0,<3.0)", "torchdata (>0.5.9,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchmetrics (>=0.7.0,<2.0)", "torchvision (>=0.13.0,<1.0)", "torchvision (>=0.15.2,<1.0)", "traitlets (>=5.3.0,<6.0)", "trio (<0.22.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -examples = ["Pillow (>=9.5.0)", "gym[classic-control] (>=0.17.0,<1.0)", "ipython[all] (<9.0)", "lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)", "torchvision (>=0.15.2,<1.0)"] -extra = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "aiohttp (>=3.8.0,<4.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "docker (>=5.0.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "hydra-core (>=1.0.5,<2.0)", "inquirer (>=2.10.0,<4.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-api-access (>=0.0.3)", "lightning-cloud (==0.5.39)", "lightning-fabric (>=1.9.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "packaging", "panel (>=1.0.0,<2.0)", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "pytorch-lightning (>=1.9.0)", "redis (>=4.0.1,<6.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "s3fs (>=2022.5.0,<2024.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "streamlit (>=1.13.0,<2.0)", "tensorboardX (>=2.2,<3.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -fabric-all = ["deepspeed (>=0.8.2,<=0.9.3)", "lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] -fabric-dev = ["click (==8.1.7)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3)", "lightning-utilities (>=0.8.0,<1.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchmetrics (>=0.7.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] -fabric-examples = ["lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] -fabric-strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] -fabric-test = ["click (==8.1.7)", "coverage (==7.3.1)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.7.0,<2.0)"] -pytorch-all = ["deepspeed (>=0.8.2,<=0.9.3)", "gym[classic-control] (>=0.17.0,<1.0)", "hydra-core (>=1.0.5,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] -pytorch-dev = ["cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3)", "fastapi", "gym[classic-control] (>=0.17.0,<1.0)", "hydra-core (>=1.0.5,<2.0)", "ipython[all] (<9.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "lightning-utilities (>=0.8.0,<1.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "onnx (>=0.14.0,<2.0)", "onnxruntime (>=0.15.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "rich (>=12.3.0,<14.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)", "uvicorn"] -pytorch-examples = ["gym[classic-control] (>=0.17.0,<1.0)", "ipython[all] (<9.0)", "lightning-utilities (>=0.8.0,<1.0)", "torchmetrics (>=0.10.0,<2.0)", "torchvision (>=0.13.0,<1.0)"] -pytorch-extra = ["hydra-core (>=1.0.5,<2.0)", "jsonargparse[signatures] (>=4.18.0,<5.0)", "matplotlib (>3.1,<4.0)", "omegaconf (>=2.0.5,<3.0)", "rich (>=12.3.0,<14.0)", "tensorboardX (>=2.2,<3.0)"] -pytorch-strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] -pytorch-test = ["cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "fastapi", "onnx (>=0.14.0,<2.0)", "onnxruntime (>=0.15.0,<2.0)", "pandas (>1.0,<3.0)", "psutil (<6.0)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1,<2.0)", "tensorboard (>=2.9.1,<3.0)", "uvicorn"] -store = ["Jinja2 (<4.0)", "PyYAML (<7.0)", "arrow (>=1.2.0,<2.0)", "backoff (>=2.2.1,<3.0)", "beautifulsoup4 (>=4.8.0,<5.0)", "click (<9.0)", "croniter (>=1.3.0,<1.5.0)", "dateutils (<1.0)", "deepdiff (>=5.7.0,<7.0)", "fastapi (>=0.92.0,<1.0)", "fsspec (>=2022.5.0,<2024.0)", "inquirer (>=2.10.0,<4.0)", "lightning-cloud (==0.5.39)", "lightning-utilities (>=0.8.0,<1.0)", "packaging", "psutil (<6.0)", "pydantic (>=1.7.4)", "python-multipart (>=0.0.5,<1.0)", "requests (<3.0)", "rich (>=12.3.0,<14.0)", "starlette", "starsessions (>=1.2.1,<2.0)", "traitlets (>=5.3.0,<6.0)", "typing-extensions (>=4.0.0,<5.0)", "urllib3 (<3.0)", "uvicorn (<1.0)", "websocket-client (<2.0)", "websockets (<12.0)"] -store-test = ["coverage (==7.3.1)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)"] -strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] -test = ["click (==8.1.7)", "cloudpickle (>=1.3,<3.0)", "coverage (==7.3.1)", "fastapi", "httpx (==0.25.0)", "onnx (>=0.14.0,<2.0)", "onnxruntime (>=0.15.0,<2.0)", "pandas (>1.0,<3.0)", "playwright (==1.38.0)", "psutil (<6.0)", "pympler", "pytest (==7.4.0)", "pytest-asyncio (==0.21.1)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==0.9.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytest-xdist (==3.3.1)", "requests-mock (==1.11.0)", "scikit-learn (>0.22.1,<2.0)", "setuptools (<69.0)", "tensorboard (>=2.9.1,<3.0)", "tensorboardX (>=2.2,<3.0)", "torchmetrics (>=0.7.0,<2.0)", "trio (<0.22.0)", "uvicorn"] -ui = ["panel (>=1.0.0,<2.0)", "streamlit (>=1.13.0,<2.0)"] - -[[package]] -name = "lightning-utilities" -version = "0.9.0" -description = "PyTorch Lightning Sample project." -optional = false -python-versions = ">=3.7" -files = [ - {file = "lightning-utilities-0.9.0.tar.gz", hash = "sha256:efbf2c488c257f942abdfd06cf646fb84ca215a9663b60081811e22a15ee033b"}, - {file = "lightning_utilities-0.9.0-py3-none-any.whl", hash = "sha256:918dd90c775719e3855631db6282ad75c14da4c5727c4cebdd1589d865fad03d"}, -] - -[package.dependencies] -packaging = ">=17.1" -typing-extensions = "*" - -[package.extras] -cli = ["fire"] -docs = ["requests (>=2.0.0)"] -typing = ["mypy (>=1.0.0)"] - [[package]] name = "llvmlite" version = "0.40.1" @@ -1238,7 +783,7 @@ files = [ name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, @@ -1293,48 +838,6 @@ files = [ {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, ] -[[package]] -name = "matplotlib" -version = "3.3.4" -description = "Python plotting package" -optional = false -python-versions = ">=3.6" -files = [ - {file = "matplotlib-3.3.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:672960dd114e342b7c610bf32fb99d14227f29919894388b41553217457ba7ef"}, - {file = "matplotlib-3.3.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:7c155437ae4fd366e2700e2716564d1787700687443de46bcb895fe0f84b761d"}, - {file = "matplotlib-3.3.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:a17f0a10604fac7627ec82820439e7db611722e80c408a726cd00d8c974c2fb3"}, - {file = "matplotlib-3.3.4-cp36-cp36m-win32.whl", hash = "sha256:215e2a30a2090221a9481db58b770ce56b8ef46f13224ae33afe221b14b24dc1"}, - {file = "matplotlib-3.3.4-cp36-cp36m-win_amd64.whl", hash = "sha256:348e6032f666ffd151b323342f9278b16b95d4a75dfacae84a11d2829a7816ae"}, - {file = "matplotlib-3.3.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:94bdd1d55c20e764d8aea9d471d2ae7a7b2c84445e0fa463f02e20f9730783e1"}, - {file = "matplotlib-3.3.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:a1acb72f095f1d58ecc2538ed1b8bca0b57df313b13db36ed34b8cdf1868e674"}, - {file = "matplotlib-3.3.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:46b1a60a04e6d884f0250d5cc8dc7bd21a9a96c584a7acdaab44698a44710bab"}, - {file = "matplotlib-3.3.4-cp37-cp37m-win32.whl", hash = "sha256:ed4a9e6dcacba56b17a0a9ac22ae2c72a35b7f0ef0693aa68574f0b2df607a89"}, - {file = "matplotlib-3.3.4-cp37-cp37m-win_amd64.whl", hash = "sha256:c24c05f645aef776e8b8931cb81e0f1632d229b42b6d216e30836e2e145a2b40"}, - {file = "matplotlib-3.3.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7310e353a4a35477c7f032409966920197d7df3e757c7624fd842f3eeb307d3d"}, - {file = "matplotlib-3.3.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:451cc89cb33d6652c509fc6b588dc51c41d7246afdcc29b8624e256b7663ed1f"}, - {file = "matplotlib-3.3.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:3d2eb9c1cc254d0ffa90bc96fde4b6005d09c2228f99dfd493a4219c1af99644"}, - {file = "matplotlib-3.3.4-cp38-cp38-win32.whl", hash = "sha256:e15fa23d844d54e7b3b7243afd53b7567ee71c721f592deb0727ee85e668f96a"}, - {file = "matplotlib-3.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:1de0bb6cbfe460725f0e97b88daa8643bcf9571c18ba90bb8e41432aaeca91d6"}, - {file = "matplotlib-3.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f44149a0ef5b4991aaef12a93b8e8d66d6412e762745fea1faa61d98524e0ba9"}, - {file = "matplotlib-3.3.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:746a1df55749629e26af7f977ea426817ca9370ad1569436608dc48d1069b87c"}, - {file = "matplotlib-3.3.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:5f571b92a536206f7958f7cb2d367ff6c9a1fa8229dc35020006e4cdd1ca0acd"}, - {file = "matplotlib-3.3.4-cp39-cp39-win32.whl", hash = "sha256:9265ae0fb35e29f9b8cc86c2ab0a2e3dcddc4dd9de4b85bf26c0f63fe5c1c2ca"}, - {file = "matplotlib-3.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:9a79e5dd7bb797aa611048f5b70588b23c5be05b63eefd8a0d152ac77c4243db"}, - {file = "matplotlib-3.3.4-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1e850163579a8936eede29fad41e202b25923a0a8d5ffd08ce50fc0a97dcdc93"}, - {file = "matplotlib-3.3.4-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:d738acfdfb65da34c91acbdb56abed46803db39af259b7f194dc96920360dbe4"}, - {file = "matplotlib-3.3.4-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:aa49571d8030ad0b9ac39708ee77bd2a22f87815e12bdee52ecaffece9313ed8"}, - {file = "matplotlib-3.3.4-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:cf3a7e54eff792f0815dbbe9b85df2f13d739289c93d346925554f71d484be78"}, - {file = "matplotlib-3.3.4.tar.gz", hash = "sha256:3e477db76c22929e4c6876c44f88d790aacdf3c3f8f3a90cb1975c0bf37825b0"}, -] - -[package.dependencies] -cycler = ">=0.10" -kiwisolver = ">=1.0.1" -numpy = ">=1.15" -pillow = ">=6.2.0" -pyparsing = ">=2.0.3,<2.0.4 || >2.0.4,<2.1.2 || >2.1.2,<2.1.6 || >2.1.6" -python-dateutil = ">=2.1" - [[package]] name = "mccabe" version = "0.6.1" @@ -1357,129 +860,6 @@ files = [ {file = "mistune-3.0.1.tar.gz", hash = "sha256:e912116c13aa0944f9dc530db38eb88f6a77087ab128f49f84a48f4c05ea163c"}, ] -[[package]] -name = "mpmath" -version = "1.3.0" -description = "Python library for arbitrary-precision floating-point arithmetic" -optional = false -python-versions = "*" -files = [ - {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, - {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, -] - -[package.extras] -develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] -docs = ["sphinx"] -gmpy = ["gmpy2 (>=2.1.0a4)"] -tests = ["pytest (>=4.6)"] - -[[package]] -name = "multidict" -version = "6.0.4" -description = "multidict implementation" -optional = false -python-versions = ">=3.7" -files = [ - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, - {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, - {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, - {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, - {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, - {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, - {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, - {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, - {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, - {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, - {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, - {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, - {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, -] - -[[package]] -name = "mvlearn" -version = "0.5.0" -description = "A set of python modules for multiview learning" -optional = false -python-versions = "*" -files = [ - {file = "mvlearn-0.5.0-py3-none-any.whl", hash = "sha256:449a5c649176d4a61a0408844ad45908cfcf6825cc029aa5b876b7624a244df6"}, - {file = "mvlearn-0.5.0.tar.gz", hash = "sha256:5e1a53318dec2c5f41e627080af657364e497aa7a3cdaaaacef879fc3d427ba5"}, -] - -[package.dependencies] -joblib = ">=0.11" -matplotlib = ">=3.0.0,<=3.3.4" -numpy = ">=1.17.0" -scikit-learn = ">=0.19.1" -scipy = ">=1.5.0" -seaborn = ">=0.9.0" - -[package.extras] -multiviewica = ["multiviewica (>=0.0.1)", "python-picard (>=0.4)"] -torch = ["torch (>=1.1.0)", "tqdm"] - [[package]] name = "mypy-extensions" version = "1.0.0" @@ -1631,24 +1011,6 @@ certifi = "*" cftime = "*" numpy = "*" -[[package]] -name = "networkx" -version = "3.2" -description = "Python package for creating and manipulating graphs and networks" -optional = false -python-versions = ">=3.9" -files = [ - {file = "networkx-3.2-py3-none-any.whl", hash = "sha256:8b25f564bd28f94ac821c58b04ae1a3109e73b001a7d476e4bb0d00d63706bf8"}, - {file = "networkx-3.2.tar.gz", hash = "sha256:bda29edf392d9bfa5602034c767d28549214ec45f620081f0b74dc036a1fbbc1"}, -] - -[package.extras] -default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] -developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] -doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] -test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] - [[package]] name = "numba" version = "0.57.1" @@ -1858,73 +1220,6 @@ six = "*" [package.extras] test = ["pytest", "pytest-cov", "scipy"] -[[package]] -name = "pillow" -version = "10.1.0" -description = "Python Imaging Library (Fork)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, - {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, - {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, - {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, - {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, - {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, - {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, - {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, - {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, - {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, - {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, - {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, - {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, - {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, - {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, - {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, - {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, - {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, - {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, - {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, - {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, - {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, - {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, - {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, - {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, - {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] - [[package]] name = "platformdirs" version = "3.10.0" @@ -2050,20 +1345,6 @@ files = [ [package.extras] plugins = ["importlib-metadata"] -[[package]] -name = "pyparsing" -version = "3.1.1" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -optional = false -python-versions = ">=3.6.8" -files = [ - {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, - {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, -] - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - [[package]] name = "pytest" version = "7.4.0" @@ -2100,37 +1381,6 @@ files = [ [package.dependencies] six = ">=1.5" -[[package]] -name = "pytorch-lightning" -version = "2.1.0" -description = "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pytorch-lightning-2.1.0.tar.gz", hash = "sha256:bf9e26b293e1ccda5f8e146fe58716eecfd77e9639ef3ec2210b0dcba51c4593"}, - {file = "pytorch_lightning-2.1.0-py3-none-any.whl", hash = "sha256:2802d683ef513235dfc211f6bc45d7086e8982feaac1625aafd2886c5e5b96f8"}, -] - -[package.dependencies] -fsspec = {version = ">2021.06.0", extras = ["http"]} -lightning-utilities = ">=0.8.0" -numpy = ">=1.17.2" -packaging = ">=20.0" -PyYAML = ">=5.4" -torch = ">=1.12.0" -torchmetrics = ">=0.7.0" -tqdm = ">=4.57.0" -typing-extensions = ">=4.0.0" - -[package.extras] -all = ["deepspeed (>=0.8.2,<=0.9.3)", "gym[classic-control] (>=0.17.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.15.0)", "jsonargparse[signatures] (>=4.18.0)", "lightning-utilities (>=0.8.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=12.3.0)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)"] -deepspeed = ["deepspeed (>=0.8.2,<=0.9.3)"] -dev = ["cloudpickle (>=1.3)", "coverage (==7.3.1)", "deepspeed (>=0.8.2,<=0.9.3)", "fastapi", "gym[classic-control] (>=0.17.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.15.0)", "jsonargparse[signatures] (>=4.18.0)", "lightning-utilities (>=0.8.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "onnx (>=0.14.0)", "onnxruntime (>=0.15.0)", "pandas (>1.0)", "psutil (<5.9.6)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "rich (>=12.3.0)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "tensorboardX (>=2.2)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)", "uvicorn"] -examples = ["gym[classic-control] (>=0.17.0)", "ipython[all] (<8.15.0)", "lightning-utilities (>=0.8.0)", "torchmetrics (>=0.10.0)", "torchvision (>=0.13.0)"] -extra = ["hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=12.3.0)", "tensorboardX (>=2.2)"] -strategies = ["deepspeed (>=0.8.2,<=0.9.3)"] -test = ["cloudpickle (>=1.3)", "coverage (==7.3.1)", "fastapi", "onnx (>=0.14.0)", "onnxruntime (>=0.15.0)", "pandas (>1.0)", "psutil (<5.9.6)", "pytest (==7.4.0)", "pytest-cov (==4.1.0)", "pytest-random-order (==1.1.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "uvicorn"] - [[package]] name = "pytz" version = "2023.3" @@ -2552,27 +1802,6 @@ dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] -[[package]] -name = "seaborn" -version = "0.13.0" -description = "Statistical data visualization" -optional = false -python-versions = ">=3.8" -files = [ - {file = "seaborn-0.13.0-py3-none-any.whl", hash = "sha256:70d740828c48de0f402bb17234e475eda687e3c65f4383ea25d0cc4728f7772e"}, - {file = "seaborn-0.13.0.tar.gz", hash = "sha256:0e76abd2ec291c655b516703c6a022f0fd5afed26c8e714e8baef48150f73598"}, -] - -[package.dependencies] -matplotlib = ">=3.3,<3.6.1 || >3.6.1" -numpy = ">=1.20,<1.24.0 || >1.24.0" -pandas = ">=1.2" - -[package.extras] -dev = ["flake8", "flit", "mypy", "pandas-stubs", "pre-commit", "pytest", "pytest-cov", "pytest-xdist"] -docs = ["ipykernel", "nbconvert", "numpydoc", "pydata_sphinx_theme (==0.10.0rc2)", "pyyaml", "sphinx (<6.0.0)", "sphinx-copybutton", "sphinx-design", "sphinx-issues"] -stats = ["scipy (>=1.7)", "statsmodels (>=0.12)"] - [[package]] name = "six" version = "1.16.0" @@ -2829,35 +2058,6 @@ build = ["cython (>=0.29.26)"] develop = ["colorama", "cython (>=0.29.26)", "cython (>=0.29.28,<3.0.0)", "flake8", "isort", "joblib", "matplotlib (>=3)", "oldest-supported-numpy (>=2022.4.18)", "pytest (>=7.0.1,<7.1.0)", "pytest-randomly", "pytest-xdist", "pywinpty", "setuptools-scm[toml] (>=7.0.0,<7.1.0)"] docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"] -[[package]] -name = "sympy" -version = "1.12" -description = "Computer algebra system (CAS) in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, - {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, -] - -[package.dependencies] -mpmath = ">=0.19" - -[[package]] -name = "tensorly" -version = "0.8.1" -description = "Tensor learning in Python." -optional = false -python-versions = "*" -files = [ - {file = "tensorly-0.8.1-py3-none-any.whl", hash = "sha256:08988dbc5e433c3f255d0e00855f99a613fe273d50a1627b7e82b03ff2a6da9a"}, - {file = "tensorly-0.8.1.tar.gz", hash = "sha256:cf78e4ffe612feca3510214002845c6831b267b1f2c1181154d41430310b237d"}, -] - -[package.dependencies] -numpy = "*" -scipy = "*" - [[package]] name = "threadpoolctl" version = "3.2.0" @@ -2909,74 +2109,6 @@ files = [ {file = "toolz-0.12.0.tar.gz", hash = "sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194"}, ] -[[package]] -name = "torch" -version = "2.1.0" -description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "torch-2.1.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:bf57f8184b2c317ef81fb33dc233ce4d850cd98ef3f4a38be59c7c1572d175db"}, - {file = "torch-2.1.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a04a0296d47f28960f51c18c5489a8c3472f624ec3b5bcc8e2096314df8c3342"}, - {file = "torch-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0bd691efea319b14ef239ede16d8a45c246916456fa3ed4f217d8af679433cc6"}, - {file = "torch-2.1.0-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:101c139152959cb20ab370fc192672c50093747906ee4ceace44d8dd703f29af"}, - {file = "torch-2.1.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:a6b7438a90a870e4cdeb15301519ae6c043c883fcd224d303c5b118082814767"}, - {file = "torch-2.1.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:2224622407ca52611cbc5b628106fde22ed8e679031f5a99ce286629fc696128"}, - {file = "torch-2.1.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:8132efb782cd181cc2dcca5e58effbe4217cdb2581206ac71466d535bf778867"}, - {file = "torch-2.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:5c3bfa91ce25ba10116c224c59d5b64cdcce07161321d978bd5a1f15e1ebce72"}, - {file = "torch-2.1.0-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:601b0a2a9d9233fb4b81f7d47dca9680d4f3a78ca3f781078b6ad1ced8a90523"}, - {file = "torch-2.1.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:3cd1dedff13884d890f18eea620184fb4cd8fd3c68ce3300498f427ae93aa962"}, - {file = "torch-2.1.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fb7bf0cc1a3db484eb5d713942a93172f3bac026fcb377a0cd107093d2eba777"}, - {file = "torch-2.1.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:761822761fffaa1c18a62c5deb13abaa780862577d3eadc428f1daa632536905"}, - {file = "torch-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:458a6d6d8f7d2ccc348ac4d62ea661b39a3592ad15be385bebd0a31ced7e00f4"}, - {file = "torch-2.1.0-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:c8bf7eaf9514465e5d9101e05195183470a6215bb50295c61b52302a04edb690"}, - {file = "torch-2.1.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:05661c32ec14bc3a157193d0f19a7b19d8e61eb787b33353cad30202c295e83b"}, - {file = "torch-2.1.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:556d8dd3e0c290ed9d4d7de598a213fb9f7c59135b4fee144364a8a887016a55"}, - {file = "torch-2.1.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:de7d63c6ecece118684415a3dbd4805af4a4c1ee1490cccf7405d8c240a481b4"}, - {file = "torch-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:2419cf49aaf3b2336c7aa7a54a1b949fa295b1ae36f77e2aecb3a74e3a947255"}, - {file = "torch-2.1.0-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:6ad491e70dbe4288d17fdbfc7fbfa766d66cbe219bc4871c7a8096f4a37c98df"}, - {file = "torch-2.1.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:421739685eba5e0beba42cb649740b15d44b0d565c04e6ed667b41148734a75b"}, -] - -[package.dependencies] -filelock = "*" -fsspec = "*" -jinja2 = "*" -networkx = "*" -sympy = "*" -typing-extensions = "*" - -[package.extras] -opt-einsum = ["opt-einsum (>=3.3)"] - -[[package]] -name = "torchmetrics" -version = "1.2.0" -description = "PyTorch native Metrics" -optional = false -python-versions = ">=3.8" -files = [ - {file = "torchmetrics-1.2.0-py3-none-any.whl", hash = "sha256:da2cb18822b285786d082c40efb9e1d861aac425f58230234fe6ce233cf002f8"}, - {file = "torchmetrics-1.2.0.tar.gz", hash = "sha256:7eb28340bde45e13187a9ad54a4a7010a50417815d8181a5df6131f116ffe1b7"}, -] - -[package.dependencies] -lightning-utilities = ">=0.8.0" -numpy = ">1.20.0" -torch = ">=1.8.1" - -[package.extras] -all = ["SciencePlots (>=2.0.0)", "lpips (<=0.1.4)", "matplotlib (>=3.2.0)", "mypy (==1.5.1)", "nltk (>=3.6)", "piq (<=0.8.0)", "pycocotools (>2.0.0)", "pystoi (>=0.3.0)", "regex (>=2021.9.24)", "scipy (>1.0.0)", "torch-fidelity (<=0.4.0)", "torchaudio (>=0.10.0)", "torchvision (>=0.8)", "tqdm (>=4.41.0)", "transformers (>4.4.0)", "transformers (>=4.10.0)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"] -audio = ["pystoi (>=0.3.0)", "torchaudio (>=0.10.0)"] -detection = ["pycocotools (>2.0.0)", "torchvision (>=0.8)"] -dev = ["SciencePlots (>=2.0.0)", "bert-score (==0.3.13)", "cloudpickle (>1.3)", "coverage (==7.3.1)", "dython (<=0.7.4)", "fairlearn", "fast-bss-eval (>=0.1.0)", "faster-coco-eval (>=1.3.3)", "fire (<=0.5.0)", "huggingface-hub (<0.18)", "jiwer (>=2.3.0)", "kornia (>=0.6.7)", "lpips (<=0.1.4)", "matplotlib (>=3.2.0)", "mir-eval (>=0.6)", "mypy (==1.5.1)", "netcal (>1.0.0)", "nltk (>=3.6)", "numpy (<1.25.0)", "pandas (>1.0.0)", "pandas (>=1.4.0)", "phmdoctest (==1.4.0)", "piq (<=0.8.0)", "psutil (<=5.9.5)", "pycocotools (>2.0.0)", "pystoi (>=0.3.0)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==1.0.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytorch-msssim (==1.0.0)", "regex (>=2021.9.24)", "requests (<=2.31.0)", "rouge-score (>0.1.0)", "sacrebleu (>=2.0.0)", "scikit-image (>=0.19.0)", "scikit-learn (>=1.1.1)", "scipy (>1.0.0)", "sewar (>=0.4.4)", "statsmodels (>0.13.5)", "torch-complex (<=0.4.3)", "torch-fidelity (<=0.4.0)", "torchaudio (>=0.10.0)", "torchvision (>=0.8)", "tqdm (>=4.41.0)", "transformers (>4.4.0)", "transformers (>=4.10.0)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"] -image = ["lpips (<=0.1.4)", "scipy (>1.0.0)", "torch-fidelity (<=0.4.0)", "torchvision (>=0.8)"] -multimodal = ["piq (<=0.8.0)", "transformers (>=4.10.0)"] -test = ["bert-score (==0.3.13)", "cloudpickle (>1.3)", "coverage (==7.3.1)", "dython (<=0.7.4)", "fairlearn", "fast-bss-eval (>=0.1.0)", "faster-coco-eval (>=1.3.3)", "fire (<=0.5.0)", "huggingface-hub (<0.18)", "jiwer (>=2.3.0)", "kornia (>=0.6.7)", "mir-eval (>=0.6)", "netcal (>1.0.0)", "numpy (<1.25.0)", "pandas (>1.0.0)", "pandas (>=1.4.0)", "phmdoctest (==1.4.0)", "psutil (<=5.9.5)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-doctestplus (==1.0.0)", "pytest-rerunfailures (==12.0)", "pytest-timeout (==2.1.0)", "pytorch-msssim (==1.0.0)", "requests (<=2.31.0)", "rouge-score (>0.1.0)", "sacrebleu (>=2.0.0)", "scikit-image (>=0.19.0)", "scikit-learn (>=1.1.1)", "scipy (>1.0.0)", "sewar (>=0.4.4)", "statsmodels (>0.13.5)", "torch-complex (<=0.4.3)"] -text = ["nltk (>=3.6)", "regex (>=2021.9.24)", "tqdm (>=4.41.0)", "transformers (>4.4.0)"] -typing = ["mypy (==1.5.1)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"] -visual = ["SciencePlots (>=2.0.0)", "matplotlib (>=3.2.0)"] - [[package]] name = "tornado" version = "6.3.2" @@ -3036,7 +2168,7 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] name = "typing-extensions" version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, @@ -3123,93 +2255,6 @@ io = ["cftime", "fsspec", "h5netcdf", "netCDF4", "pooch", "pydap", "scipy", "zar parallel = ["dask[complete]"] viz = ["matplotlib", "nc-time-axis", "seaborn"] -[[package]] -name = "yarl" -version = "1.9.2" -description = "Yet another URL library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"}, - {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"}, - {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"}, - {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"}, - {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"}, - {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"}, - {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"}, - {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"}, - {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"}, - {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"}, - {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"}, - {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"}, - {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"}, - {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"}, - {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"}, -] - -[package.dependencies] -idna = ">=2.0" -multidict = ">=4.0" - [[package]] name = "zipp" version = "3.16.2" @@ -3228,4 +2273,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "99fac1aa63c4cfcfac25aa4b1adf09283d35bc1df94faef8fea7541151618184" +content-hash = "1cfe94418275b0c9eeb14a85ac855eb8700f42c462ca908b6823e7f3ac42798f" diff --git a/pyproject.toml b/pyproject.toml index d66bae1..2ccc47e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,15 +22,11 @@ statsmodels = ">=0.14.0" netCDF4 = "^1.5.7" numba = "^0.57" -[tool.poetry.dev-dependencies] +[tool.poetry.group.dev.dependencies] flake8 = "^4.0.1" pytest = "^7.0.1" coverage = "^6.3.1" - - -[tool.poetry.group.dev.dependencies] -black = "~23.7.0" -cca-zoo = "^2.3.11" +black = "~23.7.0" [tool.poetry.group.docs.dependencies] rpy2 = {version = ">=3.5", optional = true} diff --git a/tests/models/test_cca.py b/tests/models/test_cca.py index 36e8e62..620e0a0 100644 --- a/tests/models/test_cca.py +++ b/tests/models/test_cca.py @@ -2,8 +2,6 @@ import xarray as xr import pytest import dask.array as da -from cca_zoo.linear import MCCA as ReferenceCCA -from cca_zoo.linear import PCACCA as ReferenceCCA2 from numpy.testing import assert_allclose from ..conftest import generate_list_of_synthetic_dataarrays @@ -63,37 +61,3 @@ def test_scores(dim, mock_data_array_list): scores = cca.scores() assert isinstance(scores, list) - - -@pytest.mark.parametrize( - "c", - [ - (0.0), - (0.5), - (1.0), - ], -) -def test_solution(c): - """Check numerical results with cca-zoo reference implementation""" - - dalist = generate_list_of_synthetic_dataarrays( - 2, 1, 1, "index", "no_nan", "no_dask" - ) - # Ensure that the numpy 2D arrays is in the correct format - Xlist = [X.transpose("sample0", "feature0").data for X in dalist] - - cca = CCA(n_modes=2, pca=False, c=c) - cca.fit(dalist, dim="sample0") - comps = cca.components() - scores = cca.scores() - - # Compare with cca-zoo - # cca-zoo requires centered data - Xlist = [X - X.mean(0) for X in Xlist] - cca_ref = ReferenceCCA(latent_dimensions=2, c=c) - scores_ref = cca_ref.fit_transform(Xlist) - comps_ref = cca_ref.factor_loadings(Xlist) - - for i in range(len(scores)): - assert_allclose(abs(comps[i]), abs(comps_ref[i].T), rtol=1e-5) - assert_allclose(abs(scores[i]), abs(scores_ref[i].T), rtol=1e-5) From fb85abea09d98975d1784fd206a00bdfcd5c5e6f Mon Sep 17 00:00:00 2001 From: Niclas Rieger Date: Mon, 23 Oct 2023 22:40:11 +0200 Subject: [PATCH 43/43] build: add typing-extensions Adds Self in python3.10 --- poetry.lock | 14 +++++++------- pyproject.toml | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index 5de8f24..4b83264 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2166,13 +2166,13 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] [[package]] name = "typing-extensions" -version = "4.7.1" -description = "Backported and Experimental Type Hints for Python 3.7+" -optional = true -python-versions = ">=3.7" +version = "4.8.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, - {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, + {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, + {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, ] [[package]] @@ -2273,4 +2273,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "1cfe94418275b0c9eeb14a85ac855eb8700f42c462ca908b6823e7f3ac42798f" +content-hash = "7b7b136975a0e65a82eb4ad0de78ee240bd719a40f902a1d60c7ce52467491f2" diff --git a/pyproject.toml b/pyproject.toml index 2ccc47e..f34c14c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,7 @@ dask = ">=2023.0.1" statsmodels = ">=0.14.0" netCDF4 = "^1.5.7" numba = "^0.57" +typing-extensions = "^4.8.0" [tool.poetry.group.dev.dependencies] flake8 = "^4.0.1"

Nh<%hoq{5iOXAi74%RKr#5eP?_G}T^N2f8OKFRR|o6J0D^nc^(+Vbu$(hit;`&vtU1-=X!2c?AYBC^wu`F*7&Bq%JMV6U^tN?icc%;@Awa zpc@%UTKY6SVP8#gYW$hXb`@J!$rADI+b?MGRqVHk2G^>UQt1w48kk(MDxW^A&y=D* z8p5!ZAKkXq-TYJ?KjV}_IQS^RR;(#7J>X`8io1RF!inYh0=|$~ptkkxD=^Eco>Rm! z+(i6*IeD;|5m_Tdfta_kSemToJtnWo5j?^|t`^l-XFe|zFkyx)A+!6Y`&`*2X>u8f%( z_VZwF1mYtyInAgBiO(0i3gD=4qfvkk&}-U?!=eVdG5uV}8STgx7R{Rt(rD(G_qNT# zgEuO=E@G}F(|z3Q)C>W5sN7tUI`!r0rMvsQ$ZrBFicpEtoVM)Z`bFWvW6PJnja&9X z9^DZT1w7z1?Rv8O{)L`UYMr<%vCxPPdf+jb&A|&>5W;T7QrWIxT?Aq8;{F^V89nZ7 z=ixVQ{BBGCPb5TLN#Of8-5>iaGU5=;Ji!bIgz*>4XsV^{p#e&pU*#@2f4tLvL*&Qb=>uhAIyzlwRIJMtm=+@q zYH?8KD?DliXrD*~th{guosh2z{o?ZVObt2fzrLdR7_s4Q$C5WlC1VjzW5!X;Pu}Ok zVHnP*nFg%0>D#>dv~4(lE=`_@=9b@TbL93_;R0p zc11)3N-UfyJTDYdN5S$s^5%*iDC@@yZNCZo`T4B~e*iCOi38vbo?<0;>Rizf9fuW6 ztagXqZroVXka<-h%LgA9AB<2wU`j1wdz1AGwN5eivl!)&X5pBZn4PQW0?*5YQCC9t z;~u#atc`5X^!1UYc?v)CA!dW=h8gPCGo=OkP+6DN=x;U4#m+a(uZ>Io4N~!RmvRbz ziVpL-I$8*xmE(GA-S6=6r9A$SzUU8cu^ZILY$FK7#WHH6C0F_7@a*jF6dwO>Yc``& zS1Bxz`B!6wG1<_fL%f=Q9f`eKU%SeqEy$l(Ob!0jVD4SWby5# z_GhW-FC9P_ImQ3C+t4fa3tS8QbjQ&zo>j}#KV9ZYy~700=~zDepfGnj7a&((jM?QcaMK)P^jn}=#JJzHy6 zYg-+ik9(}K_mFZ+bjG}Yf6~8ixpy?W>y4)`8?yL1s%lbv&W6$Bqb&|u43$iR*<05b zn%2HP)4OU~zUs#cmh%H)v+ht`RY?TslEmGReOHMX-VudI&XZP6hcM{RFwik@`cIQ4 z0$fgRdjiAX9)XnJo^$(7#GKhQcf}6qCM!EFJ!Ua^%2d!M|fYVkao6r z{(7`j8tZXu)t@@VmbxOLg(Rxa;pranU&2IEkl_m8Gtg7l2H5J57y1r8FMEry(O+Py zSiB!D0rxnN8te69U<*BjRbeTND_HtfkYQ=GN=kKRaClvmMFz|}r^m;ysSa|Cu>{I` zi7D)7w#fnhfEF|VNNxy}D;$tlZ|5UdL)l09V)Z9InICxhC!YOVvJ;7SqEZpFoE&C* z8492vaBF`UaPGVabGAo1qQo4yXhRSFdUBQ4oLw7I^h~gZ=Vc#VEK{)W!V6inyjOnz z;j94*GJ`ct%YEhRx`^AtTvbRV^2Nb0-Xf>KF^lS=e;S9AyEpmh1jr#i=m;)rbnc73 zzJc`&kRnk2wmsM|nrenljU#We*!A#5N7hPT4y5}N=Wuc{B4}v-|yEc zB@i#>%T{b#M`hH!0JU>=kZJo2&ThIBN}Fd7c;I+ehav_KMbn}r+F!Szpg(-*bh4p> zik4FP{eEBt&R($VRI*5RR-2j@Uy<036B3R?!#=S|k64G9xT_}?NSC9+}Xb-@*EyY|MoJ zkh&rudA>B_E^R`-8rN`qoE~I3zG;r_Z_hzl*XxWCFagD<8W6WoUZWxrS23_vXu&!| zE{Fdh*d*}Z^n07g4eFr9aBy%M?{2RYGfJAK>Hqt3r~v0TLzp$Q@b_c$9y0f>LI{?w zY0@mH7%TfmLrgX;Wcn__Oz5h*F%)>5qaqYd)PEwJjNC$l-OV?-H1VpHpmLAh31zm4 zo`;qyDMyd<=}~2tmc`B1E%Z$1F-6fU;8X2Q%eR!xbY;L`jIb?e;%>MtJmz?FkUXLg zCKn!Kvfk?F@~GZp0iHE)ayyh`7WQVx{ z&D{;eLDbY)O^L6bRz87<>Skxt4F6wY5#T=U{(35BE-UWU@o5=jj^iET2 zV3k+gNquATG?xTt{Vn#6VBKQ+%>yZFcU4R|`M}(Q8N=*-`PCiWkUv zwpq`0c>Km7 zl}OqoGT(Y}h*7%Z@zkIF(Vd{7=3;%79^17b<@BYR`nu3B$J3_ubzqZ?c`J}}?kSRY z^yQQszt$}M?I7FiY6^s#;HUg**rgX}2sCY;sY&!IBqW^j*dea*JHz&lh@2l((SvkW zuxLE63>kk8>6{yGjqB6&&|3_7DbkujhMDVdywv<+?6p19O0%aYx8)$Y7jka^WZTbp zGunrgsp`1k*M~9Qv(2UNTi!ay z5MttfS|0-;A%ocjsW2G*t5(SOeIG{V!hboZ!c$%aUqAR8mQam^h5LF{UeL$)n6=yR z$3da5_EHi_08h}LA`lAzSxG4=nO}SoVY2@P2Hi_|X5tgp(H_sdNJCn=ER6Cr%IUz+*_P?ta5zUtpsB_wAV`CEe7Lve(H6FU-ywD^a4(2ngh0 zqAL1PHLZNHDiLS5Q1-RX9-Y$eScoQr{vX4M;#PH2f|N*XtbZ3}Y8%g1ih?sBS{v#n zgE0`uv`10vE_s5*yBI$A>!n|#jg2Ovh0b_6lRb*Nvyrm&PiW?%RWkfXI?^7CWz|Yu9`x$$kNt4D0>S?dbq{$79HGJQ+MObSy`0j3UxNdAL2$n9FLiK_1jX#{-c*OrUO zYZq=z8OSCTK0HTW^RM_Aq;gl)R43NpC!FCo@`T9+s%UQ-oCg={o+XoqO1S#o1vNI! zr~4}+JqBh_@rT)1zi+(J3~>>bRWzTg=L|}|e5fnthvw5vmvZVUd%lt-jCJ6chRj$j zaUw>1rh(r!Rn6AfIZ#1Obl49jLHJA$5RsObmjgE9g^FyV;L7p6(;}c7+(nOHg{clN zOo?vl2bz52vkSq;|F$qN+2wcBYQjv`kBirM8dY}D5@hI{J@Hd8uRJSsn(VG`h{0Vr zs%fV@N2GT1{aO5r`ZNUyB%*&Uwdj|PoBnRJM1GE;G_P+v&3x$S&_>#hM++%niUg3G z+h=o*S=9aSC|vSNi{CTMwr||=dN#s;9}7&Hsf-RUsjmVXDLD%p;@Gz29rBdu@wQWJ z?O1Tl(8XxFPmOaL$Jy1tY?(-Mq{5Xe3qL=HatDH?_U8xTrPwzPBrgFt;3naLjZP8r zcX?IY(TM~G1A}#8fnrSZmSfShYG1;LFW}~cs}x2fZwJbXQ(?AH@92o&-t6o6Yv*AK zc2oZ6RR8RS6Tca1qrO@EN}NvmI{y1`l7)dWoUY2*(ofKZn8SYNixTuJEp}u!C(K&Y z0*~~cu>6l-GACQzWNK#!hY+i@l&xbr9T@yhQSb) zV(zDF9O%)f_&bzfRK1@I5(NNxT*(wIH7nznmMQwthfOS%GqwirCh?JiAc{fxi^001BoK>cB+H7g4{Bkwn`;T{Vul&`X4k#>m@As9s z#VB?g{q@I^f5oD@=NDRHeqJ0FCvxk+Q17ulgS1|n{3GiC8A@jz7|5M4F?P`-r)lWo zV=-Zn$LCRn{`dx+gI!!VRXN|7%eODi_T|pcW?E`VYL*{8)@$6*kTLf;3L#LiNxmG_ z%8?rDBD#O1mU{0uKK^^v0?>+oO-mbS^gNa(&d8j%V(vPL^}Aa(f>7do5}m&xIQS?* zaq=dJN)%Pd`S(_BWOr&{@6s^sfTP4iLo@l2Cn>mRN6sI;w_CjuH$*Ywg>nP%@GK6m zzROcidN$SvTDSPdjtKE+Kx9zeene=ge1!|g*-3EJ*jKBmdZ5cMzhH=MVB&drX6(04 zn%sP<0WsHUF-SJ`6SsZh&UPLw6p2x2LJ+Q|9-lIL37xCE;Qf;D3cVeO(sF}800|O1 z?kjMBTTJz#d3t-__m%+XBiD~gXBUjX$oC*^37Gg}ej9_D97`h(XwYSK_v@PDH%O9h z=szf2jLQj26wQysO#$ycg=pb0Tq7%iSwwamor@(ugWdaG_*92ovMJy&wXU~?>&&ghz21_L zj5IwST67~+Uf?_oEGgdPyJsKn{{&J%8)6&UB=X_iyGI~1OIf7aX6qNAu8_$lo?OW0L@$5+;VXRH(NYwytNi>3!N)SCruyVf%NNFU zO40BKDrw1Sn62RUbiX=_^S|URYirYs8;g8Nl|dA3`?wLuYvH;Rr)bXq3m+z~@$jbH z{%9PvW1470pDSPJK|Toq`__!~w0r;sU|>O|G77M>hJN7(+QC2;toth`>i%4-s2~w` zW4-ST1#O@|^Y{%-bzasE>uqrRHwD%oC4Sc~CUv%tb&QJL`V&DgJ;p>78iEABh(l`Uc@JC6WPYe`e zx_?R(n@fC+icKzFPJ0M%-h6^>-)whTc}~k9H3u%gA0vE!53f>$Ym8K$%Wew%9=|TB z(eV`aE`$rJL)J7G{bWDB$3Y?LT;DIXvs!8^ZE1tmz8JH{EzJIX*-A}Rvd|Bu@aCKp zNIk9ty6u2u9)H>8<>gmv5|Z6=2ow~X1$k)+b#PLZaNdtK7FO{Z1bCEz)!B~S8&hSb z0SbuAINI>Ve8rV&yC7}fS9oc(JXvb5zBLbo)u4%t*rT(=tT{KrP%#Jr9Ti`=ivMGH zt>}g4RnMz|4<3cE@>R2Ma6zZ{^k?zkYb(KI(gS1WrxNZuDACugTncq@PWGp04_$c)llu zh3oUNH4;A2*zDqf9}CFHSfr~6XRMX{sAKT6Y4bn5GuojUjg^`-3AKdx%I@xa7nc(Z z?+sCX&CYtYtKS70p`i@;t7KjhgWgpuPb93!--At7HB2y62GG zqP&E7Ok6Iyx_r1R-&@g&!zKAAC2Y0Bq*#G{v3X=OD?(sE4`lgooO@o(36ibZKr_at z%ip@Cya$JW6%&Yx^UJf6DIe(9^4xwJX}_!$U0lf_1L&na`PvZeQp`D*NU8^(qsr~G~vD=ifZ&BMY{lI>u%o+jb(UJ2ZUsJ?86ty zU>N1MnR?^q#($sE)Rp*#pe&0GoP?V6lC(<0d7ussnwIt+WL?w80blwPmB;$S^t@+s ze9i0N)%YPAd49fu_gz3eD-){e8pYteV5HxpG~m4UonkGd$xv{dUNZm>x0Jp@&RbDU zX-xh1yi3%mD@Fc9EdZ(gHmo$}9jW|D1Opgk-y5A$AdmZ~_nP*iJzdT@Z@K+MR3212 zoO1&IRWxZ&96^`$nW5*>QzD$h*D{bEHueYduZH2XyFN^5Im!BJ?|sqSTP~Rt%H7mv zjU-ec{0PE>NKJ9$p|UtOFa2Wkp=AP*#6yDEyM1A?b4Rjx3&g=K@JEA&2=ZL|&ZZBU zGT>SQ9f}|d$k7%{h!+3lFN%%)rp2cLOQ_UfG~&NM9(NX7G7#zBF`(8O#0Q~a?SIa+ z9X^uVEq<*yu_>x_GsfdLD}&i$5nPrLDSKULs=Yxw-SYsKwk-`|+H(B?|A8kH_04}6VE|qKxbyaoTvkeE79WxuO_$^ zDw{-o7m&jX=BQmv;#sH7n9G}J)o5M1*)5>m&R6@A3qrQW@<5>Zqq~8*?!Jl=>ofA= z^^K2t<;=$aYLu zm_mJzCrr@%_o+eu9I)|ZgKGF1dn)k#yqdc|!90ID^&Wqn-)W#KWy@;J>dkGct)AsHeNYH|z~kQbj#2tO{A&4VzfTZC{N@MLX|g+Pm2R#7&L)2K8ukeGD4%8EdV3V2pUWVpsQ9zI911*N7H{u$a!XDU{|#Zl84H^D?)&xh z^(g?L{OV2>Of;gZf`WoEq#J%-R5hN1`1lJ#9EoQJ5?sk|o0OpPkG6K)Zbi)*rd+nZ zL?W~#)*@*dL_!-TwMdg{)`dg;jz|UAZ$2+)G!4#WH2=(Q9^6feQF8h45-&yx&4Ku@ zkhWw3-L20lI<34Uly;AV>kD6t_h-oQp5-wW)k^845*Q>;o&UaSg_F5op+BH(N9=yr z7@ctJOFh|Tp*|qL@m{(Y0^KnGFlATkrPL4Gye<>6}B;=jXc0;4$7$n zMKDk$biXDiM}Rxy*RN_P^PF}}D=#f`vrw|Dl|pB}6}u>k(3-&|v!4o6N7MUT{1dx# z7liq9lwi=7g}noIFOwUmW|3O5wSKq2N7o-5KFezOt+_>|nGzEE)H9@%Iw4gJ5Ow4_ zy2l$+%Z)zAE~=~b!+g9+O!A@^W)rqi@?YGy@+W(wF))@!d&@sfKH1x49GW23hSrJ= zG+||?DQoOy))w_{l55E1))qx5YwkL1=}sCsHjre(jLL*$^|gu~vDB;`&UuR;?MKD$ z^a|rny+dW!t&ReHyu1x!ZMl&XZxwxfL_ohZ5F)e^xh8-xBM>k-^D+KEV8UAHZF|mA zgc&@B;(L62nbqG{pagUJ%phjzQL7pE{r_Pw7|6JK1)iR#rEmOFJTUKO?jGRdL(O;+ zXnZ9-IhWrK97|t>9(Zlth}0ky&NQc@V1s^d{Ze?-#Hh>c5c@kXR?d<@X(`T#h}(!> z4>YJjU*rb7MyH2TLNpoq2$&|IF$m-kjU4Ehy#{9H9VcT?aA;>}Yz%|G@yj>0kmHwXw0E zfqe0ld(XMKIbA?NfYL=XL3Ze{Fk?C)Lj|LC!B0&ia$v3b6k0R8Nii4V9wy$~lg!Nu zlZ>*rst}w+i`JKW67jO7yo0GRa3~X>9T%G1P|2|9JDS-7Z@@O6zOyGt+NG-E{ z0i0L|Ewd%|Rq!hlPWL0uF63rgImOkl;u=oYGL{_Zif(1V(yI9zs2&XiU5{H4mv4g% z&&LnS8aBd9=Z$95L?^e~DQ|^OZ>%o(s}V$~Mz%lA?o`(KZ@C!e9oc>;(tmsC@&lTl z9y4v@W@Q47UfKGfHx5ijn#%i(E%Tp z8Zs<8>_xTIk0YXva6UR3%0snYAMvjjT?NlYn8;pzJtz@L^eUcpOuG>!>(NIIlvAY*V8W2K`qk7A(X}OVJ;Pg0>A|}n86=%aL<3*R;U4O-v+{EWm!&n7 zot+!M81i<~qGfu!>$M~UiBC>-tH|1GJse2TSM1QM=;7*KrLVTm9%vBD6=WOJT1wPSKK6(=sQSLVOv1~vut1Qv^%V0w;t#pRd}VRPzIb) zManz`Im}CP3W}g!h4HDT44wx~bUrQCEwi38kHIhNPnp^#zMs*=pNZYL#s?wud)Hb@1{|%_Q z=U>QlPzAv?F#I}jRrSy)Vc)${PF zKt^qRd^ID{;my4dArK^J=nvcJF^7IzI_k135?~~9`%d{50#TJT;p(P1K1(u*qVJsI zF1`A~uJ7sE>vCJ|M#lgvM3LhTJs!eV`*MQkaH0^o8)%SF#u(8jDCy%P7XzRHIL^)G z!R@FLJy+6Ni%H2NO5nI?N4qR6bpKabZO798Wt7ej1dud`nVMkEWJ2$4iW zmOJeye{U?Oghyi8uBUByxI}W5Z`|3LE<;aky2YX-N-cYT)2{auD_MQmKKk~aoKYL>2Ff;H3lFVNj#vLK z|M75~_qsQ#LEtUQQDl*cgT9RJpKD%FEjqK57A>?22woM`=#ZogM;k^{D7l*^Dfo+X z1i=}l7wLO9jMN1024)vuA64(3b|`E9xcGCWaB*Ja&U?(BDhz`;otTjj6~><{8MCSS z*)w66J!?GJa7Fm0gS*-R5QFNF&QDcdyq9w%YkQZxxtmE{?U8^EP)c`M)}I6h@_0~< zVlj|RxA3BkF8U`-j-r6o?EeXM!KG9N?xUm8W>&WlvmE{iScz4^va+yvR-8Q}ligNw zA*}cBmTD19(&zKDx*zz~yWvU+@>s_acwdM)SB zlmN!tHxB0ik^Byw;079WHwZ(~Tvbk><;E>t-AA)?7EFLReHXXCN(hSQ&nnWodzwz} zz-OAxF&}0t4^2uRi`S0c2Zev_s$iP(${sv;e_0N^VYLXwZi1b5p4kpXJmKUSL4sJAAwUrU@+bbM< zF}KHP_%?yQ02WHK7dTc~*IAR8uJq|x8Kb<-LV zZbh*6ESO2+nN&Rep8j!wTJu@i7WaD5UGNZmC|lWzA(0E!GVg3#<{g4D!Dd#<(VWoN zB)it)4gLeMh2$Hx`nL@Oy2y#`jcKO2T|Lhby1C28>!g>%JKM8Q#&l z-HvYo}~ob?#F5zO-E3~RK;tKA6>3geb*d6*q-gGPjx@7 z_PO0Z-7m2J{Ss_#+1#&xJwEp5SxtR1^(PF^v`lMw`*e3Je&VQpD2WSATwMG`Q01?u z`d~qiBw{lG+nQ#utoBcLY<9BmJmM4~F_lCCOCnFrCjc}{_CS%2ylNrETmgz2~c;gG2(l{#Ph_e{iDN{A=y)ZIE2 zts@wN%hZahQgPFp{>kC%v-r{pp(y9qy5W{>E^e1!*}gO8R^Q%FWq6fP|L`&M>#^Ay1kuqY!aA+@uIjs>^ZIauyFKu2 zkFIdphRi>$vM1-goo^;7GwZ=m_udl(**MyjR(}aqx^~}rMBh*3uiXKr243dBbMdcZ zAY{n8N?k~Z?n-Dwj_`DJOWlMzx;DCD zbAV~uyzi|+&&Ic2If-=qAyAbdR@Ix zX$I4$C|9@{F1lz7pCD8Il}W}-hcVS_WM_itd!#vx%!bAN^tET1QhhHuI7(#<)__e5 zY!})!aAD@N_gEFvQEf&qX=#H&wCR%TOD|vB~o5?1fWyIH&g3-!WIWa zTCS}7CxlRZTn(;EkbF;`Y_E@q1K)C5{$mQd&Yf^2A1+iSyhJfC_N%rsV- z2ln>!U6nr#uDxf*U#N#S77uTmQ8s@<$IHXZ&ue~Q$RQQkG38Po1^FBC`orJtxfVQu zm|ZxOPw+m-)T!_M79|+{+$*(V^Kj%5kC1k|i7VgMHfd3jK?Uyf#{hii<>hq%CC@}` zIvoI-AV(@MmDeWDG;M;svu6+O{Y62c1JKV^A3x%t1cZY_-Ana3z%T^eudRUx#8~X~ zh=vC)uKjO73xV;~=AVKB5I-4Ox}Z?8-)|RmQEgPC(2xzA`|JA!?y01%d9=RA);j&$ z!@J#Z;r?I_ix-AU!s8_mtEA1s1mulY2H#X3XPeuS#vM)WVKkEsIxg3p^pMs{c zy-)h2(O`kdXBB)VS8 zahII-b15KAt1^qM=*TIS4+#sU|9eX zPmyTja zq@xe+$BrdE!}IMlljTI=T&oH6t(Y%d0v*G|NHhDb`- zXAvn{yiGYF3ne)`SjbD0g@v=WzTQ0|K<}ApapAO?JBQ+%n_4S-)fq_G)GJy1SrR-b zBJr`1*5V5Kd5@Ir?=-cHnrd+rV1!dj5m$`mf+n_8{u6HLK-f3-VtVwJBV@Srck%}< zTLE8pZ-Su6%eiOIo&gS)<>h5#0TF_o0>gT$rp{YNz~=zz5Jpka>Gg&IHP8ad-|xu) z)|`Jge(i>ZOxpkEZ@6=d%L=>=$mtk3IeSgYK!E{BI4Oc80HC;uCz*(1RbKsMAoa@H zK99;eH$Mlaxn}jFQewR0$bM8$rla-#@`|Flcha!aCk|Prn}HE`wyYicDZh8MbT|A9 zH0+bY^rOnf7FufBOE29c{uLqu+(yX&L!}FN}`+5ZBVfT$-yMU7WfKyDz0Qy-|4Bn-RQ z@KK%4?D)8;j?E2P+O|#!4gt>jH}di=wgMRSw2h4oif0yt7)hdsz;^|aFop@Kvoj4a z{?LiRdCUR|yr2N^0_c7@yOt_F_o!3QFF$8chraIEyl3)HgKpp@@aBV+ojb9?y@YAVPY4aMI^R%K?khfSFhO;Bt=D%EG( zWo=FK8lcB^(fg*Z8^+-_UcSSwN?IC1lxV;<^-7wV-fp(DWhr29A%OPHJwBJA?#R}6 zx|5=gz3$;o06pauRUT`OK&$J|HRUYBriFh>fVh*(o4u!8G-{8Ly#g_^R!;J4MA%Gh*c@yns+~rS7Ur}? zXvPS`wKbwOs{`HLr+I?9*q|X!#nA*Js)j3j7r=!cxA$&|uMeboeO1WFdh*_knEnFJ z(%$rY=OZ}5&l-(>Zw@$i#BWe>)J4VNG;tSEuQ(R+*AnlqRwhgU$Ia{;eWemb6DkVV zL&M%)8JC2v2N;H#+3qzJ%S;(&X6CzOWbISsHk3TZ5YS`dI+ddjnBI-M=_`jvJ`xM0 zO8~M%P%0sAi93wSa)O3@a|Y8}Wvik&o-}ow@xM(%)ml$He>X@g-0CBBYqnREGENZp z6TBjyPcRe7sTfHYD6wZYws*${tDbAN0LOFepa{wy5uu_Y1mi+>G#eOFdtj@Z&LCN9 z83lNLczk?aX&sXCr)i)M8oy(i!pQU^wlFRLzWO0E^Qgc62u?ogUR*CBj*HNKt(JXD zV6_&1RyOigzhyr|ExYSViNy}X{MPU9Kl2hF=Pwt4cBF};B^qxh;uK>~pR{fvjrsJH zN~gK+e3c8^-GV#Fuh*O|wl@-=Lp!@uiWrKDKV`pzt~iFGzN(xaD*$~B5x4COK#d;| zfQumtO|WlRfBRQ3bY#f7va%PPK0ts9bYt-mSty@9IquIZ*|SIQd$I+y^66s2wE5fM zwB`~9n6YLjE=w}EfUos4T=#JL3=6XuinseJfeJ17RKIIf%b@JN7?$=-(Ca2(B7pb& zGezu()`4k-B2Sm$yYS^R87hm#1_bTqj2DsHoY8XsUKjdo*=QN;uTDqCCJ#c@WRGHaW>ZHv<51ht;TO zHW$44O>Wf3`@v77*3x(j8H<6N4tkA4L0IPmhPM`K@ZsAwB}s;qFp{=-3JXj59IWQr}CzzgT6Gs(8!J{iXJm3qm9L97YPb z1+m_I;oI*Cjrxi|))!p4^yHD6-TQ~lE!9l7Ltac-VM%A<2g#R9p_q-kugrR>*2-Oo zG{(p7Q>_JZRN6+RXBB?n^dwLL8TI;_J2?^R+|MPp&h8-r?RTRfC@q2-+q4$EcxhXt z+lO0DL1B3*TmsYi3E&ri&zZ*5LGc{_X6*e)>+|*h$pmA3S#r_>JZgq-PUh*PWY*ll z4@gN@_8)HcNzKwIwjk8i)O0Fgv{nt!)7RQkNu&N#OyADQ#a=Jydk@JvC~)&g=T^bD)wuhqtU(S;Gz%V~YmGtaciFeI!XM}jHYZ3=y!m&v?+pABwJZ|l`3TdMj_gpyV zio@T{U44ibPWYA*`=TSgGGuk^@C985zxmcg$ubi8%ynx*E3`K9`P?1HbFn|R0+nmE ze+?@f5fye!%*;a8>4l%{56WTXL|LI06S-FlVhc6H#79~48w!*OhydlO^I&o$p2OaQ=+ zku!Z~z3fuN*_*nfG5s)}CH?IhZ2P3-nr8v!w3J^b{O-VU`RUm%H^xHiN?p>`5mRO| zRp*F)UHHKa9nRbocD>T-#&<)5q@nytp90QY0WfI|6BOO6*f@T@#xcxacO`u-loecr zuJcKH=Oe~r9ZJiF24@8u?)x@)MI&5E6-o~u;+Pcwlhh)>*dexKJaW%m z_Ja6PfA-*Z>u&HFU^xSyK)M0+rx`|`%1n(|)VI565!i42eQyQ&)6NyLbv(CjnVBx}cfdsTn*0?L z{)&4<=J-Lj*XB|Xc!xU$Spj!DhYb{=Q9lOUxyxF!!l)*I$4bKJb|?s9@BHOh%))vyk@#jO}XgWlPtx`hqDDyN-oT(%L`Bm ztwWo&na7(wkj|Wc8@Flzx=v#S8(5u|#Q)`ZU6CpC-O;sGCXyW^tW}Xh+KhF)iaYE; zge{EzUpjf~pYL?{H4w8gG^7Kh=uBK(eX9D?VIrJZo(&eRt`$;VPe2A5G->5cDYi(7 zn07K0@E;LuzG^J90<>7g9tTRtrC+h0)e3qR0^|o_slvxaHooi}Hr@|~E}fbHa+0Eb z;~nV|YIUxowlt34okuLhgcHu0mlx@h@y&nOyp>XlV!GTZKYE5+^M+S_wM(-f8JdJ> z->J*RKI8@W5;7k;RNxsBL3<8uS$8RY70xyF1fuI17Fj(Q6Kv0|4ZbQfn%}FucSKm0J_GhYp`xa&|0r{VcC9nvE9)7#MvlJeZ~P$)Ew-YtJT?v z9|FZ$Ok~~9ie(c{V-@kiP{Xhjo8GJQgK1QHft&Nq>v}U>)fexi&I*Et(&XF9Nu$*6 zd+?euyAY9OzmWym1i1UtSzJ_6PXY@Ocv6#ZA~*Q@@^YpfM~!=V^D9=;qRf9O3cxf- zyM9&lzn*62HUjZjZed|N0-=BF20oWAQ`_;d>e_$Al6$XMEY*QE;31e+>9Ny zpGsuTP7k&Z6L!a{sk~?5NeHK0zwn0BdLTV_x&rE&#mi;to-oJTji*~80A2L^mn}=< zE}8fBA^mHK+HcndQ(LAyhO#$L&v*M}3fmB=GR>=4cyG4JdhKCO_OD!md7oYjKD$LC zRJ(qcX1--^Rmt4Ah%4SqG*fQl2oCUM4BOGMyzA@hK=cf#{mN^_9QRl$OT%^XJd^*x36ImZ|hSdG7|5#|kJO{7pC~Ec(=spt~Cx6SKO!GQ!6Ej*n_m zT}?r$Q})s24T@3bKC`bCh^OKvh`lAQW;0w}iF0bAD`IpQj;~(8RQ@ zwKkOuAJj{#^@q>)mP5YM_0Le`|D4#!G{cI(_gbjXPpPO@b`vV66;$ZmQ- zB5m9z7*>HtCeO((C^o!--Z~mLLVhMUOwGypBJO>*J0#D|3qM@5=m0_(VI^iA;2>-c zqZOe+?bIC*qh~#f9z{^Xx@0@w93PaM9z_5{GR09$sm_)2n;`2XMj!SxnJd<~w zy1Ds6N!L*>!zdwisC?%KRJ<~AIFpcQsWkEmu`D}%yvM3BGKy6(;3}FXgq9PE8?s)_!bv^gM|pYMQe~+B-0-^%>0J27BcygRDS!jZ)05L$pcX z5hKL0ziztv-gbn4cVf3>+hvrp_`-PO6+ej3g&AeS-YztMHt;?g-J1LzSO~4Hq184t z?3;0$>uvpdU&v#q{7gng<<2>J$IbeFdv$6irb1|OtE5Vvq&Bd_Rsh4SztKk#e1rNJ zZ!2pGUs1j@shfZ9+4P&$U!EtTB7upErjIT!tE=ttW$)hQq^4i@xi8(Um95{+`YW&a zz%oJx+Z%LpUY&J@hwYh)AekUSLfYi;gA0&tUgS#8AFYSRebFVjg z2Cqq;yS{aQrCYby80#7kVo!%9JbOo6Ip-rHoYZ$0Izb^D`6UcC8BicO@Bou4BwXOE zIxP0w1$hbzK^RSwbb+Q%?$F3LYp()?d?){Lapi;b6ndwCtfb-inoqdTn5$VOwtAiJ zW>W*6)_hlbXAyycaRiDNlrf`s#<2Qk-GI23$op#FeU*D=WzpSOSEuX!UJb6XErY6> z>K)Ybf@)gO?xo5w`^n&11#s9~glu+%I9~qO739zcnxOgt{e}l9_asdLtsdP*pL&EN zkVz5%sbY+Vv*oDI$P~-DwB^X%j8z1!lqnNe; z&LtBQQyKcbFh{SQc)51YzC>Y)Kc>yr4b_&H1EhsF@t6);N0+L8AKrf-ESLCx+Whv* z%L|Gz#xayocwCndbfp1yQ1S6IjOb8(MU%rY0d?3aS>)mKp{J8BWELCS@+{_=9 z)6n>??x~^ifK@4RAaSwj1GebZ$r8XK;nq|C83UZJvmQv=vtyJzFk08A{k{}J@0lW17!4f{x-gIAb6TDA9QwrY&fWf=~ z`*GAlQ;GVk&$#b^`{?KzM4KmB7`V*1nyLf}-K)^=h0rxK^jQKCm9>acS` zQY>&(s8ftsm%l$pRUUvn9NpUbsuBq@vlKul$>~D;{qh@JBC1yQqPpg8?#Ah6ZHB7I zySO#;KN-JUb3>=Urrs7!XTB%6K4s%!d-`6BRy+j%gp#mFQGO|{tSq?Dk8T4gpOaFg zt{?t}vD?RuTiy-D+BoqF`R!pL%)6dL39^2|{J>`HO z1i!A<`PsF*gYXhwWq3!6CEp?v*#Vk|_YMy38)EufEU|EKaFp1#^=6&?c6S||g_D9S z>#C|I+VY82bagvHc5$M?TLg&D-hu@EOwD5h%s~Ls%cAgO{;5)oIaujJVDX>Uar_}| zjZXi|XekH|{K;YyY`_-nmPivTIf3*=yc zNYRhyR`=YzuY_#EXdGW*$5T_Cple(%?7L%J^@|!S$py2ISN5!ys%(~1Ekd0{Fl_vP zjrDu&>p2+Z@8Q;mk#8plq*Qv6c)mAyF!Qs8=BNz?fUKV=7-dTe@zDv#beX^M+S?9b zni|J6;(&TsH+XECM!B&(5tP?7MqOdjs(_9`xuKfS$?g&-Zpi@jVY!V+k)!2-;z`6e zjq;XDKgAdId*LG3Gu_?6e?H4Ez6h;fIxhh36HL=PkU}Nq{`I${j_;23 z5T_Zy0WYtvmI85mOmVJ>uGY!s(vfGW=cvbMO2ia?%W~5ms;akuDh>I&I zY`yTqYpE<)ry=Xn4xek;r#+w4b`JtY#6rrU1P8&>C08cdMI*HGyT0z=Cr^rAv&w_r zR4gfBSmj$|!Sq7frJ%z}z^#rMQ#S868GkAS;ddeWg>d=OX;t6nnLGxlv1m4$GBYzV zE@&Q`{;8;ABgXt6AS)X$Io)Pl$fW`ZO-^h{{HX~f{)filI7-42PXi00;(Kv_!c ztu6~QbDNdd+qX>O;^Imu-^)!SO0cQKJdZPe{`8ME@`yx z1xR_402YD&GWcPE@)2epR>tUzlzghw=w@K5Fd7;_sOY`*oRJz(e^0zQxur2Oet-%h z9T^?r0T6`^PBNd%HZ`A~UYQ#*&oAT!mCOuYR#td0a5i-qfc17v2Hn@I-|jHP|NxE~A=Z@dKd;L(*X@nIFHn z?<%}}>EGfs`(w=yqPRpOiMLju)v?pheH}+yecGg(UY3>iK}P&MAUeE{MN|?YUjVvC zok&XF&%FRKp6UA=jQ45`4=;|L{vcA3c?_HfZ`{Iet=6bM@hTVS*z=hnI>sxcbbaxC zdMRD%Hql{bnVNo*k4h02tfO!`xr3Fx!l^F9Ke+^O!iayu5)(;5$3r^m_y0yPPP;FK zf|VQ8%L_J+ql%_M*ZVS7WK~Qc9sCRU^X6azNLepAZmBDatvr%MF9vh(gFnf`dN*$X z{KWjD-vHf8Oe_2eRG}Fd{-oVZj)+*`ITZC6JOFZ`RSpb5<*X-BNL}~hv_^ola5-9qBI(Z6e8nwu&g$4M0T>8W17Y zd>jY{HeE#F2NcPMWi+~xiQ?%A?P$0B<*Pn)f``kVUdNC7+3Y&yj48-frFVH@&BT5X(JrJM4SeWpKYImx=7GZu z@@fv7tyt4f9aAl?he<8SysGJ%Oa>5|D5I0LckRS)B#VZ}OzevFPFtwwO5V#{#SxK4 zOV8P_ZXf=+*a#R@QYA4}SHM7K{MrA@`)t4TNf;Z4al9OR9s&pO5o~y1FP``53=b^=`bASG`%0 zMREm)tk-X|B7JO37WwQFKC53I3J>P&5$9xxd1v1kl=BTzS@eXx((jsU=FRcKBmYb9 zDw9`67TMV~y^Qt>A4h}QekQ70);PcByfWo=HNIhn$5NJKKh}`0l)w-QtS3uRacdhR z40)hgt5e<*(>`!Qvttf3x7>Liz}g`+)tdD~^5I%>j^D9evGnMo0RIp)BZA=) z5y+kfFh=pLQ5H-6t(2sk&$IC9dN)uetDJ=g10Wqh^UHx!1*6#`4~r?LkSiZQ#XEKy z=;ttk19N??m?aul=}T}~s)+YXbsCu%rq@iZ)n(Wt`W$t&@AKtrHC2V&_xU!_^o~MV zd2HNVx*vl!zGoKzV!D2liWPF}zER`y{ifiQgWh&AJ{`O5n2SgE8$B-B$1LcFXod$V z$5@H&L%(3j`g7g=bGVzzWn@w~Y+K337ERUBda3;1Ld3(MPM?rB-yfNXVyDwV5HGZe zxKY_~@nu<27Q|B}1f5`H@zQ+h!(uh>%r7baNGU?qjBnEHG|sFk%gQd)h|hRj>+m13 z0nyncj=Oa8{XJ=y{Nr!)_-c;|fp)B6#Mi<4r0yEc(XlAEx>rsiN-=-CIE@|r^A}I^ zXit|(g~Qb~6L@Rq6OKYQ@}c5|Lk1o1BzBgOv>t#iAcNuSG+%|$?}a^(gwaC%MZUEn z6|4Oe6*F?=Pq2b>0zc>wVcj3EjAcy8MHYgV)Y6`m9a(fva>^8YOlHEd@cS{;n|p*q zDt_c>QDB(wMre@cV&_)5{R4vkgR;misT?~==a0m?`_)%{L8C7$^Qw(IxFEPZu3I`X zlrJIm93V5_I#{_r7glfdUyZoVY3Hr@bWJ#^=l(aQuTP$cdAwb`_43l2fBeDXPH?Ot z@~@bE$j`iZQ%6HMZ7D~Z<*msl*K@P=9n!kU>uX=)hYx?|xJ!16rnxtXK71UL%^}y_ zYu&A6vEZtIsgYprEwY_GWydLKH?yM!L&>dFUJjZlij0p?snP8buH063*J{6i%b#a_ zhs2d7AZ~%siAZ$2lDcwvv8K?Yzo)JB0NyxPRLM||fBX1EM;}e6sbcIWCkJI^rwS+& zLw=HzL&2n)5AcJW=el<`n@WcJX3tWN>4yb3-PY*C=T`YM>?IrwNG@ICl#ryt`S}3$ z^>NmHnXKIY{_5V3shWkL4;wDB88KOQ!x7WJgBr@xVbl^jGs2bmhgF_M!Q++GZk>;x zxHQCkzyiNRo0agIdIW3PJ$fyqj|s& zU5TZ>@~DD^A_N3JwYaffJmu3G8$CO17O^r1WlDHP2fXOvIyR&)L`GO_F2Df-Olm_9<3Ae=bu=4V*=xwQXH* zBh_?M=N#uhi9?T~vJClwP;)SIX8dKPva`Uoe}++B{*IXGF#T+~^@5bTXhj+q?%|gq zKhJb)XNCTpWBB>C!suMgWe&dty^67}gL zgT76PIL}0cu*|1T`^IY3KQEH6(Yq6zONUNKWG0dnf6z&*D>vAESSrt2t-7N=d*F+0(y~qYOWz zI|KwOe!!9rr*>dkxiehB9MQyoQl6RzIo+0n@DNGSZKj-re2u>`f`dh?=+9V|os~5d zH46di*8G4~gl&EN#VNQ6Zz5VG8wZC*cMNBe*gpU0)2h#8vuJjd7;IP>PdP^_frt5( zTYlL~4`ju{8G0DKmLDqey?W=zR6yWHz$dJYnYQyKH`JXpWH#&iDYI7RV|}iI5!iTY zvg%oiQRu#VNM)7Yq{4fIQ2%DDyt!{>QXe~yv?TYFa`w$8UML5rrY-t$IborflDD@4 z(G3XX>VsbxOXkI{+p6L?_sf?v8Z>=|<4!;{?g=})JgE4(G&Fd+glQD@jY`OLeP=1}cw_790jMaU2 zzmv(9)Yq)Fv*W=X5Ivll3>Dq>oNu+d-0oG@u)faJnLQJxtO|D!f(<$#)F?sy!p%z zABJy+i(GiD62XsJqR+fsCgV33IHmY5rxt45OzXzYXlBn$3$9K_g^}n0hD>`zeqcYx zxX3lqwX%BuUBA&lNTXMdn2cTD=}E4YKA+Rjo6F;~SCt38hf4yK0Sm{18PY@Aad^(> zO>dZ)n5t&H7XD57M=<4qhOB(#uC-~p!Le2cSB!mhOLlnS*7*!GBfnGT@M^Wv{=~;T z$hMMo92IBb;M3`!bqb}cxBJzX+?Yyyw_xPa{eD4CO(_)qW%~H?zTO&fGWYEKtXTcdWflf0w&d4`G)`-Q+*iYKXeqw=apw`7U zzX=ko|AwUa4Roqv2q<0W%@{&WauuEZw{g5Hwsr{o>OMH6-B@?y+q~~V>Opk?j`hm%*2pjjFK%u!f5bwFtcHwh3bdEV&Bc^*lTol z;a0%m1Z~tw57@Rho%v{GWo5Yz1P6IUKZ~8b-W)KdGz?n2iG%z2aq$-8tm8qKb-7$5 zeo)X+S7~&l4`n>mTJY_gcpzLNZRn4T5esxVpr0L8TT;Jq#RP%)%`v8?cRKWpXOQQaXcBtvCVl;ACJG}VEM7ZfxEy&@Y`g8!mY|(* zOf&Y`w4Xt=sQ67BURbrA#COlv&#rgWRuZ2tO}p+ZG|*jlAg4-^Z=&sbxjBYvXoaRK z&^a9@3l_V}wAxlWhs<0E;a6otHYBslUYy;Y#`}+PaBn^*Xq7ep;G+`$IJCB|vZI4S zFsIT-q$&v)k7m5+ymGk9gJexGs_@qw>kUX~lPN{k`HlD+tEnyAaW0g(tA1TWtAm+P z>dT$p`-jaPb5EK~Bc#cqCr}%+X-TUF5>wSiZKh^e$pLM#_l+HQNUt1L1fsuBL1Va| zX$bhjJKd=HGSEeJ$veShc>7dyxMNm#@PS6wFV(UBu)zIkV@w+_)1OzU-^Dj7|H5yLHzJl=z%#c5TA*(iwCmI^eIZZPG zx+V=1lQbq+%!wR240UKclw-X^KlJELxB^w01ech^jCW7Xxwfbbw%zkCJ!9?t@k0gWDwr{fKyMbMT&$J$!YIx*vnBF& zm}y;9 zP2V3se*A`)mlqDo2sSQs5XgY&oq+|xHGaX^{e$@Q^mOlOsu2w}wMcw?d?s1o_T{{X zH#cpop3T)34a)BnsW{3`8tnT)f_+4+G79OYIyN0+oxcpsG74z+tOcl6aJfS%1JYu} zb<)CpCRS;1cG2A98)1%8hbKiHor@;o^{gFAD;~7XzK0sg``iz#WT#lX%RRE?R0-c`A`AdtLQl7VwMI&1inC?E?B{2YbN@gi*< z9i4KY%B+x}(N7G6dPfIX(cfdN-!^V*_A#Wmx+EqWE$Zz_09zBBDmn9roh6~J;Z&X+YG#1g<}iTQ5D=wka>K} zfQm)$N9%nq8z5rD)yQu`EVp9C^9!kRc4_k4&#@@@dO8FNmmkcY8$eTjopzdyo&v`) z<(DIUHw5Gt6qibXC&6 zXY(Q`5g)zCk$QCzF(<@LPT4u8;AH`Y#u3=s`7bf}MSBSsqPsCh!v+rm;eR0+xL%SG ze?4EnZ_N ztk5c*N9Vp$aByIAe$-zJlgsP&1YJnZR)G~0pKlFAQ-i27y4z z+i0AB#E)8IoV4cVW{RgMJQ}GikD`n4Y;@o7^!7e}Uy8Q1b)Kz2!nikAOpNtqA5G@F z?a^v~f}=6RKzZR;Ye_}mFbx=gxEp=)k&vvJQfp;MklDfKLoxadL6l6vM&^JKWf zcvy{R509+5;C*s#!(VN|0mM|ki7~D4vu0v zTANmi1#ZX3>TR1iJaBej4{+H{7ApY|I$i$vq8UfY{<_O(rIqMj9Q(Tua^{q%a+htE z^6M{jT@I@=K9Y$!2M2M*yor3NH?ng*bp!iG89*l<&Q#2txo*Os98X5qu*6)XrXink z@j5MjTLC(HVfWosz%-HvC8dX4#6!*9qGdZbzA(_oUFy4bLfev z(DT*3vIRv3VaLT!pz&pS+M-0~@VvHAOT{?VDb%sUm;U@nXU6@~1)qcjRH~D^tC`ln zpc3hPmhQ*Cv?G_7JDLKkXXT7Gx?*SRlt381k?8an?((_pRvy}6`wqpORGYw7%d11` z!*NQ%jq$Is?0i*QCBL-tLEr~8H!=aj!$ImU1TsP;wg#d4G-^G%F`8e0T-fX;8mbV- zZxaIsl~KFXpbv7PeX~ak-nHu=vvT-<+0i&+<>Tzv2b)xvK6HIzn+;rXT|rxP!X9IGo+g9R z%-k0$to^GZBqFZD%E}D~nJ+(n;alz)7?#xMq|e0(HlDswK|$$Eu0S9Nn*uWV$H^eRvhXDp&4GcLtG0~vl02>0)2%vMOr+V&?+Hk%tf<%9y8yhaL zMD`_F53j~{wStJ@pnp>y-loKK&s znV6X9{k8}N;x*~Z%Wr|3_xQ;Z*&+$&sAP|G8(I-)MGU-qo(w*(o$lOQ+^~(bqFueW z+--woX3}IB|T5rIjTDHB;L64_1U^;sLIgj7QDzP5Ry3>+}YM}__le? zW+#xKlq9TKAb=$qWlr&)oo`VdvzG8~I@3?pdsf!CqVm3dyAw*m`x5wcnni~7Y+@E` z18Fr53;wu-6shl0ytS4(Ja_I!f|}hUu$6@xB|y2ezO24Mb5tJh8o8$E_KTd8-@w6@ z!_x({)#-A?SX6FBGb)I{2W;LiPdOf#ciUT-t?ln|2IrI9VdR zbe{hK!b{*V_u&%|xzcH0wNEeK#XcMp+?=jy2Ri0k!Vb!^1;4dSqs- zq1vlgpAPbYoIBB;^9BcaP9-HJeI})rHNd_BC|h)L@~rU@F2>mH?~hbFy{j}Yif=J+ zm&pkW3xj>{S`33@r@sybR0yT=_TG|y(jVcjs$L3$u?}3-eMXH;PhWB?rsT}0mgb6M zT|-&xm22|GYzzI;ou?KGkP8Fw=#;$vNfuYBL#@OtzMEUYQ7`7a1F zfU>m&Ragw~($Dnt{DXTS!>B=owhjm0|Eo~7Rp4lKMI7bUoMG%0+DCo8Gj54(?D==2AYhuv}^ng)e?(b2E2>ImJ`qXHuMjT`)h5i@0p` zlb@P7Fu9Ourb%44@qNEw7|t470xM=M8Pypbe{+CxuxZrRMF~6nR2eHKGc5X=5I`~# zNiSG^^b>&!fKe<8*VR8Ul7K*(&|r9~8X6fWM5WE)+Tf_T1psMo00235MV_PPyF&K( zFyJ<<0~A=igl~hPC#w84!FMp4gQGP_$L`L=gic#WN9`z8j#U+>ksl27z-^2dr5qT{ zu6AlC`WpGrHB?elTs!r?vFjDe>%|Lu{P>}yYs)ti3b;j==EjWBP=TW2(HU3*4KkAG zhz>eW1<(LDW*!U8y3eY#)<$jw7ATU>`^;I^TxytvUi=rR!@P+k<1)wr{a}{!O&{2} zxW>S2W`oUOy*`xL;6)`_&>PP$4?qjr2?BO+%k?&C;2Tq(23PK~;yYgpzwUSE$u?}x zKpVWOF`MDrw^SflODpQGLKZCTdAv0Uj%#aD7%k2kF`Z}*7=(rpPS%Cl^VL9+t4+Jy zMi3dp-Bt_E9FaceTOt{|-|i?<-AnI&gn)YyJ`2%+KvL3!LF*2 zQFX`$fDS7a)gQ;pKQ1JY*>MDC3GgMYX_C9p;`00X9vcnYLPeI>SC?#TZ11@kJyv@t zIdzJgD2HsCRsH%(-Woy+qxZ#5NTpms_PhOfC$r>|-AGpI>D(p}1QJgitXi_n8D9nr z>61r~UINUNCYUj=8Jg6-E;QY{w{LS&Q=4>K^H~-;=imdq6SF}(pVRGtjFJlQxL$`+ zna4|Rt|=C+yF42FNSwM5ce=wbgDBiSD)zt8tB%acD0?%9y3BuK^emO-)>?0fuK4aL zcBO~q8hWy1*2BlA;oklGNt&(H=ToJVwtaN7@xM@ph^34goSt@z_BnJ--7#cfpl2T) z&)RtNaPG`*Fm#GDZnTb^8t~GTUH{Yt(#op;)cMN?puV9Vsa=2+Q6+OPvGU$h2o*i}lmz{XFg|V67|6rbkfm|3! z8Bkw%AG=EBUbqN?UP^2>2&RZ`1o;=EIq9%d71M}$INC-Q|%)fECy0(@i!%*=D>D3Vg%Q!me9u9Q&I0XY*cbPq)(#h34}H`3u2z!SKd&(7(ksk3?z37D3__ zla6s3fiQ|*#7dANvF96m4$*qjeUBW)=${%zRC)V8Td~0%xhb}`8wVVbe`$bQw;Z1A zuY_jvEEfpwhly+tEVczl$HZhQCJ3x|vZgQ*^8ak7h@_>ZMID?rYATNI3gG)0BOALK zvw>GUZHYDDHAy7PvVr`aki#iStI|d*K3oVDVP;{mZvAqXE-+Z@S0XBR-xI7HQ!_L3 z-;-qk_GJF<9f1660!Yt6D)#uY9F7 zu8x&Kw{{m;)E}_%MjE_5kuFdjowW;)zRlsj>3kBxI3bkxUcNA`(DY%^bH1vJiv!R= zZv*Uzu z_)BokOI-|p=ulm#Utzm`al_A|vpCi}?sMGKjS(y?TJfb@(_wkjq*72#Czoz?aR+cO z1qHQfg`Ghd{!2c3HzXuPE*AY$yWT>yz;V0DW-W_t^}^dhdmFrFuX zXrHBOf|w`wQECg)TAwf9l%$q%S)1^SCg)BY)$puGjmLsjsMFVR;$C^XaTVYK=MaM0 zWxi=Kn(PiEsD}Z4p(3P7sMU_>OkFCzP!kdt@|UNpp{H5F4r{~DFU%smX&nk47ywR@ z($nv3Wju0T>+TF!*tEF1!|#}X1ozzsJH9^{uMT*3jLKt&=o{Q)0fipy-qxj+c@ zhK>DuOl)j6KsU;oZ+hz&Xc-BV3{L*9Wd3Q*6YxutQPL#BDldRv_8Skn-;k)U2^LWd zw-Hz8!9MKjQ|21TfD`=B{?@odOIrYrPT@v$1-S5FsNkRP#~l3`?n_+JBMqhoLPdu~ zo0FLbj|SZQ8Y(KO!1E`4@Zfo1;JI0e(CSYR89|6ZhId3wv%U*7F<`oM0G6`c=MoO8 zbh43i#H99x(ObZp;RW>Y|C)ZKh_wMSEF)?zp3!1B9vzLBPY$1w0BZ`d)X{LnqxzD* zU#&rA2_8dI+U(tS+O*0F&4qeR?V_rB@w{SIX67fLwZa431Vj>>Ar;$Yne#%6)oYyp z5bhZggkbkEnrSlhX$%r12NyhB%~^zHai`uk{vx6xGaKNeoxSe?4KDKdcj;B7X+k~c zZRA7|IKZmwXt4ebg5sugYFORp<@Ow}Z4_@F+*bL+%y6VbSPHPNJl8asaT;ST>1ntb z*K0BCk3;Z1C53xE_H>uj^PB)5Kk?etVe8%R$wZJ6fIvzS%#+?TK}HjbBV?(>;J7&X zIz2@tTLG?-KxZj~C7`?+Ea)2U3JfK1g%uosgZCkpuv=@Aw9zDk zM`8x`C4r5xjk-+^ZV^>3uyDuW~w*Y$z z<~1Iy8~-!zraRyvfE=tDMdVfktc{!E|L=>yaHImh=2cPu|KB=cZI5Q#%qxs3{XY#k z6fe3eY_EbE^ewb!) z(9r-Kg69XR=&2$40BXEt5HL?+e67|9|{q zFkc$RtBvC{cGTI_q;IVjHdmiVQ;i^iI(e^{h1zjqk&! zQI&Irpx@{*5047Sw~Pb4dZI*vuuEQFl1Mh7Z|JH<)od23*Sey<$H(g(um)HJK6300 zS@P)t2Hb1<9{5`K#S@X7M9|Zsj|pfn(Gj_k*B}asxogI7(Zx~}05OjO*4zAZje(xK0|{{e*dnV| z3H!g=yV9U0&n+CWYF&VFP-GKSP!t8FvIqh~5fDXDWU*0*=(S}jL1inumMgRfXvy47*{^5tB5?8Tm*<2E)n!n%_*N!cd){hAuVo7zYia(w1hQ~8Uuw6ucO zR;Qq0{x7G(k%b2h{j8tZ-wmSXTEv*W6y4`bmW-+8qSM?pH$Y}3`6^5oiF)qE_z23_ zTNL-jnAPG(wlj|!BL!;?tSM^da1?v0*_1`B1z*>Rd%p8jA`TTOGE%U@I6FJLv&26W z3^sWe>&cNu4(3l*RK!n*o5pDY^NAXFj8A}Q^Bj?QI9OVq)2keBwGF)q zU;^Uy2@183yz@JbduL3%xih`X~OWwMk#UBy z(w#kK zTJ&i9jy9>oixlyL`=L4vSBW~!mDD?4%ORp#tXH(I0%$x{2uM{=x-u89PJDBIL$}ZFyf& zFU6jHGZQMSZgvE>MP*O)2h5`!`B+vwkP*_=0xwxGq;I&)8YU()f8qxUj(d{goD^u&t?R%0Mmp{L=<7|Wa$n_7M1#*77v^R9KyaYZ4-3|g5jHk-DQ&&CtJv-@ zyii4BhN9DHL-EaYYo)!!2WyV0v?mU;>J0?8QqH3bkmyKiYUh&!Od!6yIE@{5^JV7jj3%f|zRXlHwyEbic z@^84C_XkGLL#@I-$uwkmsHhTgqA~N{tlG|ChYZu8v%IeS!+CqVJHAxV7SU!US20DC zO_BgjO=~yzpcEKcrKpr)S6W)CqpQno85u`fV>V24a?0WJ`3rqFtGN2HEFKQ`_@*5F#R%15&Eg?F?g z@7HLbv${}$=Qy0VcW`LO9Fbx4n+Q0Q8&Gy5oal8q;Zi@GYygW*eh@7^~y z4IId-uC^>uZ+#S%mz$GgHmrewkB2?}ZiWSpwwKlN$s{f}ZBcUXThr%Q$H|GBv#n#& z%RyPeoUMp-1=-eddv(%EI8c$o{w=gpS774?*6QmY!iSzn#Die^*4R@^=oy&LE;y8H z>oWZkAQ~j!z}Z&|`xMpGZgMy~A~8}|CcKfB&cm-eD!{OadLl6d$_jl&kA2uaV6H)D z*u>mjC7Wl|@wqkzzx%dlDU)6CZOrAcE6K^Zz$P2Dwe2v8)LC@uBE(&CP?GDn0Dsd~ zbgDrsMfPx-dHCJF#tc&6BF&EE_kq?6?9T|1h`h5f&@)mv$SOZD*&SW=l;vK)_U#b8 znI7qoHRktx5W${dU&+iYBvdJh=QwAC>!?rGE`VM1qWLv5gfo}ICpiP{-u3Th*{O%_ z2HBWu5^zIHn4fp86^_4`AB~!u>FJ(xpHH7Uza7VQ8btx3OyR_eJY&U#?oEWwDANu8)1F?_(2`+ zwYZ4;#HMIn06--a(rj_?6Hp{ky6PVMoj&MbO>OPGm#T@T;i!8hU^lB`MyVFg=TgdM zH`8b%eBvZ#-SnpOu3uM%T?{{uiAexstOS8jpJ$4>+3nA$-=YH10;Nq&1A}(24pzpQ z=P7O~w1}4%@>Gf?bqydF9WAZhg0i4Cerdp9 z)osem-YbQw5;QLoav2Y@jw=q3~zl445FRe~>2gtUQduWMD5)F5!+ zpgSs>RL<$_!nB21%K1OXa6f5M7Yp0$;^bC40S#tMyJK2?@18x)FcN`Nm9H9is(kQ| zFFZn@uOvY5D?9Z=K-9%29xl-^SD<`ctrkc*M@u#EP8zP7jS^swp|