diff --git a/docs/notebooks/active_learning_for_binary_classification.pct.py b/docs/notebooks/active_learning_for_binary_classification.pct.py index 58385ed404..9280b5e15f 100644 --- a/docs/notebooks/active_learning_for_binary_classification.pct.py +++ b/docs/notebooks/active_learning_for_binary_classification.pct.py @@ -10,7 +10,6 @@ import trieste from trieste.acquisition.function import BayesianActiveLearningByDisagreement from trieste.acquisition.rule import OBJECTIVE -from trieste.models.gpflow.models import VariationalGaussianProcess from trieste.objectives.utils import mk_observer np.random.seed(1793) diff --git a/docs/notebooks/asynchronous_greedy_multiprocessing.pct.py b/docs/notebooks/asynchronous_greedy_multiprocessing.pct.py index 020d60de30..4906732351 100644 --- a/docs/notebooks/asynchronous_greedy_multiprocessing.pct.py +++ b/docs/notebooks/asynchronous_greedy_multiprocessing.pct.py @@ -211,7 +211,7 @@ def terminate_processes(processes): f"Process {pid}: Main : received data {new_data}", flush=True, ) - except: + except Exception: continue # new_data is a tuple of (point, observation value) diff --git a/docs/notebooks/deep_gaussian_processes.pct.py b/docs/notebooks/deep_gaussian_processes.pct.py index 63e9e9e17b..a020576ceb 100644 --- a/docs/notebooks/deep_gaussian_processes.pct.py +++ b/docs/notebooks/deep_gaussian_processes.pct.py @@ -18,7 +18,6 @@ # The Michalewicz functions are highly non-stationary and have a global minimum that's hard to find, so DGPs might be more suitable than standard GPs, which may struggle because they typically have stationary kernels that cannot easily model non-stationarities. # %% -import gpflow from trieste.objectives import Michalewicz2, Michalewicz5 from trieste.objectives.utils import mk_observer from trieste.experimental.plotting import plot_function_plotly @@ -167,9 +166,6 @@ def build_dgp_model(data, search_space): # We now compare to a GP model with priors over the hyperparameters. We do not expect this to do as well because GP models cannot deal with non-stationary functions well. # %% -import gpflow -import tensorflow_probability as tfp - from trieste.models.gpflow import GaussianProcessRegression, build_gpr gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7) diff --git a/docs/notebooks/explicit_constraints.pct.py b/docs/notebooks/explicit_constraints.pct.py index 8f267b9773..7837e66ad9 100644 --- a/docs/notebooks/explicit_constraints.pct.py +++ b/docs/notebooks/explicit_constraints.pct.py @@ -204,7 +204,6 @@ def constraint(input_data): def plot_bo_results(): dataset = result.try_get_final_dataset() query_points = dataset.query_points.numpy() - observations = dataset.observations.numpy() _, ax = plot_function_2d( ScaledBranin.objective, diff --git a/docs/notebooks/multifidelity_modelling.pct.py b/docs/notebooks/multifidelity_modelling.pct.py index 0858443c1d..d6fb09ff49 100644 --- a/docs/notebooks/multifidelity_modelling.pct.py +++ b/docs/notebooks/multifidelity_modelling.pct.py @@ -260,13 +260,13 @@ def __call__(self, x, add_noise=True): # Plot gpr results mean, var = gpr_predictions -ax.plot(X, mean, label=f"GPR", color="tab:blue") +ax.plot(X, mean, label="GPR", color="tab:blue") ax.plot(X, mean + 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:blue") ax.plot(X, mean - 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:blue") # Plot gpr results mean, var = multifidelity_predictions -ax.plot(X, mean, label=f"MultifidelityAutoregressive", color="tab:orange") +ax.plot(X, mean, label="MultifidelityAutoregressive", color="tab:orange") ax.plot(X, mean + 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:orange") ax.plot(X, mean - 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:orange") @@ -275,13 +275,13 @@ def __call__(self, x, add_noise=True): ax.plot( X, observer(X_for_multifid, add_noise=False).observations, - label=f"True function", + label="True function", color="tab:green", ) # Scatter the data ax.scatter( - hf_data.query_points, hf_data.observations, label=f"Data", color="tab:green" + hf_data.query_points, hf_data.observations, label="Data", color="tab:green" ) plt.legend() plt.show() diff --git a/pyproject.toml b/pyproject.toml index 54ead61342..115292a2bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,10 +85,12 @@ disable = [ "invalid-unary-operand-type", "line-too-long", "locally-disabled", + "missing-class-docstring", "missing-module-docstring", "no-else-return", "no-self-use", "no-value-for-parameter", + "protected-access", "redefined-builtin", "redundant-keyword-arg", "suppressed-message", @@ -98,10 +100,12 @@ disable = [ "too-many-instance-attributes", "too-many-public-methods", "too-many-arguments", + "too-many-branches", "too-many-locals", "too-many-statements", "too-many-boolean-expressions", "too-many-nested-blocks", + "typevar-name-incorrect-variance", "unexpected-keyword-arg", "unused-argument", "unsubscriptable-object", diff --git a/tests/integration/models/multifidelity/test_multifidelity_models.py b/tests/integration/models/multifidelity/test_multifidelity_models.py index 8832d82080..19a9ca945c 100644 --- a/tests/integration/models/multifidelity/test_multifidelity_models.py +++ b/tests/integration/models/multifidelity/test_multifidelity_models.py @@ -129,7 +129,7 @@ def test_multifidelity_nonlinear_autoregressive_results_better_than_linear() -> build_multifidelity_autoregressive_models(initial_data, n_fidelities, input_search_space) ) - mses = list() + mses = [] for model in [nonlinear_model, linear_model]: model.update(initial_data) model.optimize(initial_data) diff --git a/tests/integration/test_multifidelity_bayesian_optimization.py b/tests/integration/test_multifidelity_bayesian_optimization.py index 3cac2687ca..39202a6b4c 100644 --- a/tests/integration/test_multifidelity_bayesian_optimization.py +++ b/tests/integration/test_multifidelity_bayesian_optimization.py @@ -61,7 +61,7 @@ def _build_nested_multifidelity_dataset( ) -> Dataset: num_fidelities = problem.num_fidelities initial_sample_sizes = [10 + 2 * (num_fidelities - i) for i in range(num_fidelities)] - fidelity_samples = list() + fidelity_samples = [] lowest_fidelity_sample = problem.search_space.sample(initial_sample_sizes[0]) lowest_fidelity_sample = add_fidelity_column(lowest_fidelity_sample, 0) fidelity_samples.append(lowest_fidelity_sample) diff --git a/tests/unit/acquisition/function/test_active_learning.py b/tests/unit/acquisition/function/test_active_learning.py index 67c3e675f4..736437da92 100644 --- a/tests/unit/acquisition/function/test_active_learning.py +++ b/tests/unit/acquisition/function/test_active_learning.py @@ -405,7 +405,7 @@ def test_integrated_variance_reduction_builder_updates_without_retracing() -> No ], ) def test_bayesian_active_learning_by_disagreement_is_correct(at: tf.Tensor) -> None: - """ " + """ We perform an MC check as in Section 5 of Houlsby 2011 paper. We check only the 2nd, more complicated term. """ diff --git a/trieste/acquisition/combination.py b/trieste/acquisition/combination.py index c302e0fc68..81455bef1b 100644 --- a/trieste/acquisition/combination.py +++ b/trieste/acquisition/combination.py @@ -46,7 +46,8 @@ def __init__(self, *builders: AcquisitionFunctionBuilder[ProbabilisticModelType] def __repr__(self) -> str: """""" - return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, self._acquisitions))) + builders = ", ".join(map(repr, self._acquisitions)) + return f"{self.__class__.__name__}({builders})" def prepare_acquisition_function( self, diff --git a/trieste/acquisition/function/active_learning.py b/trieste/acquisition/function/active_learning.py index adcd73e853..0f0d576f24 100644 --- a/trieste/acquisition/function/active_learning.py +++ b/trieste/acquisition/function/active_learning.py @@ -64,7 +64,7 @@ def prepare_acquisition_function( if not isinstance(model, SupportsPredictJoint): raise NotImplementedError( f"PredictiveVariance only works with models that support " - f"predict_joint; received {model.__repr__()}" + f"predict_joint; received {model!r}" ) return predictive_variance(model, self._jitter) @@ -282,8 +282,7 @@ def prepare_acquisition_function( """ if not isinstance(model, FastUpdateModel): raise NotImplementedError( - f"PredictiveVariance only works with FastUpdateModel models; " - f"received {model.__repr__()}" + f"PredictiveVariance only works with FastUpdateModel models; received {model!r}" ) return integrated_variance_reduction(model, self._integration_points, self._threshold) diff --git a/trieste/acquisition/function/continuous_thompson_sampling.py b/trieste/acquisition/function/continuous_thompson_sampling.py index 38b824a10c..59798e93d2 100644 --- a/trieste/acquisition/function/continuous_thompson_sampling.py +++ b/trieste/acquisition/function/continuous_thompson_sampling.py @@ -70,7 +70,7 @@ def prepare_acquisition_function( if not isinstance(model, HasTrajectorySampler): raise ValueError( f"Thompson sampling from trajectory only supports models with a trajectory_sampler " - f"method; received {model.__repr__()}" + f"method; received {model!r}" ) self._trajectory_sampler = model.trajectory_sampler() @@ -147,7 +147,7 @@ def prepare_acquisition_function( if not isinstance(model, HasTrajectorySampler): raise ValueError( f"Thompson sampling from trajectory only supports models with a trajectory_sampler " - f"method; received {model.__repr__()}" + f"method; received {model!r}" ) self._trajectory_sampler = model.trajectory_sampler() diff --git a/trieste/acquisition/function/entropy.py b/trieste/acquisition/function/entropy.py index f786417efd..3704fff9f4 100644 --- a/trieste/acquisition/function/entropy.py +++ b/trieste/acquisition/function/entropy.py @@ -221,8 +221,6 @@ class SupportsCovarianceObservationNoise( ): """A model that supports both covariance_between_points and get_observation_noise.""" - pass - class SupportsCovarianceObservationNoiseTrajectory( HasTrajectorySampler, SupportsCovarianceObservationNoise, Protocol @@ -337,7 +335,7 @@ def prepare_acquisition_function( if not isinstance(model, SupportsCovarianceObservationNoise): raise NotImplementedError( f"GIBBON only works with models that support " - f"covariance_between_points and get_observation_noise; received {model.__repr__()}" + f"covariance_between_points and get_observation_noise; received {model!r}" ) tf.debugging.Assert(dataset is not None, [tf.constant([])]) @@ -630,8 +628,6 @@ class SupportsCovarianceWithTopFidelityPredictY( ): """A model that is both multifidelity and supports predict_y.""" - pass - MUMBOModelType = TypeVar( "MUMBOModelType", bound=SupportsCovarianceWithTopFidelityPredictY, contravariant=True diff --git a/trieste/acquisition/function/function.py b/trieste/acquisition/function/function.py index abb0601acb..bc8268d779 100644 --- a/trieste/acquisition/function/function.py +++ b/trieste/acquisition/function/function.py @@ -249,7 +249,7 @@ def prepare_acquisition_function( if not isinstance(model, SupportsGetObservationNoise): raise NotImplementedError( f"AugmentedExpectedImprovement only works with models that support " - f"get_observation_noise; received {model.__repr__()}" + f"get_observation_noise; received {model!r}" ) tf.debugging.Assert(dataset is not None, [tf.constant([])]) dataset = cast(Dataset, dataset) @@ -318,14 +318,11 @@ def __call__(self, x: TensorType) -> TensorType: ) mean, variance = self._model.predict(tf.squeeze(x, -2)) normal = tfp.distributions.Normal(mean, tf.sqrt(variance)) - expected_improvement = (self._eta - mean) * normal.cdf(self._eta) + variance * normal.prob( - self._eta - ) - + ei = (self._eta - mean) * normal.cdf(self._eta) + variance * normal.prob(self._eta) augmentation = 1 - (tf.math.sqrt(self._noise_variance)) / ( tf.math.sqrt(self._noise_variance + variance) ) - return expected_improvement * augmentation + return ei * augmentation class NegativeLowerConfidenceBound(SingleModelAcquisitionBuilder[ProbabilisticModel]): @@ -828,7 +825,7 @@ def prepare_acquisition_function( if not isinstance(model, HasReparamSampler): raise ValueError( f"MonteCarloExpectedImprovement only supports models with a reparam_sampler method;" - f"received {model.__repr__()}" + f"received {model!r}" ) sampler = model.reparam_sampler(self._sample_size) @@ -972,7 +969,7 @@ def prepare_acquisition_function( raise ValueError( f"MonteCarloAugmentedExpectedImprovement only supports models with a " f"reparam_sampler method and that support observation noise; received " - f"{model.__repr__()}." + f"{model!r}." ) sampler = model.reparam_sampler(self._sample_size) @@ -1167,7 +1164,7 @@ def __init__(self, sample_size: int, model: HasReparamSampler, eta: TensorType, if not isinstance(model, HasReparamSampler): raise ValueError( f"The batch Monte-Carlo expected improvement acquisition function only supports " - f"models that implement a reparam_sampler method; received {model.__repr__()}" + f"models that implement a reparam_sampler method; received {model!r}" ) sampler = model.reparam_sampler(self._sample_size) @@ -1219,7 +1216,7 @@ def __init__( def __repr__(self) -> str: """""" - return f"BatchExpectedImprovement({self._sample_size!r}, " f"jitter={self._jitter!r})" + return f"BatchExpectedImprovement({self._sample_size!r}, jitter={self._jitter!r})" def prepare_acquisition_function( self, @@ -1743,9 +1740,9 @@ def _compute_batch_expected_improvement( ) # Compute outer sum - expected_improvement = tf.reduce_sum(mean_T_term + sum_term, axis=1) + ei = tf.reduce_sum(mean_T_term + sum_term, axis=1) - return expected_improvement + return ei @tf.function def __call__(self, x: TensorType) -> TensorType: diff --git a/trieste/acquisition/function/greedy_batch.py b/trieste/acquisition/function/greedy_batch.py index 55898ed954..171e8be254 100644 --- a/trieste/acquisition/function/greedy_batch.py +++ b/trieste/acquisition/function/greedy_batch.py @@ -397,8 +397,6 @@ class FantasizerModelType( ): """The model requirements for the Fantasizer acquisition function.""" - pass - class FantasizerModelStack( PredictJointModelStack, PredictYModelStack, ModelStack[FantasizerModelType] @@ -408,8 +406,6 @@ class FantasizerModelStack( and predict_y but none of the other methods. """ - pass - FantasizerModelOrStack = Union[FantasizerModelType, FantasizerModelStack] @@ -555,7 +551,7 @@ def prepare_acquisition_function( raise NotImplementedError( f"Fantasizer only works with FastUpdateModel models that also support " f"predict_joint, get_kernel and get_observation_noise, or with " - f"ModelStack stacks of such models; received {model.__repr__()}" + f"ModelStack stacks of such models; received {model!r}" ) if pending_points is None: return self._update_base_acquisition_function(models, datasets) @@ -606,9 +602,7 @@ def _generate_fantasized_data( elif fantasize_method == "sample": fantasized_obs = model.sample(pending_points, num_samples=1)[0] else: - raise NotImplementedError( - f"fantasize_method must be KB or sample, " f"received {model.__repr__()}" - ) + raise NotImplementedError(f"fantasize_method must be KB or sample, received {model!r}") return Dataset(pending_points, fantasized_obs) diff --git a/trieste/acquisition/function/multi_objective.py b/trieste/acquisition/function/multi_objective.py index 86b09e2588..af12adaa7c 100644 --- a/trieste/acquisition/function/multi_objective.py +++ b/trieste/acquisition/function/multi_objective.py @@ -77,7 +77,7 @@ def __init__( def __repr__(self) -> str: """""" if callable(self._ref_point_spec): - return f"ExpectedHypervolumeImprovement(" f"{self._ref_point_spec.__name__})" + return f"ExpectedHypervolumeImprovement({self._ref_point_spec.__name__})" else: return f"ExpectedHypervolumeImprovement({self._ref_point_spec!r})" @@ -340,7 +340,7 @@ def prepare_acquisition_function( if not isinstance(model, HasReparamSampler): raise ValueError( f"The batch Monte-Carlo expected hyper-volume improvement function only supports " - f"models that implement a reparam_sampler method; received {model.__repr__()}" + f"models that implement a reparam_sampler method; received {model!r}" ) sampler = model.reparam_sampler(self._sample_size) diff --git a/trieste/acquisition/function/utils.py b/trieste/acquisition/function/utils.py index 234075caab..fefc9121a7 100644 --- a/trieste/acquisition/function/utils.py +++ b/trieste/acquisition/function/utils.py @@ -72,8 +72,8 @@ def _standard_normal_cdf_and_inverse_cdf( loc=tf.zeros(shape=(), dtype=dtype), scale=tf.ones(shape=(), dtype=dtype), ) - Phi: Callable[[TensorType], TensorType] = lambda x: normal.cdf(x) - iPhi: Callable[[TensorType], TensorType] = lambda x: normal.quantile(x) + Phi: Callable[[TensorType], TensorType] = normal.cdf + iPhi: Callable[[TensorType], TensorType] = normal.quantile return Phi, iPhi diff --git a/trieste/acquisition/multi_objective/pareto.py b/trieste/acquisition/multi_objective/pareto.py index 0373843575..86fa3dc1b1 100644 --- a/trieste/acquisition/multi_objective/pareto.py +++ b/trieste/acquisition/multi_objective/pareto.py @@ -111,9 +111,9 @@ def sample_diverse_subset( f" got {bounds_delta_scale_factor}" ) if bounds_delta_scale_factor < 0: - raise ValueError("bounds_delta_min should be non-negative," f" got {bounds_min_delta}") + raise ValueError("bounds_delta_min should be non-negative, got {bounds_min_delta}") - front_size, front_dims = self.front.shape + front_size, _ = self.front.shape if (front_size < sample_size) and allow_repeats is False: raise ValueError( @@ -169,7 +169,7 @@ def _choose_batch_with_repeats( n_times_sampled[idx] += 1 # Create a list of the sample indices - sample_ids = list() + sample_ids = [] for idx, repeats in enumerate(n_times_sampled): for _ in range(int(repeats)): sample_ids.append(idx) diff --git a/trieste/acquisition/multi_objective/partition.py b/trieste/acquisition/multi_objective/partition.py index 4afa6ebabb..4bc90c2616 100644 --- a/trieste/acquisition/multi_objective/partition.py +++ b/trieste/acquisition/multi_objective/partition.py @@ -189,7 +189,7 @@ def __init__(self, front: TensorType): tf.debugging.assert_equal( tf.reduce_all(non_dominated(front)[1]), True, - message=f"\ninput {front} " f"contains dominated points", + message=f"\ninput {front} contains dominated points", ) self.front = tf.gather_nd(front, tf.argsort(front[:, :1], axis=0)) # sort input front self._bounds = self._get_bound_index() @@ -235,7 +235,7 @@ def __init__(self, front: TensorType, threshold: TensorType | float = 0): tf.debugging.assert_equal( tf.reduce_all(non_dominated(front)[1]), True, - message=f"\ninput {front} " f"contains dominated points", + message=f"\ninput {front} contains dominated points", ) self.front = tf.gather_nd(front, tf.argsort(front[:, :1], axis=0)) # sort self.front = front diff --git a/trieste/acquisition/optimizer.py b/trieste/acquisition/optimizer.py index c6c818428a..d1d4d8c008 100644 --- a/trieste/acquisition/optimizer.py +++ b/trieste/acquisition/optimizer.py @@ -605,6 +605,7 @@ def run( constraints: Sequence[Constraint], optimizer_args: Optional[dict[str, Any]] = None, ) -> spo.OptimizeResult: + """Greenlet run method.""" cache_x = start + 1 # Any value different from `start`. cache_y: Optional["np.ndarray[Any, Any]"] = None cache_dy_dx: Optional["np.ndarray[Any, Any]"] = None @@ -626,7 +627,7 @@ def value_and_gradient( method = "trust-constr" if len(constraints) else "l-bfgs-b" optimizer_args = dict( - dict(method=method, constraints=constraints), **(optimizer_args or {}) + {"method": method, "constraints": constraints}, **(optimizer_args or {}) ) return spo.minimize( lambda x: value_and_gradient(x)[0], diff --git a/trieste/acquisition/rule.py b/trieste/acquisition/rule.py index 352f446736..ae1a1f468f 100644 --- a/trieste/acquisition/rule.py +++ b/trieste/acquisition/rule.py @@ -1007,7 +1007,6 @@ def initialize( :param models: The model for each tag. :param datasets: The dataset for each tag. """ - ... @abstractmethod def update( @@ -1021,7 +1020,6 @@ def update( :param models: The model for each tag. :param datasets: The dataset for each tag. """ - ... def _get_tags(self, tags: Set[Tag]) -> Tuple[Set[Tag], Set[Tag]]: # Separate tags into local (matching index) and global tags (without matching @@ -1155,7 +1153,7 @@ def __init__( self._subspaces = tuple(init_subspaces) for index, subspace in enumerate(self._subspaces): subspace.region_index = index # Override the index. - self._tags = tuple([str(index) for index in range(len(init_subspaces))]) + self._tags = tuple(str(index) for index in range(len(init_subspaces))) self._rule = rule # The rules for each subspace. These are only used when we want to run the base rule @@ -1345,7 +1343,6 @@ def get_initialize_subspaces_mask( :param datasets: The dataset for each tag. :return: A boolean mask of length V, where V is the number of subspaces. """ - ... def filter_datasets( self, models: Mapping[Tag, ProbabilisticModelType], datasets: Mapping[Tag, Dataset] @@ -1456,6 +1453,7 @@ def __init__( self._initialized = False self._step_is_success = False + self.eps = 0.0 self._init_eps() self._update_bounds() self._y_min = np.inf @@ -1574,12 +1572,12 @@ def acquire( num_query_points = 1 init_subspaces: Tuple[UpdatableTrustRegionBox, ...] = tuple( - [SingleObjectiveTrustRegionBox(search_space) for _ in range(num_query_points)] + SingleObjectiveTrustRegionBox(search_space) for _ in range(num_query_points) ) self._subspaces = init_subspaces for index, subspace in enumerate(self._subspaces): subspace.region_index = index # Override the index. - self._tags = tuple([str(index) for index in range(self.num_local_datasets)]) + self._tags = tuple(str(index) for index in range(self.num_local_datasets)) # Ensure passed in global search space is always the same as the search space passed to # the subspaces. @@ -1769,11 +1767,11 @@ def __init__( self.success_counter = 0 self.failure_counter = 0 - if not self.success_tolerance > 0: + if self.success_tolerance <= 0: raise ValueError( f"success tolerance must be an integer greater than 0, got {self.success_tolerance}" ) - if not self.failure_tolerance > 0: + if self.failure_tolerance <= 0: raise ValueError( f"success tolerance must be an integer greater than 0, got {self.failure_tolerance}" ) @@ -1917,14 +1915,14 @@ def __init__( points from the Sharpe ratio optimisation. Defaults to 0.1. :param noisy_observations: Whether the observations have noise. Defaults to True. """ - if not num_query_points > 0: + if num_query_points <= 0: raise ValueError(f"Num query points must be greater than 0, got {num_query_points}") - if not ga_population_size >= num_query_points: + if ga_population_size < num_query_points: raise ValueError( "Population size must be greater or equal to num query points size, got num" f" query points as {num_query_points} and population size as {ga_population_size}" ) - if not ga_n_generations > 0: + if ga_n_generations <= 0: raise ValueError(f"Number of generation must be greater than 0, got {ga_n_generations}") if not 0.0 <= filter_threshold < 1.0: raise ValueError(f"Filter threshold must be in range [0.0,1.0), got {filter_threshold}") diff --git a/trieste/acquisition/sampler.py b/trieste/acquisition/sampler.py index d45bbf07ab..27e82051d7 100644 --- a/trieste/acquisition/sampler.py +++ b/trieste/acquisition/sampler.py @@ -46,6 +46,8 @@ def __init__(self, sample_min_value: bool = False): @property def sample_min_value(self) -> bool: + """Whether this samples from the minimum value of the function + (as opposed to the function's minimiser).""" return self._sample_min_value def __repr__(self) -> str: @@ -245,7 +247,7 @@ def sample( if not isinstance(model, HasTrajectorySampler): raise ValueError( f"Thompson sampling from trajectory only supports models with a trajectory_sampler " - f"method; received {model.__repr__()}" + f"method; received {model!r}" ) trajectory_sampler = model.trajectory_sampler() diff --git a/trieste/ask_tell_optimization.py b/trieste/ask_tell_optimization.py index 78f336e91b..46dd6a8b6a 100644 --- a/trieste/ask_tell_optimization.py +++ b/trieste/ask_tell_optimization.py @@ -317,7 +317,7 @@ def model(self, model: TrainableProbabilisticModelType) -> None: """Update the current model, using the OBJECTIVE tag.""" if len(self.models) != 1: raise ValueError(f"Expected a single model, found {len(self.models)}") - elif self.models.keys() != {OBJECTIVE}: + if self.models.keys() != {OBJECTIVE}: raise ValueError( f"Expected a single model tagged OBJECTIVE, found {self.models.keys()}. " "To update this, pass in a dictionary to the models property instead." diff --git a/trieste/experimental/plotting/plotting.py b/trieste/experimental/plotting/plotting.py index 43bdb17479..04bfc52745 100644 --- a/trieste/experimental/plotting/plotting.py +++ b/trieste/experimental/plotting/plotting.py @@ -358,7 +358,7 @@ def plot_mobo_points_in_obj_space( """ obj_num = obs_values.shape[-1] tf.debugging.assert_shapes([]) - assert obj_num == 2 or obj_num == 3, NotImplementedError( + assert obj_num in (2, 3), NotImplementedError( f"Only support 2/3-objective functions but found: {obj_num}" ) diff --git a/trieste/logging.py b/trieste/logging.py index 74e95de674..4966fe2c78 100644 --- a/trieste/logging.py +++ b/trieste/logging.py @@ -75,15 +75,15 @@ def tensorboard_writer(summary_writer: Optional[tf.summary.SummaryWriter]) -> It set_tensorboard_writer(old_writer) -def set_step_number(step_number: int) -> None: +def set_step_number(new_step_number: int) -> None: """ Set an optimization step number to use for logging purposes. - :param step_number: current step number + :param new_step_number: new current step number :raise ValueError: if step_number < 0 """ global _STEP_NUMBER - _STEP_NUMBER = step_number + _STEP_NUMBER = new_step_number def get_step_number() -> int: @@ -96,14 +96,14 @@ def get_step_number() -> int: @contextmanager -def step_number(step_number: int) -> Iterator[None]: +def step_number(new_step_number: int) -> Iterator[None]: """ A context manager for setting or overriding the optimization step number inside a code block. - :param step_number: current step number + :param new_step_number: new current step number """ old_step_number = get_step_number() - set_step_number(step_number) + set_step_number(new_step_number) yield set_step_number(old_step_number) diff --git a/trieste/models/gpflow/builders.py b/trieste/models/gpflow/builders.py index c86b1104db..99c66abb01 100644 --- a/trieste/models/gpflow/builders.py +++ b/trieste/models/gpflow/builders.py @@ -119,12 +119,12 @@ def build_gpr( """ empirical_mean, empirical_variance, _ = _get_data_stats(data) - if kernel is None and search_space is None: - raise ValueError( - "'build_gpr' function requires one of 'search_space' or 'kernel' arguments," - " but got neither" - ) - elif kernel is None and search_space is not None: + if kernel is None: + if search_space is None: + raise ValueError( + "'build_gpr' function requires one of 'search_space' or 'kernel' arguments," + " but got neither" + ) kernel = _get_kernel(empirical_variance, search_space, kernel_priors, kernel_priors) mean = _get_mean_function(empirical_mean) @@ -589,7 +589,7 @@ def _create_multifidelity_nonlinear_autoregressive_kernels( scale_lengthscale = 1.0 kernels = [kernel_base_class(lengthscales=lengthscales)] - for i in range(1, n_fidelities): + for _ in range(1, n_fidelities): interaction_kernel = kernel_base_class(lengthscales=lengthscales, active_dims=dims[:-1]) scale_kernel = kernel_base_class(lengthscales=scale_lengthscale, active_dims=[dims[-1]]) bias_kernel = kernel_base_class(lengthscales=lengthscales, active_dims=dims[:-1]) diff --git a/trieste/models/gpflow/models.py b/trieste/models/gpflow/models.py index e748d048d2..235325e399 100644 --- a/trieste/models/gpflow/models.py +++ b/trieste/models/gpflow/models.py @@ -1358,7 +1358,7 @@ def covariance_between_points( (L being the number of latent GPs = number of output dimensions) """ - inducing_points, _, q_sqrt, whiten = self.get_inducing_variables() + _, _, q_sqrt, whiten = self.get_inducing_variables() return _covariance_between_points_for_variational_models( kernel=self.get_kernel(), diff --git a/trieste/models/gpflow/sampler.py b/trieste/models/gpflow/sampler.py index fcf09b6c95..4528ff0cda 100644 --- a/trieste/models/gpflow/sampler.py +++ b/trieste/models/gpflow/sampler.py @@ -192,7 +192,7 @@ def __init__( if not isinstance(model, SupportsPredictJoint): raise NotImplementedError( f"BatchReparametrizationSampler only works with models that support " - f"predict_joint; received {model.__repr__()}" + f"predict_joint; received {model!r}" ) self._eps: Optional[tf.Variable] = None self._qmc = qmc @@ -293,8 +293,6 @@ class FeatureDecompositionInternalDataModel( and get_internal_data methods. """ - pass - @runtime_checkable class FeatureDecompositionInducingPointModel( @@ -305,8 +303,6 @@ class FeatureDecompositionInducingPointModel( and get_inducing_point methods. """ - pass - FeatureDecompositionTrajectorySamplerModel = Union[ FeatureDecompositionInducingPointModel, @@ -496,7 +492,7 @@ def __init__( raise NotImplementedError( f"RandomFourierFeatureTrajectorySampler only works with models with " f"get_kernel, get_observation_noise and get_internal_data methods; " - f"but received {model.__repr__()}." + f"but received {model!r}." ) tf.debugging.assert_positive(num_features) @@ -645,7 +641,7 @@ def __init__( raise NotImplementedError( f"DecoupledTrajectorySampler only works with models that either support " f"get_kernel, get_observation_noise and get_internal_data or support get_kernel " - f"and get_inducing_variables; but received {model.__repr__()}." + f"and get_inducing_variables; but received {model!r}." ) tf.debugging.assert_positive(num_features) @@ -772,7 +768,7 @@ def __init__( f"ResampleableRandomFourierFeatureFunctions only work with models that either" f"support get_kernel, get_observation_noise and get_internal_data or support " f"get_kernel and get_inducing_variables;" - f"but received {model.__repr__()}." + f"but received {model!r}." ) super().__init__(model.get_kernel(), n_components, dtype=tf.float64) @@ -794,12 +790,12 @@ def resample(self) -> None: self.b.assign(self._bias_init(tf.shape(self.b), dtype=self._dtype)) self.W.assign(self._weights_init(tf.shape(self.W), dtype=self._dtype)) - def call(self, x: TensorType) -> TensorType: # [N, D] -> [N, F] or [L, N, F] + def call(self, inputs: TensorType) -> TensorType: # [N, D] -> [N, F] or [L, N, F] """ - Evaluate the basis functions at ``x`` + Evaluate the basis functions at ``inputs`` """ - x = self.kernel.slice(x, None)[0] # Keep only the active dims from the kernel. - return super().call(x) # [N, F] or [L, N, F] + inputs = self.kernel.slice(inputs, None)[0] # Keep only active dims from the kernel + return super().call(inputs) # [N, F] or [L, N, F] class ResampleableDecoupledFeatureFunctions(ResampleableRandomFourierFeatureFunctions): @@ -839,12 +835,12 @@ def __init__( kernel_K(self._inducing_points, x) ) - def call(self, x: TensorType) -> TensorType: # [N, D] -> [N, F + M] or [L, N, F + M] + def call(self, inputs: TensorType) -> TensorType: # [N, D] -> [N, F + M] or [L, N, F + M] """ combine prior basis functions with canonical basis functions """ - fourier_feature_eval = super().call(x) # [N, F] or [L, N, F] - canonical_feature_eval = self._canonical_feature_functions(x) # [1, N, M] or [L, N, M] + fourier_feature_eval = super().call(inputs) # [N, F] or [L, N, F] + canonical_feature_eval = self._canonical_feature_functions(inputs) # [1, N, M] or [L, N, M] # ensure matching rank between features, i.e. drop the leading 1 dimension matched_shape = tf.shape(canonical_feature_eval)[-tf.rank(fourier_feature_eval) :] canonical_feature_eval = tf.reshape(canonical_feature_eval, matched_shape) @@ -892,16 +888,16 @@ def __init__( ) # dummy init to be updated before trajectory evaluation @tf.function - def __call__(self, x: TensorType) -> TensorType: # [N, B, D] -> [N, B, L] + def __call__(self, inputs: TensorType) -> TensorType: # [N, B, D] -> [N, B, L] """Call trajectory function.""" if not self._initialized: # work out desired batch size from input - self._batch_size.assign(tf.shape(x)[-2]) # B + self._batch_size.assign(tf.shape(inputs)[-2]) # B self.resample() # sample B feature weights self._initialized.assign(True) tf.debugging.assert_equal( - tf.shape(x)[-2], + tf.shape(inputs)[-2], self._batch_size.value(), message=f""" This trajectory only supports batch sizes of {self._batch_size}. @@ -910,9 +906,9 @@ def __call__(self, x: TensorType) -> TensorType: # [N, B, D] -> [N, B, L] """, ) - flat_x, unflatten = flatten_leading_dims(x) # [N*B, D] + flat_inputs, unflatten = flatten_leading_dims(inputs) # [N*B, D] flattened_feature_evaluations = self._feature_functions( - flat_x + flat_inputs ) # [N*B, F + M] or [L, N*B, F + M] # ensure tensor is always rank 3 rank3_shape = tf.concat([[1], tf.shape(flattened_feature_evaluations)], axis=0)[-3:] @@ -922,7 +918,7 @@ def __call__(self, x: TensorType) -> TensorType: # [N, B, D] -> [N, B, L] ) # [N*B, F + M, L] feature_evaluations = unflatten(flattened_feature_evaluations) # [N, B, F + M, L] - mean = self._mean_function(x) # account for the model's mean function + mean = self._mean_function(inputs) # account for the model's mean function return tf.reduce_sum(feature_evaluations * self._weights_sample, -2) + mean # [N, B, L] def resample(self) -> None: diff --git a/trieste/models/gpflow/utils.py b/trieste/models/gpflow/utils.py index fda3c1991b..fc6534b067 100644 --- a/trieste/models/gpflow/utils.py +++ b/trieste/models/gpflow/utils.py @@ -98,10 +98,10 @@ def squeeze_hyperparameters( :raise ValueError: If ``alpha`` is not in (0,1) or epsilon <= 0 """ - if not (0 < alpha < 1): + if not 0 < alpha < 1: raise ValueError(f"squeeze factor alpha must be in (0, 1), found {alpha}") - if not (0 < epsilon): + if epsilon <= 0: raise ValueError(f"offset factor epsilon must be > 0, found {epsilon}") for param in object.trainable_parameters: @@ -308,7 +308,7 @@ def _compute_kernel_blocks( K12 = tf.repeat(tf.expand_dims(K12, -3), num_latent, axis=-3) elif len(tf.shape(K)) > 3: raise NotImplementedError( - "Covariance between points is not supported " "for kernels of type " f"{type(kernel)}." + "Covariance between points is not supported for kernels of type {type(kernel)}." ) tf.debugging.assert_shapes( diff --git a/trieste/models/interfaces.py b/trieste/models/interfaces.py index eeac37c22d..ae265d4f85 100644 --- a/trieste/models/interfaces.py +++ b/trieste/models/interfaces.py @@ -179,8 +179,6 @@ def get_kernel(self) -> gpflow.kernels.Kernel: class TrainableSupportsGetKernel(TrainableProbabilisticModel, SupportsGetKernel, Protocol): """A trainable probabilistic model that supports get_kernel.""" - pass - @runtime_checkable class SupportsGetObservationNoise(ProbabilisticModel, Protocol): @@ -334,8 +332,6 @@ class SupportsReparamSamplerObservationNoise( ): """A model that supports both reparam_sampler and get_observation_noise.""" - pass - class ModelStack(ProbabilisticModel, Generic[ProbabilisticModelType]): r""" @@ -526,52 +522,38 @@ def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]: class TrainableSupportsPredictJoint(TrainableProbabilisticModel, SupportsPredictJoint, Protocol): """A model that is both trainable and supports predict_joint.""" - pass - class TrainablePredictJointModelStack( TrainableModelStack, PredictJointModelStack, ModelStack[TrainableSupportsPredictJoint] ): """A stack of models that are both trainable and support predict_joint.""" - pass - class TrainableSupportsPredictY(TrainableProbabilisticModel, SupportsPredictY, Protocol): """A model that is both trainable and supports predict_y.""" - pass - class TrainablePredictYModelStack( TrainableModelStack, PredictYModelStack, ModelStack[TrainableSupportsPredictY] ): """A stack of models that are both trainable and support predict_y.""" - pass - class SupportsPredictJointPredictY(SupportsPredictJoint, SupportsPredictY, Protocol): """A model that supports both predict_joint and predict_y.""" - pass - class PredictJointPredictYModelStack( PredictJointModelStack, PredictYModelStack, ModelStack[SupportsPredictJointPredictY] ): """A stack of models that support both predict_joint and predict_y.""" - pass - class TrainableSupportsPredictJointHasReparamSampler( TrainableSupportsPredictJoint, HasReparamSampler, Protocol ): """A model that is trainable, supports predict_joint and has a reparameterization sampler.""" - pass - class TrainablePredictJointReparamModelStack( TrainablePredictJointModelStack, @@ -580,8 +562,6 @@ class TrainablePredictJointReparamModelStack( ): """A stack of models that are both trainable and support predict_joint.""" - pass - class ReparametrizationSampler(ABC, Generic[ProbabilisticModelType]): r""" diff --git a/trieste/models/keras/builders.py b/trieste/models/keras/builders.py index a84e1ef567..8cd608e317 100644 --- a/trieste/models/keras/builders.py +++ b/trieste/models/keras/builders.py @@ -65,7 +65,7 @@ def build_keras_ensemble( input_tensor_spec, output_tensor_spec = get_tensor_spec_from_data(data) hidden_layer_args = [] - for i in range(num_hidden_layers): + for _ in range(num_hidden_layers): hidden_layer_args.append({"units": units, "activation": activation}) networks = [ diff --git a/trieste/models/keras/models.py b/trieste/models/keras/models.py index f0e488ea2f..e434185988 100644 --- a/trieste/models/keras/models.py +++ b/trieste/models/keras/models.py @@ -159,7 +159,7 @@ def __repr__(self) -> str: @property def model(self) -> tf.keras.Model: - """ " Returns compiled Keras ensemble model.""" + """Returns compiled Keras ensemble model.""" return self._model.model @property @@ -437,16 +437,15 @@ def log(self, dataset: Optional[Dataset] = None) -> None: else: # unrecognised history key; ignore continue - if "model" in post and not logging.include_summary("_ensemble"): - break - else: - if "model" in post: - pre = pre + "/_ensemble" - logging.histogram(f"{pre}/epoch{post}", lambda: v) - logging.scalar(f"{pre}/final{post}", lambda: v[-1]) - logging.scalar(f"{pre}/diff{post}", lambda: v[0] - v[-1]) - logging.scalar(f"{pre}/min{post}", lambda: tf.reduce_min(v)) - logging.scalar(f"{pre}/max{post}", lambda: tf.reduce_max(v)) + if "model" in post: + if not logging.include_summary("_ensemble"): + break + pre = pre + "/_ensemble" + logging.histogram(f"{pre}/epoch{post}", lambda: v) + logging.scalar(f"{pre}/final{post}", lambda: v[-1]) + logging.scalar(f"{pre}/diff{post}", lambda: v[0] - v[-1]) + logging.scalar(f"{pre}/min{post}", lambda: tf.reduce_min(v)) + logging.scalar(f"{pre}/max{post}", lambda: tf.reduce_max(v)) if dataset: write_summary_data_based_metrics( dataset=dataset, model=self, prefix="training_" diff --git a/trieste/models/keras/sampler.py b/trieste/models/keras/sampler.py index 891065f115..654df713f5 100644 --- a/trieste/models/keras/sampler.py +++ b/trieste/models/keras/sampler.py @@ -57,7 +57,7 @@ def __init__( raise NotImplementedError( f"EnsembleTrajectorySampler only works with DeepEnsembleModel models, that support " f"ensemble_size and ensemble_distributions methods; " - f"received {model.__repr__()}" + f"received {model!r}" ) super().__init__(model) diff --git a/trieste/space.py b/trieste/space.py index 4a3a400246..ae28253215 100644 --- a/trieste/space.py +++ b/trieste/space.py @@ -1020,9 +1020,7 @@ def __init__(self, spaces: Sequence[SearchSpace], tags: Optional[Sequence[str]] super().__init__(spaces, tags) subspace_sizes = self.subspace_dimension - self._subspace_sizes_by_tag = { - tag: subspace_size for tag, subspace_size in zip(self._tags, subspace_sizes) - } + self._subspace_sizes_by_tag = dict(zip(self._tags, subspace_sizes)) self._subspace_starting_indices = dict( zip(self._tags, tf.cumsum(subspace_sizes, exclusive=True)) @@ -1179,7 +1177,7 @@ def __init__(self, spaces: Sequence[SearchSpace], tags: Optional[Sequence[str]] ) tf.debugging.assert_equal( - len(set([int(space.dimension) for space in spaces])), + len({int(space.dimension) for space in spaces}), 1, message=f""" All subspaces must have the same dimension but received diff --git a/trieste/version.py b/trieste/version.py index a87e61f014..a4b7be24a0 100644 --- a/trieste/version.py +++ b/trieste/version.py @@ -17,4 +17,4 @@ BASE_PATH = Path(__file__).parents[0] VERSION = BASE_PATH / "VERSION" -__version__ = Path(VERSION).read_text().strip() +__version__ = Path(VERSION).read_text(encoding="utf8").strip()