Skip to content

Commit

Permalink
Various linting suggestions (#807)
Browse files Browse the repository at this point in the history
  • Loading branch information
uri-granta authored Jan 16, 2024
1 parent 741df6b commit 5478896
Show file tree
Hide file tree
Showing 35 changed files with 104 additions and 145 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import trieste
from trieste.acquisition.function import BayesianActiveLearningByDisagreement
from trieste.acquisition.rule import OBJECTIVE
from trieste.models.gpflow.models import VariationalGaussianProcess
from trieste.objectives.utils import mk_observer

np.random.seed(1793)
Expand Down
2 changes: 1 addition & 1 deletion docs/notebooks/asynchronous_greedy_multiprocessing.pct.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ def terminate_processes(processes):
f"Process {pid}: Main : received data {new_data}",
flush=True,
)
except:
except Exception:
continue

# new_data is a tuple of (point, observation value)
Expand Down
4 changes: 0 additions & 4 deletions docs/notebooks/deep_gaussian_processes.pct.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
# The Michalewicz functions are highly non-stationary and have a global minimum that's hard to find, so DGPs might be more suitable than standard GPs, which may struggle because they typically have stationary kernels that cannot easily model non-stationarities.

# %%
import gpflow
from trieste.objectives import Michalewicz2, Michalewicz5
from trieste.objectives.utils import mk_observer
from trieste.experimental.plotting import plot_function_plotly
Expand Down Expand Up @@ -167,9 +166,6 @@ def build_dgp_model(data, search_space):
# We now compare to a GP model with priors over the hyperparameters. We do not expect this to do as well because GP models cannot deal with non-stationary functions well.

# %%
import gpflow
import tensorflow_probability as tfp

from trieste.models.gpflow import GaussianProcessRegression, build_gpr

gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
Expand Down
1 change: 0 additions & 1 deletion docs/notebooks/explicit_constraints.pct.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,6 @@ def constraint(input_data):
def plot_bo_results():
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()

_, ax = plot_function_2d(
ScaledBranin.objective,
Expand Down
8 changes: 4 additions & 4 deletions docs/notebooks/multifidelity_modelling.pct.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,13 +260,13 @@ def __call__(self, x, add_noise=True):

# Plot gpr results
mean, var = gpr_predictions
ax.plot(X, mean, label=f"GPR", color="tab:blue")
ax.plot(X, mean, label="GPR", color="tab:blue")
ax.plot(X, mean + 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:blue")
ax.plot(X, mean - 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:blue")

# Plot gpr results
mean, var = multifidelity_predictions
ax.plot(X, mean, label=f"MultifidelityAutoregressive", color="tab:orange")
ax.plot(X, mean, label="MultifidelityAutoregressive", color="tab:orange")
ax.plot(X, mean + 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:orange")
ax.plot(X, mean - 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:orange")

Expand All @@ -275,13 +275,13 @@ def __call__(self, x, add_noise=True):
ax.plot(
X,
observer(X_for_multifid, add_noise=False).observations,
label=f"True function",
label="True function",
color="tab:green",
)

# Scatter the data
ax.scatter(
hf_data.query_points, hf_data.observations, label=f"Data", color="tab:green"
hf_data.query_points, hf_data.observations, label="Data", color="tab:green"
)
plt.legend()
plt.show()
Expand Down
4 changes: 4 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,12 @@ disable = [
"invalid-unary-operand-type",
"line-too-long",
"locally-disabled",
"missing-class-docstring",
"missing-module-docstring",
"no-else-return",
"no-self-use",
"no-value-for-parameter",
"protected-access",
"redefined-builtin",
"redundant-keyword-arg",
"suppressed-message",
Expand All @@ -98,10 +100,12 @@ disable = [
"too-many-instance-attributes",
"too-many-public-methods",
"too-many-arguments",
"too-many-branches",
"too-many-locals",
"too-many-statements",
"too-many-boolean-expressions",
"too-many-nested-blocks",
"typevar-name-incorrect-variance",
"unexpected-keyword-arg",
"unused-argument",
"unsubscriptable-object",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def test_multifidelity_nonlinear_autoregressive_results_better_than_linear() ->
build_multifidelity_autoregressive_models(initial_data, n_fidelities, input_search_space)
)

mses = list()
mses = []
for model in [nonlinear_model, linear_model]:
model.update(initial_data)
model.optimize(initial_data)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def _build_nested_multifidelity_dataset(
) -> Dataset:
num_fidelities = problem.num_fidelities
initial_sample_sizes = [10 + 2 * (num_fidelities - i) for i in range(num_fidelities)]
fidelity_samples = list()
fidelity_samples = []
lowest_fidelity_sample = problem.search_space.sample(initial_sample_sizes[0])
lowest_fidelity_sample = add_fidelity_column(lowest_fidelity_sample, 0)
fidelity_samples.append(lowest_fidelity_sample)
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/acquisition/function/test_active_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ def test_integrated_variance_reduction_builder_updates_without_retracing() -> No
],
)
def test_bayesian_active_learning_by_disagreement_is_correct(at: tf.Tensor) -> None:
""" "
"""
We perform an MC check as in Section 5 of Houlsby 2011 paper. We check only the
2nd, more complicated term.
"""
Expand Down
3 changes: 2 additions & 1 deletion trieste/acquisition/combination.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ def __init__(self, *builders: AcquisitionFunctionBuilder[ProbabilisticModelType]

def __repr__(self) -> str:
""""""
return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, self._acquisitions)))
builders = ", ".join(map(repr, self._acquisitions))
return f"{self.__class__.__name__}({builders})"

def prepare_acquisition_function(
self,
Expand Down
5 changes: 2 additions & 3 deletions trieste/acquisition/function/active_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def prepare_acquisition_function(
if not isinstance(model, SupportsPredictJoint):
raise NotImplementedError(
f"PredictiveVariance only works with models that support "
f"predict_joint; received {model.__repr__()}"
f"predict_joint; received {model!r}"
)

return predictive_variance(model, self._jitter)
Expand Down Expand Up @@ -282,8 +282,7 @@ def prepare_acquisition_function(
"""
if not isinstance(model, FastUpdateModel):
raise NotImplementedError(
f"PredictiveVariance only works with FastUpdateModel models; "
f"received {model.__repr__()}"
f"PredictiveVariance only works with FastUpdateModel models; received {model!r}"
)

return integrated_variance_reduction(model, self._integration_points, self._threshold)
Expand Down
4 changes: 2 additions & 2 deletions trieste/acquisition/function/continuous_thompson_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def prepare_acquisition_function(
if not isinstance(model, HasTrajectorySampler):
raise ValueError(
f"Thompson sampling from trajectory only supports models with a trajectory_sampler "
f"method; received {model.__repr__()}"
f"method; received {model!r}"
)

self._trajectory_sampler = model.trajectory_sampler()
Expand Down Expand Up @@ -147,7 +147,7 @@ def prepare_acquisition_function(
if not isinstance(model, HasTrajectorySampler):
raise ValueError(
f"Thompson sampling from trajectory only supports models with a trajectory_sampler "
f"method; received {model.__repr__()}"
f"method; received {model!r}"
)

self._trajectory_sampler = model.trajectory_sampler()
Expand Down
6 changes: 1 addition & 5 deletions trieste/acquisition/function/entropy.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,8 +221,6 @@ class SupportsCovarianceObservationNoise(
):
"""A model that supports both covariance_between_points and get_observation_noise."""

pass


class SupportsCovarianceObservationNoiseTrajectory(
HasTrajectorySampler, SupportsCovarianceObservationNoise, Protocol
Expand Down Expand Up @@ -337,7 +335,7 @@ def prepare_acquisition_function(
if not isinstance(model, SupportsCovarianceObservationNoise):
raise NotImplementedError(
f"GIBBON only works with models that support "
f"covariance_between_points and get_observation_noise; received {model.__repr__()}"
f"covariance_between_points and get_observation_noise; received {model!r}"
)

tf.debugging.Assert(dataset is not None, [tf.constant([])])
Expand Down Expand Up @@ -630,8 +628,6 @@ class SupportsCovarianceWithTopFidelityPredictY(
):
"""A model that is both multifidelity and supports predict_y."""

pass


MUMBOModelType = TypeVar(
"MUMBOModelType", bound=SupportsCovarianceWithTopFidelityPredictY, contravariant=True
Expand Down
21 changes: 9 additions & 12 deletions trieste/acquisition/function/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ def prepare_acquisition_function(
if not isinstance(model, SupportsGetObservationNoise):
raise NotImplementedError(
f"AugmentedExpectedImprovement only works with models that support "
f"get_observation_noise; received {model.__repr__()}"
f"get_observation_noise; received {model!r}"
)
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
Expand Down Expand Up @@ -318,14 +318,11 @@ def __call__(self, x: TensorType) -> TensorType:
)
mean, variance = self._model.predict(tf.squeeze(x, -2))
normal = tfp.distributions.Normal(mean, tf.sqrt(variance))
expected_improvement = (self._eta - mean) * normal.cdf(self._eta) + variance * normal.prob(
self._eta
)

ei = (self._eta - mean) * normal.cdf(self._eta) + variance * normal.prob(self._eta)
augmentation = 1 - (tf.math.sqrt(self._noise_variance)) / (
tf.math.sqrt(self._noise_variance + variance)
)
return expected_improvement * augmentation
return ei * augmentation


class NegativeLowerConfidenceBound(SingleModelAcquisitionBuilder[ProbabilisticModel]):
Expand Down Expand Up @@ -828,7 +825,7 @@ def prepare_acquisition_function(
if not isinstance(model, HasReparamSampler):
raise ValueError(
f"MonteCarloExpectedImprovement only supports models with a reparam_sampler method;"
f"received {model.__repr__()}"
f"received {model!r}"
)

sampler = model.reparam_sampler(self._sample_size)
Expand Down Expand Up @@ -972,7 +969,7 @@ def prepare_acquisition_function(
raise ValueError(
f"MonteCarloAugmentedExpectedImprovement only supports models with a "
f"reparam_sampler method and that support observation noise; received "
f"{model.__repr__()}."
f"{model!r}."
)

sampler = model.reparam_sampler(self._sample_size)
Expand Down Expand Up @@ -1167,7 +1164,7 @@ def __init__(self, sample_size: int, model: HasReparamSampler, eta: TensorType,
if not isinstance(model, HasReparamSampler):
raise ValueError(
f"The batch Monte-Carlo expected improvement acquisition function only supports "
f"models that implement a reparam_sampler method; received {model.__repr__()}"
f"models that implement a reparam_sampler method; received {model!r}"
)

sampler = model.reparam_sampler(self._sample_size)
Expand Down Expand Up @@ -1219,7 +1216,7 @@ def __init__(
def __repr__(self) -> str:
""""""

return f"BatchExpectedImprovement({self._sample_size!r}, " f"jitter={self._jitter!r})"
return f"BatchExpectedImprovement({self._sample_size!r}, jitter={self._jitter!r})"

def prepare_acquisition_function(
self,
Expand Down Expand Up @@ -1743,9 +1740,9 @@ def _compute_batch_expected_improvement(
)

# Compute outer sum
expected_improvement = tf.reduce_sum(mean_T_term + sum_term, axis=1)
ei = tf.reduce_sum(mean_T_term + sum_term, axis=1)

return expected_improvement
return ei

@tf.function
def __call__(self, x: TensorType) -> TensorType:
Expand Down
10 changes: 2 additions & 8 deletions trieste/acquisition/function/greedy_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,8 +397,6 @@ class FantasizerModelType(
):
"""The model requirements for the Fantasizer acquisition function."""

pass


class FantasizerModelStack(
PredictJointModelStack, PredictYModelStack, ModelStack[FantasizerModelType]
Expand All @@ -408,8 +406,6 @@ class FantasizerModelStack(
and predict_y but none of the other methods.
"""

pass


FantasizerModelOrStack = Union[FantasizerModelType, FantasizerModelStack]

Expand Down Expand Up @@ -555,7 +551,7 @@ def prepare_acquisition_function(
raise NotImplementedError(
f"Fantasizer only works with FastUpdateModel models that also support "
f"predict_joint, get_kernel and get_observation_noise, or with "
f"ModelStack stacks of such models; received {model.__repr__()}"
f"ModelStack stacks of such models; received {model!r}"
)
if pending_points is None:
return self._update_base_acquisition_function(models, datasets)
Expand Down Expand Up @@ -606,9 +602,7 @@ def _generate_fantasized_data(
elif fantasize_method == "sample":
fantasized_obs = model.sample(pending_points, num_samples=1)[0]
else:
raise NotImplementedError(
f"fantasize_method must be KB or sample, " f"received {model.__repr__()}"
)
raise NotImplementedError(f"fantasize_method must be KB or sample, received {model!r}")

return Dataset(pending_points, fantasized_obs)

Expand Down
4 changes: 2 additions & 2 deletions trieste/acquisition/function/multi_objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def __init__(
def __repr__(self) -> str:
""""""
if callable(self._ref_point_spec):
return f"ExpectedHypervolumeImprovement(" f"{self._ref_point_spec.__name__})"
return f"ExpectedHypervolumeImprovement({self._ref_point_spec.__name__})"
else:
return f"ExpectedHypervolumeImprovement({self._ref_point_spec!r})"

Expand Down Expand Up @@ -340,7 +340,7 @@ def prepare_acquisition_function(
if not isinstance(model, HasReparamSampler):
raise ValueError(
f"The batch Monte-Carlo expected hyper-volume improvement function only supports "
f"models that implement a reparam_sampler method; received {model.__repr__()}"
f"models that implement a reparam_sampler method; received {model!r}"
)

sampler = model.reparam_sampler(self._sample_size)
Expand Down
4 changes: 2 additions & 2 deletions trieste/acquisition/function/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ def _standard_normal_cdf_and_inverse_cdf(
loc=tf.zeros(shape=(), dtype=dtype),
scale=tf.ones(shape=(), dtype=dtype),
)
Phi: Callable[[TensorType], TensorType] = lambda x: normal.cdf(x)
iPhi: Callable[[TensorType], TensorType] = lambda x: normal.quantile(x)
Phi: Callable[[TensorType], TensorType] = normal.cdf
iPhi: Callable[[TensorType], TensorType] = normal.quantile

return Phi, iPhi

Expand Down
6 changes: 3 additions & 3 deletions trieste/acquisition/multi_objective/pareto.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ def sample_diverse_subset(
f" got {bounds_delta_scale_factor}"
)
if bounds_delta_scale_factor < 0:
raise ValueError("bounds_delta_min should be non-negative," f" got {bounds_min_delta}")
raise ValueError("bounds_delta_min should be non-negative, got {bounds_min_delta}")

front_size, front_dims = self.front.shape
front_size, _ = self.front.shape

if (front_size < sample_size) and allow_repeats is False:
raise ValueError(
Expand Down Expand Up @@ -169,7 +169,7 @@ def _choose_batch_with_repeats(
n_times_sampled[idx] += 1

# Create a list of the sample indices
sample_ids = list()
sample_ids = []
for idx, repeats in enumerate(n_times_sampled):
for _ in range(int(repeats)):
sample_ids.append(idx)
Expand Down
4 changes: 2 additions & 2 deletions trieste/acquisition/multi_objective/partition.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def __init__(self, front: TensorType):
tf.debugging.assert_equal(
tf.reduce_all(non_dominated(front)[1]),
True,
message=f"\ninput {front} " f"contains dominated points",
message=f"\ninput {front} contains dominated points",
)
self.front = tf.gather_nd(front, tf.argsort(front[:, :1], axis=0)) # sort input front
self._bounds = self._get_bound_index()
Expand Down Expand Up @@ -235,7 +235,7 @@ def __init__(self, front: TensorType, threshold: TensorType | float = 0):
tf.debugging.assert_equal(
tf.reduce_all(non_dominated(front)[1]),
True,
message=f"\ninput {front} " f"contains dominated points",
message=f"\ninput {front} contains dominated points",
)
self.front = tf.gather_nd(front, tf.argsort(front[:, :1], axis=0)) # sort
self.front = front
Expand Down
3 changes: 2 additions & 1 deletion trieste/acquisition/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -605,6 +605,7 @@ def run(
constraints: Sequence[Constraint],
optimizer_args: Optional[dict[str, Any]] = None,
) -> spo.OptimizeResult:
"""Greenlet run method."""
cache_x = start + 1 # Any value different from `start`.
cache_y: Optional["np.ndarray[Any, Any]"] = None
cache_dy_dx: Optional["np.ndarray[Any, Any]"] = None
Expand All @@ -626,7 +627,7 @@ def value_and_gradient(

method = "trust-constr" if len(constraints) else "l-bfgs-b"
optimizer_args = dict(
dict(method=method, constraints=constraints), **(optimizer_args or {})
{"method": method, "constraints": constraints}, **(optimizer_args or {})
)
return spo.minimize(
lambda x: value_and_gradient(x)[0],
Expand Down
Loading

0 comments on commit 5478896

Please sign in to comment.