diff --git a/src/ert/run_models/everest_run_model.py b/src/ert/run_models/everest_run_model.py index 487aa634f3f..a92d8cf10a3 100644 --- a/src/ert/run_models/everest_run_model.py +++ b/src/ert/run_models/everest_run_model.py @@ -501,6 +501,7 @@ def _create_optimizer(self, simulator: Simulator) -> BasicOptimizer: ) ropt_output_folder = Path(self.everest_config.optimization_output_dir) + ropt_evaluator_fn = simulator.create_forward_model_evaluator_function() # Initialize the optimizer with output tables. `min_header_len` is set # to ensure that all tables have the same number of header lines, @@ -508,7 +509,7 @@ def _create_optimizer(self, simulator: Simulator) -> BasicOptimizer: # set because ropt reports minimization results, while everest wants # maximization results, necessitating a conversion step. optimizer = ( - BasicOptimizer(enopt_config=self.ropt_config, evaluator=simulator) + BasicOptimizer(enopt_config=self.ropt_config, evaluator=ropt_evaluator_fn) .add_table( columns=RESULT_COLUMNS, path=ropt_output_folder / "results.txt", diff --git a/src/everest/simulator/simulator.py b/src/everest/simulator/simulator.py index 84179f8cfb4..d66ca5d00b2 100644 --- a/src/everest/simulator/simulator.py +++ b/src/everest/simulator/simulator.py @@ -7,7 +7,7 @@ import numpy as np from numpy import float64 from numpy._typing import NDArray -from ropt.evaluator import EvaluatorContext, EvaluatorResult +from ropt.evaluator import Evaluator, EvaluatorContext, EvaluatorResult from ert import BatchSimulator, WorkflowRunner from ert.config import ErtConfig, HookRuntime @@ -90,7 +90,18 @@ def _get_aliases(self, ever_config: EverestConfig) -> Dict[str, str]: aliases[f"{constraint.name}:upper"] = constraint.name return aliases - def __call__( + def create_forward_model_evaluator_function( + self, + ) -> Evaluator: + def run_forward_model( + control_values: NDArray[np.float64], metadata: EvaluatorContext + ) -> EvaluatorResult: + nonlocal self + return self._run_forward_model(control_values, metadata) + + return run_forward_model + + def _run_forward_model( self, control_values: NDArray[np.float64], metadata: EvaluatorContext ) -> EvaluatorResult: active = ( diff --git a/tests/everest/test_simulator_cache.py b/tests/everest/test_simulator_cache.py index 1eb152a73a6..d2348b4f01e 100644 --- a/tests/everest/test_simulator_cache.py +++ b/tests/everest/test_simulator_cache.py @@ -12,7 +12,7 @@ def test_simulator_cache(monkeypatch, copy_math_func_test_data_to_tmp): n_evals = 0 - original_call = Simulator.__call__ + original_call = Simulator._run_forward_model def new_call(*args): nonlocal n_evals @@ -20,7 +20,7 @@ def new_call(*args): n_evals += (result.evaluation_ids >= 0).sum() return result - monkeypatch.setattr(Simulator, "__call__", new_call) + monkeypatch.setattr(Simulator, "_run_forward_model", new_call) config = EverestConfig.load_file(CONFIG_FILE) config.simulator = SimulatorConfig(enable_cache=True) @@ -33,7 +33,10 @@ def new_call(*args): # Run once, populating the cache of the simulator: variables1 = ( - BasicOptimizer(enopt_config=ropt_config, evaluator=simulator) + BasicOptimizer( + enopt_config=ropt_config, + evaluator=simulator.create_forward_model_evaluator_function(), + ) .run() .variables ) @@ -44,7 +47,10 @@ def new_call(*args): # Run again with the same simulator: n_evals = 0 variables2 = ( - BasicOptimizer(enopt_config=ropt_config, evaluator=simulator) + BasicOptimizer( + enopt_config=ropt_config, + evaluator=simulator.create_forward_model_evaluator_function(), + ) .run() .variables )