Skip to content

Commit

Permalink
Update ropt dependency to 0.9
Browse files Browse the repository at this point in the history
  • Loading branch information
verveerpj committed Nov 4, 2024
1 parent 90d11ec commit 8c679c1
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 57 deletions.
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,8 @@ everest = [
"decorator",
"resdata",
"colorama",
"ropt[pandas]>=0.8,<0.9",
"ropt-dakota>=0.7,<0.8",
"ropt[pandas]>=0.9,<0.10",
"ropt-dakota>=0.9,<0.10",
"seba-sqlite",
]

Expand Down
81 changes: 37 additions & 44 deletions src/ert/run_models/everest_run_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@

import seba_sqlite.sqlite_storage
from ropt.enums import EventType, OptimizerExitCode
from ropt.optimization import Event
from ropt.plan import OptimizationPlanRunner
from ropt.plan import BasicOptimizer, Event
from seba_sqlite import SqliteStorage

from ert.config import ErtConfig
Expand Down Expand Up @@ -294,7 +293,6 @@ def __call__(self) -> str | None: ...
class EverestRunModel(BaseRunModel):
def __init__(
self,
random_seed: Optional[int],
config: ErtConfig,
everest_config: EverestConfig,
simulation_callback: SimulationCallback,
Expand Down Expand Up @@ -335,7 +333,6 @@ def __init__(
config.queue_config,
status_queue,
active_realizations=[True] * config.model_config.num_realizations,
random_seed=random_seed,
minimum_required_realizations=config.model_config.num_realizations, # OK?
)

Expand Down Expand Up @@ -372,7 +369,6 @@ def create(
ever_config: EverestConfig,
simulation_callback: Optional[SimulationCallback] = None,
optimization_callback: Optional[OptimizerCallback] = None,
random_seed: Optional[int] = None,
) -> EverestRunModel:
def default_simulation_callback(
simulation_status: SimulationStatus | None, event: str
Expand All @@ -384,7 +380,6 @@ def default_optimization_callback() -> str | None:

ert_config = everest_to_ert_config(cls._add_defaults(ever_config))
return cls(
random_seed=random_seed,
config=ert_config,
everest_config=ever_config,
simulation_callback=simulation_callback or default_simulation_callback,
Expand Down Expand Up @@ -485,7 +480,7 @@ def _simulation_callback(self, ctx: BatchContext | None) -> None:
self._monitor_thread.start()

def _ropt_callback(
self, _: Event, optimizer: OptimizationPlanRunner, simulator: Simulator
self, _: Event, optimizer: BasicOptimizer, simulator: Simulator
) -> None:
logging.getLogger(EVEREST).debug("Optimization callback called")

Expand All @@ -507,50 +502,48 @@ def _ropt_callback(
logging.getLogger(EVEREST).info("User abort requested.")
optimizer.abort_optimization()

def _configure_optimizer(self, simulator: Simulator) -> OptimizationPlanRunner:
def _configure_optimizer(self, simulator: Simulator) -> BasicOptimizer:
assert (
self.everest_config.environment is not None
and self.everest_config.environment is not None
)
optimizer = OptimizationPlanRunner(
enopt_config=self.ropt_config,
evaluator=simulator,
seed=self.everest_config.environment.random_seed,
)

# Initialize output tables. `min_header_len` is set to ensure that all
# tables have the same number of header lines, simplifying code that
# reads them as fixed width tables. `maximize` is set because ropt
# reports minimization results, while everest wants maximization
# results, necessitating a conversion step.
ropt_output_folder = Path(self.everest_config.optimization_output_dir)
optimizer.add_table(
columns=RESULT_COLUMNS,
path=ropt_output_folder / "results.txt",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
optimizer.add_table(
columns=GRADIENT_COLUMNS,
path=ropt_output_folder / "gradients.txt",
table_type="gradients",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
optimizer.add_table(
columns=SIMULATION_COLUMNS,
path=ropt_output_folder / "simulations.txt",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
optimizer.add_table(
columns=PERTURBATIONS_COLUMNS,
path=ropt_output_folder / "perturbations.txt",
table_type="gradients",
min_header_len=MIN_HEADER_LEN,
maximize=True,

# Initialize the optimizer with output tables. `min_header_len` is set
# to ensure that all tables have the same number of header lines,
# simplifying code that reads them as fixed width tables. `maximize` is
# set because ropt reports minimization results, while everest wants
# maximization results, necessitating a conversion step.
return (
BasicOptimizer(enopt_config=self.ropt_config, evaluator=simulator)
.add_table(
columns=RESULT_COLUMNS,
path=ropt_output_folder / "results.txt",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
.add_table(
columns=GRADIENT_COLUMNS,
path=ropt_output_folder / "gradients.txt",
table_type="gradients",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
.add_table(
columns=SIMULATION_COLUMNS,
path=ropt_output_folder / "simulations.txt",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
.add_table(
columns=PERTURBATIONS_COLUMNS,
path=ropt_output_folder / "perturbations.txt",
table_type="gradients",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
)
return optimizer

@classmethod
def name(cls) -> str:
Expand Down
2 changes: 2 additions & 0 deletions src/everest/optimizer/everest2ropt.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,6 +500,8 @@ def _parse_environment(ever_config: EverestConfig, ropt_config):
ropt_config["optimizer"]["output_dir"] = os.path.abspath(
ever_config.optimization_output_dir
)
if ever_config.environment.random_seed is not None:
ropt_config["gradient"]["seed"] = ever_config.environment.random_seed


def everest2ropt(ever_config: EverestConfig) -> Dict[str, Any]:
Expand Down
14 changes: 3 additions & 11 deletions tests/everest/test_simulator_cache.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import numpy as np
from ropt.plan import OptimizationPlanRunner
from ropt.plan import BasicOptimizer

from ert.storage import open_storage
from everest.config import EverestConfig, SimulatorConfig
Expand Down Expand Up @@ -33,11 +33,7 @@ def new_call(*args):

# Run once, populating the cache of the simulator:
variables1 = (
OptimizationPlanRunner(
enopt_config=ropt_config,
evaluator=simulator,
seed=config.environment.random_seed,
)
BasicOptimizer(enopt_config=ropt_config, evaluator=simulator)
.run()
.variables
)
Expand All @@ -48,11 +44,7 @@ def new_call(*args):
# Run again with the same simulator:
n_evals = 0
variables2 = (
OptimizationPlanRunner(
enopt_config=ropt_config,
evaluator=simulator,
seed=config.environment.random_seed,
)
BasicOptimizer(enopt_config=ropt_config, evaluator=simulator)
.run()
.variables
)
Expand Down

0 comments on commit 8c679c1

Please sign in to comment.