From 02e1646fbc76bb0cc521b2a6c4f4227128c737eb Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 5 Nov 2024 09:03:53 +0100 Subject: [PATCH 1/8] Cast stop_long_running to bool It is not really a subclass of bool so pydantic validation fails, but its context is also never used so casting is OK. --- src/ert/config/queue_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ert/config/queue_config.py b/src/ert/config/queue_config.py index 6fcb8713715..e8c21a938a2 100644 --- a/src/ert/config/queue_config.py +++ b/src/ert/config/queue_config.py @@ -349,7 +349,7 @@ def from_dict(cls, config_dict: ConfigDict) -> QueueConfig: selected_queue_system, queue_options, queue_options_test_run, - stop_long_running=stop_long_running, + stop_long_running=bool(stop_long_running), ) def create_local_copy(self) -> QueueConfig: @@ -360,7 +360,7 @@ def create_local_copy(self) -> QueueConfig: QueueSystem.LOCAL, self.queue_options_test_run, self.queue_options_test_run, - stop_long_running=self.stop_long_running, + stop_long_running=bool(self.stop_long_running), ) @property From 1739e49ddad68fb5583561524b5893d39660871e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Eide?= Date: Thu, 31 Oct 2024 09:34:29 +0100 Subject: [PATCH 2/8] Pass in arguments instead of unpacking inside function --- src/everest/bin/everest_script.py | 2 +- src/everest/bin/kill_script.py | 2 +- src/everest/bin/monitor_script.py | 2 +- src/everest/config/everest_config.py | 30 ++++++++++++++++++---------- src/everest/detached/__init__.py | 17 ++++++++-------- tests/everest/test_detached.py | 4 ++-- 6 files changed, 33 insertions(+), 24 deletions(-) diff --git a/src/everest/bin/everest_script.py b/src/everest/bin/everest_script.py index cf5c04e086d..d24a4f11603 100755 --- a/src/everest/bin/everest_script.py +++ b/src/everest/bin/everest_script.py @@ -94,7 +94,7 @@ def run_everest(options): logger = logging.getLogger("everest_main") server_state = everserver_status(options.config) - if server_is_running(options.config): + if server_is_running(*options.config.server_context): config_file = options.config.config_file print( "An optimization is currently running.\n" diff --git a/src/everest/bin/kill_script.py b/src/everest/bin/kill_script.py index 42fdbe82ac6..c9d0c6453e7 100755 --- a/src/everest/bin/kill_script.py +++ b/src/everest/bin/kill_script.py @@ -70,7 +70,7 @@ def _handle_keyboard_interrupt(signal, frame, after=False): def kill_everest(options): - if not server_is_running(options.config): + if not server_is_running(*options.config.server_context): print("Server is not running.") return diff --git a/src/everest/bin/monitor_script.py b/src/everest/bin/monitor_script.py index 672968b9dc0..310f4bd80ca 100755 --- a/src/everest/bin/monitor_script.py +++ b/src/everest/bin/monitor_script.py @@ -63,7 +63,7 @@ def monitor_everest(options): config: EverestConfig = options.config server_state = everserver_status(options.config) - if server_is_running(config): + if server_is_running(*config.server_context): run_detached_monitor(config, show_all_jobs=options.show_all_jobs) server_state = everserver_status(config) if server_state["status"] == ServerStatus.failed: diff --git a/src/everest/config/everest_config.py b/src/everest/config/everest_config.py index 4f6f3a8c7cf..f8c30b74797 100644 --- a/src/everest/config/everest_config.py +++ b/src/everest/config/everest_config.py @@ -5,7 +5,15 @@ from argparse import ArgumentParser from io import StringIO from pathlib import Path -from typing import TYPE_CHECKING, List, Literal, Optional, Protocol, no_type_check +from typing import ( + TYPE_CHECKING, + List, + Literal, + Optional, + Protocol, + Tuple, + no_type_check, +) from pydantic import ( AfterValidator, @@ -684,18 +692,20 @@ def hostfile_path(self): def server_info(self): """Load server information from the hostfile""" host_file_path = self.hostfile_path + try: + with open(host_file_path, "r", encoding="utf-8") as f: + json_string = f.read() - with open(host_file_path, "r", encoding="utf-8") as f: - json_string = f.read() - - data = json.loads(json_string) - if set(data.keys()) != {"host", "port", "cert", "auth"}: - raise RuntimeError("Malformed hostfile") - - return data + data = json.loads(json_string) + if set(data.keys()) != {"host", "port", "cert", "auth"}: + raise RuntimeError("Malformed hostfile") + return data + except FileNotFoundError: + # No host file + return {"host": None, "port": None, "cert": None, "auth": None} @property - def server_context(self): + def server_context(self) -> Tuple[str, str, Tuple[str, str]]: """Returns a tuple with - url of the server - path to the .cert file diff --git a/src/everest/detached/__init__.py b/src/everest/detached/__init__.py index d6bed829a38..9a984a51478 100644 --- a/src/everest/detached/__init__.py +++ b/src/everest/detached/__init__.py @@ -60,7 +60,7 @@ def start_server(config: EverestConfig, ert_config: ErtConfig, storage): """ Start an Everest server running the optimization defined in the config """ - if server_is_running(config): # better safe than sorry + if server_is_running(*config.server_context): # better safe than sorry return log_dir = config.log_dir @@ -181,7 +181,7 @@ def wait_for_server( Raise an exception when the timeout is reached. """ - if not server_is_running(config): + if not server_is_running(*config.server_context): sleep_time_increment = float(timeout) / (2**_HTTP_REQUEST_RETRY - 1) for retry_count in range(_HTTP_REQUEST_RETRY): # Failure may occur before contact with the server is established: @@ -225,11 +225,11 @@ def wait_for_server( sleep_time = sleep_time_increment * (2**retry_count) time.sleep(sleep_time) - if server_is_running(config): + if server_is_running(*config.server_context): return # If number of retries reached and server is not running - throw exception - if not server_is_running(config): + if not server_is_running(*config.server_context): raise RuntimeError("Failed to start server within configured timeout.") @@ -330,22 +330,21 @@ def wait_for_server_to_stop(config: EverestConfig, timeout): Raise an exception when the timeout is reached. """ - if server_is_running(config): + if server_is_running(*config.server_context): sleep_time_increment = float(timeout) / (2**_HTTP_REQUEST_RETRY - 1) for retry_count in range(_HTTP_REQUEST_RETRY): sleep_time = sleep_time_increment * (2**retry_count) time.sleep(sleep_time) - if not server_is_running(config): + if not server_is_running(*config.server_context): return # If number of retries reached and server still running - throw exception - if server_is_running(config): + if server_is_running(*config.server_context): raise Exception("Failed to stop server within configured timeout.") -def server_is_running(config: EverestConfig): +def server_is_running(url: str, cert: bool, auth: Tuple[str, str]): try: - url, cert, auth = config.server_context response = requests.get( url, verify=cert, diff --git a/tests/everest/test_detached.py b/tests/everest/test_detached.py index a8ecf4227b8..5955ce8ac04 100644 --- a/tests/everest/test_detached.py +++ b/tests/everest/test_detached.py @@ -103,7 +103,7 @@ def test_https_requests(copy_math_func_test_data_to_tmp): result.raise_for_status() # Test stopping server - assert server_is_running(everest_config) + assert server_is_running(*everest_config.server_context) if stop_server(everest_config): wait_for_server_to_stop(everest_config, 60) @@ -115,7 +115,7 @@ def test_https_requests(copy_math_func_test_data_to_tmp): ServerStatus.stopped, ServerStatus.completed, ] - assert not server_is_running(everest_config) + assert not server_is_running(*everest_config.server_context) else: context_stop_and_wait() server_status = everserver_status(everest_config) From 30037b84a452ae5e72e4127a96b90788363c97af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Eide?= Date: Fri, 1 Nov 2024 09:05:56 +0100 Subject: [PATCH 3/8] Inline _run_everest function --- src/everest/bin/everest_script.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/everest/bin/everest_script.py b/src/everest/bin/everest_script.py index d24a4f11603..f00137e69a5 100755 --- a/src/everest/bin/everest_script.py +++ b/src/everest/bin/everest_script.py @@ -80,16 +80,6 @@ def _build_args_parser(): return arg_parser -def _run_everest(options, ert_config, storage): - with PluginSiteConfigEnv(): - context = start_server(options.config, ert_config, storage) - print("Waiting for server ...") - wait_for_server(options.config, timeout=600, context=context) - print("Everest server found!") - run_detached_monitor(options.config, show_all_jobs=options.show_all_jobs) - wait_for_context() - - def run_everest(options): logger = logging.getLogger("everest_main") server_state = everserver_status(options.config) @@ -119,8 +109,13 @@ def run_everest(options): makedirs_if_needed(options.config.output_dir, roll_if_exists=True) - with open_storage(ert_config.ens_path, "w") as storage: - _run_everest(options, ert_config, storage) + with open_storage(ert_config.ens_path, "w") as storage, PluginSiteConfigEnv(): + context = start_server(options.config, ert_config, storage) + print("Waiting for server ...") + wait_for_server(options.config, timeout=600, context=context) + print("Everest server found!") + run_detached_monitor(options.config, show_all_jobs=options.show_all_jobs) + wait_for_context() server_state = everserver_status(options.config) server_state_info = server_state["message"] From 400850206a69c1b0d2621e98b712b6369c090505 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Eide?= Date: Fri, 1 Nov 2024 14:58:20 +0100 Subject: [PATCH 4/8] Remove unused function --- src/everest/detached/__init__.py | 60 -------------------------------- 1 file changed, 60 deletions(-) diff --git a/src/everest/detached/__init__.py b/src/everest/detached/__init__.py index 9a984a51478..beb26313f73 100644 --- a/src/everest/detached/__init__.py +++ b/src/everest/detached/__init__.py @@ -18,7 +18,6 @@ from ert.config import ErtConfig, QueueSystem from everest.config import EverestConfig from everest.config_keys import ConfigKeys as CK -from everest.simulator import JOB_FAILURE, JOB_SUCCESS, Status from everest.strings import ( EVEREST, EVEREST_SERVER_CONFIG, @@ -233,65 +232,6 @@ def wait_for_server( raise RuntimeError("Failed to start server within configured timeout.") -def get_sim_status(config: EverestConfig): - """Retrieve a seba database snapshot and return a list of simulation - information objects for each of the available batches in the database - - Example: [{progress: [[{'start_time': u'Thu, 16 May 2019 16:53:20 UTC', - 'end_time': u'Thu, 16 May 2019 16:53:20 UTC', - 'status': JOB_SUCCESS}]], - 'batch_number': 0, - 'event': 'update'}, ..] - """ - - seba_snapshot = SebaSnapshot(config.optimization_output_dir) - snapshot = seba_snapshot.get_snapshot() - - def timestamp2str(timestamp): - if timestamp: - return "{} UTC".format( - datetime.fromtimestamp(timestamp).strftime("%a, %d %b %Y %H:%M:%S %Z") - ) - else: - return None - - sim_progress: dict = {} - for sim in snapshot.simulation_data: - sim_metadata = { - "start_time": timestamp2str(sim.start_time), - "end_time": timestamp2str(sim.end_time), - "realization": sim.realization, - "simulation": sim.simulation, - "status": JOB_SUCCESS if sim.success else JOB_FAILURE, - } - if sim.batch in sim_progress: - sim_progress[sim.batch]["progress"].append([sim_metadata]) - else: - sim_progress[sim.batch] = { - "progress": [[sim_metadata]], - "batch_number": sim.batch, - "event": "update", - } - for status in sim_progress.values(): - fm_runs = len(status["progress"]) - failed = sum( - fm_run[0]["status"] == JOB_FAILURE for fm_run in status["progress"] - ) - status.update( - { - "status": Status( - waiting=0, - pending=0, - running=0, - failed=failed, - complete=fm_runs - failed, - ) - } - ) - - return list(sim_progress.values()) - - def get_opt_status(output_folder): """Retrieve a seba database snapshot and return a dictionary with optimization information.""" From 5eec39cd1584009946b28020c11fd9000201e79a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Eide?= Date: Fri, 4 Oct 2024 10:18:42 +0200 Subject: [PATCH 5/8] Differentiate on name --- src/ert/config/queue_config.py | 21 +++++++++++++++---- .../config/config_dict_generator.py | 1 + 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/ert/config/queue_config.py b/src/ert/config/queue_config.py index e8c21a938a2..2df612440b2 100644 --- a/src/ert/config/queue_config.py +++ b/src/ert/config/queue_config.py @@ -4,10 +4,12 @@ import re import shutil from abc import abstractmethod -from dataclasses import asdict, dataclass, field, fields -from typing import Any, Dict, List, Mapping, Optional, no_type_check +from dataclasses import asdict, field, fields +from typing import Any, Dict, List, Literal, Mapping, Optional, Union, no_type_check import pydantic +from pydantic import Field +from pydantic.dataclasses import dataclass from typing_extensions import Annotated from .parsing import ( @@ -27,6 +29,7 @@ @pydantic.dataclasses.dataclass(config={"extra": "forbid", "validate_assignment": True}) class QueueOptions: + name: str max_running: pydantic.NonNegativeInt = 0 submit_sleep: pydantic.NonNegativeFloat = 0.0 project_code: Optional[str] = None @@ -79,6 +82,8 @@ def driver_options(self) -> Dict[str, Any]: @pydantic.dataclasses.dataclass class LocalQueueOptions(QueueOptions): + name: Literal[QueueSystem.LOCAL] = QueueSystem.LOCAL + @property def driver_options(self) -> Dict[str, Any]: return {} @@ -86,6 +91,7 @@ def driver_options(self) -> Dict[str, Any]: @pydantic.dataclasses.dataclass class LsfQueueOptions(QueueOptions): + name: Literal[QueueSystem.LSF] = QueueSystem.LSF bhist_cmd: Optional[NonEmptyString] = None bjobs_cmd: Optional[NonEmptyString] = None bkill_cmd: Optional[NonEmptyString] = None @@ -97,6 +103,7 @@ class LsfQueueOptions(QueueOptions): @property def driver_options(self) -> Dict[str, Any]: driver_dict = asdict(self) + driver_dict.pop("name") driver_dict["exclude_hosts"] = driver_dict.pop("exclude_host") driver_dict["queue_name"] = driver_dict.pop("lsf_queue") driver_dict["resource_requirement"] = driver_dict.pop("lsf_resource") @@ -107,6 +114,7 @@ def driver_options(self) -> Dict[str, Any]: @pydantic.dataclasses.dataclass class TorqueQueueOptions(QueueOptions): + name: Literal[QueueSystem.TORQUE] = QueueSystem.TORQUE qsub_cmd: Optional[NonEmptyString] = None qstat_cmd: Optional[NonEmptyString] = None qdel_cmd: Optional[NonEmptyString] = None @@ -124,6 +132,7 @@ class TorqueQueueOptions(QueueOptions): @property def driver_options(self) -> Dict[str, Any]: driver_dict = asdict(self) + driver_dict.pop("name") driver_dict["queue_name"] = driver_dict.pop("queue") driver_dict.pop("max_running") driver_dict.pop("submit_sleep") @@ -141,6 +150,7 @@ def check_memory_per_job(cls, value: str) -> str: @pydantic.dataclasses.dataclass class SlurmQueueOptions(QueueOptions): + name: Literal[QueueSystem.SLURM] = QueueSystem.SLURM sbatch: NonEmptyString = "sbatch" scancel: NonEmptyString = "scancel" scontrol: NonEmptyString = "scontrol" @@ -156,6 +166,7 @@ class SlurmQueueOptions(QueueOptions): @property def driver_options(self) -> Dict[str, Any]: driver_dict = asdict(self) + driver_dict.pop("name") driver_dict["sbatch_cmd"] = driver_dict.pop("sbatch") driver_dict["scancel_cmd"] = driver_dict.pop("scancel") driver_dict["scontrol_cmd"] = driver_dict.pop("scontrol") @@ -255,8 +266,10 @@ class QueueConfig: realization_memory: int = 0 max_submit: int = 1 queue_system: QueueSystem = QueueSystem.LOCAL - queue_options: QueueOptions = field(default_factory=QueueOptions) - queue_options_test_run: QueueOptions = field(default_factory=LocalQueueOptions) + queue_options: Union[ + LsfQueueOptions, TorqueQueueOptions, SlurmQueueOptions, LocalQueueOptions + ] = Field(default_factory=LocalQueueOptions, discriminator="name") + queue_options_test_run: LocalQueueOptions = field(default_factory=LocalQueueOptions) stop_long_running: bool = False @no_type_check diff --git a/tests/ert/unit_tests/config/config_dict_generator.py b/tests/ert/unit_tests/config/config_dict_generator.py index 441c20dc557..d747afbddf9 100644 --- a/tests/ert/unit_tests/config/config_dict_generator.py +++ b/tests/ert/unit_tests/config/config_dict_generator.py @@ -138,6 +138,7 @@ def valid_queue_options(queue_system: str): for field in fields( queue_systems_and_options[QueueSystemWithGeneric(queue_system)] ) + if field.name != "name" ] From 9c30ce62d50b447fc6d487f3d45da1acd02f0925 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Eide?= Date: Wed, 4 Sep 2024 13:58:36 +0200 Subject: [PATCH 6/8] Fix bug where optional property could not be None --- src/ert/config/queue_config.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/ert/config/queue_config.py b/src/ert/config/queue_config.py index 2df612440b2..9e0769dc4f0 100644 --- a/src/ert/config/queue_config.py +++ b/src/ert/config/queue_config.py @@ -142,7 +142,7 @@ def driver_options(self) -> Dict[str, Any]: @pydantic.field_validator("memory_per_job") @classmethod - def check_memory_per_job(cls, value: str) -> str: + def check_memory_per_job(cls, value: Optional[str]) -> Optional[str]: if not queue_memory_usage_formats[QueueSystem.TORQUE].validate(value): raise ValueError("wrong memory format") return value @@ -180,7 +180,7 @@ def driver_options(self) -> Dict[str, Any]: @pydantic.field_validator("memory", "memory_per_cpu") @classmethod - def check_memory_per_job(cls, value: str) -> str: + def check_memory_per_job(cls, value: Optional[str]) -> Optional[str]: if not queue_memory_usage_formats[QueueSystem.SLURM].validate(value): raise ValueError("wrong memory format") return value @@ -190,7 +190,9 @@ def check_memory_per_job(cls, value: str) -> str: class QueueMemoryStringFormat: suffixes: List[str] - def validate(self, mem_str_format: str) -> bool: + def validate(self, mem_str_format: Optional[str]) -> bool: + if mem_str_format is None: + return True return ( re.match( r"\d+(" + "|".join(self.suffixes) + ")$", From aba7e5cfb74846e47b25de1a23eb331e8365b400 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Eide?= Date: Wed, 4 Sep 2024 14:04:53 +0200 Subject: [PATCH 7/8] Fix bug where optional property was not marked as optional --- src/ert/config/queue_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ert/config/queue_config.py b/src/ert/config/queue_config.py index 9e0769dc4f0..9ed6653b263 100644 --- a/src/ert/config/queue_config.py +++ b/src/ert/config/queue_config.py @@ -157,7 +157,7 @@ class SlurmQueueOptions(QueueOptions): squeue: NonEmptyString = "squeue" exclude_host: str = "" include_host: str = "" - memory: str = "" + memory: Optional[NonEmptyString] = None memory_per_cpu: Optional[NonEmptyString] = None partition: Optional[NonEmptyString] = None # aka queue_name squeue_timeout: pydantic.PositiveFloat = 2 From 1146983fb0036bcc6a32be0f39f4b11203318b1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Eide?= Date: Mon, 4 Nov 2024 15:02:14 +0100 Subject: [PATCH 8/8] Create driver from QueueOptions instead of QueueConfig --- src/ert/ensemble_evaluator/_ensemble.py | 2 +- src/ert/scheduler/__init__.py | 20 +++++++++---------- src/ert/simulator/batch_simulator_context.py | 2 +- .../test_async_queue_execution.py | 2 +- .../unit_tests/scheduler/test_lsf_driver.py | 2 +- .../unit_tests/scheduler/test_scheduler.py | 4 ++-- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/ert/ensemble_evaluator/_ensemble.py b/src/ert/ensemble_evaluator/_ensemble.py index 14138b1ab42..ecc1d5c81d5 100644 --- a/src/ert/ensemble_evaluator/_ensemble.py +++ b/src/ert/ensemble_evaluator/_ensemble.py @@ -272,7 +272,7 @@ async def _evaluate_inner( # pylint: disable=too-many-branches raise ValueError("no config") # mypy try: - driver = create_driver(self._queue_config) + driver = create_driver(self._queue_config.queue_options) self._scheduler = Scheduler( driver, self.active_reals, diff --git a/src/ert/scheduler/__init__.py b/src/ert/scheduler/__init__.py index b71c7591a1d..349e1d3838a 100644 --- a/src/ert/scheduler/__init__.py +++ b/src/ert/scheduler/__init__.py @@ -15,21 +15,21 @@ from .slurm_driver import SlurmDriver if TYPE_CHECKING: - from ert.config.queue_config import QueueConfig + from ert.config.queue_config import QueueOptions -def create_driver(config: QueueConfig) -> Driver: - if config.queue_system == QueueSystem.LOCAL: - return LocalDriver(**config.queue_options.driver_options) - elif config.queue_system == QueueSystem.TORQUE: - return OpenPBSDriver(**config.queue_options.driver_options) - elif config.queue_system == QueueSystem.LSF: - return LsfDriver(**config.queue_options.driver_options) - elif config.queue_system == QueueSystem.SLURM: +def create_driver(queue_options: QueueOptions) -> Driver: + if queue_options.name == QueueSystem.LOCAL: + return LocalDriver() + elif queue_options.name == QueueSystem.TORQUE: + return OpenPBSDriver(**queue_options.driver_options) + elif queue_options.name == QueueSystem.LSF: + return LsfDriver(**queue_options.driver_options) + elif queue_options.name == QueueSystem.SLURM: return SlurmDriver( **dict( {"user": getpwuid(getuid()).pw_name}, - **config.queue_options.driver_options, + **queue_options.driver_options, ) ) else: diff --git a/src/ert/simulator/batch_simulator_context.py b/src/ert/simulator/batch_simulator_context.py index 23147381836..41126c60561 100644 --- a/src/ert/simulator/batch_simulator_context.py +++ b/src/ert/simulator/batch_simulator_context.py @@ -145,7 +145,7 @@ def __post_init__(self) -> None: """ Handle which can be used to query status and results for batch simulation. """ - driver = create_driver(self.queue_config) + driver = create_driver(self.queue_config.queue_options) self._scheduler = Scheduler(driver, max_running=self.queue_config.max_running) # fill in the missing geo_id data diff --git a/tests/ert/unit_tests/ensemble_evaluator/test_async_queue_execution.py b/tests/ert/unit_tests/ensemble_evaluator/test_async_queue_execution.py index b61601317f2..72845b3f753 100644 --- a/tests/ert/unit_tests/ensemble_evaluator/test_async_queue_execution.py +++ b/tests/ert/unit_tests/ensemble_evaluator/test_async_queue_execution.py @@ -14,7 +14,7 @@ async def test_happy_path( ensemble = make_ensemble(monkeypatch, tmpdir, 1, 1) queue = Scheduler( - driver=create_driver(queue_config), + driver=create_driver(queue_config.queue_options), realizations=ensemble.reals, ens_id="ee_0", ) diff --git a/tests/ert/unit_tests/scheduler/test_lsf_driver.py b/tests/ert/unit_tests/scheduler/test_lsf_driver.py index 3132d2965eb..411e5649807 100644 --- a/tests/ert/unit_tests/scheduler/test_lsf_driver.py +++ b/tests/ert/unit_tests/scheduler/test_lsf_driver.py @@ -194,7 +194,7 @@ async def test_submit_with_project_code(): "FORWARD_MODEL": [("FLOW",), ("ECLIPSE",), ("RMS",)], } queue_config = QueueConfig.from_dict(queue_config_dict) - driver: LsfDriver = create_driver(queue_config) + driver: LsfDriver = create_driver(queue_config.queue_options) await driver.submit(0, "sleep") assert f"-P {queue_config.queue_options.project_code}" in Path( "captured_bsub_args" diff --git a/tests/ert/unit_tests/scheduler/test_scheduler.py b/tests/ert/unit_tests/scheduler/test_scheduler.py index 3b5de6903d8..d6d77a4c5c1 100644 --- a/tests/ert/unit_tests/scheduler/test_scheduler.py +++ b/tests/ert/unit_tests/scheduler/test_scheduler.py @@ -638,7 +638,7 @@ def test_scheduler_create_lsf_driver(): ], } queue_config = QueueConfig.from_dict(queue_config_dict) - driver = create_driver(queue_config) + driver = create_driver(queue_config.queue_options) assert isinstance(driver, LsfDriver) assert str(driver._bsub_cmd) == bsub_cmd assert str(driver._bkill_cmd) == bkill_cmd @@ -678,7 +678,7 @@ def test_scheduler_create_openpbs_driver(): ], } queue_config = QueueConfig.from_dict(queue_config_dict) - driver = create_driver(queue_config) + driver = create_driver(queue_config.queue_options) assert isinstance(driver, OpenPBSDriver) assert driver._queue_name == queue_name assert driver._keep_qsub_output == True if keep_qsub_output == "True" else False