Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Backport test fixes to version-11.0 #9134

Open
wants to merge 6 commits into
base: version-11.0
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test_ert.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:
- name: CLI Test
if: inputs.test-type == 'cli-tests'
run: |
pytest --cov=ert --cov-report=xml:cov1.xml --junit-xml=junit.xml -n logical -v --benchmark-disable --dist loadgroup tests/ui_tests/cli
pytest --cov=ert --cov-report=xml:cov1.xml --junit-xml=junit.xml -n logical --maxprocesses=2 -v --benchmark-disable --dist loadgroup tests/ui_tests/cli

- name: Unit Test
if: inputs.test-type == 'unit-tests'
Expand Down
13 changes: 6 additions & 7 deletions ci/testkomodo.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
copy_test_files () {
copy_test_files() {
cp -r ${CI_SOURCE_ROOT}/tests ${CI_TEST_ROOT}
ln -s ${CI_SOURCE_ROOT}/test-data ${CI_TEST_ROOT}/test-data

Expand All @@ -11,11 +11,11 @@ copy_test_files () {
ln -s ${CI_SOURCE_ROOT}/pyproject.toml ${CI_TEST_ROOT}/pyproject.toml
}

install_test_dependencies () {
install_test_dependencies() {
pip install ".[dev]"
}

run_ert_with_opm () {
run_ert_with_opm() {
pushd "${CI_TEST_ROOT}"

cp -r "${CI_SOURCE_ROOT}/test-data/flow_example" ert_with_opm
Expand All @@ -24,7 +24,7 @@ run_ert_with_opm () {
ert test_run flow.ert ||
(
# In case ert fails, print log files if they are there:
cat spe1_out/realization-0/iter-0/STATUS || true
cat spe1_out/realization-0/iter-0/STATUS || true
cat spe1_out/realization-0/iter-0/ERROR || true
cat spe1_out/realization-0/iter-0/FLOW.stderr.0 || true
cat spe1_out/realization-0/iter-0/FLOW.stdout.0 || true
Expand All @@ -33,15 +33,15 @@ run_ert_with_opm () {
popd
}

start_tests () {
start_tests() {
export NO_PROXY=localhost,127.0.0.1

export ECL_SKIP_SIGNAL=ON

pushd ${CI_TEST_ROOT}/tests

# Run all ert tests except tests evaluating memory consumption and tests requiring windows manager (GUI tests)
pytest --eclipse-simulator -n logical --show-capture=stderr -v --max-worker-restart 0 \
pytest --eclipse-simulator -n auto --show-capture=stderr -v --max-worker-restart 0 \
-m "not limit_memory and not requires_window_manager" --benchmark-disable --dist loadgroup
return_code_ert_main_tests=$?

Expand Down Expand Up @@ -72,7 +72,6 @@ start_tests () {

set -e


return_code_combined_tests=0
# We error if one or more returncodes are nonzero
if [ "$return_code_ert_main_tests" -ne 0 ]; then
Expand Down
2 changes: 1 addition & 1 deletion src/ert/ensemble_evaluator/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ async def handle_dispatch(self, websocket: WebSocketServerProtocol) -> None:
f"closing connection to dispatcher: {ex}"
)
await websocket.close(
code=1011, reason=f"failed handling {event}"
code=1011, reason=f"failed handling message {raw_msg!r}"
)
return

Expand Down
1 change: 1 addition & 0 deletions src/ert/scheduler/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,7 @@ async def execute(
# does internalization at a time
forward_model_ok_lock = asyncio.Lock()
for iens, job in self._jobs.items():
await asyncio.sleep(0)
if job.state != JobState.ABORTED:
self._job_tasks[iens] = asyncio.create_task(
job.run(sem, forward_model_ok_lock, self._max_submit),
Expand Down
9 changes: 8 additions & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,16 +187,22 @@ def _copy_case(path):
@pytest.fixture()
def copy_poly_case(copy_case):
copy_case("poly_example")
with open("poly.ert", "a", encoding="utf-8") as fh:
fh.write("QUEUE_OPTION LOCAL MAX_RUNNING 12\n")


@pytest.fixture()
def copy_snake_oil_field(copy_case):
copy_case("snake_oil_field")
with open("snake_oil_field.ert", "a", encoding="utf-8") as fh:
fh.write("QUEUE_OPTION LOCAL MAX_RUNNING 12\n")


@pytest.fixture()
def copy_snake_oil_case(copy_case):
copy_case("snake_oil")
with open("snake_oil.ert", "a", encoding="utf-8") as fh:
fh.write("QUEUE_OPTION LOCAL MAX_RUNNING 12\n")


@pytest.fixture(
Expand Down Expand Up @@ -374,7 +380,8 @@ def _run_heat_equation(source_root):
os.path.join(source_root, "test-data", "heat_equation"), "test_data"
)
os.chdir("test_data")

with open("config.ert", "a", encoding="utf-8") as fh:
fh.write("QUEUE_OPTION LOCAL MAX_RUNNING 12\n")
parser = ArgumentParser(prog="test_main")
parsed = ert_parser(
parser,
Expand Down
2 changes: 1 addition & 1 deletion tests/ui_tests/cli/analysis/test_adaptive_localization.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def _evaluate(coeffs, x):
f.write(
"""
QUEUE_SYSTEM LOCAL
QUEUE_OPTION LOCAL MAX_RUNNING 50
QUEUE_OPTION LOCAL MAX_RUNNING 12

RUNPATH poly_out/realization-<IENS>/iter-<ITER>

Expand Down
16 changes: 16 additions & 0 deletions tests/ui_tests/cli/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import os

import pytest


@pytest.fixture(autouse=True)
def reduce_omp_num_threads_count():
old_omp_num_threads = os.environ.get("OMP_NUM_THREADS")
os.environ["OMP_NUM_THREADS"] = "1"

yield

if old_omp_num_threads is None:
del os.environ["OMP_NUM_THREADS"]
else:
os.environ["OMP_NUM_THREADS"] = old_omp_num_threads
15 changes: 15 additions & 0 deletions tests/unit_tests/ensemble_evaluator/test_ensemble_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@
import datetime
from functools import partial
from typing import cast
from unittest.mock import MagicMock

import pytest
from hypothesis import given
from hypothesis import strategies as st
from websockets.server import WebSocketServerProtocol

from _ert.events import (
EESnapshot,
Expand Down Expand Up @@ -62,6 +64,19 @@ async def mock_failure(message, *args, **kwargs):
await evaluator.run_and_get_successful_realizations()


async def test_when_dispatch_is_given_invalid_event_the_socket_is_closed(
make_ee_config,
):
evaluator = EnsembleEvaluator(TestEnsemble(0, 2, 2, id_="0"), make_ee_config())

socket = MagicMock(spec=WebSocketServerProtocol)
socket.__aiter__.return_value = ["invalid_json"]
await evaluator.handle_dispatch(socket)
socket.close.assert_called_once_with(
code=1011, reason="failed handling message 'invalid_json'"
)


async def test_no_config_raises_valueerror_when_running():
evaluator = EnsembleEvaluator(TestEnsemble(0, 2, 2, id_="0"), None)
with pytest.raises(ValueError, match="no config for evaluator"):
Expand Down
23 changes: 20 additions & 3 deletions tests/unit_tests/forward_model_runner/test_job.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,8 @@ def max_memory_per_subprocess_layer(layers: int) -> int:
assert max_seens[1] + memory_per_numbers_list < max_seens[2]


@pytest.mark.flaky(reruns=3)
@pytest.mark.integration_test
@pytest.mark.flaky(reruns=5)
@pytest.mark.usefixtures("use_tmpdir")
def test_memory_profile_in_running_events():
scriptname = "increasing_memory.py"
Expand Down Expand Up @@ -190,10 +191,26 @@ def test_memory_profile_in_running_events():
# Avoid the tail of the array, then the process is tearing down
).all(), f"Emitted memory usage not increasing, got {emitted_rss_values[:-3]=}"

memory_deltas = np.diff(np.array(emitted_rss_values[7:]))
if not len(memory_deltas):
# This can happen if memory profiling is lagging behind the process
# we are trying to track.
memory_deltas = np.diff(np.array(emitted_rss_values[2:]))

lenience_factor = 4
# Ideally this is 1 which corresponds to being able to track every memory
# allocation perfectly. But on loaded hardware, some of the allocations can be
# missed due to process scheduling. Bump as needed.

assert (
np.diff(np.array(emitted_rss_values[7:])).max() < 3 * 1024 * 1024
max(memory_deltas) < lenience_factor * 1024 * 1024
# Avoid the first steps, which includes the Python interpreters memory usage
), f"Memory increased too sharply, missing a measurement? Got {emitted_rss_values[7:]=}"
), (
"Memory increased too sharply, missing a measurement? "
f"Got {emitted_rss_values=} with selected diffs {memory_deltas}. "
"If the maximal number is at the beginning, it is probably the Python process "
"startup that is tracked."
)

if sys.platform.startswith("darwin"):
# No oom_score on MacOS
Expand Down
Loading