Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core: Add ruff rules PT (pytest) #29381

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion libs/core/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ python = ">=3.12.4"
[tool.poetry.extras]

[tool.ruff.lint]
select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "TRY", "UP", "W", "YTT",]
select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "PT", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "TRY", "UP", "W", "YTT",]
ignore = [ "COM812", "UP007", "S110", "S112",]

[tool.coverage.run]
Expand Down
2 changes: 1 addition & 1 deletion libs/core/tests/unit_tests/_api/test_beta_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@


@pytest.mark.parametrize(
"kwargs, expected_message",
("kwargs", "expected_message"),
[
(
{
Expand Down
6 changes: 4 additions & 2 deletions libs/core/tests/unit_tests/_api/test_deprecation.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@


@pytest.mark.parametrize(
"kwargs, expected_message",
("kwargs", "expected_message"),
[
(
{
Expand Down Expand Up @@ -404,7 +404,9 @@ def test_deprecated_method_pydantic() -> None:
def test_raise_error_for_bad_decorator() -> None:
"""Verify that errors raised on init rather than on use."""
# Should not specify both `alternative` and `alternative_import`
with pytest.raises(ValueError):
with pytest.raises(
ValueError, match="Cannot specify both alternative and alternative_import"
):

@deprecated(since="2.0.0", alternative="NewClass", alternative_import="hello")
def deprecated_function() -> str:
Expand Down
2 changes: 1 addition & 1 deletion libs/core/tests/unit_tests/caches/test_in_memory_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def test_initialization() -> None:
assert cache_with_maxsize._cache == {}
assert cache_with_maxsize._maxsize == 2

with pytest.raises(ValueError):
with pytest.raises(ValueError, match="maxsize must be greater than 0"):
InMemoryCache(maxsize=0)


Expand Down
9 changes: 5 additions & 4 deletions libs/core/tests/unit_tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,10 @@
from uuid import UUID

import pytest
from pytest import Config, Function, Parser
from pytest_mock import MockerFixture


def pytest_addoption(parser: Parser) -> None:
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
Expand All @@ -23,7 +22,9 @@ def pytest_addoption(parser: Parser) -> None:
)


def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
def pytest_collection_modifyitems(
config: pytest.Config, items: Sequence[pytest.Function]
) -> None:
"""Add implementations for handling custom markers.

At the moment, this adds support for a custom `requires` marker.
Expand Down Expand Up @@ -91,7 +92,7 @@ def test_something():
)


@pytest.fixture()
@pytest.fixture
def deterministic_uuids(mocker: MockerFixture) -> MockerFixture:
side_effect = (
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
Expand Down
8 changes: 4 additions & 4 deletions libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,16 @@


class TestDocumentIndexerTestSuite(DocumentIndexerTestSuite):
@pytest.fixture()
@pytest.fixture
def index(self) -> Generator[DocumentIndex, None, None]:
yield InMemoryDocumentIndex()
yield InMemoryDocumentIndex() # noqa: PT022


class TestAsyncDocumentIndexerTestSuite(AsyncDocumentIndexTestSuite):
# Something funky is going on with mypy and async pytest fixture
@pytest.fixture()
@pytest.fixture
async def index(self) -> AsyncGenerator[DocumentIndex, None]: # type: ignore
yield InMemoryDocumentIndex()
yield InMemoryDocumentIndex() # noqa: PT022


def test_sync_retriever() -> None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from langchain_core.indexing import InMemoryRecordManager


@pytest.fixture()
@pytest.fixture
def manager() -> InMemoryRecordManager:
"""Initialize the test database and yield the TimestampedSet instance."""
# Initialize and yield the TimestampedSet instance
Expand Down
48 changes: 40 additions & 8 deletions libs/core/tests/unit_tests/indexing/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -466,11 +466,19 @@ def test_incremental_fails_with_bad_source_ids(
]
)

with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Source id key is required when cleanup mode is "
"incremental or scoped_full.",
):
# Should raise an error because no source id function was specified
index(loader, record_manager, vector_store, cleanup="incremental")

with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Source ids are required when cleanup mode "
"is incremental or scoped_full.",
):
# Should raise an error because no source id function was specified
index(
loader,
Expand Down Expand Up @@ -502,7 +510,11 @@ async def test_aincremental_fails_with_bad_source_ids(
]
)

with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Source id key is required when cleanup mode "
"is incremental or scoped_full.",
):
# Should raise an error because no source id function was specified
await aindex(
loader,
Expand All @@ -511,7 +523,11 @@ async def test_aincremental_fails_with_bad_source_ids(
cleanup="incremental",
)

with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Source ids are required when cleanup mode "
"is incremental or scoped_full.",
):
# Should raise an error because no source id function was specified
await aindex(
loader,
Expand Down Expand Up @@ -771,11 +787,19 @@ def test_scoped_full_fails_with_bad_source_ids(
]
)

with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Source id key is required when cleanup mode "
"is incremental or scoped_full.",
):
# Should raise an error because no source id function was specified
index(loader, record_manager, vector_store, cleanup="scoped_full")

with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Source ids are required when cleanup mode "
"is incremental or scoped_full.",
):
# Should raise an error because no source id function was specified
index(
loader,
Expand Down Expand Up @@ -807,11 +831,19 @@ async def test_ascoped_full_fails_with_bad_source_ids(
]
)

with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Source id key is required when cleanup mode "
"is incremental or scoped_full.",
):
# Should raise an error because no source id function was specified
await aindex(loader, arecord_manager, vector_store, cleanup="scoped_full")

with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Source ids are required when cleanup mode "
"is incremental or scoped_full.",
):
# Should raise an error because no source id function was specified
await aindex(
loader,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ async def test_async_batch_size(messages: list, messages_2: list) -> None:
assert (cb.traced_runs[0].extra or {}).get("batch_size") == 1


@pytest.mark.xfail(reason="This test is failing due to a bug in the testing code")
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The linter found an issue in the test code here.
It was not seen before because eval_response was never executed.

async def test_stream_error_callback() -> None:
message = "test"

Expand All @@ -112,17 +113,15 @@ def eval_response(callback: BaseFakeCallbackHandler, i: int) -> None:
responses=[message],
error_on_chunk_number=i,
)
cb_async = FakeAsyncCallbackHandler()
with pytest.raises(FakeListChatModelError):
cb_async = FakeAsyncCallbackHandler()
async for _ in llm.astream("Dummy message", callbacks=[cb_async]):
pass
eval_response(cb_async, i)
_ = [_ async for _ in llm.astream("Dummy message", callbacks=[cb_async])]
eval_response(cb_async, i)

cb_sync = FakeCallbackHandler()
for _ in llm.stream("Dumy message", callbacks=[cb_sync]):
pass

eval_response(cb_sync, i)
cb_sync = FakeCallbackHandler()
with pytest.raises(FakeListChatModelError):
_ = list(llm.stream("Dummy message", callbacks=[cb_sync]))
eval_response(cb_sync, i)


async def test_astream_fallback_to_ainvoke() -> None:
Expand Down
24 changes: 14 additions & 10 deletions libs/core/tests/unit_tests/language_models/llms/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ async def test_async_batch_size() -> None:
assert (cb.traced_runs[0].extra or {}).get("batch_size") == 1


@pytest.mark.xfail(reason="This test is failing due to a bug in the testing code")
Copy link
Collaborator Author

@cbornet cbornet Jan 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The linter found an issue in the test code here.
It was not seen before because eval_response was never executed.

async def test_stream_error_callback() -> None:
message = "test"

Expand All @@ -110,17 +111,20 @@ def eval_response(callback: BaseFakeCallbackHandler, i: int) -> None:
responses=[message],
error_on_chunk_number=i,
)
cb_async = FakeAsyncCallbackHandler()
with pytest.raises(FakeListLLMError):
cb_async = FakeAsyncCallbackHandler()
async for _ in llm.astream("Dummy message", callbacks=[cb_async]):
pass
eval_response(cb_async, i)

cb_sync = FakeCallbackHandler()
for _ in llm.stream("Dumy message", callbacks=[cb_sync]):
pass

eval_response(cb_sync, i)
_ = [
_
async for _ in llm.astream(
"Dummy message", config={"callbacks": [cb_async]}
)
]
eval_response(cb_async, i)

cb_sync = FakeCallbackHandler()
with pytest.raises(FakeListLLMError):
_ = list(llm.stream("Dummy message", config={"callbacks": [cb_sync]}))
eval_response(cb_sync, i)


async def test_astream_fallback_to_ainvoke() -> None:
Expand Down
14 changes: 12 additions & 2 deletions libs/core/tests/unit_tests/messages/test_utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import base64
import json
import re
import typing
from collections.abc import Sequence
from typing import Any, Callable, Optional, Union
Expand Down Expand Up @@ -424,7 +425,14 @@ def test_trim_messages_bound_model_token_counter() -> None:

def test_trim_messages_bad_token_counter() -> None:
trimmer = trim_messages(max_tokens=10, token_counter={})
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match=re.escape(
"'token_counter' expected to be a model that implements "
"'get_num_tokens_from_messages()' or a function. "
"Received object of type <class 'dict'>."
),
):
trimmer.invoke([HumanMessage("foobar")])


Expand Down Expand Up @@ -653,7 +661,9 @@ def test_convert_to_messages_openai_refusal() -> None:
assert actual == expected

# Raises error if content is missing.
with pytest.raises(ValueError):
with pytest.raises(
ValueError, match="Message dict must contain 'role' and 'content' keys"
):
convert_to_messages([{"role": "assistant", "refusal": "9.1"}])


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,10 @@ def test_pydantic_output_parser_fail() -> None:
pydantic_object=TestModel
)

with pytest.raises(OutputParserException) as e:
with pytest.raises(
OutputParserException, match="Failed to parse TestModel from completion"
):
pydantic_parser.parse(DEF_RESULT_FAIL)
assert "Failed to parse TestModel from completion" in str(e)


def test_pydantic_output_parser_type_inference() -> None:
Expand Down
Loading
Loading