Skip to content

Commit

Permalink
Bump ruff version to 0.9
Browse files Browse the repository at this point in the history
  • Loading branch information
cbornet committed Jan 14, 2025
1 parent c55af44 commit e0ed709
Show file tree
Hide file tree
Showing 46 changed files with 473 additions and 355 deletions.
2 changes: 1 addition & 1 deletion libs/core/langchain_core/_api/beta_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
old_doc = inspect.cleandoc(old_doc or "").strip("\n") or ""
components = [message, addendum]
details = " ".join([component.strip() for component in components if component])
new_doc = f".. beta::\n" f" {details}\n\n" f"{old_doc}\n"
new_doc = f".. beta::\n {details}\n\n{old_doc}\n"

if inspect.iscoroutinefunction(obj):
finalized = finalize(awarning_emitting_wrapper, new_doc)
Expand Down
1 change: 1 addition & 0 deletions libs/core/langchain_core/_api/deprecation.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
exclude=obj.exclude,
),
)

elif isinstance(obj, FieldInfoV2):
wrapped = None
if not _obj_type:
Expand Down
3 changes: 1 addition & 2 deletions libs/core/langchain_core/callbacks/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -385,8 +385,7 @@ async def _ahandle_event_for_handler(
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback:"
f" {repr(e)}"
f"Error in {handler.__class__.__name__}.{event_name} callback: {repr(e)}"
)
if handler.raise_error:
raise e
Expand Down
3 changes: 2 additions & 1 deletion libs/core/langchain_core/indexing/in_memory.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
Expand Down Expand Up @@ -80,5 +81,5 @@ def _get_relevant_documents(
count = document.page_content.count(query)
counts_by_doc.append((document, count))

counts_by_doc.sort(key=lambda x: x[1], reverse=True)
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]
2 changes: 1 addition & 1 deletion libs/core/langchain_core/language_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ def get_num_tokens_from_messages(
"Counting tokens in tool schemas is not yet supported. Ignoring tools.",
stacklevel=2,
)
return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages])
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)

@classmethod
def _all_required_field_names(cls) -> set:
Expand Down
3 changes: 1 addition & 2 deletions libs/core/langchain_core/language_models/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,8 +349,7 @@ def _get_ls_params(

# get default provider from class name
default_provider = self.__class__.__name__
if default_provider.endswith("LLM"):
default_provider = default_provider[:-3]
default_provider = default_provider.removesuffix("LLM")
default_provider = default_provider.lower()

ls_params = LangSmithParams(ls_provider=default_provider, ls_model_type="llm")
Expand Down
10 changes: 8 additions & 2 deletions libs/core/langchain_core/messages/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1009,7 +1009,10 @@ def convert_to_openai_messages(
)
raise ValueError(err)
content.append(
{"type": "image_url", "image_url": block["image_url"]}
{
"type": "image_url",
"image_url": block["image_url"],
}
)
# Anthropic and Bedrock converse format
elif (block.get("type") == "image") or "image" in block:
Expand Down Expand Up @@ -1128,7 +1131,10 @@ def convert_to_openai_messages(
)
raise ValueError(msg)
content.append(
{"type": "text", "text": json.dumps(block["json"])}
{
"type": "text",
"text": json.dumps(block["json"]),
}
)
elif (
block.get("type") == "guard_content"
Expand Down
2 changes: 1 addition & 1 deletion libs/core/langchain_core/output_parsers/list.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ class MarkdownListOutputParser(ListOutputParser):

def get_format_instructions(self) -> str:
"""Return the format instructions for the Markdown list output."""
return "Your response should be a markdown list, " "eg: `- foo\n- bar\n- baz`"
return "Your response should be a markdown list, eg: `- foo\n- bar\n- baz`"

def parse(self, text: str) -> list[str]:
"""Parse the output of an LLM call.
Expand Down
4 changes: 1 addition & 3 deletions libs/core/langchain_core/prompts/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -1402,9 +1402,7 @@ def _create_template_from_message_type(
elif len(template) == 2 and isinstance(template[1], bool):
var_name_wrapped, is_optional = template
if not isinstance(var_name_wrapped, str):
msg = (
"Expected variable name to be a string." f" Got: {var_name_wrapped}"
)
msg = f"Expected variable name to be a string. Got: {var_name_wrapped}"
raise ValueError(msg)
if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}":
msg = (
Expand Down
2 changes: 1 addition & 1 deletion libs/core/langchain_core/pydantic_v1/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from langchain_core._api.deprecation import warn_deprecated

## Create namespaces for pydantic v1 and v2.
# Create namespaces for pydantic v1 and v2.
# This code must stay at the top of the file before other modules may
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
#
Expand Down
4 changes: 2 additions & 2 deletions libs/core/langchain_core/rate_limiters.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,14 +248,14 @@ async def aacquire(self, *, blocking: bool = True) -> bool:
if not blocking:
return self._consume()

while not self._consume():
while not self._consume(): # noqa: ASYNC110
# This code ignores the ASYNC110 warning which is a false positive in this
# case.
# There is no external actor that can mark that the Event is done
# since the tokens are managed by the rate limiter itself.
# It needs to wake up to re-fill the tokens.
# https://docs.astral.sh/ruff/rules/async-busy-wait/
await asyncio.sleep(self.check_every_n_seconds) # ruff: noqa: ASYNC110
await asyncio.sleep(self.check_every_n_seconds)
return True


Expand Down
30 changes: 15 additions & 15 deletions libs/core/langchain_core/runnables/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2868,7 +2868,7 @@ def config_specs(self) -> list[ConfigurableFieldSpec]:
# calculate context dependencies
specs_by_pos = groupby(
[tup for tup in all_specs if tup[0].id.startswith(CONTEXT_CONFIG_PREFIX)],
lambda x: x[1],
itemgetter(1),
)
next_deps: set[str] = set()
deps_by_pos: dict[int, set[str]] = {}
Expand Down Expand Up @@ -3012,7 +3012,7 @@ def invoke(
for i, step in enumerate(self.steps):
# mark each step as a child run
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
config, callbacks=run_manager.get_child(f"seq:step:{i + 1}")
)
context = copy_context()
context.run(_set_config_context, config)
Expand Down Expand Up @@ -3052,7 +3052,7 @@ async def ainvoke(
for i, step in enumerate(self.steps):
# mark each step as a child run
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
config, callbacks=run_manager.get_child(f"seq:step:{i + 1}")
)
context = copy_context()
context.run(_set_config_context, config)
Expand Down Expand Up @@ -3137,7 +3137,8 @@ def batch(
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{stepidx+1}")
config,
callbacks=rm.get_child(f"seq:step:{stepidx + 1}"),
)
for i, (rm, config) in enumerate(zip(run_managers, configs))
if i not in failed_inputs_map
Expand Down Expand Up @@ -3169,7 +3170,7 @@ def batch(
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{i+1}")
config, callbacks=rm.get_child(f"seq:step:{i + 1}")
)
for rm, config in zip(run_managers, configs)
],
Expand Down Expand Up @@ -3266,7 +3267,8 @@ async def abatch(
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{stepidx+1}")
config,
callbacks=rm.get_child(f"seq:step:{stepidx + 1}"),
)
for i, (rm, config) in enumerate(zip(run_managers, configs))
if i not in failed_inputs_map
Expand Down Expand Up @@ -3298,7 +3300,7 @@ async def abatch(
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{i+1}")
config, callbacks=rm.get_child(f"seq:step:{i + 1}")
)
for rm, config in zip(run_managers, configs)
],
Expand Down Expand Up @@ -3345,7 +3347,7 @@ def _transform(
final_pipeline = cast(Iterator[Output], input)
for idx, step in enumerate(steps):
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{idx+1}")
config, callbacks=run_manager.get_child(f"seq:step:{idx + 1}")
)
if idx == 0:
final_pipeline = step.transform(final_pipeline, config, **kwargs)
Expand Down Expand Up @@ -3374,7 +3376,7 @@ async def _atransform(
for idx, step in enumerate(steps):
config = patch_config(
config,
callbacks=run_manager.get_child(f"seq:step:{idx+1}"),
callbacks=run_manager.get_child(f"seq:step:{idx + 1}"),
)
if idx == 0:
final_pipeline = step.atransform(final_pipeline, config, **kwargs)
Expand Down Expand Up @@ -4406,7 +4408,7 @@ def get_input_schema(
if dict_keys := get_function_first_arg_dict_keys(func):
return create_model_v2(
self.get_name("Input"),
field_definitions={key: (Any, ...) for key in dict_keys},
field_definitions=dict.fromkeys(dict_keys, (Any, ...)),
)

return super().get_input_schema(config)
Expand Down Expand Up @@ -4530,7 +4532,7 @@ def __eq__(self, other: Any) -> bool:
def __repr__(self) -> str:
"""A string representation of this Runnable."""
if hasattr(self, "func") and isinstance(self.func, itemgetter):
return f"RunnableLambda({str(self.func)[len('operator.'):]})"
return f"RunnableLambda({str(self.func)[len('operator.') :]})"
elif hasattr(self, "func"):
return f"RunnableLambda({get_lambda_source(self.func) or '...'})"
elif hasattr(self, "afunc"):
Expand Down Expand Up @@ -4791,8 +4793,7 @@ def _transform(
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
msg = (
f"Recursion limit reached when invoking "
f"{self} with input {final}."
f"Recursion limit reached when invoking {self} with input {final}."
)
raise RecursionError(msg)
for chunk in output.stream(
Expand Down Expand Up @@ -4915,8 +4916,7 @@ async def f(*args, **kwargs): # type: ignore[no-untyped-def]
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
msg = (
f"Recursion limit reached when invoking "
f"{self} with input {final}."
f"Recursion limit reached when invoking {self} with input {final}."
)
raise RecursionError(msg)
async for chunk in output.astream(
Expand Down
4 changes: 2 additions & 2 deletions libs/core/langchain_core/runnables/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,8 @@ class RunnableConfig(TypedDict, total=False):
DEFAULT_RECURSION_LIMIT = 25


var_child_runnable_config = ContextVar(
"child_runnable_config", default=RunnableConfig()
var_child_runnable_config: ContextVar[RunnableConfig | None] = ContextVar(
"child_runnable_config", default=None
)


Expand Down
15 changes: 9 additions & 6 deletions libs/core/langchain_core/runnables/graph_ascii.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,17 +236,20 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str:

# NOTE: coordinates might me negative, so we need to shift
# everything to the positive plane before we actually draw it.
xlist = []
ylist = []
xlist: list[float] = []
ylist: list[float] = []

sug = _build_sugiyama_layout(vertices, edges)

for vertex in sug.g.sV:
# NOTE: moving boxes w/2 to the left
xlist.append(vertex.view.xy[0] - vertex.view.w / 2.0)
xlist.append(vertex.view.xy[0] + vertex.view.w / 2.0)
ylist.append(vertex.view.xy[1])
ylist.append(vertex.view.xy[1] + vertex.view.h)
xlist.extend(
(
vertex.view.xy[0] - vertex.view.w / 2.0,
vertex.view.xy[0] + vertex.view.w / 2.0,
)
)
ylist.extend((vertex.view.xy[1], vertex.view.xy[1] + vertex.view.h))

for edge in sug.g.sE:
for x, y in edge.view._pts:
Expand Down
4 changes: 1 addition & 3 deletions libs/core/langchain_core/runnables/history.py
Original file line number Diff line number Diff line change
Expand Up @@ -590,9 +590,7 @@ def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig:

if missing_keys and parameter_names:
example_input = {self.input_messages_key: "foo"}
example_configurable = {
missing_key: "[your-value-here]" for missing_key in missing_keys
}
example_configurable = dict.fromkeys(missing_keys, "[your-value-here]")
example_config = {"configurable": example_configurable}
msg = (
f"Missing keys {sorted(missing_keys)} in config['configurable'] "
Expand Down
30 changes: 15 additions & 15 deletions libs/core/langchain_core/runnables/passthrough.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,9 +472,9 @@ def _invoke(
config: RunnableConfig,
**kwargs: Any,
) -> dict[str, Any]:
assert isinstance(
input, dict
), "The input to RunnablePassthrough.assign() must be a dict."
assert isinstance(input, dict), (
"The input to RunnablePassthrough.assign() must be a dict."
)

return {
**input,
Expand All @@ -500,9 +500,9 @@ async def _ainvoke(
config: RunnableConfig,
**kwargs: Any,
) -> dict[str, Any]:
assert isinstance(
input, dict
), "The input to RunnablePassthrough.assign() must be a dict."
assert isinstance(input, dict), (
"The input to RunnablePassthrough.assign() must be a dict."
)

return {
**input,
Expand Down Expand Up @@ -553,9 +553,9 @@ def _transform(
)
# consume passthrough stream
for chunk in for_passthrough:
assert isinstance(
chunk, dict
), "The input to RunnablePassthrough.assign() must be a dict."
assert isinstance(chunk, dict), (
"The input to RunnablePassthrough.assign() must be a dict."
)
# remove mapper keys from passthrough chunk, to be overwritten by map
filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys}
Expand Down Expand Up @@ -603,9 +603,9 @@ async def _atransform(
)
# consume passthrough stream
async for chunk in for_passthrough:
assert isinstance(
chunk, dict
), "The input to RunnablePassthrough.assign() must be a dict."
assert isinstance(chunk, dict), (
"The input to RunnablePassthrough.assign() must be a dict."
)
# remove mapper keys from passthrough chunk, to be overwritten by map output
filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys}
Expand Down Expand Up @@ -705,9 +705,9 @@ def get_name(
return super().get_name(suffix, name=name)

def _pick(self, input: dict[str, Any]) -> Any:
assert isinstance(
input, dict
), "The input to RunnablePassthrough.assign() must be a dict."
assert isinstance(input, dict), (
"The input to RunnablePassthrough.assign() must be a dict."
)

if isinstance(self.keys, str):
return input.get(self.keys)
Expand Down
4 changes: 2 additions & 2 deletions libs/core/langchain_core/runnables/retry.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ def pending(iterable: list[U]) -> list[U]:
result = cast(list[Output], [e] * len(inputs))

outputs: list[Union[Output, Exception]] = []
for idx, _ in enumerate(inputs):
for idx in range(len(inputs)):
if idx in results_map:
outputs.append(results_map[idx])
else:
Expand Down Expand Up @@ -315,7 +315,7 @@ def pending(iterable: list[U]) -> list[U]:
result = cast(list[Output], [e] * len(inputs))

outputs: list[Union[Output, Exception]] = []
for idx, _ in enumerate(inputs):
for idx in range(len(inputs)):
if idx in results_map:
outputs.append(results_map[idx])
else:
Expand Down
Loading

0 comments on commit e0ed709

Please sign in to comment.