diff --git a/libs/community/langchain_community/agent_toolkits/nla/tool.py b/libs/community/langchain_community/agent_toolkits/nla/tool.py index 47f25d13b687b8..f097edaa1184d0 100644 --- a/libs/community/langchain_community/agent_toolkits/nla/tool.py +++ b/libs/community/langchain_community/agent_toolkits/nla/tool.py @@ -30,7 +30,7 @@ def from_open_api_endpoint_chain( The API endpoint tool. """ expanded_name = ( - f'{api_title.replace(" ", "_")}.{chain.api_operation.operation_id}' + f"{api_title.replace(' ', '_')}.{chain.api_operation.operation_id}" ) description = ( f"I'm an AI from {api_title}. Instruct what you want," diff --git a/libs/community/langchain_community/callbacks/fiddler_callback.py b/libs/community/langchain_community/callbacks/fiddler_callback.py index 95df0851d55d44..0ff6ed894d0b65 100644 --- a/libs/community/langchain_community/callbacks/fiddler_callback.py +++ b/libs/community/langchain_community/callbacks/fiddler_callback.py @@ -100,7 +100,7 @@ def __init__( if self.project not in self.fiddler_client.get_project_names(): print( # noqa: T201 - f"adding project {self.project}." "This only has to be done once." + f"adding project {self.project}.This only has to be done once." ) try: self.fiddler_client.add_project(self.project) diff --git a/libs/community/langchain_community/callbacks/manager.py b/libs/community/langchain_community/callbacks/manager.py index ba942084953f6c..8e8d0525606020 100644 --- a/libs/community/langchain_community/callbacks/manager.py +++ b/libs/community/langchain_community/callbacks/manager.py @@ -61,9 +61,9 @@ def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]: @contextmanager -def get_bedrock_anthropic_callback() -> ( - Generator[BedrockAnthropicTokenUsageCallbackHandler, None, None] -): +def get_bedrock_anthropic_callback() -> Generator[ + BedrockAnthropicTokenUsageCallbackHandler, None, None +]: """Get the Bedrock anthropic callback handler in a context manager. which conveniently exposes token and cost information. diff --git a/libs/community/langchain_community/callbacks/streamlit/streamlit_callback_handler.py b/libs/community/langchain_community/callbacks/streamlit/streamlit_callback_handler.py index 5d5985c039cb75..4747bfc2f690d6 100644 --- a/libs/community/langchain_community/callbacks/streamlit/streamlit_callback_handler.py +++ b/libs/community/langchain_community/callbacks/streamlit/streamlit_callback_handler.py @@ -211,9 +211,9 @@ def on_agent_action( def complete(self, final_label: Optional[str] = None) -> None: """Finish the thought.""" if final_label is None and self._state == LLMThoughtState.RUNNING_TOOL: - assert ( - self._last_tool is not None - ), "_last_tool should never be null when _state == RUNNING_TOOL" + assert self._last_tool is not None, ( + "_last_tool should never be null when _state == RUNNING_TOOL" + ) final_label = self._labeler.get_tool_label( self._last_tool, is_complete=True ) diff --git a/libs/community/langchain_community/chains/pebblo_retrieval/utilities.py b/libs/community/langchain_community/chains/pebblo_retrieval/utilities.py index e6e36a505a9479..0ad9ff3656036b 100644 --- a/libs/community/langchain_community/chains/pebblo_retrieval/utilities.py +++ b/libs/community/langchain_community/chains/pebblo_retrieval/utilities.py @@ -467,7 +467,7 @@ async def amake_request( logger.warning(f"Pebblo Server: Error {response.status}") elif response.status >= HTTPStatus.BAD_REQUEST: logger.warning( - f"Pebblo received an invalid payload: " f"{response.text}" + f"Pebblo received an invalid payload: {response.text}" ) elif response.status != HTTPStatus.OK: logger.warning( diff --git a/libs/community/langchain_community/chat_loaders/facebook_messenger.py b/libs/community/langchain_community/chat_loaders/facebook_messenger.py index 2bf883b0f0fe98..44900e0f84eefc 100644 --- a/libs/community/langchain_community/chat_loaders/facebook_messenger.py +++ b/libs/community/langchain_community/chat_loaders/facebook_messenger.py @@ -37,7 +37,7 @@ def lazy_load(self) -> Iterator[ChatSession]: if "content" not in m: logger.info( f"""Skipping Message No. - {index+1} as no content is present in the message""" + {index + 1} as no content is present in the message""" ) continue messages.append( diff --git a/libs/community/langchain_community/chat_message_histories/neo4j.py b/libs/community/langchain_community/chat_message_histories/neo4j.py index 5a054c706de258..9d2cb317874cc0 100644 --- a/libs/community/langchain_community/chat_message_histories/neo4j.py +++ b/libs/community/langchain_community/chat_message_histories/neo4j.py @@ -87,7 +87,7 @@ def messages(self) -> List[BaseMessage]: query = ( f"MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) " "WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT*0.." - f"{self._window*2}]-() WITH p, length(p) AS length " + f"{self._window * 2}]-() WITH p, length(p) AS length " "ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node " "RETURN {data:{content: node.content}, type:node.type} AS result" ) diff --git a/libs/community/langchain_community/chat_message_histories/sql.py b/libs/community/langchain_community/chat_message_histories/sql.py index 2c3b2351c471d0..8c0706b7ee0d91 100644 --- a/libs/community/langchain_community/chat_message_histories/sql.py +++ b/libs/community/langchain_community/chat_message_histories/sql.py @@ -177,9 +177,9 @@ def __init__( engine_args: Additional configuration for creating database engines. async_mode: Whether it is an asynchronous connection. """ - assert not ( - connection_string and connection - ), "connection_string and connection are mutually exclusive" + assert not (connection_string and connection), ( + "connection_string and connection are mutually exclusive" + ) if connection_string: global _warned_once_already if not _warned_once_already: diff --git a/libs/community/langchain_community/chat_models/bedrock.py b/libs/community/langchain_community/chat_models/bedrock.py index 6b362083903791..086a4d461301fc 100644 --- a/libs/community/langchain_community/chat_models/bedrock.py +++ b/libs/community/langchain_community/chat_models/bedrock.py @@ -110,9 +110,9 @@ def _format_anthropic_messages( if not isinstance(message.content, str): # parse as dict - assert isinstance( - message.content, list - ), "Anthropic message content must be str or list of dicts" + assert isinstance(message.content, list), ( + "Anthropic message content must be str or list of dicts" + ) # populate content content = [] diff --git a/libs/community/langchain_community/chat_models/deepinfra.py b/libs/community/langchain_community/chat_models/deepinfra.py index b23402cdc06f05..546171f8762492 100644 --- a/libs/community/langchain_community/chat_models/deepinfra.py +++ b/libs/community/langchain_community/chat_models/deepinfra.py @@ -468,8 +468,7 @@ def _handle_status(self, code: int, text: Any) -> None: raise ValueError(f"DeepInfra received an invalid payload: {text}") elif code != 200: raise Exception( - f"DeepInfra returned an unexpected response with status " - f"{code}: {text}" + f"DeepInfra returned an unexpected response with status {code}: {text}" ) def _url(self) -> str: diff --git a/libs/community/langchain_community/chat_models/konko.py b/libs/community/langchain_community/chat_models/konko.py index 3aec395cf53040..a1164d6d17804e 100644 --- a/libs/community/langchain_community/chat_models/konko.py +++ b/libs/community/langchain_community/chat_models/konko.py @@ -179,8 +179,7 @@ def get_available_models( if models_response.status_code != 200: raise ValueError( - f"Error getting models from {models_url}: " - f"{models_response.status_code}" + f"Error getting models from {models_url}: {models_response.status_code}" ) return {model["id"] for model in models_response.json()["data"]} diff --git a/libs/community/langchain_community/chat_models/zhipuai.py b/libs/community/langchain_community/chat_models/zhipuai.py index 99b58697f1a01c..18dc9dd196046a 100644 --- a/libs/community/langchain_community/chat_models/zhipuai.py +++ b/libs/community/langchain_community/chat_models/zhipuai.py @@ -121,7 +121,7 @@ def _get_jwt_token(api_key: str) -> str: import jwt except ImportError: raise ImportError( - "jwt package not found, please install it with" "`pip install pyjwt`" + "jwt package not found, please install it with`pip install pyjwt`" ) try: diff --git a/libs/community/langchain_community/document_loaders/blob_loaders/youtube_audio.py b/libs/community/langchain_community/document_loaders/blob_loaders/youtube_audio.py index 8397f32fab89bf..b0b2dc6daa8f4e 100644 --- a/libs/community/langchain_community/document_loaders/blob_loaders/youtube_audio.py +++ b/libs/community/langchain_community/document_loaders/blob_loaders/youtube_audio.py @@ -21,8 +21,7 @@ def yield_blobs(self) -> Iterable[Blob]: import yt_dlp except ImportError: raise ImportError( - "yt_dlp package not found, please install it with " - "`pip install yt_dlp`" + "yt_dlp package not found, please install it with `pip install yt_dlp`" ) # Use yt_dlp to download audio given a YouTube url diff --git a/libs/community/langchain_community/document_loaders/confluence.py b/libs/community/langchain_community/document_loaders/confluence.py index 225488c9ceb556..8b4f55078febe9 100644 --- a/libs/community/langchain_community/document_loaders/confluence.py +++ b/libs/community/langchain_community/document_loaders/confluence.py @@ -649,7 +649,7 @@ def process_attachment( from PIL import Image # noqa: F401 except ImportError: raise ImportError( - "`Pillow` package not found, " "please run `pip install Pillow`" + "`Pillow` package not found, please run `pip install Pillow`" ) # depending on setup you may also need to set the correct path for diff --git a/libs/community/langchain_community/document_loaders/csv_loader.py b/libs/community/langchain_community/document_loaders/csv_loader.py index 1e126709d4add5..59927920569b0a 100644 --- a/libs/community/langchain_community/document_loaders/csv_loader.py +++ b/libs/community/langchain_community/document_loaders/csv_loader.py @@ -164,9 +164,13 @@ def __read_file(self, csvfile: TextIOWrapper) -> Iterator[Document]: f"Source column '{self.source_column}' not found in CSV file." ) content = "\n".join( - f"""{k.strip() if k is not None else k}: {v.strip() - if isinstance(v, str) else ','.join(map(str.strip, v)) - if isinstance(v, list) else v}""" + f"""{k.strip() if k is not None else k}: { + v.strip() + if isinstance(v, str) + else ",".join(map(str.strip, v)) + if isinstance(v, list) + else v + }""" for k, v in row.items() if ( k in self.content_columns diff --git a/libs/community/langchain_community/document_loaders/dropbox.py b/libs/community/langchain_community/document_loaders/dropbox.py index c689e3a455633c..a0350914536f5d 100644 --- a/libs/community/langchain_community/document_loaders/dropbox.py +++ b/libs/community/langchain_community/document_loaders/dropbox.py @@ -54,7 +54,7 @@ def _create_dropbox_client(self) -> Any: try: from dropbox import Dropbox, exceptions except ImportError: - raise ImportError("You must run " "`pip install dropbox") + raise ImportError("You must run `pip install dropbox") try: dbx = Dropbox(self.dropbox_access_token) @@ -73,7 +73,7 @@ def _load_documents_from_folder(self, folder_path: str) -> List[Document]: from dropbox import exceptions from dropbox.files import FileMetadata except ImportError: - raise ImportError("You must run " "`pip install dropbox") + raise ImportError("You must run `pip install dropbox") try: results = dbx.files_list_folder(folder_path, recursive=self.recursive) @@ -98,7 +98,7 @@ def _load_file_from_path(self, file_path: str) -> Optional[Document]: try: from dropbox import exceptions except ImportError: - raise ImportError("You must run " "`pip install dropbox") + raise ImportError("You must run `pip install dropbox") try: file_metadata = dbx.files_get_metadata(file_path) diff --git a/libs/community/langchain_community/document_loaders/mediawikidump.py b/libs/community/langchain_community/document_loaders/mediawikidump.py index 288312d6c9c61c..a778335646b8eb 100644 --- a/libs/community/langchain_community/document_loaders/mediawikidump.py +++ b/libs/community/langchain_community/document_loaders/mediawikidump.py @@ -65,7 +65,7 @@ def _load_dump_file(self): # type: ignore[no-untyped-def] import mwxml except ImportError as e: raise ImportError( - "Unable to import 'mwxml'. Please install with" " `pip install mwxml`." + "Unable to import 'mwxml'. Please install with `pip install mwxml`." ) from e return mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding)) diff --git a/libs/community/langchain_community/document_loaders/notiondb.py b/libs/community/langchain_community/document_loaders/notiondb.py index 3e43a3fd811a12..37c367dcb486d7 100644 --- a/libs/community/langchain_community/document_loaders/notiondb.py +++ b/libs/community/langchain_community/document_loaders/notiondb.py @@ -126,7 +126,7 @@ def load_page(self, page_summary: Dict[str, Any]) -> Document: value = prop_data["url"] elif prop_type == "unique_id": value = ( - f'{prop_data["unique_id"]["prefix"]}-{prop_data["unique_id"]["number"]}' + f"{prop_data['unique_id']['prefix']}-{prop_data['unique_id']['number']}" if prop_data["unique_id"] else None ) diff --git a/libs/community/langchain_community/document_loaders/oracleadb_loader.py b/libs/community/langchain_community/document_loaders/oracleadb_loader.py index 35da49c9affe9b..ebf0c446c44852 100644 --- a/libs/community/langchain_community/document_loaders/oracleadb_loader.py +++ b/libs/community/langchain_community/document_loaders/oracleadb_loader.py @@ -82,8 +82,7 @@ def _run_query(self) -> List[Dict[str, Any]]: import oracledb except ImportError as e: raise ImportError( - "Could not import oracledb, " - "please install with 'pip install oracledb'" + "Could not import oracledb, please install with 'pip install oracledb'" ) from e connect_param = {"user": self.user, "password": self.password, "dsn": self.dsn} if self.dsn == self.tns_name: diff --git a/libs/community/langchain_community/document_loaders/parsers/audio.py b/libs/community/langchain_community/document_loaders/parsers/audio.py index 32ced082601879..6d98e9667ce40c 100644 --- a/libs/community/langchain_community/document_loaders/parsers/audio.py +++ b/libs/community/langchain_community/document_loaders/parsers/audio.py @@ -148,8 +148,7 @@ def __init__( import openai except ImportError: raise ImportError( - "openai package not found, please install it with " - "`pip install openai`" + "openai package not found, please install it with `pip install openai`" ) if is_openai_v1(): @@ -278,14 +277,13 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]: import openai except ImportError: raise ImportError( - "openai package not found, please install it with " - "`pip install openai`" + "openai package not found, please install it with `pip install openai`" ) try: from pydub import AudioSegment except ImportError: raise ImportError( - "pydub package not found, please install it with " "`pip install pydub`" + "pydub package not found, please install it with `pip install pydub`" ) if is_openai_v1(): @@ -402,7 +400,7 @@ def __init__( import torch except ImportError: raise ImportError( - "torch package not found, please install it with " "`pip install torch`" + "torch package not found, please install it with `pip install torch`" ) # Determine the device to use @@ -533,7 +531,7 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]: from pydub import AudioSegment except ImportError: raise ImportError( - "pydub package not found, please install it with " "`pip install pydub`" + "pydub package not found, please install it with `pip install pydub`" ) if self.api_key: diff --git a/libs/community/langchain_community/document_loaders/parsers/docai.py b/libs/community/langchain_community/document_loaders/parsers/docai.py index 517ea0140aacf0..74b80f6af35cc5 100644 --- a/libs/community/langchain_community/document_loaders/parsers/docai.py +++ b/libs/community/langchain_community/document_loaders/parsers/docai.py @@ -230,7 +230,7 @@ def batch_parse( time_elapsed += check_in_interval_sec if time_elapsed > timeout_sec: raise TimeoutError( - "Timeout exceeded! Check operations " f"{operation_names} later!" + f"Timeout exceeded! Check operations {operation_names} later!" ) logger.debug(".") diff --git a/libs/community/langchain_community/document_loaders/parsers/grobid.py b/libs/community/langchain_community/document_loaders/parsers/grobid.py index 2ffe2998fa37f7..ee287cade6e710 100644 --- a/libs/community/langchain_community/document_loaders/parsers/grobid.py +++ b/libs/community/langchain_community/document_loaders/parsers/grobid.py @@ -44,7 +44,7 @@ def process_xml( from bs4 import BeautifulSoup except ImportError: raise ImportError( - "`bs4` package not found, please install it with " "`pip install bs4`" + "`bs4` package not found, please install it with `pip install bs4`" ) soup = BeautifulSoup(xml_data, "xml") sections = soup.find_all("div") diff --git a/libs/community/langchain_community/document_loaders/parsers/pdf.py b/libs/community/langchain_community/document_loaders/parsers/pdf.py index 00b4510ee660d1..632b3a09801a26 100644 --- a/libs/community/langchain_community/document_loaders/parsers/pdf.py +++ b/libs/community/langchain_community/document_loaders/parsers/pdf.py @@ -100,8 +100,7 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-ty import pypdf except ImportError: raise ImportError( - "`pypdf` package not found, please install it with " - "`pip install pypdf`" + "`pypdf` package not found, please install it with `pip install pypdf`" ) def _extract_text_from_page(page: pypdf.PageObject) -> str: @@ -425,8 +424,7 @@ def __init__( import PIL # noqa:F401 except ImportError: raise ImportError( - "pillow package not found, please install it with" - " `pip install pillow`" + "pillow package not found, please install it with `pip install pillow`" ) self.text_kwargs = text_kwargs or {} self.dedupe = dedupe diff --git a/libs/community/langchain_community/document_loaders/pyspark_dataframe.py b/libs/community/langchain_community/document_loaders/pyspark_dataframe.py index 410b7d07afc20c..ff1c7fc7ee8555 100644 --- a/libs/community/langchain_community/document_loaders/pyspark_dataframe.py +++ b/libs/community/langchain_community/document_loaders/pyspark_dataframe.py @@ -36,8 +36,7 @@ def __init__( from pyspark.sql import DataFrame, SparkSession except ImportError: raise ImportError( - "pyspark is not installed. " - "Please install it with `pip install pyspark`" + "pyspark is not installed. Please install it with `pip install pyspark`" ) self.spark = ( diff --git a/libs/community/langchain_community/document_loaders/quip.py b/libs/community/langchain_community/document_loaders/quip.py index 540ef8f945c797..bb3a002912bd64 100644 --- a/libs/community/langchain_community/document_loaders/quip.py +++ b/libs/community/langchain_community/document_loaders/quip.py @@ -40,7 +40,7 @@ def __init__( from quip_api.quip import QuipClient except ImportError: raise ImportError( - "`quip_api` package not found, please run " "`pip install quip_api`" + "`quip_api` package not found, please run `pip install quip_api`" ) self.quip_client = QuipClient( diff --git a/libs/community/langchain_community/document_loaders/rspace.py b/libs/community/langchain_community/document_loaders/rspace.py index cb12c6d6c848bd..244b92bb4f1599 100644 --- a/libs/community/langchain_community/document_loaders/rspace.py +++ b/libs/community/langchain_community/document_loaders/rspace.py @@ -58,7 +58,7 @@ def _create_rspace_client(self) -> Any: from rspace_client.eln import eln, field_content except ImportError: - raise ImportError("You must run " "`pip install rspace_client`") + raise ImportError("You must run `pip install rspace_client`") try: eln = eln.ELNClient(self.url, self.api_key) @@ -66,8 +66,7 @@ def _create_rspace_client(self) -> Any: except Exception: raise Exception( - f"Unable to initialize client - is url {self.url} or " - f"api key correct?" + f"Unable to initialize client - is url {self.url} or api key correct?" ) return eln, field_content.FieldContent diff --git a/libs/community/langchain_community/embeddings/ascend.py b/libs/community/langchain_community/embeddings/ascend.py index d01f7967491d85..c8cb059177b193 100644 --- a/libs/community/langchain_community/embeddings/ascend.py +++ b/libs/community/langchain_community/embeddings/ascend.py @@ -89,7 +89,7 @@ def encode(self, sentences: Any) -> Any: import torch except ImportError as e: raise ImportError( - "Unable to import torch, please install with " "`pip install -U torch`." + "Unable to import torch, please install with `pip install -U torch`." ) from e last_hidden_state = self.model( inputs.input_ids.npu(), inputs.attention_mask.npu(), return_dict=True @@ -103,7 +103,7 @@ def pooling(self, last_hidden_state: Any, attention_mask: Any = None) -> Any: import torch except ImportError as e: raise ImportError( - "Unable to import torch, please install with " "`pip install -U torch`." + "Unable to import torch, please install with `pip install -U torch`." ) from e if self.pooling_method == "cls": return last_hidden_state[:, 0] diff --git a/libs/community/langchain_community/embeddings/openvino.py b/libs/community/langchain_community/embeddings/openvino.py index 3dbbdef45f1a6f..930453247c3491 100644 --- a/libs/community/langchain_community/embeddings/openvino.py +++ b/libs/community/langchain_community/embeddings/openvino.py @@ -166,19 +166,19 @@ def encode( import numpy as np except ImportError as e: raise ImportError( - "Unable to import numpy, please install with " "`pip install -U numpy`." + "Unable to import numpy, please install with `pip install -U numpy`." ) from e try: from tqdm import trange except ImportError as e: raise ImportError( - "Unable to import tqdm, please install with " "`pip install -U tqdm`." + "Unable to import tqdm, please install with `pip install -U tqdm`." ) from e try: import torch except ImportError as e: raise ImportError( - "Unable to import torch, please install with " "`pip install -U torch`." + "Unable to import torch, please install with `pip install -U torch`." ) from e def run_mean_pooling(model_output: Any, attention_mask: Any) -> Any: diff --git a/libs/community/langchain_community/embeddings/spacy_embeddings.py b/libs/community/langchain_community/embeddings/spacy_embeddings.py index 1d7c6ed4e95adf..cd862d3ba901f0 100644 --- a/libs/community/langchain_community/embeddings/spacy_embeddings.py +++ b/libs/community/langchain_community/embeddings/spacy_embeddings.py @@ -48,8 +48,7 @@ def validate_environment(cls, values: Dict) -> Any: # Check if the spaCy package is installed if importlib.util.find_spec("spacy") is None: raise ValueError( - "SpaCy package not found. " - "Please install it with `pip install spacy`." + "SpaCy package not found. Please install it with `pip install spacy`." ) try: # Try to load the spaCy model diff --git a/libs/community/langchain_community/graphs/falkordb_graph.py b/libs/community/langchain_community/graphs/falkordb_graph.py index 99e3e4592b3e95..56ce03c1f9a090 100644 --- a/libs/community/langchain_community/graphs/falkordb_graph.py +++ b/libs/community/langchain_community/graphs/falkordb_graph.py @@ -167,7 +167,7 @@ def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: data = self._graph.query(query, params) return data.result_set except Exception as e: - raise ValueError("Generated Cypher Statement is not valid\n" f"{e}") + raise ValueError(f"Generated Cypher Statement is not valid\n{e}") def add_graph_documents( self, graph_documents: List[GraphDocument], include_source: bool = False diff --git a/libs/community/langchain_community/graphs/gremlin_graph.py b/libs/community/langchain_community/graphs/gremlin_graph.py index 934ccb3f53c990..d2c1a1725dce2e 100644 --- a/libs/community/langchain_community/graphs/gremlin_graph.py +++ b/libs/community/langchain_community/graphs/gremlin_graph.py @@ -57,7 +57,7 @@ def __init__( asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) except ImportError: raise ImportError( - "Please install gremlin-python first: " "`pip3 install gremlinpython" + "Please install gremlin-python first: `pip3 install gremlinpython" ) self.client = client.Client( diff --git a/libs/community/langchain_community/graphs/memgraph_graph.py b/libs/community/langchain_community/graphs/memgraph_graph.py index fa829a0e5db813..4180b49ce3d49e 100644 --- a/libs/community/langchain_community/graphs/memgraph_graph.py +++ b/libs/community/langchain_community/graphs/memgraph_graph.py @@ -488,8 +488,7 @@ def add_graph_documents( if baseEntityLabel: self.query( - f"CREATE CONSTRAINT ON (b:{BASE_ENTITY_LABEL}) " - "ASSERT b.id IS UNIQUE;" + f"CREATE CONSTRAINT ON (b:{BASE_ENTITY_LABEL}) ASSERT b.id IS UNIQUE;" ) self.query(f"CREATE INDEX ON :{BASE_ENTITY_LABEL}(id);") self.query(f"CREATE INDEX ON :{BASE_ENTITY_LABEL};") diff --git a/libs/community/langchain_community/graphs/neo4j_graph.py b/libs/community/langchain_community/graphs/neo4j_graph.py index dd2a7937f7f819..f96cd276967d15 100644 --- a/libs/community/langchain_community/graphs/neo4j_graph.py +++ b/libs/community/langchain_community/graphs/neo4j_graph.py @@ -204,7 +204,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str: example = ( ( "Available options: " - f'{[clean_string_values(el) for el in prop["values"]]}' + f"{[clean_string_values(el) for el in prop['values']]}" ) if prop["values"] else "" @@ -218,7 +218,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str: "LOCAL_DATE_TIME", ]: if prop.get("min") is not None: - example = f'Min: {prop["min"]}, Max: {prop["max"]}' + example = f"Min: {prop['min']}, Max: {prop['max']}" else: example = ( f'Example: "{prop["values"][0]}"' @@ -230,7 +230,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str: if not prop.get("min_size") or prop["min_size"] > LIST_LIMIT: continue example = ( - f'Min Size: {prop["min_size"]}, Max Size: {prop["max_size"]}' + f"Min Size: {prop['min_size']}, Max Size: {prop['max_size']}" ) formatted_node_props.append( f" - `{prop['property']}`: {prop['type']} {example}" @@ -252,7 +252,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str: example = ( ( "Available options: " - f'{[clean_string_values(el) for el in prop["values"]]}' + f"{[clean_string_values(el) for el in prop['values']]}" ) if prop["values"] else "" @@ -265,7 +265,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str: "LOCAL_DATE_TIME", ]: if prop.get("min"): # If we have min/max - example = f'Min: {prop["min"]}, Max: {prop["max"]}' + example = f"Min: {prop['min']}, Max: {prop['max']}" else: # return a single value example = ( f'Example: "{prop["values"][0]}"' if prop["values"] else "" @@ -275,7 +275,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str: if not prop.get("min_size") or prop["min_size"] > LIST_LIMIT: continue example = ( - f'Min Size: {prop["min_size"]}, Max Size: {prop["max_size"]}' + f"Min Size: {prop['min_size']}, Max Size: {prop['max_size']}" ) formatted_rel_props.append( f" - `{prop['property']}: {prop['type']}` {example}" diff --git a/libs/community/langchain_community/graphs/neptune_rdf_graph.py b/libs/community/langchain_community/graphs/neptune_rdf_graph.py index f560768a15ccc0..7f2aefac96ed21 100644 --- a/libs/community/langchain_community/graphs/neptune_rdf_graph.py +++ b/libs/community/langchain_community/graphs/neptune_rdf_graph.py @@ -249,7 +249,7 @@ def _get_local_name(self, iri: str) -> Sequence[str]: return [f"{tokens[0]}#", tokens[-1]] elif "/" in iri: tokens = iri.split("/") - return [f"{'/'.join(tokens[0:len(tokens)-1])}/", tokens[-1]] + return [f"{'/'.join(tokens[0 : len(tokens) - 1])}/", tokens[-1]] else: raise ValueError(f"Unexpected IRI '{iri}', contains neither '#' nor '/'.") diff --git a/libs/community/langchain_community/graphs/rdf_graph.py b/libs/community/langchain_community/graphs/rdf_graph.py index edc46d5d18d6d1..ca8595c1620ff1 100644 --- a/libs/community/langchain_community/graphs/rdf_graph.py +++ b/libs/community/langchain_community/graphs/rdf_graph.py @@ -217,7 +217,7 @@ def query( try: res = self.graph.query(query) except ParserError as e: - raise ValueError("Generated SPARQL statement is invalid\n" f"{e}") + raise ValueError(f"Generated SPARQL statement is invalid\n{e}") return [r for r in res if isinstance(r, ResultRow)] def update( @@ -232,7 +232,7 @@ def update( try: self.graph.update(query) except ParserError as e: - raise ValueError("Generated SPARQL statement is invalid\n" f"{e}") + raise ValueError(f"Generated SPARQL statement is invalid\n{e}") if self.local_copy: self.graph.serialize( destination=self.local_copy, format=self.local_copy.split(".")[-1] @@ -274,9 +274,9 @@ def _rdf_s_schema( f"In the following, each IRI is followed by the local name and " f"optionally its description in parentheses. \n" f"The RDF graph supports the following node types:\n" - f'{", ".join([self._res_to_str(r, "cls") for r in classes])}\n' + f"{', '.join([self._res_to_str(r, 'cls') for r in classes])}\n" f"The RDF graph supports the following relationships:\n" - f'{", ".join([self._res_to_str(r, "rel") for r in relationships])}\n' + f"{', '.join([self._res_to_str(r, 'rel') for r in relationships])}\n" ) if self.standard == "rdf": @@ -295,13 +295,13 @@ def _rdf_s_schema( f"In the following, each IRI is followed by the local name and " f"optionally its description in parentheses. \n" f"The OWL graph supports the following node types:\n" - f'{", ".join([self._res_to_str(r, "cls") for r in clss])}\n' + f"{', '.join([self._res_to_str(r, 'cls') for r in clss])}\n" f"The OWL graph supports the following object properties, " f"i.e., relationships between objects:\n" - f'{", ".join([self._res_to_str(r, "op") for r in ops])}\n' + f"{', '.join([self._res_to_str(r, 'op') for r in ops])}\n" f"The OWL graph supports the following data properties, " f"i.e., relationships between objects and literals:\n" - f'{", ".join([self._res_to_str(r, "dp") for r in dps])}\n' + f"{', '.join([self._res_to_str(r, 'dp') for r in dps])}\n" ) else: raise ValueError(f"Mode '{self.standard}' is currently not supported.") diff --git a/libs/community/langchain_community/indexes/_document_manager.py b/libs/community/langchain_community/indexes/_document_manager.py index 64fbe1b94616e7..e36f3d832b3598 100644 --- a/libs/community/langchain_community/indexes/_document_manager.py +++ b/libs/community/langchain_community/indexes/_document_manager.py @@ -27,7 +27,7 @@ def _get_pymongo_client(mongodb_url: str, **kwargs: Any) -> Any: client = pymongo(mongodb_url, **kwargs) except ValueError as e: raise ImportError( - f"MongoClient string provided is not in proper format. " f"Got error: {e} " + f"MongoClient string provided is not in proper format. Got error: {e} " ) return client diff --git a/libs/community/langchain_community/llms/bedrock.py b/libs/community/langchain_community/llms/bedrock.py index 4c5fbdc476d665..e1b6570cf30835 100644 --- a/libs/community/langchain_community/llms/bedrock.py +++ b/libs/community/langchain_community/llms/bedrock.py @@ -454,8 +454,7 @@ def _get_provider(self) -> str: return self.provider if self.model_id.startswith("arn"): raise ValueError( - "Model provider should be supplied when passing a model ARN as " - "model_id" + "Model provider should be supplied when passing a model ARN as model_id" ) return self.model_id.split(".")[0] diff --git a/libs/community/langchain_community/llms/databricks.py b/libs/community/langchain_community/llms/databricks.py index 3f7aa42f2fda68..4b656192e2b51c 100644 --- a/libs/community/langchain_community/llms/databricks.py +++ b/libs/community/langchain_community/llms/databricks.py @@ -457,12 +457,12 @@ def set_cluster_id(cls, values: Dict[str, Any]) -> dict: pass if model_kwargs := values.get("model_kwargs"): - assert ( - "prompt" not in model_kwargs - ), "model_kwargs must not contain key 'prompt'" - assert ( - "stop" not in model_kwargs - ), "model_kwargs must not contain key 'stop'" + assert "prompt" not in model_kwargs, ( + "model_kwargs must not contain key 'prompt'" + ) + assert "stop" not in model_kwargs, ( + "model_kwargs must not contain key 'stop'" + ) return values def __init__(self, **data: Any): diff --git a/libs/community/langchain_community/llms/deepinfra.py b/libs/community/langchain_community/llms/deepinfra.py index 47551dd4e65205..bd0e21df5260b7 100644 --- a/libs/community/langchain_community/llms/deepinfra.py +++ b/libs/community/langchain_community/llms/deepinfra.py @@ -97,8 +97,7 @@ def _handle_status(self, code: int, text: Any) -> None: raise ValueError(f"DeepInfra received an invalid payload: {text}") elif code != 200: raise Exception( - f"DeepInfra returned an unexpected response with status " - f"{code}: {text}" + f"DeepInfra returned an unexpected response with status {code}: {text}" ) def _call( diff --git a/libs/community/langchain_community/llms/oci_generative_ai.py b/libs/community/langchain_community/llms/oci_generative_ai.py index a9f48a97528bab..ee355867e9631a 100644 --- a/libs/community/langchain_community/llms/oci_generative_ai.py +++ b/libs/community/langchain_community/llms/oci_generative_ai.py @@ -274,8 +274,7 @@ def _prepare_invocation_object( if self.model_id is None: raise ValueError( - "model_id is required to call the model, " - "please provide the model_id." + "model_id is required to call the model, please provide the model_id." ) if self.model_id.startswith(CUSTOM_ENDPOINT_PREFIX): diff --git a/libs/community/langchain_community/llms/sambanova.py b/libs/community/langchain_community/llms/sambanova.py index 994fb7888c8753..18f2810262a21f 100644 --- a/libs/community/langchain_community/llms/sambanova.py +++ b/libs/community/langchain_community/llms/sambanova.py @@ -711,7 +711,7 @@ def _handle_request( } data = {key: value for key, value in data.items() if value is not None} headers = { - "Authorization": f"Bearer " f"{self.sambanova_api_key.get_secret_value()}", + "Authorization": f"Bearer {self.sambanova_api_key.get_secret_value()}", "Content-Type": "application/json", } diff --git a/libs/community/langchain_community/llms/self_hosted_hugging_face.py b/libs/community/langchain_community/llms/self_hosted_hugging_face.py index 016da2e48f9be9..e43ca9e312454e 100644 --- a/libs/community/langchain_community/llms/self_hosted_hugging_face.py +++ b/libs/community/langchain_community/llms/self_hosted_hugging_face.py @@ -69,8 +69,7 @@ def _load_transformer( model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( - f"Got invalid task {task}, " - f"currently only {VALID_TASKS} are supported" + f"Got invalid task {task}, currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ImportError( diff --git a/libs/community/langchain_community/retrievers/nanopq.py b/libs/community/langchain_community/retrievers/nanopq.py index ca4162240d0980..274ad4b42e15f9 100644 --- a/libs/community/langchain_community/retrievers/nanopq.py +++ b/libs/community/langchain_community/retrievers/nanopq.py @@ -86,7 +86,7 @@ def _get_relevant_documents( from nanopq import PQ except ImportError: raise ImportError( - "Could not import nanopq, please install with `pip install " "nanopq`." + "Could not import nanopq, please install with `pip install nanopq`." ) query_embeds = np.array(self.embeddings.embed_query(query)) diff --git a/libs/community/langchain_community/tools/azure_ai_services/text_analytics_for_health.py b/libs/community/langchain_community/tools/azure_ai_services/text_analytics_for_health.py index b47b29210fde64..b7fcd310e9cdf1 100644 --- a/libs/community/langchain_community/tools/azure_ai_services/text_analytics_for_health.py +++ b/libs/community/langchain_community/tools/azure_ai_services/text_analytics_for_health.py @@ -83,7 +83,7 @@ def _format_text_analysis_result(self, text_analysis_result: Dict) -> str: if "entities" in text_analysis_result: formatted_result.append( f"""The text contains the following healthcare entities: { - ', '.join(text_analysis_result['entities']) + ", ".join(text_analysis_result["entities"]) }""".replace("\n", " ") ) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/text_analytics_health.py b/libs/community/langchain_community/tools/azure_cognitive_services/text_analytics_health.py index 0827cb9b93c188..53463f4d592abc 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/text_analytics_health.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/text_analytics_health.py @@ -83,7 +83,7 @@ def _format_text_analysis_result(self, text_analysis_result: Dict) -> str: if "entities" in text_analysis_result: formatted_result.append( f"""The text contains the following healthcare entities: { - ', '.join(text_analysis_result['entities']) + ", ".join(text_analysis_result["entities"]) }""".replace("\n", " ") ) diff --git a/libs/community/langchain_community/tools/databricks/_execution.py b/libs/community/langchain_community/tools/databricks/_execution.py index 62e8414fe49a37..09693e55107541 100644 --- a/libs/community/langchain_community/tools/databricks/_execution.py +++ b/libs/community/langchain_community/tools/databricks/_execution.py @@ -66,9 +66,9 @@ def get_execute_function_sql_stmt( else: parts.append(f"SELECT * FROM {function.full_name}(") if function.input_params is None or function.input_params.parameters is None: - assert ( - not json_params - ), "Function has no parameters but parameters were provided." + assert not json_params, ( + "Function has no parameters but parameters were provided." + ) else: args = [] use_named_args = False @@ -213,17 +213,17 @@ def execute_function( assert response.status is not None, f"Statement execution failed: {response}" if response.status.state != StatementState.SUCCEEDED: error = response.status.error - assert ( - error is not None - ), f"Statement execution failed but no error message was provided: {response}" + assert error is not None, ( + f"Statement execution failed but no error message was provided: {response}" + ) return FunctionExecutionResult(error=f"{error.error_code}: {error.message}") manifest = response.manifest assert manifest is not None truncated = manifest.truncated result = response.result - assert ( - result is not None - ), "Statement execution succeeded but no result was provided." + assert result is not None, ( + "Statement execution succeeded but no result was provided." + ) data_array = result.data_array if is_scalar(function): value = None @@ -234,9 +234,9 @@ def execute_function( ) else: schema = manifest.schema - assert ( - schema is not None and schema.columns is not None - ), "Statement execution succeeded but no schema was provided." + assert schema is not None and schema.columns is not None, ( + "Statement execution succeeded but no schema was provided." + ) columns = [c.name for c in schema.columns] if data_array is None: data_array = [] diff --git a/libs/community/langchain_community/tools/edenai/audio_speech_to_text.py b/libs/community/langchain_community/tools/edenai/audio_speech_to_text.py index b4a218dc3daa69..b3a70d4e6424a2 100644 --- a/libs/community/langchain_community/tools/edenai/audio_speech_to_text.py +++ b/libs/community/langchain_community/tools/edenai/audio_speech_to_text.py @@ -70,7 +70,7 @@ def _wait_processing(self, url: str) -> requests.Response: if temp["results"][self.providers[0]]["error"] is not None: raise Exception( f"""EdenAI returned an unexpected response - {temp['results'][self.providers[0]]['error']}""" + {temp["results"][self.providers[0]]["error"]}""" ) else: return audio_analysis_result diff --git a/libs/community/langchain_community/tools/financial_datasets/balance_sheets.py b/libs/community/langchain_community/tools/financial_datasets/balance_sheets.py index 0b02b549ad6c21..4be8125cb5a444 100644 --- a/libs/community/langchain_community/tools/financial_datasets/balance_sheets.py +++ b/libs/community/langchain_community/tools/financial_datasets/balance_sheets.py @@ -20,7 +20,7 @@ class BalanceSheetsSchema(BaseModel): "Default is 'annual'.", ) limit: int = Field( - description="The number of balance sheets to return. " "Default is 10.", + description="The number of balance sheets to return. Default is 10.", ) diff --git a/libs/community/langchain_community/tools/financial_datasets/cash_flow_statements.py b/libs/community/langchain_community/tools/financial_datasets/cash_flow_statements.py index a627300a3a4339..5f621085a1838c 100644 --- a/libs/community/langchain_community/tools/financial_datasets/cash_flow_statements.py +++ b/libs/community/langchain_community/tools/financial_datasets/cash_flow_statements.py @@ -20,7 +20,7 @@ class CashFlowStatementsSchema(BaseModel): "Default is 'annual'.", ) limit: int = Field( - description="The number of cash flow statements to return. " "Default is 10.", + description="The number of cash flow statements to return. Default is 10.", ) diff --git a/libs/community/langchain_community/tools/financial_datasets/income_statements.py b/libs/community/langchain_community/tools/financial_datasets/income_statements.py index dc06d358df1178..d181ef7a1c1f6d 100644 --- a/libs/community/langchain_community/tools/financial_datasets/income_statements.py +++ b/libs/community/langchain_community/tools/financial_datasets/income_statements.py @@ -20,7 +20,7 @@ class IncomeStatementsSchema(BaseModel): "Default is 'annual'.", ) limit: int = Field( - description="The number of income statements to return. " "Default is 10.", + description="The number of income statements to return. Default is 10.", ) diff --git a/libs/community/langchain_community/tools/gmail/create_draft.py b/libs/community/langchain_community/tools/gmail/create_draft.py index b35b0f8758025e..a1cb44cd3cf9a7 100644 --- a/libs/community/langchain_community/tools/gmail/create_draft.py +++ b/libs/community/langchain_community/tools/gmail/create_draft.py @@ -81,7 +81,7 @@ def _run( .create(userId="me", body=create_message) .execute() ) - output = f'Draft created. Draft Id: {draft["id"]}' + output = f"Draft created. Draft Id: {draft['id']}" return output except Exception as e: raise Exception(f"An error occurred: {e}") diff --git a/libs/community/langchain_community/tools/gmail/send_message.py b/libs/community/langchain_community/tools/gmail/send_message.py index 0d9ab4634993b3..fadab49014434d 100644 --- a/libs/community/langchain_community/tools/gmail/send_message.py +++ b/libs/community/langchain_community/tools/gmail/send_message.py @@ -41,7 +41,7 @@ class GmailSendMessage(GmailBaseTool): # type: ignore[override, override] name: str = "send_gmail_message" description: str = ( - "Use this tool to send email messages." " The input is the message, recipients" + "Use this tool to send email messages. The input is the message, recipients" ) args_schema: Type[SendMessageSchema] = SendMessageSchema @@ -86,6 +86,6 @@ def _run( .send(userId="me", body=create_message) ) sent_message = send_message.execute() - return f'Message sent. Message Id: {sent_message["id"]}' + return f"Message sent. Message Id: {sent_message['id']}" except Exception as error: raise Exception(f"An error occurred: {error}") diff --git a/libs/community/langchain_community/tools/nuclia/tool.py b/libs/community/langchain_community/tools/nuclia/tool.py index 02cb335367a532..8bae0929a744c5 100644 --- a/libs/community/langchain_community/tools/nuclia/tool.py +++ b/libs/community/langchain_community/tools/nuclia/tool.py @@ -161,7 +161,7 @@ def _pushField(self, id: str, field: Any) -> str: ) if response.status_code != 200: logger.info( - f"Error pushing field {id}:" f"{response.status_code} {response.text}" + f"Error pushing field {id}:{response.status_code} {response.text}" ) raise ValueError("Error pushing field") else: @@ -177,7 +177,7 @@ def _pull(self, id: str) -> str: logger.info(f"{id} not in queue") return "" elif result["status"] == "pending": - logger.info(f'Waiting for {result["uuid"]} to be processed') + logger.info(f"Waiting for {result['uuid']} to be processed") return "" else: return result["data"] diff --git a/libs/community/langchain_community/tools/office365/send_event.py b/libs/community/langchain_community/tools/office365/send_event.py index e7513025ac1f41..052fc19c0e3d2b 100644 --- a/libs/community/langchain_community/tools/office365/send_event.py +++ b/libs/community/langchain_community/tools/office365/send_event.py @@ -6,10 +6,10 @@ from datetime import datetime as dt from typing import List, Optional, Type +from zoneinfo import ZoneInfo from langchain_core.callbacks import CallbackManagerForToolRun from pydantic import BaseModel, Field -from zoneinfo import ZoneInfo from langchain_community.tools.office365.base import O365BaseTool from langchain_community.tools.office365.utils import UTC_FORMAT diff --git a/libs/community/langchain_community/tools/openapi/utils/api_models.py b/libs/community/langchain_community/tools/openapi/utils/api_models.py index fef0b849ae37dc..8358305464d7f1 100644 --- a/libs/community/langchain_community/tools/openapi/utils/api_models.py +++ b/libs/community/langchain_community/tools/openapi/utils/api_models.py @@ -576,8 +576,7 @@ def _format_nested_properties( prop_type = f"{{\n{nested_props}\n{' ' * indent}}}" formatted_props.append( - f"{prop_desc}\n{' ' * indent}{prop_name}" - f"{prop_required}: {prop_type}," + f"{prop_desc}\n{' ' * indent}{prop_name}{prop_required}: {prop_type}," ) return "\n".join(formatted_props) diff --git a/libs/community/langchain_community/utilities/bibtex.py b/libs/community/langchain_community/utilities/bibtex.py index a3bf82ab738896..050b3b61025090 100644 --- a/libs/community/langchain_community/utilities/bibtex.py +++ b/libs/community/langchain_community/utilities/bibtex.py @@ -70,7 +70,7 @@ def get_metadata( if "url" in entry: url = entry["url"] elif "doi" in entry: - url = f'https://doi.org/{entry["doi"]}' + url = f"https://doi.org/{entry['doi']}" else: url = None meta = { diff --git a/libs/community/langchain_community/utilities/cassandra_database.py b/libs/community/langchain_community/utilities/cassandra_database.py index 4ec1973b1668ed..cd1c0f01964380 100644 --- a/libs/community/langchain_community/utilities/cassandra_database.py +++ b/libs/community/langchain_community/utilities/cassandra_database.py @@ -433,7 +433,7 @@ def _resolve_session( import cassio.config except ImportError: raise ValueError( - "cassio package not found, please install with" " `pip install cassio`" + "cassio package not found, please install with `pip install cassio`" ) # Use pre-existing session on cassio diff --git a/libs/community/langchain_community/utilities/clickup.py b/libs/community/langchain_community/utilities/clickup.py index f5f00b22a0884c..ef4d6140998bad 100644 --- a/libs/community/langchain_community/utilities/clickup.py +++ b/libs/community/langchain_community/utilities/clickup.py @@ -460,7 +460,7 @@ def get_task_attribute(self, query: str) -> Dict: if params["attribute_name"] not in task: return { - "Error": f"""attribute_name = {params['attribute_name']} was not + "Error": f"""attribute_name = {params["attribute_name"]} was not found in task keys {task.keys()}. Please call again with one of the key names.""" } diff --git a/libs/community/langchain_community/utilities/github.py b/libs/community/langchain_community/utilities/github.py index 2674b749de8f10..fab79012ed8568 100644 --- a/libs/community/langchain_community/utilities/github.py +++ b/libs/community/langchain_community/utilities/github.py @@ -20,8 +20,7 @@ def _import_tiktoken() -> Any: import tiktoken except ImportError: raise ImportError( - "tiktoken is not installed. " - "Please install it with `pip install tiktoken`" + "tiktoken is not installed. Please install it with `pip install tiktoken`" ) return tiktoken @@ -90,8 +89,7 @@ def validate_environment(cls, values: Dict) -> Any: installation = installation[0] except ValueError as e: raise ValueError( - "Please make sure to give correct github parameters " - f"Error message: {e}" + f"Please make sure to give correct github parameters Error message: {e}" ) # create a GitHub instance: g = installation.get_github_for_installation() @@ -257,8 +255,7 @@ def list_branches_in_repo(self) -> str: if branches: branches_str = "\n".join(branches) return ( - f"Found {len(branches)} branches in the repository:" - f"\n{branches_str}" + f"Found {len(branches)} branches in the repository:\n{branches_str}" ) else: return "No branches found in the repository" @@ -774,8 +771,7 @@ def search_code(self, query: str) -> str: code.path, ref=self.active_branch ).decoded_content.decode() results.append( - f"Filepath: `{code.path}`\nFile contents: " - f"{file_content}\n<END OF FILE>" + f"Filepath: `{code.path}`\nFile contents: {file_content}\n<END OF FILE>" ) count += 1 return "\n".join(results) diff --git a/libs/community/langchain_community/utilities/google_places_api.py b/libs/community/langchain_community/utilities/google_places_api.py index b0c6f152bd26b4..423aeee6ec02fc 100644 --- a/libs/community/langchain_community/utilities/google_places_api.py +++ b/libs/community/langchain_community/utilities/google_places_api.py @@ -84,7 +84,7 @@ def run(self, query: str) -> str: if details is not None: places.append(details) - return "\n".join([f"{i+1}. {item}" for i, item in enumerate(places)]) + return "\n".join([f"{i + 1}. {item}" for i, item in enumerate(places)]) def fetch_place_details(self, place_id: str) -> Optional[str]: try: diff --git a/libs/community/langchain_community/utilities/google_scholar.py b/libs/community/langchain_community/utilities/google_scholar.py index ffc94848a61a56..2285f086891c99 100644 --- a/libs/community/langchain_community/utilities/google_scholar.py +++ b/libs/community/langchain_community/utilities/google_scholar.py @@ -121,10 +121,10 @@ def run(self, query: str) -> str: if not total_results: return "No good Google Scholar Result was found" docs = [ - f"Title: {result.get('title','')}\n" - f"Authors: {','.join([author.get('name') for author in result.get('publication_info',{}).get('authors',[])])}\n" # noqa: E501 - f"Summary: {result.get('publication_info',{}).get('summary','')}\n" - f"Total-Citations: {result.get('inline_links',{}).get('cited_by',{}).get('total','')}" # noqa: E501 + f"Title: {result.get('title', '')}\n" + f"Authors: {','.join([author.get('name') for author in result.get('publication_info', {}).get('authors', [])])}\n" # noqa: E501 + f"Summary: {result.get('publication_info', {}).get('summary', '')}\n" + f"Total-Citations: {result.get('inline_links', {}).get('cited_by', {}).get('total', '')}" # noqa: E501 for result in total_results ] return "\n\n".join(docs) diff --git a/libs/community/langchain_community/utilities/merriam_webster.py b/libs/community/langchain_community/utilities/merriam_webster.py index 8cf9e18a107e80..94268556d02b56 100644 --- a/libs/community/langchain_community/utilities/merriam_webster.py +++ b/libs/community/langchain_community/utilities/merriam_webster.py @@ -84,7 +84,7 @@ def _format_definitions(self, query: str, definitions: List[Dict]) -> str: formatted_definitions.extend(self._format_definition(definition)) if len(formatted_definitions) == 1: - return f"Definition of '{query}':\n" f"{formatted_definitions[0]}" + return f"Definition of '{query}':\n{formatted_definitions[0]}" result = f"Definitions of '{query}':\n\n" for i, formatted_definition in enumerate(formatted_definitions, 1): diff --git a/libs/community/langchain_community/utilities/openapi.py b/libs/community/langchain_community/utilities/openapi.py index 1d99f7e182301c..9e6ff44cf12a25 100644 --- a/libs/community/langchain_community/utilities/openapi.py +++ b/libs/community/langchain_community/utilities/openapi.py @@ -211,8 +211,7 @@ def _alert_unsupported_spec(obj: dict) -> None: ) else: raise ValueError( - "Attempting to load an unsupported spec:" - f"\n\n{obj}\n{warning_message}" + f"Attempting to load an unsupported spec:\n\n{obj}\n{warning_message}" ) @classmethod diff --git a/libs/community/langchain_community/utilities/portkey.py b/libs/community/langchain_community/utilities/portkey.py index dbaf41840f072e..5eb16f7af518b0 100644 --- a/libs/community/langchain_community/utilities/portkey.py +++ b/libs/community/langchain_community/utilities/portkey.py @@ -26,9 +26,9 @@ def Config( cache_force_refresh: Optional[str] = None, cache_age: Optional[int] = None, ) -> Dict[str, str]: - assert retry_count is None or retry_count in range( - 1, 6 - ), "retry_count must be an integer and in range [1, 2, 3, 4, 5]" + assert retry_count is None or retry_count in range(1, 6), ( + "retry_count must be an integer and in range [1, 2, 3, 4, 5]" + ) assert cache is None or cache in [ "simple", "semantic", @@ -37,9 +37,9 @@ def Config( isinstance(cache_force_refresh, str) and cache_force_refresh in ["True", "False"] ), "cache_force_refresh must be 'True' or 'False'" - assert cache_age is None or isinstance( - cache_age, int - ), "cache_age must be an integer" + assert cache_age is None or isinstance(cache_age, int), ( + "cache_age must be an integer" + ) os.environ["OPENAI_API_BASE"] = Portkey.base diff --git a/libs/community/langchain_community/utilities/sql_database.py b/libs/community/langchain_community/utilities/sql_database.py index 85f0a1076770d9..62eec9e472b48e 100644 --- a/libs/community/langchain_community/utilities/sql_database.py +++ b/libs/community/langchain_community/utilities/sql_database.py @@ -24,8 +24,8 @@ def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str: return ( - f'Name: {index["name"]}, Unique: {index["unique"]},' - f' Columns: {str(index["column_names"])}' + f"Name: {index['name']}, Unique: {index['unique']}," + f" Columns: {str(index['column_names'])}" ) diff --git a/libs/community/langchain_community/vectorstores/aerospike.py b/libs/community/langchain_community/vectorstores/aerospike.py index 997646a00f98af..96ef3c659eb68c 100644 --- a/libs/community/langchain_community/vectorstores/aerospike.py +++ b/libs/community/langchain_community/vectorstores/aerospike.py @@ -149,7 +149,7 @@ def convert_distance_strategy( return DistanceStrategy.EUCLIDEAN_DISTANCE raise ValueError( - "Unknown distance strategy, must be cosine, dot_product" ", or euclidean" + "Unknown distance strategy, must be cosine, dot_product, or euclidean" ) def add_texts( @@ -437,8 +437,7 @@ def _select_relevance_score_fn(self) -> Callable[[float], float]: return self._euclidean_relevance_score_fn else: raise ValueError( - "Unknown distance strategy, must be cosine, dot_product" - ", or euclidean" + "Unknown distance strategy, must be cosine, dot_product, or euclidean" ) @staticmethod diff --git a/libs/community/langchain_community/vectorstores/apache_doris.py b/libs/community/langchain_community/vectorstores/apache_doris.py index 4d25f0a0aeb762..56ee6c0f64ffd8 100644 --- a/libs/community/langchain_community/vectorstores/apache_doris.py +++ b/libs/community/langchain_community/vectorstores/apache_doris.py @@ -123,10 +123,10 @@ def __init__( self.schema = f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( - {self.config.column_map['id']} varchar(50), - {self.config.column_map['document']} string, - {self.config.column_map['embedding']} array<float>, - {self.config.column_map['metadata']} string + {self.config.column_map["id"]} varchar(50), + {self.config.column_map["document"]} string, + {self.config.column_map["embedding"]} array<float>, + {self.config.column_map["metadata"]} string ) ENGINE = OLAP UNIQUE KEY(id) DISTRIBUTED BY HASH(id) \ PROPERTIES ("replication_allocation" = "tag.location.default: 1")\ """ @@ -179,7 +179,7 @@ def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> s INSERT INTO {self.config.database}.{self.config.table}({ks}) VALUES - {','.join(_data)} + {",".join(_data)} """ return i_str @@ -310,10 +310,10 @@ def _build_query_sql( where_str = "" q_str = f""" - SELECT {self.config.column_map['document']}, - {self.config.column_map['metadata']}, + SELECT {self.config.column_map["document"]}, + {self.config.column_map["metadata"]}, cosine_distance(array<float>[{q_emb_str}], - {self.config.column_map['embedding']}) as dist + {self.config.column_map["embedding"]}) as dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY dist {self.dist_order} diff --git a/libs/community/langchain_community/vectorstores/azure_cosmos_db_no_sql.py b/libs/community/langchain_community/vectorstores/azure_cosmos_db_no_sql.py index 8d0d90dd92e9c7..a2d6327d09dcbf 100644 --- a/libs/community/langchain_community/vectorstores/azure_cosmos_db_no_sql.py +++ b/libs/community/langchain_community/vectorstores/azure_cosmos_db_no_sql.py @@ -617,7 +617,7 @@ def _construct_query( ): query = f"SELECT {'TOP ' + str(k) + ' ' if not offset_limit else ''}" else: - query = f"""SELECT {'TOP @limit ' if not offset_limit else ''}""" + query = f"""SELECT {"TOP @limit " if not offset_limit else ""}""" query += self._generate_projection_fields( projection_mapping, query_type, embeddings ) @@ -790,7 +790,7 @@ def _build_where_clause(self, pre_filter: PreFilter) -> str: # e.g., for IN clauses value = f"({', '.join(map(str, condition.value))})" clauses.append(f"c.{condition.property} {sql_operator} {value}") - return f""" WHERE {' {} '.format(sql_logical_operator).join(clauses)}""".strip() + return f""" WHERE {" {} ".format(sql_logical_operator).join(clauses)}""".strip() def _execute_query( self, diff --git a/libs/community/langchain_community/vectorstores/bigquery_vector_search.py b/libs/community/langchain_community/vectorstores/bigquery_vector_search.py index 07114924dca6e1..be4596cd148c94 100644 --- a/libs/community/langchain_community/vectorstores/bigquery_vector_search.py +++ b/libs/community/langchain_community/vectorstores/bigquery_vector_search.py @@ -122,9 +122,7 @@ def __init__( self.text_embedding_field = text_embedding_field self.doc_id_field = doc_id_field self.distance_strategy = distance_strategy - self._full_table_id = ( - f"{self.project_id}." f"{self.dataset_name}." f"{self.table_name}" - ) + self._full_table_id = f"{self.project_id}.{self.dataset_name}.{self.table_name}" self._logger.debug("Using table `%s`", self.full_table_id) with _vector_table_lock: self.vectors_table = self._initialize_table() @@ -149,7 +147,7 @@ def _initialize_table(self) -> Any: columns[self.doc_id_field].field_type != "STRING" or columns[self.doc_id_field].mode == "REPEATED" ): - raise ValueError(f"Column {self.doc_id_field} must be of " "STRING type") + raise ValueError(f"Column {self.doc_id_field} must be of STRING type") if self.metadata_field not in columns: changed_schema = True schema.append( @@ -171,7 +169,7 @@ def _initialize_table(self) -> Any: columns[self.content_field].field_type != "STRING" or columns[self.content_field].mode == "REPEATED" ): - raise ValueError(f"Column {self.content_field} must be of " "STRING type") + raise ValueError(f"Column {self.content_field} must be of STRING type") if self.text_embedding_field not in columns: changed_schema = True schema.append( @@ -186,7 +184,7 @@ def _initialize_table(self) -> Any: or columns[self.text_embedding_field].mode != "REPEATED" ): raise ValueError( - f"Column {self.text_embedding_field} must be of " "ARRAY<FLOAT64> type" + f"Column {self.text_embedding_field} must be of ARRAY<FLOAT64> type" ) if changed_schema: self._logger.debug("Updated table `%s` schema.", self.full_table_id) @@ -389,9 +387,7 @@ def get_documents( ) else: val = str(i[1]).replace('"', '\\"') - expr = ( - f"JSON_VALUE(`{self.metadata_field}`,'$.{i[0]}')" f' = "{val}"' - ) + expr = f"JSON_VALUE(`{self.metadata_field}`,'$.{i[0]}') = \"{val}\"" filter_expressions.append(expr) filter_expression_str = " AND ".join(filter_expressions) where_filter_expr = f" AND ({filter_expression_str})" @@ -520,7 +516,7 @@ def _search_with_score_and_embeddings_by_vector( elif fraction_lists_to_search: if fraction_lists_to_search == 0 or fraction_lists_to_search >= 1.0: raise ValueError( - "`fraction_lists_to_search` must be between " "0.0 and 1.0" + "`fraction_lists_to_search` must be between 0.0 and 1.0" ) options_string = ( ',options => \'{"fraction_lists_to_search":' diff --git a/libs/community/langchain_community/vectorstores/chroma.py b/libs/community/langchain_community/vectorstores/chroma.py index 1ab7ddad140d6d..ebf91f718af3a2 100644 --- a/libs/community/langchain_community/vectorstores/chroma.py +++ b/libs/community/langchain_community/vectorstores/chroma.py @@ -643,7 +643,7 @@ def max_marginal_relevance_search( """ if self._embedding_function is None: raise ValueError( - "For MMR search, you must specify an embedding function on" "creation." + "For MMR search, you must specify an embedding function oncreation." ) embedding = self._embedding_function.embed_query(query) diff --git a/libs/community/langchain_community/vectorstores/clarifai.py b/libs/community/langchain_community/vectorstores/clarifai.py index b37b99f02a7064..4cae4bf7cf976e 100644 --- a/libs/community/langchain_community/vectorstores/clarifai.py +++ b/libs/community/langchain_community/vectorstores/clarifai.py @@ -115,14 +115,14 @@ def add_texts( assert length > 0, "No texts provided to add to the vectorstore." if metadatas is not None: - assert length == len( - metadatas - ), "Number of texts and metadatas should be the same." + assert length == len(metadatas), ( + "Number of texts and metadatas should be the same." + ) if ids is not None: - assert len(ltexts) == len( - ids - ), "Number of text inputs and input ids should be the same." + assert len(ltexts) == len(ids), ( + "Number of text inputs and input ids should be the same." + ) input_obj = Inputs.from_auth_helper(auth=self._auth) batch_size = 32 diff --git a/libs/community/langchain_community/vectorstores/clickhouse.py b/libs/community/langchain_community/vectorstores/clickhouse.py index 4246abfc0ddde9..b05898d55cceff 100644 --- a/libs/community/langchain_community/vectorstores/clickhouse.py +++ b/libs/community/langchain_community/vectorstores/clickhouse.py @@ -341,27 +341,28 @@ def _schema(self, dim: int, index_params: Optional[str] = "") -> str: if self.config.index_type: return f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( - {self.config.column_map['id']} Nullable(String), - {self.config.column_map['document']} Nullable(String), - {self.config.column_map['embedding']} Array(Float32), - {self.config.column_map['metadata']} JSON, - {self.config.column_map['uuid']} UUID DEFAULT generateUUIDv4(), + {self.config.column_map["id"]} Nullable(String), + {self.config.column_map["document"]} Nullable(String), + {self.config.column_map["embedding"]} Array(Float32), + {self.config.column_map["metadata"]} JSON, + {self.config.column_map["uuid"]} UUID DEFAULT generateUUIDv4(), CONSTRAINT cons_vec_len CHECK length( - {self.config.column_map['embedding']}) = {dim}, - INDEX vec_idx {self.config.column_map['embedding']} TYPE \ + {self.config.column_map["embedding"]}) = {dim}, + INDEX vec_idx {self.config.column_map["embedding"]} TYPE \ {self.config.index_type}({index_params}) GRANULARITY 1000 ) ENGINE = MergeTree ORDER BY uuid SETTINGS index_granularity = 8192\ """ else: return f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( - {self.config.column_map['id']} Nullable(String), - {self.config.column_map['document']} Nullable(String), - {self.config.column_map['embedding']} Array(Float32), - {self.config.column_map['metadata']} JSON, - {self.config.column_map['uuid']} UUID DEFAULT generateUUIDv4(), + {self.config.column_map["id"]} Nullable(String), + {self.config.column_map["document"]} Nullable(String), + {self.config.column_map["embedding"]} Array(Float32), + {self.config.column_map["metadata"]} JSON, + {self.config.column_map["uuid"]} UUID DEFAULT generateUUIDv4(), CONSTRAINT cons_vec_len CHECK length({ - self.config.column_map['embedding']}) = {dim} + self.config.column_map["embedding"] + }) = {dim} ) ENGINE = MergeTree ORDER BY uuid """ @@ -418,7 +419,7 @@ def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> s INSERT INTO TABLE {self.config.database}.{self.config.table}({ks}) VALUES - {','.join(_data)} + {",".join(_data)} """ return i_str @@ -574,13 +575,13 @@ def _build_query_sql( for k in self.config.index_query_params: settings_strs.append(f"SETTING {k}={self.config.index_query_params[k]}") q_str = f""" - SELECT {self.config.column_map['document']}, - {self.config.column_map['metadata']}, dist + SELECT {self.config.column_map["document"]}, + {self.config.column_map["metadata"]}, dist FROM {self.config.database}.{self.config.table} {where_str} - ORDER BY L2Distance({self.config.column_map['embedding']}, [{q_emb_str}]) + ORDER BY L2Distance({self.config.column_map["embedding"]}, [{q_emb_str}]) AS dist {self.dist_order} - LIMIT {topk} {' '.join(settings_strs)} + LIMIT {topk} {" ".join(settings_strs)} """ return q_str diff --git a/libs/community/langchain_community/vectorstores/dashvector.py b/libs/community/langchain_community/vectorstores/dashvector.py index 64f73630de6935..639856fcbce996 100644 --- a/libs/community/langchain_community/vectorstores/dashvector.py +++ b/libs/community/langchain_community/vectorstores/dashvector.py @@ -392,9 +392,7 @@ def from_texts( if resp: collection = dashvector_client.get(collection_name) else: - raise ValueError( - "Fail to create collection. " f"Error: {resp.message}." - ) + raise ValueError(f"Fail to create collection. Error: {resp.message}.") dashvector_vector_db = cls(collection, embedding, text_field) dashvector_vector_db.add_texts(texts, metadatas, ids, batch_size) diff --git a/libs/community/langchain_community/vectorstores/deeplake.py b/libs/community/langchain_community/vectorstores/deeplake.py index 0de4c6032a708f..c0163560e31707 100644 --- a/libs/community/langchain_community/vectorstores/deeplake.py +++ b/libs/community/langchain_community/vectorstores/deeplake.py @@ -425,8 +425,7 @@ def _search( if embedding is None: if _embedding_function is None: raise ValueError( - "Either `embedding` or `embedding_function` needs to be" - " specified." + "Either `embedding` or `embedding_function` needs to be specified." ) embedding = _embedding_function(query) if query else None diff --git a/libs/community/langchain_community/vectorstores/hanavector.py b/libs/community/langchain_community/vectorstores/hanavector.py index 6c0a8040b255cd..9212c53f5a4caf 100644 --- a/libs/community/langchain_community/vectorstores/hanavector.py +++ b/libs/community/langchain_community/vectorstores/hanavector.py @@ -669,7 +669,7 @@ def _process_filter_object(self, filter): # type: ignore[no-untyped-def] if key in self.specific_metadata_columns else f"JSON_VALUE({self.metadata_column}, '$.{key}')" ) - where_str += f"{selector} " f"{operator} {sql_param}" + where_str += f"{selector} {operator} {sql_param}" return where_str, query_tuple diff --git a/libs/community/langchain_community/vectorstores/hippo.py b/libs/community/langchain_community/vectorstores/hippo.py index 328a2f1e992266..373f6a0e06b154 100644 --- a/libs/community/langchain_community/vectorstores/hippo.py +++ b/libs/community/langchain_community/vectorstores/hippo.py @@ -118,7 +118,7 @@ def __init__( self.hc.delete_table(self.table_name, self.database_name) except Exception as e: logging.error( - f"An error occurred while deleting the table " f"{self.table_name}: {e}" + f"An error occurred while deleting the table {self.table_name}: {e}" ) raise @@ -127,7 +127,7 @@ def __init__( self.col = self.hc.get_table(self.table_name, self.database_name) except Exception as e: logging.error( - f"An error occurred while getting the table " f"{self.table_name}: {e}" + f"An error occurred while getting the table {self.table_name}: {e}" ) raise diff --git a/libs/community/langchain_community/vectorstores/infinispanvs.py b/libs/community/langchain_community/vectorstores/infinispanvs.py index 3f7c34158df15d..c9012e17fbef1f 100644 --- a/libs/community/langchain_community/vectorstores/infinispanvs.py +++ b/libs/community/langchain_community/vectorstores/infinispanvs.py @@ -90,7 +90,7 @@ def __init__( self._textfield = self._configuration.get("text_field", "text") else: warnings.warn( - "`textfield` is deprecated. Please use `text_field` " "param.", + "`textfield` is deprecated. Please use `text_field` param.", DeprecationWarning, ) self._vectorfield = self._configuration.get("vectorfield", "") @@ -98,7 +98,7 @@ def __init__( self._vectorfield = self._configuration.get("vector_field", "vector") else: warnings.warn( - "`vectorfield` is deprecated. Please use `vector_field` " "param.", + "`vectorfield` is deprecated. Please use `vector_field` param.", DeprecationWarning, ) self._to_content = self._configuration.get( @@ -361,16 +361,16 @@ def _query_result_to_docs( def configure(self, metadata: dict, dimension: int) -> None: schema = self.schema_builder(metadata, dimension) output = self.schema_create(schema) - assert ( - output.status_code == self.ispn.Codes.OK - ), "Unable to create schema. Already exists? " + assert output.status_code == self.ispn.Codes.OK, ( + "Unable to create schema. Already exists? " + ) "Consider using clear_old=True" assert json.loads(output.text)["error"] is None if not self.cache_exists(): output = self.cache_create() - assert ( - output.status_code == self.ispn.Codes.OK - ), "Unable to create cache. Already exists? " + assert output.status_code == self.ispn.Codes.OK, ( + "Unable to create cache. Already exists? " + ) "Consider using clear_old=True" # Ensure index is clean self.cache_index_clear() diff --git a/libs/community/langchain_community/vectorstores/lancedb.py b/libs/community/langchain_community/vectorstores/lancedb.py index f08e4380481b1f..11cc955cac40ae 100644 --- a/libs/community/langchain_community/vectorstores/lancedb.py +++ b/libs/community/langchain_community/vectorstores/lancedb.py @@ -562,7 +562,7 @@ def max_marginal_relevance_search( if self._embedding is None: raise ValueError( - "For MMR search, you must specify an embedding function on" "creation." + "For MMR search, you must specify an embedding function oncreation." ) embedding = self._embedding.embed_query(query) diff --git a/libs/community/langchain_community/vectorstores/manticore_search.py b/libs/community/langchain_community/vectorstores/manticore_search.py index 3743e603da7976..027d4f6adc8c17 100644 --- a/libs/community/langchain_community/vectorstores/manticore_search.py +++ b/libs/community/langchain_community/vectorstores/manticore_search.py @@ -150,16 +150,16 @@ def __init__( # Initialize the schema self.schema = f"""\ CREATE TABLE IF NOT EXISTS {self.config.table}( - {self.config.column_map['id']} bigint, - {self.config.column_map['document']} text indexed stored, - {self.config.column_map['embedding']} \ + {self.config.column_map["id"]} bigint, + {self.config.column_map["document"]} text indexed stored, + {self.config.column_map["embedding"]} \ float_vector knn_type='{self.config.knn_type}' \ knn_dims='{self.dim}' \ hnsw_similarity='{self.config.hnsw_similarity}' \ hnsw_m='{self.config.hnsw_m}' \ hnsw_ef_construction='{self.config.hnsw_ef_construction}', - {self.config.column_map['metadata']} json, - {self.config.column_map['uuid']} text indexed stored + {self.config.column_map["metadata"]} json, + {self.config.column_map["uuid"]} text indexed stored )\ """ diff --git a/libs/community/langchain_community/vectorstores/meilisearch.py b/libs/community/langchain_community/vectorstores/meilisearch.py index 885d4d5cff4417..c80301f3714bee 100644 --- a/libs/community/langchain_community/vectorstores/meilisearch.py +++ b/libs/community/langchain_community/vectorstores/meilisearch.py @@ -33,8 +33,7 @@ def _create_client( client = meilisearch.Client(url=url, api_key=api_key) elif not isinstance(client, meilisearch.Client): raise ValueError( - f"client should be an instance of meilisearch.Client, " - f"got {type(client)}" + f"client should be an instance of meilisearch.Client, got {type(client)}" ) try: client.version() diff --git a/libs/community/langchain_community/vectorstores/milvus.py b/libs/community/langchain_community/vectorstores/milvus.py index 04bff999913dbd..7dd404999683c8 100644 --- a/libs/community/langchain_community/vectorstores/milvus.py +++ b/libs/community/langchain_community/vectorstores/milvus.py @@ -550,15 +550,15 @@ def add_texts( texts = list(texts) if not self.auto_id: - assert isinstance( - ids, list - ), "A list of valid ids are required when auto_id is False." - assert len(set(ids)) == len( - texts - ), "Different lengths of texts and unique ids are provided." - assert all( - len(x.encode()) <= 65_535 for x in ids - ), "Each id should be a string less than 65535 bytes." + assert isinstance(ids, list), ( + "A list of valid ids are required when auto_id is False." + ) + assert len(set(ids)) == len(texts), ( + "Different lengths of texts and unique ids are provided." + ) + assert all(len(x.encode()) <= 65_535 for x in ids), ( + "Each id should be a string less than 65535 bytes." + ) try: embeddings = self.embedding_func.embed_documents(texts) @@ -953,13 +953,13 @@ def delete( # type: ignore[no-untyped-def] if isinstance(ids, list) and len(ids) > 0: if expr is not None: logger.warning( - "Both ids and expr are provided. " "Ignore expr and delete by ids." + "Both ids and expr are provided. Ignore expr and delete by ids." ) expr = f"{self._primary_field} in {ids}" else: - assert isinstance( - expr, str - ), "Either ids list or expr string must be provided." + assert isinstance(expr, str), ( + "Either ids list or expr string must be provided." + ) return self.col.delete(expr=expr, **kwargs) # type: ignore[union-attr] @classmethod diff --git a/libs/community/langchain_community/vectorstores/myscale.py b/libs/community/langchain_community/vectorstores/myscale.py index d3dec65c8b71e4..711b525e40606f 100644 --- a/libs/community/langchain_community/vectorstores/myscale.py +++ b/libs/community/langchain_community/vectorstores/myscale.py @@ -166,16 +166,16 @@ def __init__( ) schema_ = f""" CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( - {self.config.column_map['id']} String, - {self.config.column_map['text']} String, - {self.config.column_map['vector']} Array(Float32), - {self.config.column_map['metadata']} JSON, + {self.config.column_map["id"]} String, + {self.config.column_map["text"]} String, + {self.config.column_map["vector"]} Array(Float32), + {self.config.column_map["metadata"]} JSON, CONSTRAINT cons_vec_len CHECK length(\ - {self.config.column_map['vector']}) = {dim}, - VECTOR INDEX vidx {self.config.column_map['vector']} \ + {self.config.column_map["vector"]}) = {dim}, + VECTOR INDEX vidx {self.config.column_map["vector"]} \ TYPE {self.config.index_type}(\ 'metric_type={self.config.metric}'{index_params}) - ) ENGINE = MergeTree ORDER BY {self.config.column_map['id']} + ) ENGINE = MergeTree ORDER BY {self.config.column_map["id"]} """ self.dim = dim self.BS = "\\" @@ -220,7 +220,7 @@ def _build_istr(self, transac: Iterable, column_names: Iterable[str]) -> str: INSERT INTO TABLE {self.config.database}.{self.config.table}({ks}) VALUES - {','.join(_data)} + {",".join(_data)} """ return i_str @@ -345,11 +345,11 @@ def _build_qstr( where_str = "" q_str = f""" - SELECT {self.config.column_map['text']}, - {self.config.column_map['metadata']}, dist + SELECT {self.config.column_map["text"]}, + {self.config.column_map["metadata"]}, dist FROM {self.config.database}.{self.config.table} {where_str} - ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}]) + ORDER BY distance({self.config.column_map["vector"]}, [{q_emb_str}]) AS dist {self.dist_order} LIMIT {topk} """ @@ -475,9 +475,9 @@ def delete( Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ - assert not ( - ids is None and where_str is None - ), "You need to specify where to be deleted! Either with `ids` or `where_str`" + assert not (ids is None and where_str is None), ( + "You need to specify where to be deleted! Either with `ids` or `where_str`" + ) conds = [] if ids and len(ids) > 0: id_list = ", ".join([f"'{id}'" for id in ids]) @@ -536,11 +536,11 @@ def _build_qstr( where_str = "" q_str = f""" - SELECT {self.config.column_map['text']}, dist, - {','.join(self.must_have_cols)} + SELECT {self.config.column_map["text"]}, dist, + {",".join(self.must_have_cols)} FROM {self.config.database}.{self.config.table} {where_str} - ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}]) + ORDER BY distance({self.config.column_map["vector"]}, [{q_emb_str}]) AS dist {self.dist_order} LIMIT {topk} """ diff --git a/libs/community/langchain_community/vectorstores/neo4j_vector.py b/libs/community/langchain_community/vectorstores/neo4j_vector.py index 03d97a5a9d034e..b65f165abad750 100644 --- a/libs/community/langchain_community/vectorstores/neo4j_vector.py +++ b/libs/community/langchain_community/vectorstores/neo4j_vector.py @@ -323,8 +323,7 @@ def _handle_field_filter( if field.startswith("$"): raise ValueError( - f"Invalid filter condition. Expected a field but got an operator: " - f"{field}" + f"Invalid filter condition. Expected a field but got an operator: {field}" ) # Allow [a-zA-Z0-9_], disallow $ for now until we support escape characters @@ -344,8 +343,7 @@ def _handle_field_filter( # Verify that that operator is an operator if operator not in SUPPORTED_OPERATORS: raise ValueError( - f"Invalid operator: {operator}. " - f"Expected one of {SUPPORTED_OPERATORS}" + f"Invalid operator: {operator}. Expected one of {SUPPORTED_OPERATORS}" ) else: # Then we assume an equality operator operator = "$eq" @@ -423,8 +421,7 @@ def construct_metadata_filter(filter: Dict[str, Any]) -> Tuple[str, Dict]: # Then it's an operator if key.lower() not in ["$and", "$or"]: raise ValueError( - f"Invalid filter condition. Expected $and or $or " - f"but got: {key}" + f"Invalid filter condition. Expected $and or $or but got: {key}" ) else: # Then it's a field @@ -459,7 +456,7 @@ def construct_metadata_filter(filter: Dict[str, Any]) -> Tuple[str, Dict]: ) else: raise ValueError( - f"Invalid filter condition. Expected $and or $or " f"but got: {key}" + f"Invalid filter condition. Expected $and or $or but got: {key}" ) elif len(filter) > 1: # Then all keys have to be fields (they cannot be operators) @@ -1336,8 +1333,7 @@ def from_existing_index( if search_type == SearchType.HYBRID and not keyword_index_name: raise ValueError( - "keyword_index name has to be specified " - "when using hybrid search option" + "keyword_index name has to be specified when using hybrid search option" ) store = cls( diff --git a/libs/community/langchain_community/vectorstores/oraclevs.py b/libs/community/langchain_community/vectorstores/oraclevs.py index 2ca39e9f6f1b50..6aa767927f5e6b 100644 --- a/libs/community/langchain_community/vectorstores/oraclevs.py +++ b/libs/community/langchain_community/vectorstores/oraclevs.py @@ -75,8 +75,7 @@ def _table_exists(client: Connection, table_name: str) -> bool: import oracledb except ImportError as e: raise ImportError( - "Unable to import oracledb, please install with " - "`pip install -U oracledb`." + "Unable to import oracledb, please install with `pip install -U oracledb`." ) from e try: @@ -775,8 +774,9 @@ def similarity_search_by_vector_returning_embeddings( SELECT id, text, metadata, - vector_distance(embedding, :embedding, {_get_distance_function( - self.distance_strategy)}) as distance, + vector_distance(embedding, :embedding, { + _get_distance_function(self.distance_strategy) + }) as distance, embedding FROM {self.table_name} ORDER BY distance @@ -1010,7 +1010,7 @@ def from_texts( ) if not isinstance(distance_strategy, DistanceStrategy): raise TypeError( - f"Expected DistanceStrategy got " f"{type(distance_strategy).__name__} " + f"Expected DistanceStrategy got {type(distance_strategy).__name__} " ) query = kwargs.get("query", "What is a Oracle database") diff --git a/libs/community/langchain_community/vectorstores/pgvector.py b/libs/community/langchain_community/vectorstores/pgvector.py index 85814bef99488f..111b8da0bba055 100644 --- a/libs/community/langchain_community/vectorstores/pgvector.py +++ b/libs/community/langchain_community/vectorstores/pgvector.py @@ -900,8 +900,7 @@ def _create_filter_clause(self, filters: Any) -> Any: ) else: raise ValueError( - f"Invalid filter condition. Expected $and or $or " - f"but got: {key}" + f"Invalid filter condition. Expected $and or $or but got: {key}" ) elif len(filters) > 1: # Then all keys have to be fields (they cannot be operators) diff --git a/libs/community/langchain_community/vectorstores/pinecone.py b/libs/community/langchain_community/vectorstores/pinecone.py index 7d960409e6b0fc..7ad40b2c0c889f 100644 --- a/libs/community/langchain_community/vectorstores/pinecone.py +++ b/libs/community/langchain_community/vectorstores/pinecone.py @@ -71,7 +71,7 @@ def __init__( ) if not isinstance(index, pinecone.Index): raise ValueError( - f"client should be an instance of pinecone.Index, " f"got {type(index)}" + f"client should be an instance of pinecone.Index, got {type(index)}" ) self._index = index self._embedding = embedding diff --git a/libs/community/langchain_community/vectorstores/rocksetdb.py b/libs/community/langchain_community/vectorstores/rocksetdb.py index 263a211fba3249..0f5345eda5a8fc 100644 --- a/libs/community/langchain_community/vectorstores/rocksetdb.py +++ b/libs/community/langchain_community/vectorstores/rocksetdb.py @@ -271,10 +271,10 @@ def similarity_search_by_vector_with_relevance_scores( finalResult: list[Tuple[Document, float]] = [] for document in query_response.results: metadata = {} - assert isinstance( - document, dict - ), "document should be of type `dict[str,Any]`. But found: `{}`".format( - type(document) + assert isinstance(document, dict), ( + "document should be of type `dict[str,Any]`. But found: `{}`".format( + type(document) + ) ) for k, v in document.items(): if k == self._text_key: diff --git a/libs/community/langchain_community/vectorstores/sklearn.py b/libs/community/langchain_community/vectorstores/sklearn.py index 96953f69ff4529..4c83543276c276 100644 --- a/libs/community/langchain_community/vectorstores/sklearn.py +++ b/libs/community/langchain_community/vectorstores/sklearn.py @@ -171,8 +171,7 @@ def embeddings(self) -> Embeddings: def persist(self) -> None: if self._serializer is None: raise SKLearnVectorStoreException( - "You must specify a persist_path on creation to persist the " - "collection." + "You must specify a persist_path on creation to persist the collection." ) data = { "ids": self._ids, @@ -185,7 +184,7 @@ def persist(self) -> None: def _load(self) -> None: if self._serializer is None: raise SKLearnVectorStoreException( - "You must specify a persist_path on creation to load the " "collection." + "You must specify a persist_path on creation to load the collection." ) data = self._serializer.load() self._embeddings = data["embeddings"] diff --git a/libs/community/langchain_community/vectorstores/sqlitevec.py b/libs/community/langchain_community/vectorstores/sqlitevec.py index 13a2d5ee9208c0..52da1942f5adb8 100644 --- a/libs/community/langchain_community/vectorstores/sqlitevec.py +++ b/libs/community/langchain_community/vectorstores/sqlitevec.py @@ -132,8 +132,7 @@ def add_texts( for text, metadata, embed in zip(texts, metadatas, embeds) ] self._connection.executemany( - f"INSERT INTO {self._table}(text, metadata, text_embedding) " - f"VALUES (?,?,?)", + f"INSERT INTO {self._table}(text, metadata, text_embedding) VALUES (?,?,?)", data_input, ) self._connection.commit() diff --git a/libs/community/langchain_community/vectorstores/sqlitevss.py b/libs/community/langchain_community/vectorstores/sqlitevss.py index 3ea9f427700b14..7bc394fddefcc0 100644 --- a/libs/community/langchain_community/vectorstores/sqlitevss.py +++ b/libs/community/langchain_community/vectorstores/sqlitevss.py @@ -121,8 +121,7 @@ def add_texts( for text, metadata, embed in zip(texts, metadatas, embeds) ] self._connection.executemany( - f"INSERT INTO {self._table}(text, metadata, text_embedding) " - f"VALUES (?,?,?)", + f"INSERT INTO {self._table}(text, metadata, text_embedding) VALUES (?,?,?)", data_input, ) self._connection.commit() diff --git a/libs/community/langchain_community/vectorstores/starrocks.py b/libs/community/langchain_community/vectorstores/starrocks.py index 80debc09f92cd7..9298f12a78ff38 100644 --- a/libs/community/langchain_community/vectorstores/starrocks.py +++ b/libs/community/langchain_community/vectorstores/starrocks.py @@ -176,10 +176,10 @@ def __init__( self.schema = f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( - {self.config.column_map['id']} string, - {self.config.column_map['document']} string, - {self.config.column_map['embedding']} array<float>, - {self.config.column_map['metadata']} string + {self.config.column_map["id"]} string, + {self.config.column_map["document"]} string, + {self.config.column_map["embedding"]} array<float>, + {self.config.column_map["metadata"]} string ) ENGINE = OLAP PRIMARY KEY(id) DISTRIBUTED BY HASH(id) \ PROPERTIES ("replication_num" = "1")\ """ @@ -232,7 +232,7 @@ def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> s INSERT INTO {self.config.database}.{self.config.table}({ks}) VALUES - {','.join(_data)} + {",".join(_data)} """ return i_str @@ -363,10 +363,10 @@ def _build_query_sql( where_str = "" q_str = f""" - SELECT {self.config.column_map['document']}, - {self.config.column_map['metadata']}, + SELECT {self.config.column_map["document"]}, + {self.config.column_map["metadata"]}, cosine_similarity_norm(array<float>[{q_emb_str}], - {self.config.column_map['embedding']}) as dist + {self.config.column_map["embedding"]}) as dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY dist {self.dist_order} diff --git a/libs/community/langchain_community/vectorstores/surrealdb.py b/libs/community/langchain_community/vectorstores/surrealdb.py index ce05abdc930ce1..3157f48e33b271 100644 --- a/libs/community/langchain_community/vectorstores/surrealdb.py +++ b/libs/community/langchain_community/vectorstores/surrealdb.py @@ -323,9 +323,9 @@ def similarity_search_with_relevance_scores( List of Documents most similar along with relevance scores """ - async def _similarity_search_with_relevance_scores() -> ( - List[Tuple[Document, float]] - ): + async def _similarity_search_with_relevance_scores() -> List[ + Tuple[Document, float] + ]: await self.initialize() return await self.asimilarity_search_with_relevance_scores( query, k, filter=filter, **kwargs diff --git a/libs/community/langchain_community/vectorstores/typesense.py b/libs/community/langchain_community/vectorstores/typesense.py index 88d73992d0eea0..19f3499699a68c 100644 --- a/libs/community/langchain_community/vectorstores/typesense.py +++ b/libs/community/langchain_community/vectorstores/typesense.py @@ -158,7 +158,7 @@ def similarity_search_with_score( embedded_query = [str(x) for x in self._embedding.embed_query(query)] query_obj = { "q": "*", - "vector_query": f'vec:([{",".join(embedded_query)}], k:{k})', + "vector_query": f"vec:([{','.join(embedded_query)}], k:{k})", "filter_by": filter, "collection": self._typesense_collection_name, } diff --git a/libs/community/langchain_community/vectorstores/vdms.py b/libs/community/langchain_community/vectorstores/vdms.py index ed50d014626230..811c79fccc80b0 100644 --- a/libs/community/langchain_community/vectorstores/vdms.py +++ b/libs/community/langchain_community/vectorstores/vdms.py @@ -232,8 +232,7 @@ def _embed_query(self, text: str) -> List[float]: return self.embedding.embed_query(text) else: raise ValueError( - "Must provide `embedding` which is expected" - " to be an Embeddings object" + "Must provide `embedding` which is expected to be an Embeddings object" ) def _select_relevance_score_fn(self) -> Callable[[float], float]: @@ -1099,7 +1098,7 @@ def max_marginal_relevance_search( """ if self.embedding is None: raise ValueError( - "For MMR search, you must specify an embedding function on" "creation." + "For MMR search, you must specify an embedding function oncreation." ) # embedding_vector: List[float] = self._embed_query(query) @@ -1208,7 +1207,7 @@ def max_marginal_relevance_search_with_score( """ if self.embedding is None: raise ValueError( - "For MMR search, you must specify an embedding function on" "creation." + "For MMR search, you must specify an embedding function oncreation." ) if not os.path.isfile(query) and hasattr(self.embedding, "embed_query"): diff --git a/libs/community/langchain_community/vectorstores/vespa.py b/libs/community/langchain_community/vectorstores/vespa.py index 6fe6585cd86bea..bc9f3736406c74 100644 --- a/libs/community/langchain_community/vectorstores/vespa.py +++ b/libs/community/langchain_community/vectorstores/vespa.py @@ -96,7 +96,7 @@ def add_texts( embeddings = self._embedding_function.embed_documents(list(texts)) if ids is None: - ids = [str(f"{i+1}") for i, _ in enumerate(texts)] + ids = [str(f"{i + 1}") for i, _ in enumerate(texts)] batch = [] for i, text in enumerate(texts): diff --git a/libs/community/langchain_community/vectorstores/vikingdb.py b/libs/community/langchain_community/vectorstores/vikingdb.py index 002db6485be2ba..2e0a9b0c57fc2e 100644 --- a/libs/community/langchain_community/vectorstores/vikingdb.py +++ b/libs/community/langchain_community/vectorstores/vikingdb.py @@ -131,8 +131,7 @@ def _create_collection( fields.append(Field(key, FieldType.Text)) else: raise ValueError( - "metadatas value is invalid" - "please change the type of metadatas." + "metadatas value is invalidplease change the type of metadatas." ) # fields.append(Field("text", FieldType.String)) fields.append(Field("text", FieldType.Text)) diff --git a/libs/community/poetry.lock b/libs/community/poetry.lock index f9573f4b69e972..0db0bd745f9819 100644 --- a/libs/community/poetry.lock +++ b/libs/community/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -1883,7 +1883,7 @@ url = "../core" [[package]] name = "langchain-tests" -version = "0.3.7" +version = "0.3.8" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -1908,7 +1908,7 @@ url = "../standard-tests" [[package]] name = "langchain-text-splitters" -version = "0.3.4" +version = "0.3.5" description = "LangChain text splitting utilities" optional = false python-versions = ">=3.9,<4.0" @@ -1916,7 +1916,7 @@ files = [] develop = true [package.dependencies] -langchain-core = "^0.3.26" +langchain-core = "^0.3.29" [package.source] type = "directory" @@ -2618,30 +2618,41 @@ files = [ {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, @@ -3784,29 +3795,29 @@ files = [ [[package]] name = "ruff" -version = "0.5.7" +version = "0.9.1" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"}, - {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"}, - {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"}, - {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"}, - {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"}, - {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"}, - {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"}, + {file = "ruff-0.9.1-py3-none-linux_armv6l.whl", hash = "sha256:84330dda7abcc270e6055551aca93fdde1b0685fc4fd358f26410f9349cf1743"}, + {file = "ruff-0.9.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:3cae39ba5d137054b0e5b472aee3b78a7c884e61591b100aeb544bcd1fc38d4f"}, + {file = "ruff-0.9.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:50c647ff96f4ba288db0ad87048257753733763b409b2faf2ea78b45c8bb7fcb"}, + {file = "ruff-0.9.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0c8b149e9c7353cace7d698e1656ffcf1e36e50f8ea3b5d5f7f87ff9986a7ca"}, + {file = "ruff-0.9.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:beb3298604540c884d8b282fe7625651378e1986c25df51dec5b2f60cafc31ce"}, + {file = "ruff-0.9.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39d0174ccc45c439093971cc06ed3ac4dc545f5e8bdacf9f067adf879544d969"}, + {file = "ruff-0.9.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:69572926c0f0c9912288915214ca9b2809525ea263603370b9e00bed2ba56dbd"}, + {file = "ruff-0.9.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:937267afce0c9170d6d29f01fcd1f4378172dec6760a9f4dface48cdabf9610a"}, + {file = "ruff-0.9.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:186c2313de946f2c22bdf5954b8dd083e124bcfb685732cfb0beae0c47233d9b"}, + {file = "ruff-0.9.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f94942a3bb767675d9a051867c036655fe9f6c8a491539156a6f7e6b5f31831"}, + {file = "ruff-0.9.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:728d791b769cc28c05f12c280f99e8896932e9833fef1dd8756a6af2261fd1ab"}, + {file = "ruff-0.9.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2f312c86fb40c5c02b44a29a750ee3b21002bd813b5233facdaf63a51d9a85e1"}, + {file = "ruff-0.9.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ae017c3a29bee341ba584f3823f805abbe5fe9cd97f87ed07ecbf533c4c88366"}, + {file = "ruff-0.9.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5dc40a378a0e21b4cfe2b8a0f1812a6572fc7b230ef12cd9fac9161aa91d807f"}, + {file = "ruff-0.9.1-py3-none-win32.whl", hash = "sha256:46ebf5cc106cf7e7378ca3c28ce4293b61b449cd121b98699be727d40b79ba72"}, + {file = "ruff-0.9.1-py3-none-win_amd64.whl", hash = "sha256:342a824b46ddbcdddd3abfbb332fa7fcaac5488bf18073e841236aadf4ad5c19"}, + {file = "ruff-0.9.1-py3-none-win_arm64.whl", hash = "sha256:1cd76c7f9c679e6e8f2af8f778367dca82b95009bc7b1a85a47f1521ae524fa7"}, + {file = "ruff-0.9.1.tar.gz", hash = "sha256:fd2b25ecaf907d6458fa842675382c8597b3c746a2dde6717fe3415425df0c17"}, ] [[package]] @@ -4704,4 +4715,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "285e19251cdc78fc8dbde4ccf887f0ca35a0800e757223bb4abefa471a04a33a" +content-hash = "d4510bf9b5219bf4f11cee251e0856a228dcefa1a9e3cc90452fb36d0ef7bd29" diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml index dde6e9a1bbb890..343fccbf79109f 100644 --- a/libs/community/pyproject.toml +++ b/libs/community/pyproject.toml @@ -12,6 +12,7 @@ readme = "README.md" repository = "https://github.com/langchain-ai/langchain" [tool.ruff] +target-version = "py39" exclude = [ "tests/examples/non-utf8-encoding.py", "tests/integration_tests/examples/non-utf8-encoding.py", @@ -119,7 +120,7 @@ pytest-vcr = "^1.0.2" vcrpy = "^6" [tool.poetry.group.lint.dependencies] -ruff = "^0.5" +ruff = "~0.9.1" [[tool.poetry.group.lint.dependencies.cffi]] version = "<1.17.1" python = "<3.10" diff --git a/libs/community/tests/integration_tests/callbacks/test_langchain_tracer.py b/libs/community/tests/integration_tests/callbacks/test_langchain_tracer.py index 51b2552139be4f..a9e128a7dde0a3 100644 --- a/libs/community/tests/integration_tests/callbacks/test_langchain_tracer.py +++ b/libs/community/tests/integration_tests/callbacks/test_langchain_tracer.py @@ -28,7 +28,7 @@ "Who won the US Open women's final in 2019? " "What is her age raised to the 0.34 power?" ), - ("Who is Beyonce's husband? " "What is his age raised to the 0.19 power?"), + ("Who is Beyonce's husband? What is his age raised to the 0.19 power?"), ] diff --git a/libs/community/tests/integration_tests/callbacks/test_wandb_tracer.py b/libs/community/tests/integration_tests/callbacks/test_wandb_tracer.py index 45c5287b8ec5bc..15152bf075b14c 100644 --- a/libs/community/tests/integration_tests/callbacks/test_wandb_tracer.py +++ b/libs/community/tests/integration_tests/callbacks/test_wandb_tracer.py @@ -25,7 +25,7 @@ "Who won the US Open women's final in 2019? " "What is her age raised to the 0.34 power?" ), - ("Who is Beyonce's husband? " "What is his age raised to the 0.19 power?"), + ("Who is Beyonce's husband? What is his age raised to the 0.19 power?"), ] diff --git a/libs/community/tests/integration_tests/chat_models/test_outlines.py b/libs/community/tests/integration_tests/chat_models/test_outlines.py index 8f293e6532c512..b0c22981ff53b4 100644 --- a/libs/community/tests/integration_tests/chat_models/test_outlines.py +++ b/libs/community/tests/integration_tests/chat_models/test_outlines.py @@ -80,9 +80,9 @@ def test_chat_outlines_regex(chat_model: ChatOutlines) -> None: output = chat_model.invoke(messages) assert isinstance(output, AIMessage) - assert re.match( - ip_regex, str(output.content) - ), f"Generated output '{output.content}' is not a valid IP address" + assert re.match(ip_regex, str(output.content)), ( + f"Generated output '{output.content}' is not a valid IP address" + ) def test_chat_outlines_type_constraints(chat_model: ChatOutlines) -> None: @@ -129,14 +129,14 @@ def test_chat_outlines_grammar(chat_model: ChatOutlines) -> None: output = chat_model.invoke(messages) # Validate the output is a non-empty string - assert ( - isinstance(output.content, str) and output.content.strip() - ), "Output should be a non-empty string" + assert isinstance(output.content, str) and output.content.strip(), ( + "Output should be a non-empty string" + ) # Use a simple regex to check if the output contains basic arithmetic operations and numbers - assert re.search( - r"[\d\+\-\*/\(\)]+", output.content - ), f"Generated output '{output.content}' does not appear to be a valid arithmetic expression" + assert re.search(r"[\d\+\-\*/\(\)]+", output.content), ( + f"Generated output '{output.content}' does not appear to be a valid arithmetic expression" + ) def test_chat_outlines_with_structured_output(chat_model: ChatOutlines) -> None: diff --git a/libs/community/tests/integration_tests/chat_models/test_vertexai.py b/libs/community/tests/integration_tests/chat_models/test_vertexai.py index 5a10353bc2f3b1..37c1a8ecfde4ad 100644 --- a/libs/community/tests/integration_tests/chat_models/test_vertexai.py +++ b/libs/community/tests/integration_tests/chat_models/test_vertexai.py @@ -112,8 +112,7 @@ def test_vertexai_single_call_with_context() -> None: def test_multimodal() -> None: llm = ChatVertexAI(model_name="gemini-ultra-vision") gcs_url = ( - "gs://cloud-samples-data/generative-ai/image/" - "320px-Felis_catus-cat_on_snow.jpg" + "gs://cloud-samples-data/generative-ai/image/320px-Felis_catus-cat_on_snow.jpg" ) image_message = { "type": "image_url", @@ -131,8 +130,7 @@ def test_multimodal() -> None: def test_multimodal_history() -> None: llm = ChatVertexAI(model_name="gemini-ultra-vision") gcs_url = ( - "gs://cloud-samples-data/generative-ai/image/" - "320px-Felis_catus-cat_on_snow.jpg" + "gs://cloud-samples-data/generative-ai/image/320px-Felis_catus-cat_on_snow.jpg" ) image_message = { "type": "image_url", diff --git a/libs/community/tests/integration_tests/graphs/test_neo4j.py b/libs/community/tests/integration_tests/graphs/test_neo4j.py index 761cbc95e4b8ee..578875a81139b0 100644 --- a/libs/community/tests/integration_tests/graphs/test_neo4j.py +++ b/libs/community/tests/integration_tests/graphs/test_neo4j.py @@ -367,7 +367,7 @@ def test_enhanced_schema_exception() -> None: url=url, username=username, password=password, enhanced_schema=True ) graph.query("MATCH (n) DETACH DELETE n") - graph.query("CREATE (:Node {foo:'bar'})," "(:Node {foo: 1}), (:Node {foo: [1,2]})") + graph.query("CREATE (:Node {foo:'bar'}),(:Node {foo: 1}), (:Node {foo: [1,2]})") graph.refresh_schema() expected_output = { "node_props": {"Node": [{"property": "foo", "type": "STRING"}]}, diff --git a/libs/community/tests/integration_tests/llms/test_outlines.py b/libs/community/tests/integration_tests/llms/test_outlines.py index db0e043723442c..42828db7142cb1 100644 --- a/libs/community/tests/integration_tests/llms/test_outlines.py +++ b/libs/community/tests/integration_tests/llms/test_outlines.py @@ -72,9 +72,9 @@ def test_outlines_regex(llm: Outlines) -> None: assert isinstance(output, str) - assert re.match( - ip_regex, output - ), f"Generated output '{output}' is not a valid IP address" + assert re.match(ip_regex, output), ( + f"Generated output '{output}' is not a valid IP address" + ) def test_outlines_type_constraints(llm: Outlines) -> None: @@ -113,11 +113,11 @@ def test_outlines_grammar(llm: Outlines) -> None: output = llm.invoke("Here is a complex arithmetic expression: ") # Validate the output is a non-empty string - assert ( - isinstance(output, str) and output.strip() - ), "Output should be a non-empty string" + assert isinstance(output, str) and output.strip(), ( + "Output should be a non-empty string" + ) # Use a simple regex to check if the output contains basic arithmetic operations and numbers - assert re.search( - r"[\d\+\-\*/\(\)]+", output - ), f"Generated output '{output}' does not appear to be a valid arithmetic expression" + assert re.search(r"[\d\+\-\*/\(\)]+", output), ( + f"Generated output '{output}' does not appear to be a valid arithmetic expression" + ) diff --git a/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py b/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py index ea3a5d4815a3c4..4bfa7b7937fb28 100644 --- a/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py +++ b/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py @@ -22,13 +22,11 @@ @pytest.fixture -def init_weaviate() -> ( - Generator[ - Tuple[WeaviateDocumentIndex, Dict[str, Any], FakeEmbeddings], - None, - None, - ] -): +def init_weaviate() -> Generator[ + Tuple[WeaviateDocumentIndex, Dict[str, Any], FakeEmbeddings], + None, + None, +]: """ cd tests/integration_tests/vectorstores/docker-compose docker compose -f weaviate.yml up @@ -75,9 +73,9 @@ class WeaviateDoc(BaseDoc): @pytest.fixture -def init_elastic() -> ( - Generator[Tuple[ElasticDocIndex, Dict[str, Any], FakeEmbeddings], None, None] -): +def init_elastic() -> Generator[ + Tuple[ElasticDocIndex, Dict[str, Any], FakeEmbeddings], None, None +]: """ cd tests/integration_tests/vectorstores/docker-compose docker-compose -f elasticsearch.yml up diff --git a/libs/community/tests/integration_tests/retrievers/test_dria_index.py b/libs/community/tests/integration_tests/retrievers/test_dria_index.py index f3e3423bec5222..50104b65b3f667 100644 --- a/libs/community/tests/integration_tests/retrievers/test_dria_index.py +++ b/libs/community/tests/integration_tests/retrievers/test_dria_index.py @@ -34,8 +34,8 @@ def test_dria_retriever(dria_retriever: DriaRetriever) -> None: doc = docs[0] assert isinstance(doc, Document), "Expected a Document instance" assert isinstance(doc.page_content, str), ( - "Expected document content type " "to be string" + "Expected document content type to be string" + ) + assert isinstance(doc.metadata, dict), ( + "Expected document metadata content to be a dictionary" ) - assert isinstance( - doc.metadata, dict - ), "Expected document metadata content to be a dictionary" diff --git a/libs/community/tests/integration_tests/retrievers/test_kay.py b/libs/community/tests/integration_tests/retrievers/test_kay.py index ac6202d89c77db..4dc142769e9863 100644 --- a/libs/community/tests/integration_tests/retrievers/test_kay.py +++ b/libs/community/tests/integration_tests/retrievers/test_kay.py @@ -14,8 +14,7 @@ def test_kay_retriever() -> None: num_contexts=3, ) docs = retriever.invoke( - "What were the biggest strategy changes and partnerships made by Roku " - "in 2023?", + "What were the biggest strategy changes and partnerships made by Roku in 2023?", ) assert len(docs) == 3 for doc in docs: diff --git a/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py b/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py index 238ff93b0bb4d3..c68bc34436f9bc 100644 --- a/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py +++ b/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py @@ -32,12 +32,12 @@ def assert_detectors_response( if resp["detector"] == detector.value ) ) - assert ( - "err" not in common_response - ), f"API returned an error: {common_response.get('err')}" # noqa: E501 - assert ( - common_response.get("is_detected") is False - ), f"Prompt was detected: {common_response}" # noqa: E501 + assert "err" not in common_response, ( + f"API returned an error: {common_response.get('err')}" + ) # noqa: E501 + assert common_response.get("is_detected") is False, ( + f"Prompt was detected: {common_response}" + ) # noqa: E501 def test_prompt_injection(zenguard_tool: ZenGuardTool) -> None: diff --git a/libs/community/tests/integration_tests/utilities/test_arxiv.py b/libs/community/tests/integration_tests/utilities/test_arxiv.py index e4fa9e4aa567ad..787724bec0c19c 100644 --- a/libs/community/tests/integration_tests/utilities/test_arxiv.py +++ b/libs/community/tests/integration_tests/utilities/test_arxiv.py @@ -152,9 +152,9 @@ def _load_arxiv_from_universal_entry(**kwargs: Any) -> BaseTool: def test_load_arxiv_from_universal_entry() -> None: arxiv_tool = _load_arxiv_from_universal_entry() output = arxiv_tool.invoke("Caprice Stanley") - assert ( - "On Mixing Behavior of a Family of Random Walks" in output - ), "failed to fetch a valid result" + assert "On Mixing Behavior of a Family of Random Walks" in output, ( + "failed to fetch a valid result" + ) def test_load_arxiv_from_universal_entry_with_params() -> None: @@ -168,6 +168,6 @@ def test_load_arxiv_from_universal_entry_with_params() -> None: wp = arxiv_tool.api_wrapper assert wp.top_k_results == 1, "failed to assert top_k_results" assert wp.load_max_docs == 10, "failed to assert load_max_docs" - assert ( - wp.load_all_available_meta is True - ), "failed to assert load_all_available_meta" + assert wp.load_all_available_meta is True, ( + "failed to assert load_all_available_meta" + ) diff --git a/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db_no_sql.py b/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db_no_sql.py index bbaca0775be7c0..784465d4639b30 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db_no_sql.py +++ b/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db_no_sql.py @@ -418,8 +418,7 @@ def _get_texts_and_metadata(self) -> Tuple[List[str], List[Dict[str, Any]]]: "energetic herders skilled in outdoor activities.", "Golden Retrievers are friendly, " "loyal companions with excellent retrieving skills.", - "Labrador Retrievers are playful, " - "eager learners and skilled retrievers.", + "Labrador Retrievers are playful, eager learners and skilled retrievers.", "Australian Shepherds are agile, " "energetic herders excelling in outdoor tasks.", "German Shepherds are brave, " diff --git a/libs/community/tests/integration_tests/vectorstores/test_duckdb.py b/libs/community/tests/integration_tests/vectorstores/test_duckdb.py index b724dcf0542d90..25ef054812eb60 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_duckdb.py +++ b/libs/community/tests/integration_tests/vectorstores/test_duckdb.py @@ -93,18 +93,18 @@ def test_duckdb_add_texts_with_metadata( # Check if the metadata is correctly associated with the texts assert len(result) == 2, "Should return two results" - assert ( - result[0].metadata.get("author") == "Author 1" - ), "Metadata for Author 1 should be correctly retrieved" - assert ( - result[0].metadata.get("date") == "2021-01-01" - ), "Date for Author 1 should be correctly retrieved" - assert ( - result[1].metadata.get("author") == "Author 2" - ), "Metadata for Author 2 should be correctly retrieved" - assert ( - result[1].metadata.get("date") == "2021-02-01" - ), "Date for Author 2 should be correctly retrieved" + assert result[0].metadata.get("author") == "Author 1", ( + "Metadata for Author 1 should be correctly retrieved" + ) + assert result[0].metadata.get("date") == "2021-01-01", ( + "Date for Author 1 should be correctly retrieved" + ) + assert result[1].metadata.get("author") == "Author 2", ( + "Metadata for Author 2 should be correctly retrieved" + ) + assert result[1].metadata.get("date") == "2021-02-01", ( + "Date for Author 2 should be correctly retrieved" + ) @pytest.mark.requires("duckdb") @@ -127,9 +127,9 @@ def test_duckdb_add_texts_with_predefined_ids( result = store.similarity_search(text) found_texts = [doc.page_content for doc in result] - assert ( - text in found_texts - ), f"Text '{text}' was not found in the search results." + assert text in found_texts, ( + f"Text '{text}' was not found in the search results." + ) @pytest.mark.requires("duckdb") diff --git a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py index 2842938986c3d8..ecde8eb54748a8 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py +++ b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py @@ -896,9 +896,9 @@ def test_elasticsearch_with_user_agent( pattern = r"^langchain-py-vs/\d+\.\d+\.\d+$" match = re.match(pattern, user_agent) - assert ( - match is not None - ), f"The string '{user_agent}' does not match the expected pattern." + assert match is not None, ( + f"The string '{user_agent}' does not match the expected pattern." + ) def test_elasticsearch_with_internal_user_agent( self, elasticsearch_connection: Dict, index_name: str @@ -917,9 +917,9 @@ def test_elasticsearch_with_internal_user_agent( pattern = r"^langchain-py-vs/\d+\.\d+\.\d+$" match = re.match(pattern, user_agent) - assert ( - match is not None - ), f"The string '{user_agent}' does not match the expected pattern." + assert match is not None, ( + f"The string '{user_agent}' does not match the expected pattern." + ) def test_bulk_args(self, es_client: Any, index_name: str) -> None: """Test to make sure the user-agent is set correctly.""" diff --git a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py index e1ffcd7af0eac2..370fe9b43bec3f 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py +++ b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py @@ -1133,7 +1133,7 @@ def test_preexisting_specific_columns_for_metadata_fill( c = 0 try: - sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "quality"=' f"'ugly'" + sql_str = f"SELECT COUNT(*) FROM {table_name} WHERE \"quality\"='ugly'" cur = test_setup.conn.cursor() cur.execute(sql_str) if cur.has_result_set(): @@ -1195,7 +1195,7 @@ def test_preexisting_specific_columns_for_metadata_via_array( c = 0 try: - sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "quality"=' f"'ugly'" + sql_str = f"SELECT COUNT(*) FROM {table_name} WHERE \"quality\"='ugly'" cur = test_setup.conn.cursor() cur.execute(sql_str) if cur.has_result_set(): @@ -1206,7 +1206,7 @@ def test_preexisting_specific_columns_for_metadata_via_array( assert c == 3 try: - sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "Owner"=' f"'Steve'" + sql_str = f"SELECT COUNT(*) FROM {table_name} WHERE \"Owner\"='Steve'" cur = test_setup.conn.cursor() cur.execute(sql_str) if cur.has_result_set(): diff --git a/libs/community/tests/integration_tests/vectorstores/test_lancedb.py b/libs/community/tests/integration_tests/vectorstores/test_lancedb.py index 615b310629d347..a322d95556aa8b 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_lancedb.py +++ b/libs/community/tests/integration_tests/vectorstores/test_lancedb.py @@ -147,8 +147,8 @@ def test_lancedb_no_metadata() -> None: result = store.similarity_search("text 1") # Verify that the metadata in the Document objects is an empty dictionary for doc in result: - assert ( - doc.metadata == {} - ), "Expected empty metadata when 'metadata' column is missing" + assert doc.metadata == {}, ( + "Expected empty metadata when 'metadata' column is missing" + ) # Clean up by deleting the table (optional) db.drop_table("vectorstore_no_metadata") diff --git a/libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py b/libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py index d8586e089ccf91..ad0fcaacc59268 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py +++ b/libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py @@ -471,7 +471,7 @@ def test_neo4jvector_missing_keyword() -> None: ) except ValueError as e: assert str(e) == ( - "keyword_index name has to be specified when " "using hybrid search option" + "keyword_index name has to be specified when using hybrid search option" ) drop_vector_indexes(docsearch) @@ -522,7 +522,7 @@ def test_neo4jvector_from_existing_graph() -> None: graph.query("MATCH (n) DETACH DELETE n") - graph.query("CREATE (:Test {name:'Foo'})," "(:Test {name:'Bar'})") + graph.query("CREATE (:Test {name:'Foo'}),(:Test {name:'Bar'})") existing = Neo4jVector.from_existing_graph( embedding=FakeEmbeddingsWithOsDimension(), @@ -558,7 +558,7 @@ def test_neo4jvector_from_existing_graph_hybrid() -> None: graph.query("MATCH (n) DETACH DELETE n") - graph.query("CREATE (:Test {name:'foo'})," "(:Test {name:'Bar'})") + graph.query("CREATE (:Test {name:'foo'}),(:Test {name:'Bar'})") existing = Neo4jVector.from_existing_graph( embedding=FakeEmbeddingsWithOsDimension(), @@ -594,7 +594,7 @@ def test_neo4jvector_from_existing_graph_multiple_properties() -> None: ) graph.query("MATCH (n) DETACH DELETE n") - graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})") + graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'}),(:Test {name:'Bar'})") existing = Neo4jVector.from_existing_graph( embedding=FakeEmbeddingsWithOsDimension(), @@ -629,7 +629,7 @@ def test_neo4jvector_from_existing_graph_multiple_properties_hybrid() -> None: ) graph.query("MATCH (n) DETACH DELETE n") - graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})") + graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'}),(:Test {name:'Bar'})") existing = Neo4jVector.from_existing_graph( embedding=FakeEmbeddingsWithOsDimension(), diff --git a/libs/community/tests/unit_tests/chat_loaders/test_imessage.py b/libs/community/tests/unit_tests/chat_loaders/test_imessage.py index 4f6bc171730023..2711f64343aed8 100644 --- a/libs/community/tests/unit_tests/chat_loaders/test_imessage.py +++ b/libs/community/tests/unit_tests/chat_loaders/test_imessage.py @@ -23,9 +23,9 @@ def test_imessage_chat_loader_upgrade_osx11() -> None: # time parsed correctly expected_message_time = 720845450393148160 - assert ( - first_message.additional_kwargs["message_time"] == expected_message_time - ), "unexpected time" + assert first_message.additional_kwargs["message_time"] == expected_message_time, ( + "unexpected time" + ) expected_parsed_time = datetime.datetime(2023, 11, 5, 2, 50, 50, 393148) assert ( @@ -34,9 +34,9 @@ def test_imessage_chat_loader_upgrade_osx11() -> None: ), "date failed to parse" # is_from_me parsed correctly - assert ( - first_message.additional_kwargs["is_from_me"] is False - ), "is_from_me failed to parse" + assert first_message.additional_kwargs["is_from_me"] is False, ( + "is_from_me failed to parse" + ) def test_imessage_chat_loader() -> None: @@ -57,9 +57,9 @@ def test_imessage_chat_loader() -> None: # time parsed correctly expected_message_time = 720845450393148160 - assert ( - first_message.additional_kwargs["message_time"] == expected_message_time - ), "unexpected time" + assert first_message.additional_kwargs["message_time"] == expected_message_time, ( + "unexpected time" + ) expected_parsed_time = datetime.datetime(2023, 11, 5, 2, 50, 50, 393148) assert ( @@ -68,14 +68,14 @@ def test_imessage_chat_loader() -> None: ), "date failed to parse" # is_from_me parsed correctly - assert ( - first_message.additional_kwargs["is_from_me"] is False - ), "is_from_me failed to parse" + assert first_message.additional_kwargs["is_from_me"] is False, ( + "is_from_me failed to parse" + ) # short message content in attributedBody field - assert ( - "John is the almighty" in chat_sessions[0]["messages"][16].content - ), "Chat content mismatch" + assert "John is the almighty" in chat_sessions[0]["messages"][16].content, ( + "Chat content mismatch" + ) # long message content in attributedBody field long_msg = "aaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbba" diff --git a/libs/community/tests/unit_tests/chat_loaders/test_slack.py b/libs/community/tests/unit_tests/chat_loaders/test_slack.py index 0ab08b679bf88c..62ef8f9085a2f3 100644 --- a/libs/community/tests/unit_tests/chat_loaders/test_slack.py +++ b/libs/community/tests/unit_tests/chat_loaders/test_slack.py @@ -14,6 +14,6 @@ def test_slack_chat_loader() -> None: assert chat_sessions[1]["messages"], "Chat messages should not be empty" - assert ( - "Example message" in chat_sessions[1]["messages"][0].content - ), "Chat content mismatch" + assert "Example message" in chat_sessions[1]["messages"][0].content, ( + "Chat content mismatch" + ) diff --git a/libs/community/tests/unit_tests/document_loaders/parsers/language/test_python.py b/libs/community/tests/unit_tests/document_loaders/parsers/language/test_python.py index c14154a2842ded..d7142b1091ff5d 100644 --- a/libs/community/tests/unit_tests/document_loaders/parsers/language/test_python.py +++ b/libs/community/tests/unit_tests/document_loaders/parsers/language/test_python.py @@ -25,8 +25,8 @@ def __init__(self): hello("Hello!")""" self.expected_extracted_code = [ - "def hello(text):\n" " print(text)", - "class Simple:\n" " def __init__(self):\n" " self.a = 1", + "def hello(text):\n print(text)", + "class Simple:\n def __init__(self):\n self.a = 1", ] def test_extract_functions_classes(self) -> None: diff --git a/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py b/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py index 75a3b08bf79d96..9fd8192335d729 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py +++ b/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py @@ -113,11 +113,11 @@ def test_csv_loader_content_columns(self) -> None: file_path = self._get_csv_file_path("test_none_col.csv") expected_docs = [ Document( - page_content="column1: value1\n" "column3: value3", + page_content="column1: value1\ncolumn3: value3", metadata={"source": file_path, "row": 0}, ), Document( - page_content="column1: value6\n" "column3: value8", + page_content="column1: value6\ncolumn3: value8", metadata={"source": file_path, "row": 1}, ), ] diff --git a/libs/community/tests/unit_tests/document_loaders/test_mongodb.py b/libs/community/tests/unit_tests/document_loaders/test_mongodb.py index 72ed08905f745b..75ae18e6b773e2 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_mongodb.py +++ b/libs/community/tests/unit_tests/document_loaders/test_mongodb.py @@ -50,11 +50,12 @@ async def test_load_mocked_with_filters(expected_documents: List[Document]) -> N mock_collection.find = mock_find mock_collection.count_documents = mock_count_documents - with patch( - "motor.motor_asyncio.AsyncIOMotorClient", return_value=MagicMock() - ), patch( - "langchain_community.document_loaders.mongodb.MongodbLoader.aload", - new=mock_async_load, + with ( + patch("motor.motor_asyncio.AsyncIOMotorClient", return_value=MagicMock()), + patch( + "langchain_community.document_loaders.mongodb.MongodbLoader.aload", + new=mock_async_load, + ), ): loader = MongodbLoader( "mongodb://localhost:27017", diff --git a/libs/community/tests/unit_tests/document_transformers/test_html2text_transformer.py b/libs/community/tests/unit_tests/document_transformers/test_html2text_transformer.py index f66fee6c0bd4c8..3eea5c48e70552 100644 --- a/libs/community/tests/unit_tests/document_transformers/test_html2text_transformer.py +++ b/libs/community/tests/unit_tests/document_transformers/test_html2text_transformer.py @@ -25,10 +25,7 @@ def test_extract_paragraphs() -> None: documents = [Document(page_content=paragraphs_html)] docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ( - "# Header\n\n" - "First paragraph.\n\n" - "Second paragraph.\n\n" - "# Ignore at end\n\n" + "# Header\n\nFirst paragraph.\n\nSecond paragraph.\n\n# Ignore at end\n\n" ) @@ -78,14 +75,13 @@ def test_ignore_links() -> None: docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ( - "# First heading.\n\n" - "First paragraph with an [example](http://example.com)\n\n" + "# First heading.\n\nFirst paragraph with an [example](http://example.com)\n\n" ) html2text_transformer = Html2TextTransformer(ignore_links=True) docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ( - "# First heading.\n\n" "First paragraph with an example\n\n" + "# First heading.\n\nFirst paragraph with an example\n\n" ) @@ -101,12 +97,11 @@ def test_ignore_images() -> None: docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ( - "# First heading.\n\n" - "First paragraph with an ![Example image](example.jpg)\n\n" + "# First heading.\n\nFirst paragraph with an ![Example image](example.jpg)\n\n" ) html2text_transformer = Html2TextTransformer(ignore_images=True) docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ( - "# First heading.\n\n" "First paragraph with an\n\n" + "# First heading.\n\nFirst paragraph with an\n\n" ) diff --git a/libs/community/tests/unit_tests/document_transformers/test_markdownify.py b/libs/community/tests/unit_tests/document_transformers/test_markdownify.py index 1ce407289dd4a2..4b10d94371c131 100644 --- a/libs/community/tests/unit_tests/document_transformers/test_markdownify.py +++ b/libs/community/tests/unit_tests/document_transformers/test_markdownify.py @@ -25,7 +25,7 @@ def test_extract_paragraphs() -> None: documents = [Document(page_content=paragraphs_html)] docs_transformed = markdownify.transform_documents(documents) assert docs_transformed[0].page_content == ( - "# Header\n\n" "First paragraph.\n\n" "Second paragraph.\n\n" "# Ignore at end" + "# Header\n\nFirst paragraph.\n\nSecond paragraph.\n\n# Ignore at end" ) @@ -115,10 +115,7 @@ def test_convert_tags() -> None: documents = [Document(page_content=paragraphs_html)] docs_transformed = markdownify.transform_documents(documents) assert docs_transformed[0].page_content == ( - "Header " - "1st paragraph.\n\n " - "2nd paragraph. Here is link\n\n " - "Ignore at end" + "Header 1st paragraph.\n\n 2nd paragraph. Here is link\n\n Ignore at end" ) @@ -161,7 +158,7 @@ async def test_extract_paragraphs_async() -> None: documents = [Document(page_content=paragraphs_html)] docs_transformed = await markdownify.atransform_documents(documents) assert docs_transformed[0].page_content == ( - "# Header\n\n" "First paragraph.\n\n" "Second paragraph.\n\n" "# Ignore at end" + "# Header\n\nFirst paragraph.\n\nSecond paragraph.\n\n# Ignore at end" ) @@ -251,10 +248,7 @@ async def test_convert_tags_async() -> None: documents = [Document(page_content=paragraphs_html)] docs_transformed = await markdownify.atransform_documents(documents) assert docs_transformed[0].page_content == ( - "Header " - "1st paragraph.\n\n " - "2nd paragraph. Here is link\n\n " - "Ignore at end" + "Header 1st paragraph.\n\n 2nd paragraph. Here is link\n\n Ignore at end" ) diff --git a/libs/community/tests/unit_tests/query_constructors/test_milvus.py b/libs/community/tests/unit_tests/query_constructors/test_milvus.py index 70035628adbe8b..5369e1e04f505b 100644 --- a/libs/community/tests/unit_tests/query_constructors/test_milvus.py +++ b/libs/community/tests/unit_tests/query_constructors/test_milvus.py @@ -45,7 +45,7 @@ def test_visit_operation() -> None: ], ) - expected = '(( foo < 2 ) and ( bar == "baz" ) ' 'and ( abc < "4" ))' + expected = '(( foo < 2 ) and ( bar == "baz" ) and ( abc < "4" ))' actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual @@ -122,7 +122,7 @@ def test_visit_structured_query() -> None: expected = ( query, - {"expr": "(( foo < 2 ) " 'and ( bar == "baz" ) ' "and ( abc < 50 ))"}, + {"expr": '(( foo < 2 ) and ( bar == "baz" ) and ( abc < 50 ))'}, ) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) diff --git a/libs/community/tests/unit_tests/test_imports.py b/libs/community/tests/unit_tests/test_imports.py index 59152b3b0468ed..8d510f7d4928f7 100644 --- a/libs/community/tests/unit_tests/test_imports.py +++ b/libs/community/tests/unit_tests/test_imports.py @@ -165,6 +165,6 @@ def test_init_files_properly_defined() -> None: missing_imports = set(module.__all__) - set(names) - assert ( - not missing_imports - ), f"Missing imports: {missing_imports} in file path: {path}" + assert not missing_imports, ( + f"Missing imports: {missing_imports} in file path: {path}" + ) diff --git a/libs/community/tests/unit_tests/tools/audio/test_tools.py b/libs/community/tests/unit_tests/tools/audio/test_tools.py index 30cacb7b7b0b59..d5bb6256d45673 100644 --- a/libs/community/tests/unit_tests/tools/audio/test_tools.py +++ b/libs/community/tests/unit_tests/tools/audio/test_tools.py @@ -44,11 +44,12 @@ def test_huggingface_tts_constructor() -> None: def test_huggingface_tts_run_with_requests_mock() -> None: os.environ["HUGGINGFACE_API_KEY"] = "foo" - with tempfile.TemporaryDirectory() as tmp_dir, patch( - "uuid.uuid4" - ) as mock_uuid, patch("requests.post") as mock_inference, patch( - "builtins.open", mock_open() - ) as mock_file: + with ( + tempfile.TemporaryDirectory() as tmp_dir, + patch("uuid.uuid4") as mock_uuid, + patch("requests.post") as mock_inference, + patch("builtins.open", mock_open()) as mock_file, + ): input_query = "Dummy input" mock_uuid_value = uuid.UUID("00000000-0000-0000-0000-000000000000") diff --git a/libs/community/tests/unit_tests/vectorstores/test_azure_search.py b/libs/community/tests/unit_tests/vectorstores/test_azure_search.py index 0f1ae7356973fc..e7aa37004c4612 100644 --- a/libs/community/tests/unit_tests/vectorstores/test_azure_search.py +++ b/libs/community/tests/unit_tests/vectorstores/test_azure_search.py @@ -220,9 +220,10 @@ def mock_upload_documents(self, documents: List[object]) -> List[Response]: # t ] ids_provided = [i.metadata.get("id") for i in documents] - with patch.object( - SearchClient, "upload_documents", mock_upload_documents - ), patch.object(SearchIndexClient, "get_index", mock_default_index): + with ( + patch.object(SearchClient, "upload_documents", mock_upload_documents), + patch.object(SearchIndexClient, "get_index", mock_default_index), + ): vector_store = create_vector_store() ids_used_at_upload = vector_store.add_documents(documents, ids=ids_provided) assert len(ids_provided) == len(ids_used_at_upload) diff --git a/libs/community/tests/unit_tests/vectorstores/test_faiss.py b/libs/community/tests/unit_tests/vectorstores/test_faiss.py index 739bd243f0eb03..2e749edc49effa 100644 --- a/libs/community/tests/unit_tests/vectorstores/test_faiss.py +++ b/libs/community/tests/unit_tests/vectorstores/test_faiss.py @@ -1714,9 +1714,9 @@ def test_ip_score() -> None: scores = db.similarity_search_with_relevance_scores("sundays", k=1) assert len(scores) == 1, "only one vector should be in db" _, score = scores[0] - assert ( - score == 1 - ), f"expected inner product of equivalent vectors to be 1, not {score}" + assert score == 1, ( + f"expected inner product of equivalent vectors to be 1, not {score}" + ) @pytest.mark.requires("faiss") diff --git a/libs/community/tests/unit_tests/vectorstores/test_tencentvectordb.py b/libs/community/tests/unit_tests/vectorstores/test_tencentvectordb.py index 36a4e959346bc0..5af23b3719defb 100644 --- a/libs/community/tests/unit_tests/vectorstores/test_tencentvectordb.py +++ b/libs/community/tests/unit_tests/vectorstores/test_tencentvectordb.py @@ -20,7 +20,7 @@ def test_translate_filter() -> None: assert False else: result = translate_filter(raw_filter) - expr = '(artist = "Taylor Swift" or artist = "Katy Perry") ' "and length < 180" + expr = '(artist = "Taylor Swift" or artist = "Katy Perry") and length < 180' assert expr == result