From 0d7e14c1e2d801b155bed64cbe135655a9374b2c Mon Sep 17 00:00:00 2001 From: Nelson Auner Date: Mon, 29 Jan 2024 19:03:09 -0800 Subject: [PATCH] Linting --- .../vectorstores/pinecone.py | 4 +-- .../vectorstores/test_pinecone.py | 28 ++++++++++++------- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/libs/community/langchain_community/vectorstores/pinecone.py b/libs/community/langchain_community/vectorstores/pinecone.py index b18c1757ca368..6405fb050d199 100644 --- a/libs/community/langchain_community/vectorstores/pinecone.py +++ b/libs/community/langchain_community/vectorstores/pinecone.py @@ -145,8 +145,8 @@ def add_texts( metadata[self._text_key] = text if async_req: - # For loops to avoid memory issues and optimize when using HTTP based embeddings - # The first loop runs the embeddings, it benefits when using OpenAI embeddings + # For loops to avoid memory issues when using HTTP-based embeddings + # First loop runs embeddings, benefits when using OpenAI embeddings # The second loops runs the pinecone upsert asynchronously. for i in range(0, len(texts), embedding_chunk_size): chunk_texts = texts[i : i + embedding_chunk_size] diff --git a/libs/community/tests/integration_tests/vectorstores/test_pinecone.py b/libs/community/tests/integration_tests/vectorstores/test_pinecone.py index e7bf55231d8ac..195528911bdaa 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_pinecone.py +++ b/libs/community/tests/integration_tests/vectorstores/test_pinecone.py @@ -2,7 +2,7 @@ import os import time import uuid -from typing import TYPE_CHECKING, List, Generator +from typing import TYPE_CHECKING, Generator, List import numpy as np import pytest @@ -36,13 +36,18 @@ def texts() -> Generator[List[str], None, None]: yield [doc.page_content for doc in documents] + @pytest.fixture def mock_pool_not_supported(mocker): """ This is the error thrown when multiprocessing is not supported. See https://github.com/langchain-ai/langchain/issues/11168 """ - mocker.patch('multiprocessing.synchronize.SemLock.__init__', side_effect=OSError('OSError: [Errno 38] Function not implemented')) + mocker.patch( + "multiprocessing.synchronize.SemLock.__init__", + side_effect=OSError("OSError: [Errno 38] Function not implemented"), + ) + def reset_pinecone() -> None: assert os.environ.get("PINECONE_API_KEY") is not None @@ -311,9 +316,10 @@ def test_from_texts_with_metadatas_benchmark( query = "What did the president say about Ketanji Brown Jackson" _ = docsearch.similarity_search(query, k=1, namespace=namespace_name) - - @pytest.mark.usefixtures('mock_pool_not_supported') - def test_that_async_freq_uses_multiprocessing(self, embedding_openai: OpenAIEmbeddings) -> None: + @pytest.mark.usefixtures("mock_pool_not_supported") + def test_that_async_freq_uses_multiprocessing( + self, embedding_openai: OpenAIEmbeddings + ) -> None: with pytest.raises(OSError): Pinecone.from_texts( texts=["foo", "bar", "baz"] * 32, @@ -321,12 +327,14 @@ def test_that_async_freq_uses_multiprocessing(self, embedding_openai: OpenAIEmbe index_name=index_name, async_req=True, ) - - @pytest.mark.usefixtures('mock_pool_not_supported') - def test_that_async_freq_false_enabled_singlethreading(self, embedding_openai: OpenAIEmbeddings) -> None: + + @pytest.mark.usefixtures("mock_pool_not_supported") + def test_that_async_freq_false_enabled_singlethreading( + self, embedding_openai: OpenAIEmbeddings + ) -> None: Pinecone.from_texts( texts=["foo", "bar", "baz"], embedding=embedding_openai, index_name=index_name, - async_req=False - ) \ No newline at end of file + async_req=False, + )