diff --git a/cookbook/basic_critique_revise.ipynb b/cookbook/basic_critique_revise.ipynb index 65d26ec4ef63..ceded916b214 100644 --- a/cookbook/basic_critique_revise.ipynb +++ b/cookbook/basic_critique_revise.ipynb @@ -20,8 +20,8 @@ "outputs": [], "source": [ "Deno.env.set(\"OPENAI_API_KEY\", \"\");\n", - "Deno.env.set(\"LANGCHAIN_API_KEY\", \"\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", + "Deno.env.set(\"LANGSMITH_API_KEY\", \"\");\n", + "Deno.env.set(\"LANGSMITH_TRACING\", \"true\");\n", "\n", "import { z } from \"npm:zod\";\n", "\n", diff --git a/deno.json b/deno.json index 4c6004fa02b2..e3054ca67afb 100644 --- a/deno.json +++ b/deno.json @@ -28,7 +28,6 @@ "readline": "https://deno.land/x/readline@v1.1.0/mod.ts", "uuid": "npm:/uuid", "youtubei.js": "npm:/youtubei.js", - "youtube-transcript": "npm:/youtube-transcript", "neo4j-driver": "npm:/neo4j-driver", "axios": "npm:/axios", "@mendable/firecrawl-js": "npm:/@mendable/firecrawl-js", @@ -40,4 +39,4 @@ "@smithy/util-utf8": "npm:/@smithy/util-utf8", "@aws-sdk/types": "npm:/@aws-sdk/types" } -} \ No newline at end of file +} diff --git a/docs/core_docs/docs/how_to/agent_executor.ipynb b/docs/core_docs/docs/how_to/agent_executor.ipynb index d9d566fb81db..73880fbef354 100644 --- a/docs/core_docs/docs/how_to/agent_executor.ipynb +++ b/docs/core_docs/docs/how_to/agent_executor.ipynb @@ -65,8 +65,8 @@ "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", "\n", "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", + "export LANGSMITH_TRACING=\"true\"\n", + "export LANGSMITH_API_KEY=\"...\"\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/chat_model_caching.mdx b/docs/core_docs/docs/how_to/chat_model_caching.mdx index f8a8ad3caa87..dab184154a98 100644 --- a/docs/core_docs/docs/how_to/chat_model_caching.mdx +++ b/docs/core_docs/docs/how_to/chat_model_caching.mdx @@ -98,6 +98,64 @@ import RedisCacheExample from "@examples/cache/chat_models/redis.ts"; {RedisCacheExample} +## Caching with Upstash Redis + +LangChain provides an Upstash Redis-based cache. Like the Redis-based cache, this cache is useful if you want to share the cache across multiple processes or servers. The Upstash Redis client uses HTTP and supports edge environments. To use it, you'll need to install the `@upstash/redis` package: + +```bash npm2yarn +npm install @upstash/redis +``` + +You'll also need an [Upstash account](https://docs.upstash.com/redis#create-account) and a [Redis database](https://docs.upstash.com/redis#create-a-database) to connect to. Once you've done that, retrieve your REST URL and REST token. + +Then, you can pass a `cache` option when you instantiate the LLM. For example: + +import UpstashRedisCacheExample from "@examples/cache/chat_models/upstash_redis.ts"; + +{UpstashRedisCacheExample} + +You can also directly pass in a previously created [@upstash/redis](https://docs.upstash.com/redis/sdks/javascriptsdk/overview) client instance: + +import AdvancedUpstashRedisCacheExample from "@examples/cache/chat_models/upstash_redis_advanced.ts"; + +{AdvancedUpstashRedisCacheExample} + +## Caching with Vercel KV + +LangChain provides an Vercel KV-based cache. Like the Redis-based cache, this cache is useful if you want to share the cache across multiple processes or servers. The Vercel KV client uses HTTP and supports edge environments. To use it, you'll need to install the `@vercel/kv` package: + +```bash npm2yarn +npm install @vercel/kv +``` + +You'll also need an Vercel account and a [KV database](https://vercel.com/docs/storage/vercel-kv/kv-reference) to connect to. Once you've done that, retrieve your REST URL and REST token. + +Then, you can pass a `cache` option when you instantiate the LLM. For example: + +import VercelKVCacheExample from "@examples/cache/chat_models/vercel_kv.ts"; + +{VercelKVCacheExample} + +## Caching with Cloudflare KV + +:::info +This integration is only supported in Cloudflare Workers. +::: + +If you're deploying your project as a Cloudflare Worker, you can use LangChain's Cloudflare KV-powered LLM cache. + +For information on how to set up KV in Cloudflare, see [the official documentation](https://developers.cloudflare.com/kv/). + +**Note:** If you are using TypeScript, you may need to install types if they aren't already present: + +```bash npm2yarn +npm install -S @cloudflare/workers-types +``` + +import CloudflareExample from "@examples/cache/chat_models/cloudflare_kv.ts"; + +{CloudflareExample} + ## Caching on the File System :::warning diff --git a/docs/core_docs/docs/how_to/debugging.mdx b/docs/core_docs/docs/how_to/debugging.mdx index d1a3527e94a2..1bbacb85b1c8 100644 --- a/docs/core_docs/docs/how_to/debugging.mdx +++ b/docs/core_docs/docs/how_to/debugging.mdx @@ -16,8 +16,8 @@ The best way to do this is with [LangSmith](https://smith.langchain.com). After you sign up at the link above, make sure to set your environment variables to start logging traces: ```shell -export LANGCHAIN_TRACING_V2="true" -export LANGCHAIN_API_KEY="..." +export LANGSMITH_TRACING="true" +export LANGSMITH_API_KEY="..." # Reduce tracing latency if you are not in a serverless environment # export LANGCHAIN_CALLBACKS_BACKGROUND=true diff --git a/docs/core_docs/docs/how_to/graph_constructing.ipynb b/docs/core_docs/docs/how_to/graph_constructing.ipynb index dfee57df8191..18a0c3baf651 100644 --- a/docs/core_docs/docs/how_to/graph_constructing.ipynb +++ b/docs/core_docs/docs/how_to/graph_constructing.ipynb @@ -41,7 +41,7 @@ "\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/graph_mapping.ipynb b/docs/core_docs/docs/how_to/graph_mapping.ipynb index 413d8f30f941..43347373d6fc 100644 --- a/docs/core_docs/docs/how_to/graph_mapping.ipynb +++ b/docs/core_docs/docs/how_to/graph_mapping.ipynb @@ -40,7 +40,7 @@ "\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/graph_prompting.ipynb b/docs/core_docs/docs/how_to/graph_prompting.ipynb index 03ae3c0de203..faa53db35106 100644 --- a/docs/core_docs/docs/how_to/graph_prompting.ipynb +++ b/docs/core_docs/docs/how_to/graph_prompting.ipynb @@ -49,7 +49,7 @@ "\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/graph_semantic.ipynb b/docs/core_docs/docs/how_to/graph_semantic.ipynb index 8cf963e32632..e15bb66c1b3c 100644 --- a/docs/core_docs/docs/how_to/graph_semantic.ipynb +++ b/docs/core_docs/docs/how_to/graph_semantic.ipynb @@ -56,7 +56,7 @@ "\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/llm_caching.mdx b/docs/core_docs/docs/how_to/llm_caching.mdx index 05dcbb75538e..186045640542 100644 --- a/docs/core_docs/docs/how_to/llm_caching.mdx +++ b/docs/core_docs/docs/how_to/llm_caching.mdx @@ -161,6 +161,22 @@ import AdvancedUpstashRedisCacheExample from "@examples/cache/upstash_redis_adva {AdvancedUpstashRedisCacheExample} +## Caching with Vercel KV + +LangChain provides an Vercel KV-based cache. Like the Redis-based cache, this cache is useful if you want to share the cache across multiple processes or servers. The Vercel KV client uses HTTP and supports edge environments. To use it, you'll need to install the `@vercel/kv` package: + +```bash npm2yarn +npm install @vercel/kv +``` + +You'll also need an Vercel account and a [KV database](https://vercel.com/docs/storage/vercel-kv/kv-reference) to connect to. Once you've done that, retrieve your REST URL and REST token. + +Then, you can pass a `cache` option when you instantiate the LLM. For example: + +import VercelKVCacheExample from "@examples/cache/vercel_kv.ts"; + +{VercelKVCacheExample} + ## Caching with Cloudflare KV :::info diff --git a/docs/core_docs/docs/how_to/migrate_agent.ipynb b/docs/core_docs/docs/how_to/migrate_agent.ipynb index 816f45e40fac..5f8a6e253afc 100644 --- a/docs/core_docs/docs/how_to/migrate_agent.ipynb +++ b/docs/core_docs/docs/how_to/migrate_agent.ipynb @@ -57,10 +57,10 @@ "// process.env.OPENAI_API_KEY = \"...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// process.env.LANGCHAIN_API_KEY = \"ls...\";\n", + "// process.env.LANGSMITH_API_KEY = \"ls...\";\n", "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", - "// process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", - "// process.env.LANGCHAIN_PROJECT = \"How to migrate: LangGraphJS\";\n", + "// process.env.LANGSMITH_TRACING = \"true\";\n", + "// process.env.LANGSMITH_PROJECT = \"How to migrate: LangGraphJS\";\n", "\n", "// Reduce tracing latency if you are not in a serverless environment\n", "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";" @@ -1337,4 +1337,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb b/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb index e2cc59b8e9ba..31c684f3b761 100644 --- a/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb +++ b/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb @@ -63,8 +63,8 @@ "\n", "\n", "```bash\n", - "export LANGCHAIN_TRACING_V2=true\n", - "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "export LANGSMITH_TRACING=true\n", + "export LANGSMITH_API_KEY=YOUR_KEY\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/qa_citations.ipynb b/docs/core_docs/docs/how_to/qa_citations.ipynb index 701ec0481652..85ee820febde 100644 --- a/docs/core_docs/docs/how_to/qa_citations.ipynb +++ b/docs/core_docs/docs/how_to/qa_citations.ipynb @@ -55,8 +55,8 @@ "\n", "\n", "```bash\n", - "export LANGCHAIN_TRACING_V2=true\n", - "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "export LANGSMITH_TRACING=true\n", + "export LANGSMITH_API_KEY=YOUR_KEY\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/qa_per_user.ipynb b/docs/core_docs/docs/how_to/qa_per_user.ipynb index 4a4cd4eda993..12e4353a77f1 100644 --- a/docs/core_docs/docs/how_to/qa_per_user.ipynb +++ b/docs/core_docs/docs/how_to/qa_per_user.ipynb @@ -88,7 +88,7 @@ "\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/qa_sources.ipynb b/docs/core_docs/docs/how_to/qa_sources.ipynb index c3efb34496ab..03a3ed1f350f 100644 --- a/docs/core_docs/docs/how_to/qa_sources.ipynb +++ b/docs/core_docs/docs/how_to/qa_sources.ipynb @@ -53,8 +53,8 @@ "\n", "\n", "```bash\n", - "export LANGCHAIN_TRACING_V2=true\n", - "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "export LANGSMITH_TRACING=true\n", + "export LANGSMITH_API_KEY=YOUR_KEY\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/qa_streaming.ipynb b/docs/core_docs/docs/how_to/qa_streaming.ipynb index 5b2f8ce78565..4b4d4f222b03 100644 --- a/docs/core_docs/docs/how_to/qa_streaming.ipynb +++ b/docs/core_docs/docs/how_to/qa_streaming.ipynb @@ -53,8 +53,8 @@ "\n", "\n", "```bash\n", - "export LANGCHAIN_TRACING_V2=true\n", - "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "export LANGSMITH_TRACING=true\n", + "export LANGSMITH_API_KEY=YOUR_KEY\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/query_few_shot.ipynb b/docs/core_docs/docs/how_to/query_few_shot.ipynb index ba470500e296..5690d7f563e8 100644 --- a/docs/core_docs/docs/how_to/query_few_shot.ipynb +++ b/docs/core_docs/docs/how_to/query_few_shot.ipynb @@ -45,7 +45,7 @@ "```\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/query_high_cardinality.ipynb b/docs/core_docs/docs/how_to/query_high_cardinality.ipynb index 589f4e2cc933..d029e3c8053a 100644 --- a/docs/core_docs/docs/how_to/query_high_cardinality.ipynb +++ b/docs/core_docs/docs/how_to/query_high_cardinality.ipynb @@ -47,7 +47,7 @@ "```\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/query_multiple_queries.ipynb b/docs/core_docs/docs/how_to/query_multiple_queries.ipynb index f8ea45eaad47..0f8cb9f2ebb5 100644 --- a/docs/core_docs/docs/how_to/query_multiple_queries.ipynb +++ b/docs/core_docs/docs/how_to/query_multiple_queries.ipynb @@ -45,7 +45,7 @@ "\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/query_multiple_retrievers.ipynb b/docs/core_docs/docs/how_to/query_multiple_retrievers.ipynb index ac52581c8622..485be5b4d19c 100644 --- a/docs/core_docs/docs/how_to/query_multiple_retrievers.ipynb +++ b/docs/core_docs/docs/how_to/query_multiple_retrievers.ipynb @@ -45,7 +45,7 @@ "\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/query_no_queries.ipynb b/docs/core_docs/docs/how_to/query_no_queries.ipynb index 53e9b041d1c6..90991b051e1f 100644 --- a/docs/core_docs/docs/how_to/query_no_queries.ipynb +++ b/docs/core_docs/docs/how_to/query_no_queries.ipynb @@ -47,7 +47,7 @@ "\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/how_to/sql_large_db.mdx b/docs/core_docs/docs/how_to/sql_large_db.mdx index 9c4895632d0e..c36e5049c095 100644 --- a/docs/core_docs/docs/how_to/sql_large_db.mdx +++ b/docs/core_docs/docs/how_to/sql_large_db.mdx @@ -23,8 +23,8 @@ npm install langchain @langchain/community @langchain/openai typeorm sqlite3 ```bash export OPENAI_API_KEY="your api key" # Uncomment the below to use LangSmith. Not required. -# export LANGCHAIN_API_KEY="your api key" -# export LANGCHAIN_TRACING_V2=true +# export LANGSMITH_API_KEY="your api key" +# export LANGSMITH_TRACING=true # Reduce tracing latency if you are not in a serverless environment # export LANGCHAIN_CALLBACKS_BACKGROUND=true diff --git a/docs/core_docs/docs/how_to/sql_prompting.mdx b/docs/core_docs/docs/how_to/sql_prompting.mdx index ef5f8cdfdf27..c2bd5a78be22 100644 --- a/docs/core_docs/docs/how_to/sql_prompting.mdx +++ b/docs/core_docs/docs/how_to/sql_prompting.mdx @@ -22,8 +22,8 @@ npm install @langchain/community @langchain/openai typeorm sqlite3 ```bash export OPENAI_API_KEY="your api key" # Uncomment the below to use LangSmith. Not required. -# export LANGCHAIN_API_KEY="your api key" -# export LANGCHAIN_TRACING_V2=true +# export LANGSMITH_API_KEY="your api key" +# export LANGSMITH_TRACING=true # Reduce tracing latency if you are not in a serverless environment # export LANGCHAIN_CALLBACKS_BACKGROUND=true diff --git a/docs/core_docs/docs/how_to/sql_query_checking.mdx b/docs/core_docs/docs/how_to/sql_query_checking.mdx index 7cb5146b6e8b..90f3f6cfc7e2 100644 --- a/docs/core_docs/docs/how_to/sql_query_checking.mdx +++ b/docs/core_docs/docs/how_to/sql_query_checking.mdx @@ -26,8 +26,8 @@ npm install @langchain/community @langchain/openai typeorm sqlite3 ```bash export OPENAI_API_KEY="your api key" # Uncomment the below to use LangSmith. Not required. -# export LANGCHAIN_API_KEY="your api key" -# export LANGCHAIN_TRACING_V2=true +# export LANGSMITH_API_KEY="your api key" +# export LANGSMITH_TRACING=true # Reduce tracing latency if you are not in a serverless environment # export LANGCHAIN_CALLBACKS_BACKGROUND=true diff --git a/docs/core_docs/docs/how_to/tools_prompting.ipynb b/docs/core_docs/docs/how_to/tools_prompting.ipynb index 49356ba2e570..875953123ac3 100644 --- a/docs/core_docs/docs/how_to/tools_prompting.ipynb +++ b/docs/core_docs/docs/how_to/tools_prompting.ipynb @@ -52,7 +52,7 @@ "```\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/integrations/chat/anthropic.ipynb b/docs/core_docs/docs/integrations/chat/anthropic.ipynb index b8761588d277..6b431821c4db 100644 --- a/docs/core_docs/docs/integrations/chat/anthropic.ipynb +++ b/docs/core_docs/docs/integrations/chat/anthropic.ipynb @@ -55,8 +55,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/azure.ipynb b/docs/core_docs/docs/integrations/chat/azure.ipynb index 5f6ac31b19b7..1cea976d56b6 100644 --- a/docs/core_docs/docs/integrations/chat/azure.ipynb +++ b/docs/core_docs/docs/integrations/chat/azure.ipynb @@ -66,8 +66,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/bedrock.ipynb b/docs/core_docs/docs/integrations/chat/bedrock.ipynb index dbe56fc26d1a..74b9f3ed9fd2 100644 --- a/docs/core_docs/docs/integrations/chat/bedrock.ipynb +++ b/docs/core_docs/docs/integrations/chat/bedrock.ipynb @@ -57,8 +57,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/bedrock_converse.ipynb b/docs/core_docs/docs/integrations/chat/bedrock_converse.ipynb index b4b4fca65c8f..1d78af2a9c1c 100644 --- a/docs/core_docs/docs/integrations/chat/bedrock_converse.ipynb +++ b/docs/core_docs/docs/integrations/chat/bedrock_converse.ipynb @@ -51,8 +51,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/cerebras.ipynb b/docs/core_docs/docs/integrations/chat/cerebras.ipynb index 2d49afcd1b79..63d562762cbd 100644 --- a/docs/core_docs/docs/integrations/chat/cerebras.ipynb +++ b/docs/core_docs/docs/integrations/chat/cerebras.ipynb @@ -31,9 +31,7 @@ "\n", "Our CS-3 systems can be quickly and easily clustered to create the largest AI supercomputers in the world, making it simple to place and run the largest models. Leading corporations, research institutions, and governments are already using Cerebras solutions to develop proprietary models and train popular open-source models.\n", "\n", - "Want to experience the power of Cerebras? Check out our [website](https://cerebras.ai/) for more resources and explore options for accessing our technology through the Cerebras Cloud or on-premise deployments!\n", - "\n", - "For more information about Cerebras Cloud, visit [cloud.cerebras.ai](https://cloud.cerebras.ai/). Our API reference is available at [inference-docs.cerebras.ai](https://inference-docs.cerebras.ai).\n", + "This will help you getting started with ChatCerebras [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatCerebras features and configurations head to the [API reference](https://api.js.langchain.com/classes/_langchain_cerebras.ChatCerebras.html).\n", "\n", "## Overview\n", "\n", @@ -66,8 +64,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", @@ -314,7 +312,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all ChatCerebras features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cerebras.ChatCerebras.html" + "For detailed documentation of all ChatCerebras features and configurations head to the API reference: https://api.js.langchain.com/classes/_langchain_cerebras.ChatCerebras.html" ] } ], diff --git a/docs/core_docs/docs/integrations/chat/cohere.ipynb b/docs/core_docs/docs/integrations/chat/cohere.ipynb index 97baaf8081e0..931b5abd9011 100644 --- a/docs/core_docs/docs/integrations/chat/cohere.ipynb +++ b/docs/core_docs/docs/integrations/chat/cohere.ipynb @@ -59,8 +59,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/fireworks.ipynb b/docs/core_docs/docs/integrations/chat/fireworks.ipynb index b3115487d98e..0a9b4eca639e 100644 --- a/docs/core_docs/docs/integrations/chat/fireworks.ipynb +++ b/docs/core_docs/docs/integrations/chat/fireworks.ipynb @@ -55,8 +55,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb index 3e2332d74d4b..a7d64bedc25a 100644 --- a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb +++ b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb @@ -66,8 +66,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb index 8d046defbac1..7d0dca71a23a 100644 --- a/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb +++ b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb @@ -67,8 +67,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/groq.ipynb b/docs/core_docs/docs/integrations/chat/groq.ipynb index 279fa1a74769..cf8265dd6d57 100644 --- a/docs/core_docs/docs/integrations/chat/groq.ipynb +++ b/docs/core_docs/docs/integrations/chat/groq.ipynb @@ -56,8 +56,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/ibm.ipynb b/docs/core_docs/docs/integrations/chat/ibm.ipynb index c3f60d925f99..345203410506 100644 --- a/docs/core_docs/docs/integrations/chat/ibm.ipynb +++ b/docs/core_docs/docs/integrations/chat/ibm.ipynb @@ -124,8 +124,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/mistral.ipynb b/docs/core_docs/docs/integrations/chat/mistral.ipynb index 679bde45a925..4698c67f9f46 100644 --- a/docs/core_docs/docs/integrations/chat/mistral.ipynb +++ b/docs/core_docs/docs/integrations/chat/mistral.ipynb @@ -55,8 +55,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/ollama.ipynb b/docs/core_docs/docs/integrations/chat/ollama.ipynb index 12ea4b4e35c8..23b661bb53b4 100644 --- a/docs/core_docs/docs/integrations/chat/ollama.ipynb +++ b/docs/core_docs/docs/integrations/chat/ollama.ipynb @@ -55,8 +55,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/openai.ipynb b/docs/core_docs/docs/integrations/chat/openai.ipynb index 441580334fc1..800ed7d0b568 100644 --- a/docs/core_docs/docs/integrations/chat/openai.ipynb +++ b/docs/core_docs/docs/integrations/chat/openai.ipynb @@ -55,8 +55,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/togetherai.ipynb b/docs/core_docs/docs/integrations/chat/togetherai.ipynb index 09a0271059d7..b6deed8ccaa3 100644 --- a/docs/core_docs/docs/integrations/chat/togetherai.ipynb +++ b/docs/core_docs/docs/integrations/chat/togetherai.ipynb @@ -55,8 +55,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/chat/xai.ipynb b/docs/core_docs/docs/integrations/chat/xai.ipynb index e07a8079dd0c..830eec608441 100644 --- a/docs/core_docs/docs/integrations/chat/xai.ipynb +++ b/docs/core_docs/docs/integrations/chat/xai.ipynb @@ -55,8 +55,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/document_compressors/ibm.ipynb b/docs/core_docs/docs/integrations/document_compressors/ibm.ipynb index 9645a4e126a3..4bf4e42ac475 100644 --- a/docs/core_docs/docs/integrations/document_compressors/ibm.ipynb +++ b/docs/core_docs/docs/integrations/document_compressors/ibm.ipynb @@ -23,13 +23,13 @@ "\n", "## Overview\n", "\n", - "This will help you getting started with the [Watsonx document compressor](/docs/concepts/#document_compressors). For detailed documentation of all Watsonx document compressor features and configurations head to the [API reference]https://api.js.langchain.com/modules/_langchain_community.document_compressors_ibm.WatsonxRerank.html).\n", + "This will help you getting started with the [Watsonx document compressor](/docs/concepts/#document_compressors). For detailed documentation of all Watsonx document compressor features and configurations head to the [API reference](https://api.js.langchain.com/modules/_langchain_community.document_compressors_ibm.html).\n", "\n", "### Integration details\n", "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/llms/ibm_watsonx/) | Package downloads | Package latest |\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/retrievers/ibm_watsonx_ranker/) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: |\n", - "| [`WatsonxRerank`](https://api.js.langchain.com/modules/_langchain_community.document_compressors_ibm.WatsonxRerank.html) | [@langchain/community](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "| [`WatsonxRerank`](https://api.js.langchain.com/classes/_langchain_community.document_compressors_ibm.WatsonxRerank.html) | [@langchain/community](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", "\n", "## Setup\n", "\n", @@ -159,7 +159,7 @@ " version: \"2024-05-31\",\n", " serviceUrl: process.env.WATSONX_AI_SERVICE_URL,\n", " projectId: process.env.WATSONX_AI_PROJECT_ID,\n", - " model: \"ibm/slate-125m-english-rtrvr\",\n", + " model: \"cross-encoder/ms-marco-minilm-l-12-v2\",\n", "});" ] }, @@ -181,7 +181,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", "metadata": {}, "outputs": [ @@ -266,7 +266,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "fad30397", "metadata": {}, "outputs": [ @@ -290,7 +290,7 @@ " version: \"2024-05-31\",\n", " serviceUrl: process.env.WATSONX_AI_SERVICE_URL,\n", " projectId: process.env.WATSONX_AI_PROJECT_ID,\n", - " model: \"ibm/slate-125m-english-rtrvr\",\n", + " model: \"cross-encoder/ms-marco-minilm-l-12-v2\",\n", "});\n", "const compressed = await reranker.rerank(result, query);\n", "console.log(compressed);" @@ -363,7 +363,7 @@ "source": [ "## API reference\n", "\n", - "For detailed documentation of all Watsonx document compressor features and configurations head to the [API reference](https://api.js.langchain.com/modules/_langchain_community.document_compressors_ibm.WatsonxRerank.html)." + "For detailed documentation of all Watsonx document compressor features and configurations head to the [API reference](https://api.js.langchain.com/modules/_langchain_community.document_compressors_ibm.html)." ] } ], diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb index 31ef76f0797c..2c4b7dc3d90c 100644 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb @@ -52,8 +52,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.ipynb index 1c7927de592a..322892e6268e 100644 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.ipynb @@ -41,8 +41,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.ipynb index 001cd2fe1567..8f438c688142 100644 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.ipynb @@ -60,8 +60,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb index a68f976c0fd8..efbf3c9bf9c6 100644 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb @@ -43,8 +43,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.ipynb index 746b88214d54..ca841c111372 100644 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.ipynb @@ -50,8 +50,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/youtube.mdx b/docs/core_docs/docs/integrations/document_loaders/web_loaders/youtube.mdx index 9c65c0164d24..79fb16193d38 100644 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/youtube.mdx +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/youtube.mdx @@ -4,15 +4,14 @@ hide_table_of_contents: true # YouTube transcripts -This covers how to load youtube transcript into LangChain documents. +This covers how to load YouTube transcripts into LangChain documents. ## Setup -You'll need to install the [youtube-transcript](https://www.npmjs.com/package/youtube-transcript) package -and [youtubei.js](https://www.npmjs.com/package/youtubei.js) to extract metadata: +You'll need to install the [youtubei.js](https://www.npmjs.com/package/youtubei.js) to extract metadata: ```bash npm2yarn -npm install @langchain/community @langchain/core youtube-transcript youtubei.js +npm install @langchain/community @langchain/core youtubei.js ``` ## Usage diff --git a/docs/core_docs/docs/integrations/llms/azure.ipynb b/docs/core_docs/docs/integrations/llms/azure.ipynb index 3ebc20765204..4397a36ad384 100644 --- a/docs/core_docs/docs/integrations/llms/azure.ipynb +++ b/docs/core_docs/docs/integrations/llms/azure.ipynb @@ -76,8 +76,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/bedrock.ipynb b/docs/core_docs/docs/integrations/llms/bedrock.ipynb index 2cfe7a7aaf7a..24ce3378492f 100644 --- a/docs/core_docs/docs/integrations/llms/bedrock.ipynb +++ b/docs/core_docs/docs/integrations/llms/bedrock.ipynb @@ -56,8 +56,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/cohere.ipynb b/docs/core_docs/docs/integrations/llms/cohere.ipynb index 92def6e09adc..e100580b44df 100644 --- a/docs/core_docs/docs/integrations/llms/cohere.ipynb +++ b/docs/core_docs/docs/integrations/llms/cohere.ipynb @@ -62,8 +62,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/fireworks.ipynb b/docs/core_docs/docs/integrations/llms/fireworks.ipynb index ef23f86b3989..fb6015c76613 100644 --- a/docs/core_docs/docs/integrations/llms/fireworks.ipynb +++ b/docs/core_docs/docs/integrations/llms/fireworks.ipynb @@ -54,8 +54,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/google_vertex_ai.ipynb b/docs/core_docs/docs/integrations/llms/google_vertex_ai.ipynb index 8ad068013ecb..a4ab8da0be8b 100644 --- a/docs/core_docs/docs/integrations/llms/google_vertex_ai.ipynb +++ b/docs/core_docs/docs/integrations/llms/google_vertex_ai.ipynb @@ -96,8 +96,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/ibm.ipynb b/docs/core_docs/docs/integrations/llms/ibm.ipynb index 7c47f382e59e..4abbfa25b192 100644 --- a/docs/core_docs/docs/integrations/llms/ibm.ipynb +++ b/docs/core_docs/docs/integrations/llms/ibm.ipynb @@ -119,8 +119,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/mistral.ipynb b/docs/core_docs/docs/integrations/llms/mistral.ipynb index 0e5932455291..4348376a7d5c 100644 --- a/docs/core_docs/docs/integrations/llms/mistral.ipynb +++ b/docs/core_docs/docs/integrations/llms/mistral.ipynb @@ -57,8 +57,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/ollama.ipynb b/docs/core_docs/docs/integrations/llms/ollama.ipynb index cf301f0b65f8..34e72a860af2 100644 --- a/docs/core_docs/docs/integrations/llms/ollama.ipynb +++ b/docs/core_docs/docs/integrations/llms/ollama.ipynb @@ -56,8 +56,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/openai.ipynb b/docs/core_docs/docs/integrations/llms/openai.ipynb index a0ae65f8d079..aceeebf9607c 100644 --- a/docs/core_docs/docs/integrations/llms/openai.ipynb +++ b/docs/core_docs/docs/integrations/llms/openai.ipynb @@ -53,8 +53,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/together.ipynb b/docs/core_docs/docs/integrations/llms/together.ipynb index ebee5f5c4682..0a5a49766d7f 100644 --- a/docs/core_docs/docs/integrations/llms/together.ipynb +++ b/docs/core_docs/docs/integrations/llms/together.ipynb @@ -53,8 +53,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx b/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx deleted file mode 100644 index 090a0acb274e..000000000000 --- a/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx +++ /dev/null @@ -1,40 +0,0 @@ -# WatsonX AI - -LangChain.js supports integration with IBM WatsonX AI. Checkout [WatsonX AI](https://www.ibm.com/products/watsonx-ai) for a list of available models. - -## Setup - -You will need to set the following environment variables for using the WatsonX AI API. - -1. `IBM_CLOUD_API_KEY` which can be generated via [IBM Cloud](https://cloud.ibm.com/iam/apikeys) -2. `WATSONX_PROJECT_ID` which can be found in your [project's manage tab](https://dataplatform.cloud.ibm.com/projects/?context=wx) - -Alternatively, these can be set during the WatsonxAI Class instantiation as `ibmCloudApiKey` and `projectId` respectively. -For example: - -```typescript -const model = new WatsonxAI({ - ibmCloudApiKey: "My secret IBM Cloud API Key" - projectId: "My secret WatsonX AI Project id" -}); -``` - -## Usage - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/community @langchain/core -``` - -import CodeBlock from "@theme/CodeBlock"; -import WatsonxAiExample from "@examples/llms/watsonx_ai.ts"; - -{WatsonxAiExample} - -## Related - -- LLM [conceptual guide](/docs/concepts/text_llms) -- LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/platforms/index.mdx b/docs/core_docs/docs/integrations/platforms/index.mdx index 546d883e736d..f824f8151917 100644 --- a/docs/core_docs/docs/integrations/platforms/index.mdx +++ b/docs/core_docs/docs/integrations/platforms/index.mdx @@ -11,13 +11,16 @@ LangChain integrates with many providers. These providers have standalone `@langchain/{provider}` packages for improved versioning, dependency management and testing. +For specifics on how to use each package, look for their pages in the appropriate component docs section (e.g. [chat models](/docs/integrations/chat/)). + - [Anthropic](https://www.npmjs.com/package/@langchain/anthropic) +- [Cerebras](https://www.npmjs.com/package/@langchain/cerebras) - [Cloudflare](https://www.npmjs.com/package/@langchain/cloudflare) - [Cohere](https://www.npmjs.com/package/@langchain/cohere) - [Exa](https://www.npmjs.com/package/@langchain/exa) - [Google GenAI](https://www.npmjs.com/package/@langchain/google-genai) - [Google VertexAI](https://www.npmjs.com/package/@langchain/google-vertexai) -- [Google VertexAI Web](https://www.npmjs.com/package/@langchain/google-vertexai-web) +- [Google VertexAI (Web Environments)](https://www.npmjs.com/package/@langchain/google-vertexai-web) - [Groq](https://www.npmjs.com/package/@langchain/groq) - [MistralAI](https://www.npmjs.com/package/@langchain/mistralai) - [MongoDB](https://www.npmjs.com/package/@langchain/mongodb) diff --git a/docs/core_docs/docs/integrations/text_embedding/azure_openai.ipynb b/docs/core_docs/docs/integrations/text_embedding/azure_openai.ipynb index d270824ee0c6..a5225bd11ff8 100644 --- a/docs/core_docs/docs/integrations/text_embedding/azure_openai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/azure_openai.ipynb @@ -71,8 +71,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/bedrock.ipynb b/docs/core_docs/docs/integrations/text_embedding/bedrock.ipynb index 7d597e9c37d8..36b5b04b2225 100644 --- a/docs/core_docs/docs/integrations/text_embedding/bedrock.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/bedrock.ipynb @@ -43,8 +43,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.ipynb b/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.ipynb index 21cc3a3dbe65..0cc3c070de72 100644 --- a/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.ipynb @@ -57,8 +57,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/cohere.ipynb b/docs/core_docs/docs/integrations/text_embedding/cohere.ipynb index 209aa6352f5d..890bf3d8708b 100644 --- a/docs/core_docs/docs/integrations/text_embedding/cohere.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/cohere.ipynb @@ -46,8 +46,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/fireworks.ipynb b/docs/core_docs/docs/integrations/text_embedding/fireworks.ipynb index bae29ba34b96..507abbf4f22e 100644 --- a/docs/core_docs/docs/integrations/text_embedding/fireworks.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/fireworks.ipynb @@ -45,8 +45,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/google_generativeai.ipynb b/docs/core_docs/docs/integrations/text_embedding/google_generativeai.ipynb index 91cf095a5e8a..605674d7139d 100644 --- a/docs/core_docs/docs/integrations/text_embedding/google_generativeai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/google_generativeai.ipynb @@ -47,8 +47,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.ipynb b/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.ipynb index 870be58b58bf..dd32b96b6325 100644 --- a/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.ipynb @@ -56,8 +56,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb b/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb index bac03b424f48..e9c58968de6b 100644 --- a/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb @@ -118,8 +118,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/mistralai.ipynb b/docs/core_docs/docs/integrations/text_embedding/mistralai.ipynb index b272f3208843..d7799dfd6164 100644 --- a/docs/core_docs/docs/integrations/text_embedding/mistralai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/mistralai.ipynb @@ -45,8 +45,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/ollama.ipynb b/docs/core_docs/docs/integrations/text_embedding/ollama.ipynb index e1aa8cfa7771..7e4a963a16e6 100644 --- a/docs/core_docs/docs/integrations/text_embedding/ollama.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/ollama.ipynb @@ -39,8 +39,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/openai.ipynb b/docs/core_docs/docs/integrations/text_embedding/openai.ipynb index 14001b233f6c..5f7595627be1 100644 --- a/docs/core_docs/docs/integrations/text_embedding/openai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/openai.ipynb @@ -45,8 +45,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/pinecone.ipynb b/docs/core_docs/docs/integrations/text_embedding/pinecone.ipynb index e3ab07565f27..7df7fb186cdb 100644 --- a/docs/core_docs/docs/integrations/text_embedding/pinecone.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/pinecone.ipynb @@ -45,8 +45,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/text_embedding/togetherai.ipynb b/docs/core_docs/docs/integrations/text_embedding/togetherai.ipynb index 029ab2877261..b94c4bd57820 100644 --- a/docs/core_docs/docs/integrations/text_embedding/togetherai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/togetherai.ipynb @@ -45,8 +45,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/toolkits/openapi.ipynb b/docs/core_docs/docs/integrations/toolkits/openapi.ipynb index afbc84478784..0f5afc17ee5e 100644 --- a/docs/core_docs/docs/integrations/toolkits/openapi.ipynb +++ b/docs/core_docs/docs/integrations/toolkits/openapi.ipynb @@ -54,8 +54,8 @@ "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/toolkits/sql.ipynb b/docs/core_docs/docs/integrations/toolkits/sql.ipynb index fd8e6c949306..1e2512f7754e 100644 --- a/docs/core_docs/docs/integrations/toolkits/sql.ipynb +++ b/docs/core_docs/docs/integrations/toolkits/sql.ipynb @@ -41,8 +41,8 @@ "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb b/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb index fb233438253d..663bba1c1c53 100644 --- a/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb +++ b/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb @@ -30,8 +30,8 @@ "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb b/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb index 574447b86877..ddb857cb0d4c 100644 --- a/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb +++ b/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb @@ -53,8 +53,8 @@ "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/tools/exa_search.ipynb b/docs/core_docs/docs/integrations/tools/exa_search.ipynb index b48ee2d3831f..fcec4fa2a4ee 100644 --- a/docs/core_docs/docs/integrations/tools/exa_search.ipynb +++ b/docs/core_docs/docs/integrations/tools/exa_search.ipynb @@ -48,8 +48,8 @@ "It's also helpful (but not needed) to set up LangSmith for best-in-class observability:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/tools/google_scholar.ipynb b/docs/core_docs/docs/integrations/tools/google_scholar.ipynb index ed6539459e1d..fc02d938c9ac 100644 --- a/docs/core_docs/docs/integrations/tools/google_scholar.ipynb +++ b/docs/core_docs/docs/integrations/tools/google_scholar.ipynb @@ -56,8 +56,8 @@ "It's also helpful to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-langchain-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-langchain-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/tools/serpapi.ipynb b/docs/core_docs/docs/integrations/tools/serpapi.ipynb index 0172711a380e..b928d5ce89e8 100644 --- a/docs/core_docs/docs/integrations/tools/serpapi.ipynb +++ b/docs/core_docs/docs/integrations/tools/serpapi.ipynb @@ -59,8 +59,8 @@ "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/tools/tavily_search.ipynb b/docs/core_docs/docs/integrations/tools/tavily_search.ipynb index 327c1f677c2c..72e12bd1fc36 100644 --- a/docs/core_docs/docs/integrations/tools/tavily_search.ipynb +++ b/docs/core_docs/docs/integrations/tools/tavily_search.ipynb @@ -59,8 +59,8 @@ "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/chroma.ipynb b/docs/core_docs/docs/integrations/vectorstores/chroma.ipynb index 567f9415ca5f..28197e2a4087 100644 --- a/docs/core_docs/docs/integrations/vectorstores/chroma.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/chroma.ipynb @@ -84,8 +84,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/elasticsearch.ipynb b/docs/core_docs/docs/integrations/vectorstores/elasticsearch.ipynb index 2b3789eb9a7e..a30f8bb1978a 100644 --- a/docs/core_docs/docs/integrations/vectorstores/elasticsearch.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/elasticsearch.ipynb @@ -91,8 +91,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/faiss.ipynb b/docs/core_docs/docs/integrations/vectorstores/faiss.ipynb index 1d3422ce34f5..95f94c8c4c04 100644 --- a/docs/core_docs/docs/integrations/vectorstores/faiss.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/faiss.ipynb @@ -86,8 +86,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/hnswlib.ipynb b/docs/core_docs/docs/integrations/vectorstores/hnswlib.ipynb index 8ec5ece7ed0b..78b5a1fb2615 100644 --- a/docs/core_docs/docs/integrations/vectorstores/hnswlib.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/hnswlib.ipynb @@ -90,8 +90,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/memory.ipynb b/docs/core_docs/docs/integrations/vectorstores/memory.ipynb index c38d1a03e118..681d81b70078 100644 --- a/docs/core_docs/docs/integrations/vectorstores/memory.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/memory.ipynb @@ -77,8 +77,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.ipynb b/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.ipynb index e84e4a8bf31d..f697d3629360 100644 --- a/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.ipynb @@ -131,8 +131,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/pgvector.ipynb b/docs/core_docs/docs/integrations/vectorstores/pgvector.ipynb index 9d86461dac56..ca148923f530 100644 --- a/docs/core_docs/docs/integrations/vectorstores/pgvector.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/pgvector.ipynb @@ -79,8 +79,7 @@ "\n", "```yaml\n", "# Run this command to start the database:\n", - "# docker-compose up --build\n", - "version: \"3\"\n", + "# docker compose up\n", "services:\n", " db:\n", " hostname: 127.0.0.1\n", @@ -92,11 +91,9 @@ " - POSTGRES_DB=api\n", " - POSTGRES_USER=myuser\n", " - POSTGRES_PASSWORD=ChangeMe\n", - " volumes:\n", - " - ./init.sql:/docker-entrypoint-initdb.d/init.sql\n", "```\n", "\n", - "And then in the same directory, run docker compose up to start the container.\n", + "And then in the same directory, run `docker compose up` to start the container.\n", "\n", "You can find more information on how to setup pgvector in the [official repository](https://github.com/pgvector/pgvector/).\n", "\n", @@ -113,8 +110,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, @@ -626,4 +623,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/core_docs/docs/integrations/vectorstores/pinecone.ipynb b/docs/core_docs/docs/integrations/vectorstores/pinecone.ipynb index d47880caebb0..480c710d9127 100644 --- a/docs/core_docs/docs/integrations/vectorstores/pinecone.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/pinecone.ipynb @@ -83,8 +83,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/qdrant.ipynb b/docs/core_docs/docs/integrations/vectorstores/qdrant.ipynb index ce91818a4ca0..870485c1a5fa 100644 --- a/docs/core_docs/docs/integrations/vectorstores/qdrant.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/qdrant.ipynb @@ -89,8 +89,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/redis.ipynb b/docs/core_docs/docs/integrations/vectorstores/redis.ipynb index 3d6a038d8940..a98fb5b2d4d7 100644 --- a/docs/core_docs/docs/integrations/vectorstores/redis.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/redis.ipynb @@ -88,8 +88,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/supabase.ipynb b/docs/core_docs/docs/integrations/vectorstores/supabase.ipynb index 6b6f49a09d02..3979944bd60e 100644 --- a/docs/core_docs/docs/integrations/vectorstores/supabase.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/supabase.ipynb @@ -127,8 +127,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/integrations/vectorstores/upstash.ipynb b/docs/core_docs/docs/integrations/vectorstores/upstash.ipynb index 66b0c7d4c264..882e32beb03c 100644 --- a/docs/core_docs/docs/integrations/vectorstores/upstash.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/upstash.ipynb @@ -64,6 +64,14 @@ "\n", "You can create an index from the [Upstash Console](https://console.upstash.com/login). For further reference, see [the official docs](https://upstash.com/docs/vector/overall/getstarted).\n", "\n", + "Upstash vector also has built in embedding support. Which means you can use it directly without the need for an additional embedding model. Check the [embedding models documentation](https://upstash.com/docs/vector/features/embeddingmodels) for more details.\n", + "\n", + "```{=mdx}\n", + ":::note\n", + "To use the built-in Upstash embeddings, you'll need to select an embedding model when creating the index.\n", + ":::\n", + "```\n", + "\n", "### Credentials\n", "\n", "Once you've set up an index, set the following environment variables:\n", @@ -82,8 +90,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, @@ -127,6 +135,38 @@ "});" ] }, + { + "cell_type": "markdown", + "id": "afa53a9c", + "metadata": {}, + "source": [ + "## Usage with built-in embeddings\n", + "\n", + "To use the built-in Upstash embeddings, you can pass a `FakeEmbeddings` instance to the `UpstashVectorStore` constructor. This will make the `UpstashVectorStore` use the built-in embeddings, which you selected when creating the index." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cbabe6f7", + "metadata": {}, + "outputs": [], + "source": [ + "import { UpstashVectorStore } from \"@langchain/community/vectorstores/upstash\";\n", + "import { FakeEmbeddings } from \"@langchain/core/utils/testing\";\n", + "\n", + "import { Index } from \"@upstash/vector\";\n", + "\n", + "const indexWithEmbeddings = new Index({\n", + " url: process.env.UPSTASH_VECTOR_REST_URL,\n", + " token: process.env.UPSTASH_VECTOR_REST_TOKEN,\n", + "});\n", + "\n", + "const vectorStore = new UpstashVectorStore(new FakeEmbeddings(), {\n", + " index: indexWithEmbeddings,\n", + "});" + ] + }, { "cell_type": "markdown", "id": "ac6071d4", @@ -360,4 +400,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/core_docs/docs/integrations/vectorstores/weaviate.ipynb b/docs/core_docs/docs/integrations/vectorstores/weaviate.ipynb index bcee4bf2b3dd..4e10d08a802a 100644 --- a/docs/core_docs/docs/integrations/vectorstores/weaviate.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/weaviate.ipynb @@ -86,8 +86,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/docs/core_docs/docs/tutorials/chatbot.ipynb b/docs/core_docs/docs/tutorials/chatbot.ipynb index d21d9cd92591..29d8878c2ed6 100644 --- a/docs/core_docs/docs/tutorials/chatbot.ipynb +++ b/docs/core_docs/docs/tutorials/chatbot.ipynb @@ -91,8 +91,8 @@ "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2 = \"true\"\n", - "process.env.LANGCHAIN_API_KEY = \"...\"\n", + "process.env.LANGSMITH_TRACING = \"true\"\n", + "process.env.LANGSMITH_API_KEY = \"...\"\n", "```\n", "\n", "## Quickstart\n", diff --git a/docs/core_docs/docs/tutorials/extraction.ipynb b/docs/core_docs/docs/tutorials/extraction.ipynb index 31f09ab75631..e5a23914627f 100644 --- a/docs/core_docs/docs/tutorials/extraction.ipynb +++ b/docs/core_docs/docs/tutorials/extraction.ipynb @@ -64,8 +64,8 @@ "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", "\n", "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", + "export LANGSMITH_TRACING=\"true\"\n", + "export LANGSMITH_API_KEY=\"...\"\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/tutorials/graph.ipynb b/docs/core_docs/docs/tutorials/graph.ipynb index 0d8ab1ba18ad..28bf22191890 100644 --- a/docs/core_docs/docs/tutorials/graph.ipynb +++ b/docs/core_docs/docs/tutorials/graph.ipynb @@ -61,7 +61,7 @@ "\n", "# Optional, use LangSmith for best-in-class observability\n", "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", + "LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/tutorials/llm_chain.ipynb b/docs/core_docs/docs/tutorials/llm_chain.ipynb index d108edb0d65e..1c04758b0ac0 100644 --- a/docs/core_docs/docs/tutorials/llm_chain.ipynb +++ b/docs/core_docs/docs/tutorials/llm_chain.ipynb @@ -57,8 +57,8 @@ "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", "\n", "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", + "export LANGSMITH_TRACING=\"true\"\n", + "export LANGSMITH_API_KEY=\"...\"\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/tutorials/qa_chat_history.ipynb b/docs/core_docs/docs/tutorials/qa_chat_history.ipynb index d97d7d297234..0dc020a50047 100644 --- a/docs/core_docs/docs/tutorials/qa_chat_history.ipynb +++ b/docs/core_docs/docs/tutorials/qa_chat_history.ipynb @@ -164,8 +164,8 @@ "\n", "\n", "```bash\n", - "export LANGCHAIN_TRACING_V2=true\n", - "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "export LANGSMITH_TRACING=true\n", + "export LANGSMITH_API_KEY=YOUR_KEY\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/tutorials/rag.ipynb b/docs/core_docs/docs/tutorials/rag.ipynb index 5de1dd38681f..7d9472d9c00c 100644 --- a/docs/core_docs/docs/tutorials/rag.ipynb +++ b/docs/core_docs/docs/tutorials/rag.ipynb @@ -81,8 +81,8 @@ "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", "\n", "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", + "export LANGSMITH_TRACING=\"true\"\n", + "export LANGSMITH_API_KEY=\"...\"\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/tutorials/retrievers.ipynb b/docs/core_docs/docs/tutorials/retrievers.ipynb index 2981196652b2..12a386504417 100644 --- a/docs/core_docs/docs/tutorials/retrievers.ipynb +++ b/docs/core_docs/docs/tutorials/retrievers.ipynb @@ -51,8 +51,8 @@ "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", "\n", "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", + "export LANGSMITH_TRACING=\"true\"\n", + "export LANGSMITH_API_KEY=\"...\"\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/tutorials/sql_qa.ipynb b/docs/core_docs/docs/tutorials/sql_qa.ipynb index b072052b56f2..7bf61ea8d0d4 100644 --- a/docs/core_docs/docs/tutorials/sql_qa.ipynb +++ b/docs/core_docs/docs/tutorials/sql_qa.ipynb @@ -43,8 +43,8 @@ "\n", "```shell\n", "# Uncomment the below to use LangSmith. Not required, but recommended for debugging and observability.\n", - "# export LANGCHAIN_API_KEY=\n", - "# export LANGCHAIN_TRACING_V2=true\n", + "# export LANGSMITH_API_KEY=\n", + "# export LANGSMITH_TRACING=true\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/docs/tutorials/summarization.ipynb b/docs/core_docs/docs/tutorials/summarization.ipynb index aacf448b7e69..c854982b829e 100644 --- a/docs/core_docs/docs/tutorials/summarization.ipynb +++ b/docs/core_docs/docs/tutorials/summarization.ipynb @@ -79,8 +79,8 @@ "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", "\n", "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", + "export LANGSMITH_TRACING=\"true\"\n", + "export LANGSMITH_API_KEY=\"...\"\n", "\n", "# Reduce tracing latency if you are not in a serverless environment\n", "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", diff --git a/docs/core_docs/vercel.json b/docs/core_docs/vercel.json index fbef2e293a49..8900b58e9c82 100644 --- a/docs/core_docs/vercel.json +++ b/docs/core_docs/vercel.json @@ -92,6 +92,10 @@ { "source": "/docs/troubleshooting/errors/MULTIPLE_SUBGRAPHS(/?)", "destination": "https://langchain-ai.github.io/langgraphjs/troubleshooting/errors/MULTIPLE_SUBGRAPHS/" + }, + { + "source": "/docs/integrations/llms/watsonx_ai(/?)", + "destination": "https://js.langchain.com/docs/integrations/llms/ibm/" } ] } \ No newline at end of file diff --git a/examples/package.json b/examples/package.json index 14373178863c..53faf65a96ce 100644 --- a/examples/package.json +++ b/examples/package.json @@ -76,7 +76,7 @@ "@tensorflow/tfjs-backend-cpu": "^4.4.0", "@upstash/redis": "^1.32.0", "@upstash/vector": "^1.1.1", - "@vercel/kv": "^0.2.3", + "@vercel/kv": "^3.0.0", "@xata.io/client": "^0.28.0", "@zilliz/milvus2-sdk-node": "^2.3.5", "axios": "^0.26.0", diff --git a/examples/src/cache/chat_models/vercel_kv.ts b/examples/src/cache/chat_models/vercel_kv.ts new file mode 100644 index 000000000000..c230c6380342 --- /dev/null +++ b/examples/src/cache/chat_models/vercel_kv.ts @@ -0,0 +1,17 @@ +import { ChatOpenAI } from "@langchain/openai"; +import { VercelKVCache } from "@langchain/community/caches/vercel_kv"; +import { createClient } from "@vercel/kv"; + +// See https://vercel.com/docs/storage/vercel-kv/kv-reference#createclient-example for connection options +const cache = new VercelKVCache({ + client: createClient({ + url: "VERCEL_KV_API_URL", + token: "VERCEL_KV_API_TOKEN", + }), + ttl: 3600, +}); + +const model = new ChatOpenAI({ + model: "gpt-4o-mini", + cache, +}); diff --git a/examples/src/cache/vercel_kv.ts b/examples/src/cache/vercel_kv.ts new file mode 100644 index 000000000000..da9488553784 --- /dev/null +++ b/examples/src/cache/vercel_kv.ts @@ -0,0 +1,14 @@ +import { OpenAI } from "@langchain/openai"; +import { VercelKVCache } from "@langchain/community/caches/vercel_kv"; +import { createClient } from "@vercel/kv"; + +// See https://vercel.com/docs/storage/vercel-kv/kv-reference#createclient-example for connection options +const cache = new VercelKVCache({ + client: createClient({ + url: "VERCEL_KV_API_URL", + token: "VERCEL_KV_API_TOKEN", + }), + ttl: 3600, +}); + +const model = new OpenAI({ cache }); diff --git a/examples/src/llms/watsonx_ai.ts b/examples/src/llms/watsonx_ai.ts deleted file mode 100644 index c9b700cf848b..000000000000 --- a/examples/src/llms/watsonx_ai.ts +++ /dev/null @@ -1,18 +0,0 @@ -import { WatsonxAI } from "@langchain/community/llms/watsonx_ai"; - -// Note that modelParameters are optional -const model = new WatsonxAI({ - modelId: "meta-llama/llama-2-70b-chat", - modelParameters: { - max_new_tokens: 100, - min_new_tokens: 0, - stop_sequences: [], - repetition_penalty: 1, - }, -}); - -const res = await model.invoke( - "What would be a good company name for a company that makes colorful socks?" -); - -console.log({ res }); diff --git a/langchain-core/package.json b/langchain-core/package.json index b938bb050122..6e3ddfc22d88 100644 --- a/langchain-core/package.json +++ b/langchain-core/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/core", - "version": "0.3.27", + "version": "0.3.28", "description": "Core LangChain.js abstractions and schemas", "type": "module", "engines": { diff --git a/langchain-core/src/prompts/base.ts b/langchain-core/src/prompts/base.ts index f9e02387e261..ad5d61b32ffd 100644 --- a/langchain-core/src/prompts/base.ts +++ b/langchain-core/src/prompts/base.ts @@ -74,6 +74,14 @@ export abstract class BasePromptTemplate< partialVariables: PartialValues; + /** + * Metadata to be used for tracing. + */ + metadata?: Record; + + /** Tags to be used for tracing. */ + tags?: string[]; + constructor(input: BasePromptTemplateInput) { super(input); const { inputVariables } = input; @@ -127,10 +135,15 @@ export abstract class BasePromptTemplate< input: RunInput, options?: BaseCallbackConfig ): Promise { + const metadata = { + ...this.metadata, + ...options?.metadata, + }; + const tags = [...(this.tags ?? []), ...(options?.tags ?? [])]; return this._callWithConfig( (input: RunInput) => this.formatPromptValue(input), input, - { ...options, runType: "prompt" } + { ...options, tags, metadata, runType: "prompt" } ); } diff --git a/langchain/package.json b/langchain/package.json index b52c54cbad3f..207c846b478f 100644 --- a/langchain/package.json +++ b/langchain/package.json @@ -1,6 +1,6 @@ { "name": "langchain", - "version": "0.3.9", + "version": "0.3.10", "description": "Typescript bindings for langchain", "type": "module", "engines": { @@ -418,6 +418,7 @@ "@langchain/core": "workspace:*", "@langchain/google-genai": "*", "@langchain/google-vertexai": "*", + "@langchain/google-vertexai-web": "*", "@langchain/groq": "*", "@langchain/mistralai": "*", "@langchain/ollama": "*", @@ -466,6 +467,7 @@ "@langchain/core": ">=0.2.21 <0.4.0", "@langchain/google-genai": "*", "@langchain/google-vertexai": "*", + "@langchain/google-vertexai-web": "*", "@langchain/groq": "*", "@langchain/mistralai": "*", "@langchain/ollama": "*", @@ -494,6 +496,9 @@ "@langchain/google-vertexai": { "optional": true }, + "@langchain/google-vertexai-web": { + "optional": true + }, "@langchain/groq": { "optional": true }, diff --git a/langchain/src/chat_models/tests/universal.int.test.ts b/langchain/src/chat_models/tests/universal.int.test.ts index abf4f6a24bd3..0df247708146 100644 --- a/langchain/src/chat_models/tests/universal.int.test.ts +++ b/langchain/src/chat_models/tests/universal.int.test.ts @@ -422,6 +422,19 @@ describe("Works with all model providers", () => { expect(togetherResult).toBeDefined(); expect(togetherResult.content.length).toBeGreaterThan(0); }); + + it("Can invoke google-vertexai-web", async () => { + const vertexAIWeb = await initChatModel(undefined, { + modelProvider: "google-vertexai-web", + temperature: 0, + }); + + const vertexAIWebResult = await vertexAIWeb.invoke( + "what's your name? Use the 'name' tool to respond." + ); + expect(vertexAIWebResult).toBeDefined(); + expect(vertexAIWebResult.content.length).toBeGreaterThan(0); + }); }); test("Is compatible with agents", async () => { diff --git a/langchain/src/chat_models/universal.ts b/langchain/src/chat_models/universal.ts index 13311c4cfea5..b6fd4af95d22 100644 --- a/langchain/src/chat_models/universal.ts +++ b/langchain/src/chat_models/universal.ts @@ -40,6 +40,8 @@ const _SUPPORTED_PROVIDERS = [ "azure_openai", "cohere", "google-vertexai", + "google-vertexai-web", + "google-genai", "google-genai", "ollama", "together", @@ -97,6 +99,10 @@ async function _initChatModelHelper( const { ChatVertexAI } = await import("@langchain/google-vertexai"); return new ChatVertexAI({ model, ...passedParams }); } + case "google-vertexai-web": { + const { ChatVertexAI } = await import("@langchain/google-vertexai-web"); + return new ChatVertexAI({ model, ...passedParams }); + } case "google-genai": { const { ChatGoogleGenerativeAI } = await import( "@langchain/google-genai" @@ -594,6 +600,7 @@ export async function initChatModel< * - anthropic (@langchain/anthropic) * - azure_openai (@langchain/openai) * - google-vertexai (@langchain/google-vertexai) + * - google-vertexai-web (@langchain/google-vertexai-web) * - google-genai (@langchain/google-genai) * - bedrock (@langchain/aws) * - cohere (@langchain/cohere) diff --git a/langchain/src/hub.ts b/langchain/src/hub.ts index 53abe8e8c1fc..1ad631030778 100644 --- a/langchain/src/hub.ts +++ b/langchain/src/hub.ts @@ -50,8 +50,23 @@ export async function pull( options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean } ) { const client = new Client(options); - const result = await client._pullPrompt(ownerRepoCommit, { + + const promptObject = await client.pullPromptCommit(ownerRepoCommit, { includeModel: options?.includeModel, }); - return load(result); + + if (promptObject.manifest.kwargs?.metadata === undefined) { + promptObject.manifest.kwargs = { + ...promptObject.manifest.kwargs, + metadata: {}, + }; + } + + promptObject.manifest.kwargs.metadata = { + ...promptObject.manifest.kwargs.metadata, + lc_hub_owner: promptObject.owner, + lc_hub_repo: promptObject.repo, + lc_hub_commit_hash: promptObject.commit_hash, + }; + return load(JSON.stringify(promptObject.manifest)); } diff --git a/langchain/src/tests/hub.int.test.ts b/langchain/src/tests/hub.int.test.ts index 43b87f699d25..442520289705 100644 --- a/langchain/src/tests/hub.int.test.ts +++ b/langchain/src/tests/hub.int.test.ts @@ -1,10 +1,10 @@ /* eslint-disable no-process-env */ -import { PromptTemplate } from "@langchain/core/prompts"; +import { ChatPromptTemplate } from "@langchain/core/prompts"; import * as hub from "../hub.js"; test("Test LangChain Hub client pushing a new repo", async () => { - const prompt = PromptTemplate.fromTemplate( + const prompt = ChatPromptTemplate.fromTemplate( `You are a parrot. The current date is ${new Date().toISOString()}\n{input}` ); const repoName = `${ @@ -14,7 +14,7 @@ test("Test LangChain Hub client pushing a new repo", async () => { newRepoIsPublic: false, }); const pulledPrompt = await hub.pull(repoName); - expect(prompt.invoke({ input: "testing" })).toEqual( - pulledPrompt.invoke({ input: "testing" }) + expect(await prompt.invoke({ input: "testing" })).toEqual( + await pulledPrompt.invoke({ input: "testing" }) ); }); diff --git a/libs/create-langchain-integration/package.json b/libs/create-langchain-integration/package.json index 0bc7d5f23539..2792d628bfe9 100644 --- a/libs/create-langchain-integration/package.json +++ b/libs/create-langchain-integration/package.json @@ -1,6 +1,6 @@ { "name": "create-langchain-integration", - "version": "0.0.11", + "version": "0.0.12", "repository": { "type": "git", "url": "https://github.com/langchain-ai/langchainjs", @@ -9,7 +9,7 @@ "bin": "./dist/index.js", "scripts": { "dev": "ncc build ./index.ts -w -o dist/", - "build": "ncc build ./index.ts -o ./dist/ --minify --no-cache --no-source-map-register && cp ./template/.eslintrc.cjs ./template/.prettierrc ./template/.release-it.json ./dist/template", + "build": "ncc build ./index.ts -o ./dist/ --minify --no-cache --no-source-map-register && cp ./template/.env.example ./template/.eslintrc.cjs ./template/.prettierrc ./template/.release-it.json ./dist/template", "format": "prettier --config .prettierrc --write \"./helpers\"", "format:check": "prettier --config .prettierrc --check \"./helpers\"", "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js ./helpers", diff --git a/libs/create-langchain-integration/template/.env.example b/libs/create-langchain-integration/template/.env.example new file mode 100644 index 000000000000..29eeeaf149c3 --- /dev/null +++ b/libs/create-langchain-integration/template/.env.example @@ -0,0 +1,2 @@ +# Environment variables for testing locally go here +INTEGRATION_API_KEY="your_key" \ No newline at end of file diff --git a/libs/create-langchain-integration/template/package.json b/libs/create-langchain-integration/template/package.json index 59ea14c9fe69..5ab717048eff 100644 --- a/libs/create-langchain-integration/template/package.json +++ b/libs/create-langchain-integration/template/package.json @@ -1,7 +1,7 @@ { "name": "langchain-integration", "version": "0.0.0", - "description": "Sample integration for LangChain.js", + "description": "Sample INTEGRATION_SHORT_NAME integration for LangChain.js", "type": "module", "engines": { "node": ">=18" @@ -12,7 +12,7 @@ "type": "git", "url": "git@github.com:langchain-ai/langchainjs.git" }, - "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-INTEGRATION_NAME/", + "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/INTEGRATION_NAME/", "scripts": { "build": "yarn turbo:command build:internal --filter=@langchain/INTEGRATION_NAME", "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", @@ -31,13 +31,14 @@ }, "author": "LangChain", "license": "MIT", - "dependencies": { + "peerDependencies": { "@langchain/core": ">=0.3.0 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", + "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@tsconfig/recommended": "^1.0.3", "@typescript-eslint/eslint-plugin": "^6.12.0", @@ -63,7 +64,11 @@ }, "exports": { ".": { - "types": "./index.d.ts", + "types": { + "import": "./index.d.ts", + "require": "./index.d.cts", + "default": "./index.d.ts" + }, "import": "./index.js", "require": "./index.cjs" }, @@ -73,6 +78,7 @@ "dist/", "index.cjs", "index.js", - "index.d.ts" + "index.d.ts", + "index.d.cts" ] -} \ No newline at end of file +} diff --git a/libs/create-langchain-integration/template/src/chat_models.ts b/libs/create-langchain-integration/template/src/chat_models.ts index 7ead127849b1..752361131153 100644 --- a/libs/create-langchain-integration/template/src/chat_models.ts +++ b/libs/create-langchain-integration/template/src/chat_models.ts @@ -16,6 +16,12 @@ import { // AIMessageChunk, // } from "@langchain/core/messages"; +// Uncomment if implementing tool calling + +// import { +// type BindToolsInput, +// } from "@langchain/core/language_models/chat_models"; + /** * Input to chat model class. */ @@ -24,10 +30,10 @@ export interface ChatIntegrationInput extends BaseChatModelParams {} /** * Integration with a chat model. */ -export class ChatIntegration< - CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions - > - extends SimpleChatModel +export class ChatIntegration + // Extend BaseLanguageModelCallOptions and pass it as the generic here + // to support typing for additional runtime parameters for your integration + extends SimpleChatModel implements ChatIntegrationInput { // Used for tracing, replace with the same name as your class @@ -98,6 +104,22 @@ export class ChatIntegration< // } // } + /** + * Implement to support tool calling. + * You must also pass the bound tools into your actual chat completion call. + * See {@link ../../langchain-cerberas/src/chat_model.ts} for + * an example. + */ + // override bindTools( + // tools: BindToolsInput[], + // kwargs?: Partial + // ): Runnable { + // return this.bind({ + // tools: tools.map((tool) => convertToIntegrationFormat(tool)), + // ...kwargs, + // }); + // } + /** @ignore */ _combineLLMOutput() { return []; diff --git a/libs/langchain-community/.gitignore b/libs/langchain-community/.gitignore index 6eb6e9c7d9d3..49f87dc328fb 100644 --- a/libs/langchain-community/.gitignore +++ b/libs/langchain-community/.gitignore @@ -718,6 +718,10 @@ caches/upstash_redis.cjs caches/upstash_redis.js caches/upstash_redis.d.ts caches/upstash_redis.d.cts +caches/vercel_kv.cjs +caches/vercel_kv.js +caches/vercel_kv.d.ts +caches/vercel_kv.d.cts graphs/neo4j_graph.cjs graphs/neo4j_graph.js graphs/neo4j_graph.d.ts diff --git a/libs/langchain-community/langchain.config.js b/libs/langchain-community/langchain.config.js index 3883e56873f8..dc58963eed75 100644 --- a/libs/langchain-community/langchain.config.js +++ b/libs/langchain-community/langchain.config.js @@ -223,6 +223,7 @@ export const config = { "caches/ioredis": "caches/ioredis", "caches/momento": "caches/momento", "caches/upstash_redis": "caches/upstash_redis", + "caches/vercel_kv": "caches/vercel_kv", // graphs "graphs/neo4j_graph": "graphs/neo4j_graph", "graphs/memgraph_graph": "graphs/memgraph_graph", @@ -454,9 +455,12 @@ export const config = { "structured_query/supabase", "structured_query/vectara", "retrievers/zep_cloud", + // cache "cache/cloudflare_kv", "cache/momento", "cache/upstash_redis", + "cache/vercel_kv", + //graphs "graphs/neo4j_graph", "graphs/memgraph_graph", // document_compressors diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index a286ff97ff01..56af665d9fa9 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/community", - "version": "0.3.21", + "version": "0.3.23", "description": "Third-party integrations for LangChain.js", "type": "module", "engines": { @@ -132,8 +132,8 @@ "@upstash/ratelimit": "^2.0.3", "@upstash/redis": "^1.32.0", "@upstash/vector": "^1.1.1", - "@vercel/kv": "^0.2.3", - "@vercel/postgres": "^0.5.0", + "@vercel/kv": "^3.0.0", + "@vercel/postgres": "^0.10.0", "@writerai/writer-sdk": "^0.40.2", "@xata.io/client": "^0.28.0", "@zilliz/milvus2-sdk-node": ">=2.3.5", @@ -219,8 +219,7 @@ "weaviate-ts-client": "^1.4.0", "web-auth-library": "^1.0.3", "word-extractor": "^1.0.4", - "youtube-transcript": "^1.0.6", - "youtubei.js": "^9.1.0" + "youtubei.js": "^12.2.0" }, "peerDependencies": { "@arcjet/redact": "^v1.0.0-alpha.23", @@ -281,8 +280,8 @@ "@upstash/ratelimit": "^1.1.3 || ^2.0.3", "@upstash/redis": "^1.20.6", "@upstash/vector": "^1.1.1", - "@vercel/kv": "^0.2.3", - "@vercel/postgres": "^0.5.0", + "@vercel/kv": "*", + "@vercel/postgres": "*", "@writerai/writer-sdk": "^0.40.2", "@xata.io/client": "^0.28.0", "@zilliz/milvus2-sdk-node": ">=2.3.5", @@ -348,8 +347,7 @@ "web-auth-library": "^1.0.3", "word-extractor": "*", "ws": "^8.14.2", - "youtube-transcript": "^1.0.6", - "youtubei.js": "^9.1.0" + "youtubei.js": "*" }, "peerDependenciesMeta": { "@arcjet/redact": { @@ -712,9 +710,6 @@ "ws": { "optional": true }, - "youtube-transcript": { - "optional": true - }, "youtubei.js": { "optional": true } @@ -2343,6 +2338,15 @@ "import": "./caches/upstash_redis.js", "require": "./caches/upstash_redis.cjs" }, + "./caches/vercel_kv": { + "types": { + "import": "./caches/vercel_kv.d.ts", + "require": "./caches/vercel_kv.d.cts", + "default": "./caches/vercel_kv.d.ts" + }, + "import": "./caches/vercel_kv.js", + "require": "./caches/vercel_kv.cjs" + }, "./graphs/neo4j_graph": { "types": { "import": "./graphs/neo4j_graph.d.ts", @@ -3913,6 +3917,10 @@ "caches/upstash_redis.js", "caches/upstash_redis.d.ts", "caches/upstash_redis.d.cts", + "caches/vercel_kv.cjs", + "caches/vercel_kv.js", + "caches/vercel_kv.d.ts", + "caches/vercel_kv.d.cts", "graphs/neo4j_graph.cjs", "graphs/neo4j_graph.js", "graphs/neo4j_graph.d.ts", diff --git a/libs/langchain-community/src/caches/tests/vercel_kv.int.test.ts b/libs/langchain-community/src/caches/tests/vercel_kv.int.test.ts new file mode 100644 index 000000000000..ec9176730813 --- /dev/null +++ b/libs/langchain-community/src/caches/tests/vercel_kv.int.test.ts @@ -0,0 +1,34 @@ +/* eslint-disable no-process-env */ +import { ChatOpenAI } from "@langchain/openai"; +import { createClient } from "@vercel/kv"; +import { VercelKVCache } from "../vercel_kv.js"; + +test("VercelKVCache works with ChatOpenAI", async () => { + if ( + !process.env.VERCEL_KV_API_URL || + !process.env.VERCEL_KV_API_TOKEN || + !process.env.OPENAI_API_KEY + ) { + throw new Error("Missing Vercel KV API URL, token, or OpenAI API key"); + } + + const vercelKVCache = new VercelKVCache({ + client: createClient({ + url: process.env.VERCEL_KV_API_URL, + token: process.env.VERCEL_KV_API_TOKEN, + }), + ttl: 60, + }); + + const chat = new ChatOpenAI({ + temperature: 0, + cache: vercelKVCache, + maxTokens: 10, + }); + + const prompt = "What color is the sky?"; + const result1 = await chat.invoke(prompt); + const result2 = await chat.invoke(prompt); + + expect(result1).toEqual(result2); +}); diff --git a/libs/langchain-community/src/caches/vercel_kv.ts b/libs/langchain-community/src/caches/vercel_kv.ts new file mode 100644 index 000000000000..ad641e9f5ef7 --- /dev/null +++ b/libs/langchain-community/src/caches/vercel_kv.ts @@ -0,0 +1,87 @@ +import { kv, type VercelKV } from "@vercel/kv"; + +import { Generation } from "@langchain/core/outputs"; +import { + BaseCache, + deserializeStoredGeneration, + getCacheKey, + serializeGeneration, +} from "@langchain/core/caches"; +import { StoredGeneration } from "@langchain/core/messages"; + +export type VercelKVCacheProps = { + /** + * An existing Vercel KV client + */ + client?: VercelKV; + /** + * Time-to-live (TTL) for cached items in seconds + */ + ttl?: number; +}; + +/** + * A cache that uses Vercel KV as the backing store. + * @example + * ```typescript + * const cache = new VercelKVCache({ + * ttl: 3600, // Optional: Cache entries will expire after 1 hour + * }); + * + * // Initialize the OpenAI model with Vercel KV cache for caching responses + * const model = new ChatOpenAI({ + * cache, + * }); + * await model.invoke("How are you today?"); + * const cachedValues = await cache.lookup("How are you today?", "llmKey"); + * ``` + */ +export class VercelKVCache extends BaseCache { + private client: VercelKV; + + private ttl?: number; + + constructor(props: VercelKVCacheProps) { + super(); + const { client, ttl } = props; + this.client = client ?? kv; + this.ttl = ttl; + } + + /** + * Lookup LLM generations in cache by prompt and associated LLM key. + */ + public async lookup(prompt: string, llmKey: string) { + let idx = 0; + let key = getCacheKey(prompt, llmKey, String(idx)); + let value = await this.client.get(key); + const generations: Generation[] = []; + + while (value) { + generations.push(deserializeStoredGeneration(value)); + idx += 1; + key = getCacheKey(prompt, llmKey, String(idx)); + value = await this.client.get(key); + } + + return generations.length > 0 ? generations : null; + } + + /** + * Update the cache with the given generations. + * + * Note this overwrites any existing generations for the given prompt and LLM key. + */ + public async update(prompt: string, llmKey: string, value: Generation[]) { + for (let i = 0; i < value.length; i += 1) { + const key = getCacheKey(prompt, llmKey, String(i)); + const serializedValue = JSON.stringify(serializeGeneration(value[i])); + + if (this.ttl) { + await this.client.set(key, serializedValue, { ex: this.ttl }); + } else { + await this.client.set(key, serializedValue); + } + } + } +} diff --git a/libs/langchain-community/src/document_loaders/tests/youtube.int.test.ts b/libs/langchain-community/src/document_loaders/tests/youtube.int.test.ts new file mode 100644 index 000000000000..48d462a8a7d8 --- /dev/null +++ b/libs/langchain-community/src/document_loaders/tests/youtube.int.test.ts @@ -0,0 +1,21 @@ +import { test, expect } from "@jest/globals"; +import { YoutubeLoader } from "../web/youtube.js"; + +test("Test Youtube loader", async () => { + const videoUrl = "https://www.youtube.com/watch?v=FZhbJZEgKQ4"; + const loader = YoutubeLoader.createFromUrl(videoUrl, { + language: "en", + addVideoInfo: true, + }); + const docs = await loader.load(); + + expect(docs.length).toBe(1); + expect(docs[0].pageContent).toContain( + "One year ago, at the dawn of a new age," + ); + expect(docs[0].metadata).toMatchObject({ + author: "Microsoft", + source: "FZhbJZEgKQ4", + title: "Full Keynote: Satya Nadella at Microsoft Ignite 2023", + }); +}); diff --git a/libs/langchain-community/src/document_loaders/web/youtube.ts b/libs/langchain-community/src/document_loaders/web/youtube.ts index f9ce18abc040..d66b86e13592 100644 --- a/libs/langchain-community/src/document_loaders/web/youtube.ts +++ b/libs/langchain-community/src/document_loaders/web/youtube.ts @@ -1,4 +1,3 @@ -import { TranscriptResponse, YoutubeTranscript } from "youtube-transcript"; import { Innertube } from "youtubei.js"; import { Document } from "@langchain/core/documents"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; @@ -28,8 +27,7 @@ interface VideoMetadata { /** * A document loader for loading data from YouTube videos. It uses the - * youtube-transcript and youtubei.js libraries to fetch the transcript - * and video metadata. + * youtubei.js library to fetch the transcript and video metadata. * @example * ```typescript * const loader = new YoutubeLoader( @@ -87,29 +85,34 @@ export class YoutubeLoader extends BaseDocumentLoader { /** * Loads the transcript and video metadata from the specified YouTube - * video. It uses the youtube-transcript library to fetch the transcript - * and the youtubei.js library to fetch the video metadata. + * video. It uses the youtubei.js library to fetch the video metadata and transcripts. * @returns An array of Documents representing the retrieved data. */ async load(): Promise { - let transcript: TranscriptResponse[] | undefined; + let transcript: string | undefined; const metadata: VideoMetadata = { source: this.videoId, }; try { - transcript = await YoutubeTranscript.fetchTranscript(this.videoId, { + const youtube = await Innertube.create({ lang: this.language, + retrieve_player: false, }); + const info = await youtube.getInfo(this.videoId); + const transcriptData = await info.getTranscript(); + transcript = + transcriptData.transcript.content?.body?.initial_segments + .map((segment) => segment.snippet.text) + .join(" ") ?? ""; if (transcript === undefined) { throw new Error("Transcription not found"); } if (this.addVideoInfo) { - const youtube = await Innertube.create(); - const info = (await youtube.getBasicInfo(this.videoId)).basic_info; - metadata.description = info.short_description; - metadata.title = info.title; - metadata.view_count = info.view_count; - metadata.author = info.author; + const basicInfo = info.basic_info; + metadata.description = basicInfo.short_description; + metadata.title = basicInfo.title; + metadata.view_count = basicInfo.view_count; + metadata.author = basicInfo.author; } } catch (e: unknown) { throw new Error( @@ -117,7 +120,7 @@ export class YoutubeLoader extends BaseDocumentLoader { ); } const document = new Document({ - pageContent: transcript.map((item) => item.text).join(" "), + pageContent: transcript, metadata, }); diff --git a/libs/langchain-community/src/llms/togetherai.ts b/libs/langchain-community/src/llms/togetherai.ts index d03d44a5c12b..e388246c883c 100644 --- a/libs/langchain-community/src/llms/togetherai.ts +++ b/libs/langchain-community/src/llms/togetherai.ts @@ -27,7 +27,7 @@ interface TogetherAIInferenceResult { }; // eslint-disable-next-line @typescript-eslint/no-explicit-any subjobs: Array; - output: { + output?: { choices: Array<{ finish_reason: string; index: number; @@ -36,6 +36,11 @@ interface TogetherAIInferenceResult { raw_compute_time: number; result_type: string; }; + choices?: Array<{ + finish_reason: string; + index: number; + text: string; + }>; } /** @@ -247,8 +252,11 @@ export class TogetherAI extends LLM { prompt, options ); - const outputText = response.output.choices[0].text; - return outputText ?? ""; + if (response.output) { + return response.output.choices[0]?.text ?? ""; + } else { + return response.choices?.[0]?.text ?? ""; + } } async *_streamResponseChunks( diff --git a/libs/langchain-community/src/load/import_map.ts b/libs/langchain-community/src/load/import_map.ts index 624b789eb34a..2f6f5d86660a 100644 --- a/libs/langchain-community/src/load/import_map.ts +++ b/libs/langchain-community/src/load/import_map.ts @@ -68,6 +68,7 @@ export * as caches__cloudflare_kv from "../caches/cloudflare_kv.js"; export * as caches__ioredis from "../caches/ioredis.js"; export * as caches__momento from "../caches/momento.js"; export * as caches__upstash_redis from "../caches/upstash_redis.js"; +export * as caches__vercel_kv from "../caches/vercel_kv.js"; export * as stores__doc__base from "../stores/doc/base.js"; export * as stores__doc__gcs from "../stores/doc/gcs.js"; export * as stores__doc__in_memory from "../stores/doc/in_memory.js"; diff --git a/libs/langchain-community/src/storage/vercel_kv.ts b/libs/langchain-community/src/storage/vercel_kv.ts index 8f520a0d8543..f86a877a786a 100644 --- a/libs/langchain-community/src/storage/vercel_kv.ts +++ b/libs/langchain-community/src/storage/vercel_kv.ts @@ -137,7 +137,8 @@ export class VercelKVStore extends BaseStore { for (const key of batch) { yield this._getDeprefixedKey(key); } - while (cursor !== 0) { + // Backwards compatibility for older @vercel/kv versions + while (String(cursor) !== "0") { [cursor, batch] = await this.client.scan(cursor, { match: pattern, count: this.yieldKeysScanBatchSize, diff --git a/libs/langchain-community/src/vectorstores/pgvector.ts b/libs/langchain-community/src/vectorstores/pgvector.ts index b8c0b924c8cb..f63599836e52 100644 --- a/libs/langchain-community/src/vectorstores/pgvector.ts +++ b/libs/langchain-community/src/vectorstores/pgvector.ts @@ -1,8 +1,12 @@ import pg, { type Pool, type PoolClient, type PoolConfig } from "pg"; -import { VectorStore } from "@langchain/core/vectorstores"; +import { + MaxMarginalRelevanceSearchOptions, + VectorStore, +} from "@langchain/core/vectorstores"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; +import { maximalMarginalRelevance } from "@langchain/core/utils/math"; type Metadata = Record; @@ -602,19 +606,18 @@ export class PGVectorStore extends VectorStore { } /** - * Method to perform a similarity search in the vector store. It returns - * the `k` most similar documents to the query vector, along with their - * similarity scores. - * + * Method to perform a similarity search in the vector store. It returns the `k` most similar documents to the query text. * @param query - Query vector. * @param k - Number of most similar documents to return. * @param filter - Optional filter to apply to the search. + * @param includeEmbedding Whether to include the embedding vectors in the results. * @returns Promise that resolves with an array of tuples, each containing a `Document` and its similarity score. */ - async similaritySearchVectorWithScore( + private async searchPostgres( query: number[], k: number, - filter?: this["FilterType"] + filter?: this["FilterType"], + includeEmbedding?: boolean ): Promise<[Document, number][]> { const embeddingString = `[${query.join(",")}]`; const _filter: this["FilterType"] = filter ?? {}; @@ -694,12 +697,32 @@ export class PGVectorStore extends VectorStore { metadata: doc[this.metadataColumnName], id: doc[this.idColumnName], }); + if (includeEmbedding) { + document.metadata[this.vectorColumnName] = doc[this.vectorColumnName]; + } results.push([document, doc._distance]); } } return results; } + /** + * Method to perform a similarity search in the vector store. It returns + * the `k` most similar documents to the query vector, along with their + * similarity scores. + * @param query - Query vector. + * @param k - Number of most similar documents to return. + * @param filter - Optional filter to apply to the search. + * @returns Promise that resolves with an array of tuples, each containing a `Document` and its similarity score. + */ + async similaritySearchVectorWithScore( + query: number[], + k: number, + filter?: this["FilterType"] + ): Promise<[Document, number][]> { + return this.searchPostgres(query, k, filter, false); + } + /** * Method to ensure the existence of the table in the database. It creates * the table if it does not already exist. @@ -885,4 +908,50 @@ export class PGVectorStore extends VectorStore { ); } } + + /** + * Return documents selected using the maximal marginal relevance. + * Maximal marginal relevance optimizes for similarity to the query AND + * diversity among selected documents. + * @param query Text to look up documents similar to. + * @param options.k=4 Number of documents to return. + * @param options.fetchK=20 Number of documents to fetch before passing to + * the MMR algorithm. + * @param options.lambda=0.5 Number between 0 and 1 that determines the + * degree of diversity among the results, where 0 corresponds to maximum + * diversity and 1 to minimum diversity. + * @returns List of documents selected by maximal marginal relevance. + */ + async maxMarginalRelevanceSearch( + query: string, + options: MaxMarginalRelevanceSearchOptions + ): Promise { + const { k = 4, fetchK = 20, lambda = 0.5, filter } = options; + const queryEmbedding = await this.embeddings.embedQuery(query); + + const docs = await this.searchPostgres( + queryEmbedding, + fetchK, + filter, + true + ); + + const embeddingList = docs.map((doc) => + JSON.parse(doc[0].metadata[this.vectorColumnName]) + ); + + const mmrIndexes = maximalMarginalRelevance( + queryEmbedding, + embeddingList, + lambda, + k + ); + + const mmrDocs = mmrIndexes.map((index) => { + const doc = docs[index][0]; + delete doc.metadata[this.vectorColumnName]; + return docs[index][0]; + }); + return mmrDocs; + } } diff --git a/libs/langchain-community/src/vectorstores/tests/pgvector/docker-compose.yml b/libs/langchain-community/src/vectorstores/tests/pgvector/docker-compose.yml index 306214d0984e..04f3fb0bcdcd 100644 --- a/libs/langchain-community/src/vectorstores/tests/pgvector/docker-compose.yml +++ b/libs/langchain-community/src/vectorstores/tests/pgvector/docker-compose.yml @@ -1,10 +1,9 @@ # Run this command to start the database: -# docker-compose up --build -version: "3" +# docker compose up services: db: hostname: 127.0.0.1 - image: ankane/pgvector + image: pgvector/pgvector:pg16 ports: - 5432:5432 restart: always @@ -12,5 +11,3 @@ services: - POSTGRES_DB=api - POSTGRES_USER=myuser - POSTGRES_PASSWORD=ChangeMe - volumes: - - ./init.sql:/docker-entrypoint-initdb.d/init.sql diff --git a/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts b/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts index 5f01d3012b25..dcf4e1580786 100644 --- a/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts @@ -74,6 +74,29 @@ describe("PGVectorStore", () => { expect(results[0].pageContent).toEqual("Cat drinks milk"); }); + test("Test MMR search", async () => { + const documents = [ + { + pageContent: "hello", + metadata: { a: 1 }, + }, + { + pageContent: "foo", + metadata: { a: 2 }, + }, + { pageContent: "bye", metadata: { a: 1 } }, + ]; + await pgvectorVectorStore.addDocuments(documents); + const results = await pgvectorVectorStore.maxMarginalRelevanceSearch( + "hello", + { + k: 4, + } + ); + + expect(results).toHaveLength(3); + }); + test("PGvector can save documents with a list greater than default chunk size", async () => { // Extract the default chunk size and add one. const docsToGenerate = pgvectorVectorStore.chunkSize + 1; @@ -128,8 +151,16 @@ describe("PGVectorStore", () => { expect(result.length).toEqual(2); expect(result).toEqual([ - { pageContent: "Lorem Ipsum", metadata: { a: 100 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 300 } }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: 100 }, + }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: 300 }, + }, ]); const result2 = await pgvectorVectorStore.similaritySearch("hello", 2, { @@ -137,7 +168,11 @@ describe("PGVectorStore", () => { }); expect(result2.length).toEqual(1); expect(result2).toEqual([ - { pageContent: "Lorem Ipsum", metadata: { a: 200 } }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: 200 }, + }, ]); const result3 = await pgvectorVectorStore.similaritySearch("hello", 3); @@ -162,8 +197,16 @@ describe("PGVectorStore", () => { expect(result.length).toEqual(2); expect(result).toEqual([ - { pageContent: "Lorem Ipsum", metadata: { a: ["tag1", "tag2"] } }, - { pageContent: "Lorem Ipsum", metadata: { a: ["tag1"] } }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: ["tag1", "tag2"] }, + }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: ["tag1"] }, + }, ]); const result2 = await pgvectorVectorStore.similaritySearch("hello", 2, { @@ -173,14 +216,28 @@ describe("PGVectorStore", () => { }); expect(result2.length).toEqual(2); expect(result2).toEqual([ - { pageContent: "Lorem Ipsum", metadata: { a: ["tag1", "tag2"] } }, - { pageContent: "Lorem Ipsum", metadata: { a: ["tag2"] } }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: ["tag1", "tag2"] }, + }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: ["tag2"] }, + }, ]); const result3 = await pgvectorVectorStore.similaritySearch("hello", 3); expect(result3.length).toEqual(3); - expect(result3).toEqual(documents); + expect(result3).toEqual( + documents.map((doc) => { + // eslint-disable-next-line @typescript-eslint/no-explicit-any, no-param-reassign + (doc as any).id = expect.any(String); + return doc; + }) + ); }); test("PGvector can delete document by id", async () => { @@ -410,8 +467,16 @@ describe("PGVectorStore with collection", () => { expect(result.length).toEqual(2); expect(result).toEqual([ - { pageContent: "Lorem Ipsum", metadata: { a: 100 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 300 } }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: 100 }, + }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: 300 }, + }, ]); const result2 = await pgvectorVectorStore.similaritySearch("hello", 2, { @@ -419,7 +484,11 @@ describe("PGVectorStore with collection", () => { }); expect(result2.length).toEqual(1); expect(result2).toEqual([ - { pageContent: "Lorem Ipsum", metadata: { a: 200 } }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: 200 }, + }, ]); const result3 = await pgvectorVectorStore.similaritySearch("hello", 3); @@ -638,8 +707,16 @@ describe("PGVectorStore with schema", () => { expect(result.length).toEqual(2); expect(result).toEqual([ - { pageContent: "Lorem Ipsum", metadata: { a: 100 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 300 } }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: 100 }, + }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: 300 }, + }, ]); const result2 = await pgvectorVectorStore.similaritySearch("hello", 2, { @@ -647,7 +724,11 @@ describe("PGVectorStore with schema", () => { }); expect(result2.length).toEqual(1); expect(result2).toEqual([ - { pageContent: "Lorem Ipsum", metadata: { a: 200 } }, + { + id: expect.any(String), + pageContent: "Lorem Ipsum", + metadata: { a: 200 }, + }, ]); const result3 = await pgvectorVectorStore.similaritySearch("hello", 3); diff --git a/libs/langchain-groq/package.json b/libs/langchain-groq/package.json index 551e3770686e..db8e5ce84899 100644 --- a/libs/langchain-groq/package.json +++ b/libs/langchain-groq/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/groq", - "version": "0.1.2", + "version": "0.1.3", "description": "Groq integration for LangChain.js", "type": "module", "engines": { diff --git a/libs/langchain-groq/src/chat_models.ts b/libs/langchain-groq/src/chat_models.ts index 49bba333c6de..488f36280de4 100644 --- a/libs/langchain-groq/src/chat_models.ts +++ b/libs/langchain-groq/src/chat_models.ts @@ -119,6 +119,34 @@ export interface ChatGroqInput extends BaseChatModelParams { * This limits ensures computational efficiency and resource management. */ maxTokens?: number; + /** + * Override the default base URL for the API + */ + baseUrl?: string; + /** + * The maximum amount of time (in milliseconds) the client will wait for a response + */ + timeout?: number; + /** + * HTTP agent used to manage connections + */ + // eslint-disable-next-line @typescript-eslint/no-explicit-any + httpAgent?: any; + /** + * Custom fetch function implementation + */ + // eslint-disable-next-line @typescript-eslint/no-explicit-any + fetch?: (...args: any) => any; + + /** + * Default headers included with every request + */ + defaultHeaders?: Record; + + /** + * Default query parameters included with every request + */ + defaultQuery?: Record; } type GroqRoleEnum = "system" | "assistant" | "user" | "function"; @@ -687,10 +715,21 @@ export class ChatGroq extends BaseChatModel< `Groq API key not found. Please set the GROQ_API_KEY environment variable or provide the key into "apiKey"` ); } + const defaultHeaders = { + "User-Agent": "langchainjs", + ...(fields?.defaultHeaders ?? {}), + }; this.client = new Groq({ apiKey, dangerouslyAllowBrowser: true, + baseURL: fields?.baseUrl, + timeout: fields?.timeout, + httpAgent: fields?.httpAgent, + fetch: fields?.fetch, + maxRetries: 0, + defaultHeaders, + defaultQuery: fields?.defaultQuery, }); this.apiKey = apiKey; this.temperature = fields?.temperature ?? this.temperature; diff --git a/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb b/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb index 303493051839..a700e4312fc9 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb @@ -59,8 +59,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb b/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb index 5a27e5839758..ce2b97c41614 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb @@ -55,8 +55,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/libs/langchain-scripts/src/cli/docs/templates/text_embedding.ipynb b/libs/langchain-scripts/src/cli/docs/templates/text_embedding.ipynb index b5d950b75b37..8140fc1f6894 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/text_embedding.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/text_embedding.ipynb @@ -55,8 +55,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "# export LANGSMITH_TRACING=\"true\"\n", + "# export LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/libs/langchain-scripts/src/cli/docs/templates/toolkits.ipynb b/libs/langchain-scripts/src/cli/docs/templates/toolkits.ipynb index 903bc3619cd6..43db0617640b 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/toolkits.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/toolkits.ipynb @@ -32,8 +32,8 @@ "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```\n", "\n", "### Installation\n", diff --git a/libs/langchain-scripts/src/cli/docs/templates/tools.ipynb b/libs/langchain-scripts/src/cli/docs/templates/tools.ipynb index 0225185d5b6a..3559c528de74 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/tools.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/tools.ipynb @@ -69,8 +69,8 @@ "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", "\n", "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/libs/langchain-scripts/src/cli/docs/templates/vectorstores.ipynb b/libs/langchain-scripts/src/cli/docs/templates/vectorstores.ipynb index f3a4714a0f62..ccc4f5a62d31 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/vectorstores.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/vectorstores.ipynb @@ -86,8 +86,8 @@ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "// process.env.LANGSMITH_TRACING=\"true\"\n", + "// process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", "```" ] }, diff --git a/yarn.lock b/yarn.lock index 6fb66edb4fb8..3ea9be9a3510 100644 --- a/yarn.lock +++ b/yarn.lock @@ -8691,6 +8691,13 @@ __metadata: languageName: node linkType: hard +"@bufbuild/protobuf@npm:^2.0.0": + version: 2.2.3 + resolution: "@bufbuild/protobuf@npm:2.2.3" + checksum: 567ca0497669a8944fe84a9fdfa236e4a91d5879190c0ec0c8727d5220cbc21a85d06a114ac1eb35387fc5cb1dcbb7adc583c4d4f6a2ecb34fbe61dcaa7e7e9b + languageName: node + linkType: hard + "@cerebras/cerebras_cloud_sdk@npm:^1.15.0": version: 1.15.0 resolution: "@cerebras/cerebras_cloud_sdk@npm:1.15.0" @@ -11946,8 +11953,8 @@ __metadata: "@upstash/ratelimit": ^2.0.3 "@upstash/redis": ^1.32.0 "@upstash/vector": ^1.1.1 - "@vercel/kv": ^0.2.3 - "@vercel/postgres": ^0.5.0 + "@vercel/kv": ^3.0.0 + "@vercel/postgres": ^0.10.0 "@writerai/writer-sdk": ^0.40.2 "@xata.io/client": ^0.28.0 "@zilliz/milvus2-sdk-node": ">=2.3.5" @@ -12040,8 +12047,7 @@ __metadata: weaviate-ts-client: ^1.4.0 web-auth-library: ^1.0.3 word-extractor: ^1.0.4 - youtube-transcript: ^1.0.6 - youtubei.js: ^9.1.0 + youtubei.js: ^12.2.0 zod: ^3.22.3 zod-to-json-schema: ^3.22.5 peerDependencies: @@ -12103,8 +12109,8 @@ __metadata: "@upstash/ratelimit": ^1.1.3 || ^2.0.3 "@upstash/redis": ^1.20.6 "@upstash/vector": ^1.1.1 - "@vercel/kv": ^0.2.3 - "@vercel/postgres": ^0.5.0 + "@vercel/kv": "*" + "@vercel/postgres": "*" "@writerai/writer-sdk": ^0.40.2 "@xata.io/client": ^0.28.0 "@zilliz/milvus2-sdk-node": ">=2.3.5" @@ -12170,8 +12176,7 @@ __metadata: web-auth-library: ^1.0.3 word-extractor: "*" ws: ^8.14.2 - youtube-transcript: ^1.0.6 - youtubei.js: ^9.1.0 + youtubei.js: "*" peerDependenciesMeta: "@arcjet/redact": optional: true @@ -12413,8 +12418,6 @@ __metadata: optional: true ws: optional: true - youtube-transcript: - optional: true youtubei.js: optional: true languageName: unknown @@ -12603,7 +12606,7 @@ __metadata: languageName: unknown linkType: soft -"@langchain/google-vertexai-web@workspace:*, @langchain/google-vertexai-web@workspace:libs/langchain-google-vertexai-web": +"@langchain/google-vertexai-web@*, @langchain/google-vertexai-web@workspace:*, @langchain/google-vertexai-web@workspace:libs/langchain-google-vertexai-web": version: 0.0.0-use.local resolution: "@langchain/google-vertexai-web@workspace:libs/langchain-google-vertexai-web" dependencies: @@ -13662,15 +13665,6 @@ __metadata: languageName: node linkType: hard -"@neondatabase/serverless@npm:0.6.0": - version: 0.6.0 - resolution: "@neondatabase/serverless@npm:0.6.0" - dependencies: - "@types/pg": 8.6.6 - checksum: 25b81ba6b37e0ac56a746a1219de6107dd74d6f2d093bc23dc33b6fb8f33d00f80cb37eb0648afacd32ab0020b20c1809e3ec6e5c34d23fa21ae5b76d6041332 - languageName: node - linkType: hard - "@neondatabase/serverless@npm:^0.9.1": version: 0.9.1 resolution: "@neondatabase/serverless@npm:0.9.1" @@ -13680,6 +13674,15 @@ __metadata: languageName: node linkType: hard +"@neondatabase/serverless@npm:^0.9.3": + version: 0.9.5 + resolution: "@neondatabase/serverless@npm:0.9.5" + dependencies: + "@types/pg": 8.11.6 + checksum: b53c4b21c6eaf995a12bd84adf2c839022b7eb8b216cb07319a784a6f60965cf9ae497560a2aa767f5f8a407f1a45783f13b9f9e8f5c2078118a6a5ae174fdb2 + languageName: node + linkType: hard + "@next/env@npm:14.0.1": version: 14.0.1 resolution: "@next/env@npm:14.0.1" @@ -19859,6 +19862,17 @@ __metadata: languageName: node linkType: hard +"@types/pg@npm:8.11.6": + version: 8.11.6 + resolution: "@types/pg@npm:8.11.6" + dependencies: + "@types/node": "*" + pg-protocol: "*" + pg-types: ^4.0.1 + checksum: 231f7e5bfe8b4d14cca398d24cd55f4f14f582f815b62059e6f3ee74108cf92089fbd946568ebc35fa402f238ed9c8a8c1e10e7084e83e4ca3aff75957243014 + languageName: node + linkType: hard + "@types/pg@npm:8.6.6": version: 8.6.6 resolution: "@types/pg@npm:8.6.6" @@ -20829,15 +20843,6 @@ __metadata: languageName: node linkType: hard -"@upstash/redis@npm:1.22.0": - version: 1.22.0 - resolution: "@upstash/redis@npm:1.22.0" - dependencies: - isomorphic-fetch: ^3.0.0 - checksum: 894b7d4e318c55fdcbf0f322efd9ace94a37f1b8f9d7cb2cdf8b8b4859814379665c7e00feef3565578455d98492710d14ed4a2b043ec412b5d640a4d5a2dd55 - languageName: node - linkType: hard - "@upstash/redis@npm:^1.28.3": version: 1.31.1 resolution: "@upstash/redis@npm:1.31.1" @@ -20856,6 +20861,15 @@ __metadata: languageName: node linkType: hard +"@upstash/redis@npm:^1.34.0": + version: 1.34.3 + resolution: "@upstash/redis@npm:1.34.3" + dependencies: + crypto-js: ^4.2.0 + checksum: 4c738eb82e3d8906999345379ff0c96913627cdf96ec54b6880fa8fb5616331a92228b889c076b4a93bee4bb31324f5afa68aa927fb751d308fc83464de68e79 + languageName: node + linkType: hard + "@upstash/vector@npm:^1.1.1": version: 1.1.1 resolution: "@upstash/vector@npm:1.1.1" @@ -20863,12 +20877,12 @@ __metadata: languageName: node linkType: hard -"@vercel/kv@npm:^0.2.3": - version: 0.2.3 - resolution: "@vercel/kv@npm:0.2.3" +"@vercel/kv@npm:^3.0.0": + version: 3.0.0 + resolution: "@vercel/kv@npm:3.0.0" dependencies: - "@upstash/redis": 1.22.0 - checksum: 3a2b1e3a0ebf605b69341b449bd81edde94f9d00393cc6971759b950c90cff782e1e4fa5daf672f4769aa8301e00f7b7fdec07cdd98d40530652d83c043125ed + "@upstash/redis": ^1.34.0 + checksum: 5bebab15c770e32409c03054dbd58e3e5a4d77d1a813c91e6613a9382a96b391878b67a1e2fbe8f7e197899a2dc589212ee6f6b11ab272798954e63816c6eba1 languageName: node linkType: hard @@ -20883,15 +20897,14 @@ __metadata: languageName: node linkType: hard -"@vercel/postgres@npm:^0.5.0": - version: 0.5.0 - resolution: "@vercel/postgres@npm:0.5.0" +"@vercel/postgres@npm:^0.10.0": + version: 0.10.0 + resolution: "@vercel/postgres@npm:0.10.0" dependencies: - "@neondatabase/serverless": 0.6.0 - bufferutil: 4.0.7 - utf-8-validate: 6.0.3 - ws: 8.14.2 - checksum: 9fe0f496753a481d06566e7b846956c2129af081080f6618a4f9f8efb90ae53cc2aa1b3598a95ecc04f612b0ffdfcfffd20921fd1187fdc3db99b35d24b73a3f + "@neondatabase/serverless": ^0.9.3 + bufferutil: ^4.0.8 + ws: ^8.17.1 + checksum: 45c29baa73bed4b07a5c06ae283647d43c89d843ebd3cb4864b59ec4217ef5d24832e569cd08d61f8b7875522fc207bb364547ea7e9423dd37521fba24ccec20 languageName: node linkType: hard @@ -22816,13 +22829,13 @@ __metadata: languageName: node linkType: hard -"bufferutil@npm:4.0.7": - version: 4.0.7 - resolution: "bufferutil@npm:4.0.7" +"bufferutil@npm:^4.0.8": + version: 4.0.9 + resolution: "bufferutil@npm:4.0.9" dependencies: node-gyp: latest node-gyp-build: ^4.3.0 - checksum: f75aa87e3d1b99b87a95f60a855e63f70af07b57fb8443e75a2ddfef2e47788d130fdd46e3a78fd7e0c10176082b26dfbed970c5b8632e1cc299cafa0e93ce45 + checksum: 51ce9ee19bc4b72c2eb9f9a231dd95e786ca5a00a6bdfcae83f1d5cd8169301c79245ce96913066a5a1bbe45c44e95bc5a1761a18798b835585c1a05af65b209 languageName: node linkType: hard @@ -23116,13 +23129,6 @@ __metadata: languageName: node linkType: hard -"centra@npm:^2.6.0": - version: 2.6.0 - resolution: "centra@npm:2.6.0" - checksum: 3b4d44762bceb9e20f7e45d01ffb9e462523cf8a0186f6710c08863f0455bceabfbcb754d6b01ea095c3bdee09c4ebef912669dc2b391a9af400e9ba7e398bc5 - languageName: node - linkType: hard - "chalk@npm:5.2.0, chalk@npm:^5.0.0, chalk@npm:^5.2.0": version: 5.2.0 resolution: "chalk@npm:5.2.0" @@ -27822,7 +27828,7 @@ __metadata: "@typescript-eslint/parser": ^5.51.0 "@upstash/redis": ^1.32.0 "@upstash/vector": ^1.1.1 - "@vercel/kv": ^0.2.3 + "@vercel/kv": ^3.0.0 "@xata.io/client": ^0.28.0 "@zilliz/milvus2-sdk-node": ^2.3.5 axios: ^0.26.0 @@ -32870,12 +32876,12 @@ __metadata: languageName: node linkType: hard -"jintr@npm:^1.1.0": - version: 1.1.0 - resolution: "jintr@npm:1.1.0" +"jintr@npm:^3.2.0": + version: 3.2.0 + resolution: "jintr@npm:3.2.0" dependencies: acorn: ^8.8.0 - checksum: b61269ff80a46c71e837e893a4754fc2d0a941e3d577dc6307f0e67cebebf81e66f646c86bf6159fe7d851d829595d7a9e9e26392b9ede7b6b39d9664f1d090d + checksum: 8f526719fd77d6f7cd52c47c06c86573cb37a15e22ce8129a228ff605d7ea3d662d7c8ef37cad7b4df767f53ca11418ffa49ad4aa8776f62d94362aba8317ff3 languageName: node linkType: hard @@ -33423,6 +33429,7 @@ __metadata: "@langchain/core": "workspace:*" "@langchain/google-genai": "*" "@langchain/google-vertexai": "*" + "@langchain/google-vertexai-web": "*" "@langchain/groq": "*" "@langchain/mistralai": "*" "@langchain/ollama": "*" @@ -33482,6 +33489,7 @@ __metadata: "@langchain/core": ">=0.2.21 <0.4.0" "@langchain/google-genai": "*" "@langchain/google-vertexai": "*" + "@langchain/google-vertexai-web": "*" "@langchain/groq": "*" "@langchain/mistralai": "*" "@langchain/ollama": "*" @@ -33503,6 +33511,8 @@ __metadata: optional: true "@langchain/google-vertexai": optional: true + "@langchain/google-vertexai-web": + optional: true "@langchain/groq": optional: true "@langchain/mistralai": @@ -37251,15 +37261,6 @@ __metadata: languageName: node linkType: hard -"phin@npm:^3.5.0": - version: 3.7.0 - resolution: "phin@npm:3.7.0" - dependencies: - centra: ^2.6.0 - checksum: b0a35e943615c40a3ccd7d6a2dd062568258e6b36dceed3150d13d28cad906e9028e756ad6efe66963b43937879e8a3593f986d17aac968d42982b4e8702e539 - languageName: node - linkType: hard - "pickleparser@npm:^0.2.1": version: 0.2.1 resolution: "pickleparser@npm:0.2.1" @@ -43581,16 +43582,6 @@ __metadata: languageName: node linkType: hard -"utf-8-validate@npm:6.0.3": - version: 6.0.3 - resolution: "utf-8-validate@npm:6.0.3" - dependencies: - node-gyp: latest - node-gyp-build: ^4.3.0 - checksum: 5e21383c81ff7469c1912119ca69d07202d944c73ddd8a54b84dddcc546b939054e5101c78c294e494d206fe93bd43428adc635a0660816b3ec9c8ec89286ac4 - languageName: node - linkType: hard - "util-deprecate@npm:^1.0.1, util-deprecate@npm:^1.0.2, util-deprecate@npm:~1.0.1": version: 1.0.2 resolution: "util-deprecate@npm:1.0.2" @@ -44829,23 +44820,15 @@ __metadata: languageName: node linkType: hard -"youtube-transcript@npm:^1.0.6": - version: 1.0.6 - resolution: "youtube-transcript@npm:1.0.6" - dependencies: - phin: ^3.5.0 - checksum: 7ca6a608834d2eb43d2d353ad58bb3fa86663e2f5730146a768c5c3ac423911680451a38c57f827aa7af8fb7df78a4ce3702019d988d87d9ed266f9d81aeb833 - languageName: node - linkType: hard - -"youtubei.js@npm:^9.1.0": - version: 9.1.0 - resolution: "youtubei.js@npm:9.1.0" +"youtubei.js@npm:^12.2.0": + version: 12.2.0 + resolution: "youtubei.js@npm:12.2.0" dependencies: - jintr: ^1.1.0 + "@bufbuild/protobuf": ^2.0.0 + jintr: ^3.2.0 tslib: ^2.5.0 undici: ^5.19.1 - checksum: 7a537d79435c362c3d4f0e101f85edca6b34c584b9cafeee28c4214fdcdcbb6b2ebba2571175e21a984cc5d66d0fe673d761f400dd232ecb16803bce878cb41d + checksum: 4c89a019c6b94363328e8d0d35b8d8266de1ee3db963a39b655bdaa15e4d899a107876ead53b7a1268837b9a756fecaf53be0b399545a7fe290c6da303010c8f languageName: node linkType: hard