From 42f7891f7aa570a28e1c483dc302ea51d447611b Mon Sep 17 00:00:00 2001 From: HavenDV Date: Wed, 18 Sep 2024 01:29:16 +0400 Subject: [PATCH] feat: Updated to Providers 0.15.2. --- README.md | 4 +-- .../Controllers/AnthropicSampleController.cs | 4 +-- .../Controllers/OpenAiSampleController.cs | 1 + examples/LangChain.Samples.Azure/Program.cs | 3 +- .../LangChain.Samples.LocalRAG.csproj | 2 +- .../LangChain.Samples.LocalRAG/Program.cs | 8 ++--- examples/LangChain.Samples.OpenAI/Program.cs | 3 +- .../LangChain.Samples.Serve.OpenAI.csproj | 2 +- .../LangChain.Samples.Serve.csproj | 2 +- examples/LangChain.Samples.Serve/Program.cs | 8 ++--- src/Core/src/Chains/LLM/LLMChain.cs | 8 ++--- .../src/Chains/StackableChains/LLMChain.cs | 4 +-- src/Core/src/Memory/MessageFormatter.cs | 4 +-- .../ConversationalRetrievalChainTests.cs | 24 +++++++++------ src/Directory.Packages.props | 30 +++++++++---------- .../ServiceCollectionExtensions.Anthropic.cs | 2 +- src/Meta/test/ReadmeTests.cs | 4 +-- src/Meta/test/WikiTests.AgentWithOllama.cs | 5 +--- ....CheckingInternetSpeedWithCrewAndOllama.cs | 5 +--- ...kiTests.GettingStartedWithAmazonBedrock.cs | 17 ++++++----- ...eGenerationWithOllamaAndStableDiffusion.cs | 6 +--- .../test/WikiTests.RagWithOpenAiOllama.cs | 7 +---- src/Serve/OpenAI/ServeExtensions.cs | 4 +-- 23 files changed, 72 insertions(+), 85 deletions(-) diff --git a/README.md b/README.md index 58fdbdeb..8c0908f5 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ var answer = await llm.GenerateAsync( Question: {question} Helpful Answer: - """, cancellationToken: CancellationToken.None).ConfigureAwait(false); + """); Console.WriteLine($"LLM answer: {answer}"); // The cloaked figure. @@ -80,7 +80,7 @@ var chain = | CombineDocuments(outputKey: "context") // combine documents together and put them into context | Template(promptTemplate) // replace context and question in the prompt with their values | LLM(llm.UseConsoleForDebug()); // send the result to the language model -var chainAnswer = await chain.RunAsync("text", CancellationToken.None); // get chain result +var chainAnswer = await chain.RunAsync("text"); // get chain result Console.WriteLine("Chain Answer:"+ chainAnswer); // print the result diff --git a/examples/LangChain.Samples.AspNet/Controllers/AnthropicSampleController.cs b/examples/LangChain.Samples.AspNet/Controllers/AnthropicSampleController.cs index 112c1c69..3329cb9b 100644 --- a/examples/LangChain.Samples.AspNet/Controllers/AnthropicSampleController.cs +++ b/examples/LangChain.Samples.AspNet/Controllers/AnthropicSampleController.cs @@ -8,11 +8,11 @@ namespace LangChain.Samples.AspNet.Controllers; [Route("[controller]")] public class AnthropicSampleController : ControllerBase { - private readonly AnthropicModel _anthropicModel; + private readonly AnthropicChatModel _anthropicModel; private readonly ILogger _logger; public AnthropicSampleController( - AnthropicModel anthropicModel, + AnthropicChatModel anthropicModel, ILogger logger) { _anthropicModel = anthropicModel; diff --git a/examples/LangChain.Samples.AspNet/Controllers/OpenAiSampleController.cs b/examples/LangChain.Samples.AspNet/Controllers/OpenAiSampleController.cs index 0f40c88c..c3bd7515 100644 --- a/examples/LangChain.Samples.AspNet/Controllers/OpenAiSampleController.cs +++ b/examples/LangChain.Samples.AspNet/Controllers/OpenAiSampleController.cs @@ -1,3 +1,4 @@ +using LangChain.Providers; using LangChain.Providers.OpenAI; using Microsoft.AspNetCore.Mvc; using OpenAI; diff --git a/examples/LangChain.Samples.Azure/Program.cs b/examples/LangChain.Samples.Azure/Program.cs index 38af7f3d..9deb8f98 100644 --- a/examples/LangChain.Samples.Azure/Program.cs +++ b/examples/LangChain.Samples.Azure/Program.cs @@ -1,4 +1,5 @@ -using LangChain.Providers.Azure; +using LangChain.Providers; +using LangChain.Providers.Azure; var provider = new AzureOpenAiProvider(apiKey: "AZURE_OPEN_AI_KEY", endpoint: "ENDPOINT"); var llm = new AzureOpenAiChatModel(provider, id: "DEPLOYMENT_NAME"); diff --git a/examples/LangChain.Samples.LocalRAG/LangChain.Samples.LocalRAG.csproj b/examples/LangChain.Samples.LocalRAG/LangChain.Samples.LocalRAG.csproj index 80d04a8b..e345802c 100644 --- a/examples/LangChain.Samples.LocalRAG/LangChain.Samples.LocalRAG.csproj +++ b/examples/LangChain.Samples.LocalRAG/LangChain.Samples.LocalRAG.csproj @@ -14,7 +14,7 @@ - + diff --git a/examples/LangChain.Samples.LocalRAG/Program.cs b/examples/LangChain.Samples.LocalRAG/Program.cs index 992cda50..e02e92d2 100644 --- a/examples/LangChain.Samples.LocalRAG/Program.cs +++ b/examples/LangChain.Samples.LocalRAG/Program.cs @@ -4,11 +4,7 @@ using LangChain.Extensions; using Ollama; -var provider = new OllamaProvider(options: new RequestOptions -{ - Stop = ["\n"], - Temperature = 0.0f, -}); +var provider = new OllamaProvider(); var embeddingModel = new OllamaEmbeddingModel(provider, id: "all-minilm"); var llm = new OllamaChatModel(provider, id: "llama3"); @@ -36,6 +32,6 @@ Keep the answer as short as possible. Question: {question} Helpful Answer: - """).ConfigureAwait(false); + """); Console.WriteLine($"LLM answer: {answer}"); \ No newline at end of file diff --git a/examples/LangChain.Samples.OpenAI/Program.cs b/examples/LangChain.Samples.OpenAI/Program.cs index c152848d..5b15a16c 100644 --- a/examples/LangChain.Samples.OpenAI/Program.cs +++ b/examples/LangChain.Samples.OpenAI/Program.cs @@ -1,4 +1,5 @@ -using LangChain.Providers.OpenAI.Predefined; +using LangChain.Providers; +using LangChain.Providers.OpenAI.Predefined; var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? diff --git a/examples/LangChain.Samples.Serve.OpenAI/LangChain.Samples.Serve.OpenAI.csproj b/examples/LangChain.Samples.Serve.OpenAI/LangChain.Samples.Serve.OpenAI.csproj index 74f6de38..8d5f8910 100644 --- a/examples/LangChain.Samples.Serve.OpenAI/LangChain.Samples.Serve.OpenAI.csproj +++ b/examples/LangChain.Samples.Serve.OpenAI/LangChain.Samples.Serve.OpenAI.csproj @@ -7,7 +7,7 @@ - + diff --git a/examples/LangChain.Samples.Serve/LangChain.Samples.Serve.csproj b/examples/LangChain.Samples.Serve/LangChain.Samples.Serve.csproj index a3e42c0c..93318e67 100644 --- a/examples/LangChain.Samples.Serve/LangChain.Samples.Serve.csproj +++ b/examples/LangChain.Samples.Serve/LangChain.Samples.Serve.csproj @@ -7,7 +7,7 @@ - + diff --git a/examples/LangChain.Samples.Serve/Program.cs b/examples/LangChain.Samples.Serve/Program.cs index 561b64a4..9c0b7cd0 100644 --- a/examples/LangChain.Samples.Serve/Program.cs +++ b/examples/LangChain.Samples.Serve/Program.cs @@ -16,12 +16,8 @@ builder.Services.AddLangChainServe(); // 2. Create a model -var model = new OllamaChatModel(new OllamaProvider(options: new RequestOptions -{ - Temperature = 0, - Stop = ["User:"], -}), "llama3.1"); - +var provider = new OllamaProvider(); +var model = new OllamaChatModel(provider, id: "llama3.1"); // 3. Optional. Add custom name generator // After initiating conversation, this will generate a name for it diff --git a/src/Core/src/Chains/LLM/LLMChain.cs b/src/Core/src/Chains/LLM/LLMChain.cs index 654c9536..84d5a9a7 100644 --- a/src/Core/src/Chains/LLM/LLMChain.cs +++ b/src/Core/src/Chains/LLM/LLMChain.cs @@ -126,7 +126,7 @@ protected override async Task CallAsync(IChainValues values, Callb }, new ChatSettings { StopSequences = stop, - }, cancellationToken).ConfigureAwait(false); + }, cancellationToken); if (Verbose) { Console.WriteLine(string.Join("\n\n", response.Messages.Except(chatMessages))); @@ -136,7 +136,7 @@ protected override async Task CallAsync(IChainValues values, Callb var returnDict = new Dictionary(); var outputKey = string.IsNullOrEmpty(OutputKey) ? "text" : OutputKey; - returnDict[outputKey] = response.Messages.Last().Content; + returnDict[outputKey] = response.LastMessageContent; returnDict.TryAddKeyValues(values.Value); @@ -176,7 +176,7 @@ private async Task GenerateAsync( var (prompts, stop) = await PreparePromptsAsync(inputs, runManager, cancellationToken).ConfigureAwait(false); var responseTasks = prompts - .Select(prompt => Llm.GenerateAsync( + .Select(async prompt => await Llm.GenerateAsync( request: new ChatRequest { Messages = prompt.ToChatMessages(), @@ -194,7 +194,7 @@ private async Task GenerateAsync( { new() { - Text = response.Messages.Last().Content + Text = response.LastMessageContent } }) .ToArray(); diff --git a/src/Core/src/Chains/StackableChains/LLMChain.cs b/src/Core/src/Chains/StackableChains/LLMChain.cs index 0ae267d4..b97dca3d 100644 --- a/src/Core/src/Chains/StackableChains/LLMChain.cs +++ b/src/Core/src/Chains/StackableChains/LLMChain.cs @@ -66,8 +66,8 @@ protected override async Task InternalCallAsync( } } - var response = await _llm.GenerateAsync(prompt, settings: _settings, cancellationToken: cancellationToken).ConfigureAwait(false); - responseContent = response.Messages.Last().Content; + var response = await _llm.GenerateAsync(prompt, settings: _settings, cancellationToken: cancellationToken); + responseContent = response.LastMessageContent; if (_useCache) SaveCachedAnswer(prompt, responseContent); values.Value[OutputKeys[0]] = responseContent; diff --git a/src/Core/src/Memory/MessageFormatter.cs b/src/Core/src/Memory/MessageFormatter.cs index baff45f1..1bd77392 100644 --- a/src/Core/src/Memory/MessageFormatter.cs +++ b/src/Core/src/Memory/MessageFormatter.cs @@ -24,10 +24,10 @@ private string GetPrefix(MessageRole role) case MessageRole.Ai: return AiPrefix; - case MessageRole.FunctionCall: + case MessageRole.ToolCall: return FunctionCallPrefix; - case MessageRole.FunctionResult: + case MessageRole.ToolResult: return FunctionResultPrefix; case MessageRole.Chat: diff --git a/src/Core/test/UnitTests/Chains/ConversationalRetrieval/ConversationalRetrievalChainTests.cs b/src/Core/test/UnitTests/Chains/ConversationalRetrieval/ConversationalRetrievalChainTests.cs index cff0bd67..90fb9072 100644 --- a/src/Core/test/UnitTests/Chains/ConversationalRetrieval/ConversationalRetrievalChainTests.cs +++ b/src/Core/test/UnitTests/Chains/ConversationalRetrieval/ConversationalRetrievalChainTests.cs @@ -56,15 +56,7 @@ public async Task Call_Ok() var questionGeneratorLlmMock = new Mock(); questionGeneratorLlmMock .Setup(v => v.GenerateAsync(It.IsAny(), It.IsAny(), It.IsAny())) - .Returns((_, _, _) => - { - return Task.FromResult(new ChatResponse - { - Messages = new[] { Message.Ai("Bob's asking what is hist name") }, - Usage = Usage.Empty, - UsedSettings = ChatSettings.Default, - }); - }); + .Returns((_, _, _) => GetChatResponses()); var llmInput = new LlmChainInput(questionGeneratorLlmMock.Object, prompt); var questionGeneratorChain = new LlmChain(llmInput); @@ -107,6 +99,20 @@ public async Task Call_Ok() It.Is(request => request.Messages.Count == 1), It.IsAny(), It.IsAny())); + return; + + // Helper method to create IAsyncEnumerable + async IAsyncEnumerable GetChatResponses() + { + await Task.CompletedTask; + + yield return new ChatResponse + { + Messages = new[] { Message.Ai("Bob's asking what is hist name") }, + Usage = Usage.Empty, + UsedSettings = ChatSettings.Default, + }; + } } [Test] diff --git a/src/Directory.Packages.props b/src/Directory.Packages.props index e7b6eb3c..493227aa 100644 --- a/src/Directory.Packages.props +++ b/src/Directory.Packages.props @@ -4,7 +4,7 @@ - + @@ -17,24 +17,24 @@ - - - - - - - - - - - - - + + + + + + + + + + + + + - + diff --git a/src/Extensions/DependencyInjection/src/ServiceCollectionExtensions.Anthropic.cs b/src/Extensions/DependencyInjection/src/ServiceCollectionExtensions.Anthropic.cs index 5a27040e..9364e644 100644 --- a/src/Extensions/DependencyInjection/src/ServiceCollectionExtensions.Anthropic.cs +++ b/src/Extensions/DependencyInjection/src/ServiceCollectionExtensions.Anthropic.cs @@ -27,7 +27,7 @@ public static IServiceCollection AddAnthropic( .AddOptions() .BindConfiguration(configSectionPath: AnthropicConfiguration.SectionName); _ = services - .AddHttpClient(); + .AddHttpClient(); _ = services .AddScoped(static services => AnthropicProvider.FromConfiguration( configuration: services.GetRequiredService>().Value)); diff --git a/src/Meta/test/ReadmeTests.cs b/src/Meta/test/ReadmeTests.cs index 60fcef0e..2c78198a 100644 --- a/src/Meta/test/ReadmeTests.cs +++ b/src/Meta/test/ReadmeTests.cs @@ -97,7 +97,7 @@ Keep the answer as short as possible. Question: {question} Helpful Answer: - """, cancellationToken: CancellationToken.None).ConfigureAwait(false); + """); Console.WriteLine($"LLM answer: {answer}"); // The cloaked figure. @@ -152,7 +152,7 @@ Human will provide you with sentence about pet. You need to answer with pet name Answer: Jerry Human: {similarDocuments.AsString()} Answer: - """, cancellationToken: CancellationToken.None).ConfigureAwait(false); + """); Console.WriteLine($"LLM answer: {petNameResponse}"); Console.WriteLine($"Total usage: {llm.Usage}"); diff --git a/src/Meta/test/WikiTests.AgentWithOllama.cs b/src/Meta/test/WikiTests.AgentWithOllama.cs index c24d403b..c175da88 100644 --- a/src/Meta/test/WikiTests.AgentWithOllama.cs +++ b/src/Meta/test/WikiTests.AgentWithOllama.cs @@ -23,10 +23,7 @@ public async Task AgentWithOllama() //// We will start with basic ollama setup and simple question to the LLM: var provider = new OllamaProvider( // url: "http://172.16.50.107:11434", // if you have ollama running on different computer/port. Default is "http://localhost:11434/api" - options: new RequestOptions - { - Temperature = 0, - }); + ); var model = new OllamaChatModel(provider, id: "llama3.1").UseConsoleForDebug(); var chain = diff --git a/src/Meta/test/WikiTests.CheckingInternetSpeedWithCrewAndOllama.cs b/src/Meta/test/WikiTests.CheckingInternetSpeedWithCrewAndOllama.cs index d59ff00b..a6b47c58 100644 --- a/src/Meta/test/WikiTests.CheckingInternetSpeedWithCrewAndOllama.cs +++ b/src/Meta/test/WikiTests.CheckingInternetSpeedWithCrewAndOllama.cs @@ -29,10 +29,7 @@ public async Task CheckingInternetSpeedWithCrewAndOllama() var provider = new OllamaProvider( // url: "http://172.16.50.107:11434", // if you have ollama running on different computer/port. Default is "http://localhost:11434/api" - options: new RequestOptions - { - Temperature = 0, - }); + ); var model = new OllamaChatModel(provider, id: "llama3.1").UseConsoleForDebug(); //// ## Making a tool diff --git a/src/Meta/test/WikiTests.GettingStartedWithAmazonBedrock.cs b/src/Meta/test/WikiTests.GettingStartedWithAmazonBedrock.cs index 537b2bbf..a400d94e 100644 --- a/src/Meta/test/WikiTests.GettingStartedWithAmazonBedrock.cs +++ b/src/Meta/test/WikiTests.GettingStartedWithAmazonBedrock.cs @@ -1,4 +1,5 @@ using Amazon; +using LangChain.Providers; using LangChain.Providers.Amazon.Bedrock; using LangChain.Providers.Amazon.Bedrock.Predefined.Anthropic; @@ -94,19 +95,19 @@ public async Task GettingStartedWithAmazonBedrock() UseStreaming = true } }; - - llm.PromptSent += (_, prompt) => Console.WriteLine($"Prompt: {prompt}"); - llm.PartialResponseGenerated += (_, delta) => Console.Write(delta); - llm.CompletedResponseGenerated += (_, prompt) => Console.WriteLine($"Completed response: {prompt}"); - + + llm.RequestSent += (_, request) => Console.WriteLine($"Prompt: {request.Messages.AsHistory()}"); + llm.DeltaReceived += (_, delta) => Console.Write(delta.Content); + llm.ResponseReceived += (_, response) => Console.WriteLine($"Completed response: {response}"); + var prompt = @" you are a comic book writer. you will be given a question and you will answer it. question: who are 10 of the most popular superheros and what are their powers?"; - + string response = await llm.GenerateAsync(prompt); - + Console.WriteLine(response); - + //// In conclusion, by following these steps, you can set up the AWS CLI, //// configure the Amazon Bedrock provider, and start using the supported foundation models in your code. //// With the AWS CLI and Bedrock provider properly configured, diff --git a/src/Meta/test/WikiTests.ImageGenerationWithOllamaAndStableDiffusion.cs b/src/Meta/test/WikiTests.ImageGenerationWithOllamaAndStableDiffusion.cs index 779cf06a..377bf168 100644 --- a/src/Meta/test/WikiTests.ImageGenerationWithOllamaAndStableDiffusion.cs +++ b/src/Meta/test/WikiTests.ImageGenerationWithOllamaAndStableDiffusion.cs @@ -37,11 +37,7 @@ public async Task ImageGenerationWithOllamaAndStableDiffusion() //// ## Ollama model //// We will use latest version of `llama3.1` for our task. If you don't have mistral yet - it will be downloaded. - var provider = new OllamaProvider( - options: new RequestOptions - { - Temperature = 0, - }); + var provider = new OllamaProvider(); var llm = new OllamaChatModel(provider, id: "llama3.1").UseConsoleForDebug(); //// Here we are stopping generation after `\n` symbol appears. Mistral will put a new line(`\n`) symbol after prompt is generated. diff --git a/src/Meta/test/WikiTests.RagWithOpenAiOllama.cs b/src/Meta/test/WikiTests.RagWithOpenAiOllama.cs index cd3a9eb2..f9b54cbd 100644 --- a/src/Meta/test/WikiTests.RagWithOpenAiOllama.cs +++ b/src/Meta/test/WikiTests.RagWithOpenAiOllama.cs @@ -67,12 +67,7 @@ public async Task RagWithOpenAiOllama() //// This is free, assuming it is running locally--this code assumes it is available at https://localhost:11434. // prepare Ollama with mistral model - var providerOllama = new OllamaProvider( - options: new RequestOptions - { - Stop = ["\n"], - Temperature = 0.0f, - }); + var providerOllama = new OllamaProvider(); var embeddingModelOllama = new OllamaEmbeddingModel(providerOllama, id: "nomic-embed-text"); var llmOllama = new OllamaChatModel(providerOllama, id: "llama3.1").UseConsoleForDebug(); diff --git a/src/Serve/OpenAI/ServeExtensions.cs b/src/Serve/OpenAI/ServeExtensions.cs index 77afab77..ed39e1d5 100644 --- a/src/Serve/OpenAI/ServeExtensions.cs +++ b/src/Serve/OpenAI/ServeExtensions.cs @@ -46,7 +46,7 @@ public static WebApplication UseLangChainServeOpenAi(this WebApplication app, Ac _ => throw new NotImplementedException(), } }).ToList(), - }).ConfigureAwait(false); + }); return Results.Ok(new CreateChatCompletionResponse { @@ -60,7 +60,7 @@ public static WebApplication UseLangChainServeOpenAi(this WebApplication app, Ac { Message = new ChatCompletionResponseMessage { - Content = response.Messages.Last().Content, + Content = response.LastMessageContent, Role = ChatCompletionResponseMessageRole.Assistant, }, Index = 0,