diff --git a/docs/docs/AIAssistant.md b/docs/docs/AIAssistant.md index 6d100c6..3a79275 100644 --- a/docs/docs/AIAssistant.md +++ b/docs/docs/AIAssistant.md @@ -3,14 +3,16 @@ title: AI Assistant --- # AI Assistant -The AI Assistant in QuickAdd leverages the power of OpenAI's GPT-3 and GPT-4 models to act as your personal AI assistant within Obsidian. It can streamline your workflows by automating routine tasks and providing intellectual support. To use this feature, you need the QuickAdd plugin and an OpenAI API key. + +The AI Assistant in QuickAdd leverages the power of Large Language Models (LLMs) to act as your personal AI assistant within Obsidian. It can streamline your workflows by automating routine tasks and providing intellectual support. To use this feature, you need the QuickAdd plugin and a provider you'd like to use. ## How to Setup the AI Assistant + To set up the AI Assistant, follow these steps: 1. In Obsidian, create a new folder dedicated to AI prompt templates, e.g. `bins/ai_prompts`. 2. Navigate to QuickAdd settings and locate the "AI Assistant" section. Specify the path to the folder you created in step 1. -3. In the same section, paste your OpenAI API key into the "OpenAI API Key" field. +3. In the same section, add a provider to get started. If you are using OpenAI, you will need to add your API key to the settings. As of v1.8.x, you need to enter your API key in the [provider](#providers) settings. The video below is from an older version, but the process is the similar. ![AI Assistant Setup](./Images/AI_Assistant_Setup.gif) @@ -31,46 +33,91 @@ Here's an example of how you can set up a prompt template: You can also use AI Assistant features from within the [API](./QuickAddAPI.md). +## Providers + +QuickAdd supports multiple providers for LLMs. +The only requirement is that they are OpenAI-compatible, which means their API should be similar to OpenAIs. + +Here are a few providers that are known to work with QuickAdd: + +- [OpenAI](https://openai.com) +- [TogetherAI](https://www.together.ai) +- [Groq](https://groq.com) +- [Ollama (local)](https://ollama.com) + +Paid providers expose their own API, which you can use with QuickAdd. Free providers, such as Ollama, are also supported. + +By default, QuickAdd will add the OpenAI provider. You can add more providers by clicking the "Add Provider" button in the AI Assistant settings. + +Here's a video showcasing adding Groq as a provider: + + + +### Local LLMs + +You can use your own machine to run LLMs. This is useful if you want to keep your data private, or if you want to use a specific model that isn't available on the cloud. +To use a local LLM, you need to set up a server that can run the model. +You can then add the server as a provider in QuickAdd. + +One such server is [Ollama](https://ollama.com). Ollama is a free, open-source, and self-hosted LLM server. You can set up Ollama on your own machine, and then use it as a provider in QuickAdd. +You can find the [quick start documentation here](https://github.com/ollama/ollama/blob/main/README.md#quickstart). +Ollama binds to the port `11434` ([src](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-expose-ollama-on-my-network)), so your provider settings would be as follows: + +``` +Name: Ollama +URL: http://localhost:11434/v1 +Api Key: (empty) +``` + +And that's it! You can now use Ollama as a provider in QuickAdd. +Make sure you add the model you want to use. [mistral](https://ollama.com/library/mistral) is great. + ## AI Assistant Settings + Within the main AI Assistant settings accessible via QuickAdd settings, you can configure the following options: -- OpenAI API Key: The key to interact with OpenAI's models. -- Prompt Templates Folder: The location where all your prompt templates reside. -- Default model: The default OpenAI model to be used. -- Show Assistant: Toggle for status messages. -- Default System Prompt Template: Sets the behavior of the model. +- OpenAI API Key: The key to interact with OpenAI's models. +- Prompt Templates Folder: The location where all your prompt templates reside. +- Default model: The default OpenAI model to be used. +- Show Assistant: Toggle for status messages. +- Default System Prompt Template: Sets the behavior of the model. For each individual AI Assistant command in your macros, you can set these options: -- Prompt Template: Determines the prompt template to use. -- Model: Specifies the OpenAI model to use, overriding the default model. -- Output Name Variable: Sets the variable name for the AI Assistant’s output. -- System Prompt Template: Determines the models behavior, overriding the default system prompt template. +- Prompt Template: Determines the prompt template to use. +- Model: Specifies the OpenAI model to use, overriding the default model. +- Output Name Variable: Sets the variable name for the AI Assistant’s output. +- System Prompt Template: Determines the models behavior, overriding the default system prompt template. You can also tweak model parameters in advanced settings: -- **temperature:** Allows you to adjust the sampling temperature between 0 and 2. Higher values result in more random outputs, while lower values make the output more focused and deterministic. -- **top_p:** This parameter relates to nucleus sampling. The model considers only the tokens comprising the top 'p' probability mass. For example, 0.1 means only tokens from the top 10% probability mass are considered. -- **frequency_penalty:** A parameter ranging between -2.0 and 2.0. Positive values penalize new tokens based on their frequency in the existing text, reducing the model's tendency to repeat the same lines. -- **presence_penalty:** Also ranging between -2.0 and 2.0, positive values penalize new tokens based on their presence in the existing text, encouraging the model to introduce new topics. +- **temperature:** Allows you to adjust the sampling temperature between 0 and 2. Higher values result in more random outputs, while lower values make the output more focused and deterministic. +- **top_p:** This parameter relates to nucleus sampling. The model considers only the tokens comprising the top 'p' probability mass. For example, 0.1 means only tokens from the top 10% probability mass are considered. +- **frequency_penalty:** A parameter ranging between -2.0 and 2.0. Positive values penalize new tokens based on their frequency in the existing text, reducing the model's tendency to repeat the same lines. +- **presence_penalty:** Also ranging between -2.0 and 2.0, positive values penalize new tokens based on their presence in the existing text, encouraging the model to introduce new topics. ## AI-Powered Workflows + You can create powerful workflows utilizing the AI Assistant. Some examples are: -- **Generating Writing Prompts:** Using links to related notes to generate writing prompts. -- **Summarizer:** Create summaries of selected text. -- **Transform Selected:** Transform selected text based on provided instructions. -- **Flashcard Creator:** Generate flashcards based on selected text. -- **Get Me Started Writing About…:** Generate points to kickstart your writing on a given topic. -- **Manual Prompt:** Provide a manual prompt to the AI assistant. -- **Alternative Viewpoints:** Obtain alternative perspectives and improvements on your draft. -- **Prompt Chaining:** Chain multiple prompts together, with each prompt using the output of the previous one. +- **Generating Writing Prompts:** Using links to related notes to generate writing prompts. +- **Summarizer:** Create summaries of selected text. +- **Transform Selected:** Transform selected text based on provided instructions. +- **Flashcard Creator:** Generate flashcards based on selected text. +- **Get Me Started Writing About…:** Generate points to kickstart your writing on a given topic. +- **Manual Prompt:** Provide a manual prompt to the AI assistant. +- **Alternative Viewpoints:** Obtain alternative perspectives and improvements on your draft. +- **Prompt Chaining:** Chain multiple prompts together, with each prompt using the output of the previous one. All of these examples, and more, can be found in [Christian's blog post about the AI Assistant](https://bagerbach.com/blog/obsidian-ai). Please note, using the AI Assistant will incur costs depending on the API usage. Set spending limits on your OpenAI account to avoid unexpected expenses. Play around with different models to find the one that best suits your needs. ### Example: Summarizer + Here’s a simple prompt where you select some text, and then use the assistant with that prompt. Then it’ll spit out an AI-generated summary: @@ -79,4 +126,4 @@ Please summarize the following text. Use only the text itself as material for su {{value}} ``` -You can use the getting-started demonstration shown earlier to set this up. \ No newline at end of file +You can use the getting-started demonstration shown earlier to set this up. diff --git a/src/ai/AIAssistant.ts b/src/ai/AIAssistant.ts index 27a68b5..773af50 100644 --- a/src/ai/AIAssistant.ts +++ b/src/ai/AIAssistant.ts @@ -1,22 +1,29 @@ import GenericSuggester from "src/gui/GenericSuggester/genericSuggester"; -import type { Model } from "./models"; import { TFile } from "obsidian"; import { getMarkdownFilesInFolder } from "src/utilityObsidian"; import invariant from "src/utils/invariant"; import type { OpenAIModelParameters } from "./OpenAIModelParameters"; import { settingsStore } from "src/settingsStore"; -import { encodingForModel } from "js-tiktoken"; +import type { TiktokenModel} from "js-tiktoken"; +import { encodingForModel, getEncoding } from "js-tiktoken"; import { OpenAIRequest } from "./OpenAIRequest"; import { makeNoticeHandler } from "./makeNoticeHandler"; -import { getModelMaxTokens } from "./getModelMaxTokens"; +import type { Model } from "./Provider"; +import { getModelMaxTokens } from "./aiHelpers"; export const getTokenCount = (text: string, model: Model) => { // gpt-3.5-turbo-16k is a special case - it isn't in the library list yet. Same with gpt-4-1106-preview and gpt-3.5-turbo-1106. - let m = model === "gpt-3.5-turbo-16k" ? "gpt-3.5-turbo" : model; + let m = model.name === "gpt-3.5-turbo-16k" ? "gpt-3.5-turbo" : model.name; m = m === "gpt-4-1106-preview" ? "gpt-4" : m; m = m === "gpt-3.5-turbo-1106" ? "gpt-3.5-turbo" : m; - return encodingForModel(m).encode(text).length; + // kind of hacky, but we'll be using this general heuristic to support non-openai models + try { + return encodingForModel(m as TiktokenModel).encode(text).length; + } catch { + const enc = getEncoding("cl100k_base"); + return enc.encode(text).length; + } }; async function repeatUntilResolved( @@ -379,7 +386,7 @@ export async function ChunkedPrompt( ); const maxChunkTokenSize = - getModelMaxTokens(model) / 2 - systemPromptLength; // temp, need to impl. config + getModelMaxTokens(model.name) / 2 - systemPromptLength; // temp, need to impl. config // Whether we should strictly enforce the chunking rules or we should merge chunks that are too small const shouldMerge = settings.shouldMerge ?? true; // temp, need to impl. config @@ -398,7 +405,10 @@ export async function ChunkedPrompt( if (strSize > maxCombinedChunkSize) { throw new Error( - `The chunk "${chunk.slice(0, 25)}..." is too large to fit in a single prompt.` + `The chunk "${chunk.slice( + 0, + 25 + )}..." is too large to fit in a single prompt.` ); } diff --git a/src/ai/OpenAIRequest.ts b/src/ai/OpenAIRequest.ts index 220fa91..dac73b4 100644 --- a/src/ai/OpenAIRequest.ts +++ b/src/ai/OpenAIRequest.ts @@ -1,10 +1,10 @@ -import type { Model } from "./models"; import { requestUrl } from "obsidian"; import type { OpenAIModelParameters } from "./OpenAIModelParameters"; import { settingsStore } from "src/settingsStore"; import { getTokenCount } from "./AIAssistant"; -import { getModelMaxTokens } from "./getModelMaxTokens"; import { preventCursorChange } from "./preventCursorChange"; +import type { Model } from "./Provider"; +import { getModelProvider } from "./aiHelpers"; type ReqResponse = { id: string; @@ -38,25 +38,31 @@ export function OpenAIRequest( const tokenCount = getTokenCount(prompt, model) + getTokenCount(systemPrompt, model); - const maxTokens = getModelMaxTokens(model); + const { maxTokens } = model; if (tokenCount > maxTokens) { throw new Error( - `The ${model} API has a token limit of ${maxTokens}. Your prompt has ${tokenCount} tokens.` + `The ${model.name} API has a token limit of ${maxTokens}. Your prompt has ${tokenCount} tokens.` ); } + const modelProvider = getModelProvider(model.name); + + if (!modelProvider) { + throw new Error(`Model ${model.name} not found with any provider.`); + } + try { const restoreCursor = preventCursorChange(); const _response = requestUrl({ - url: `https://api.openai.com/v1/chat/completions`, + url: `${modelProvider.endpoint}/chat/completions`, method: "POST", headers: { "Content-Type": "application/json", Authorization: `Bearer ${apiKey}`, }, body: JSON.stringify({ - model, + model: model.name, ...modelParams, messages: [ { role: "system", content: systemPrompt }, @@ -72,7 +78,7 @@ export function OpenAIRequest( } catch (error) { console.log(error); throw new Error( - `Error while making request to OpenAI API: ${ + `Error while making request to ${modelProvider.name}: ${ (error as { message: string }).message }` ); diff --git a/src/ai/Provider.ts b/src/ai/Provider.ts new file mode 100644 index 0000000..8c9ec74 --- /dev/null +++ b/src/ai/Provider.ts @@ -0,0 +1,52 @@ +export interface AIProvider { + name: string; + endpoint: string; + apiKey: string; + models: Model[]; +} + +export interface Model { + name: string; + maxTokens: number; +} + +const OpenAIProvider: AIProvider = { + name: "OpenAI", + endpoint: "https://api.openai.com/v1", + apiKey: "", + models: [ + { + name: "gpt-3.5-turbo", + maxTokens: 4096, + }, + { + name: "gpt-3.5-turbo-16k", + maxTokens: 16384, + }, + { + name: "gpt-3.5-turbo-1106", + maxTokens: 16385, + }, + { + name: "gpt-4", + maxTokens: 8192, + }, + { + name: "gpt-4-32k", + maxTokens: 32768, + }, + { + name: "gpt-4-1106-preview", + maxTokens: 128000, + }, + { + name: "text-davinci-003", + maxTokens: 4096, + }, + ], +}; + + +export const DefaultProviders: AIProvider[] = [ + OpenAIProvider, +]; diff --git a/src/ai/aiHelpers.ts b/src/ai/aiHelpers.ts new file mode 100644 index 0000000..ff22fff --- /dev/null +++ b/src/ai/aiHelpers.ts @@ -0,0 +1,39 @@ +import { settingsStore } from "src/settingsStore"; + +export function getModelNames() { + const aiSettings = settingsStore.getState().ai; + + return aiSettings.providers + .flatMap((provider) => provider.models) + .map((model) => model.name); +} + +export function getModelByName(model: string) { + const aiSettings = settingsStore.getState().ai; + + return aiSettings.providers + .flatMap((provider) => provider.models) + .find((m) => m.name === model); +} + +export function getModelMaxTokens(model: string) { + const aiSettings = settingsStore.getState().ai; + + const modelData = aiSettings.providers + .flatMap((provider) => provider.models) + .find((m) => m.name === model); + + if (modelData) { + return modelData.maxTokens; + } + + throw new Error(`Model ${model} not found with any provider.`); +} + +export function getModelProvider(modelName: string) { + const aiSettings = settingsStore.getState().ai; + + return aiSettings.providers.find((provider) => + provider.models.some((m) => m.name === modelName) + ); +} diff --git a/src/ai/getModelMaxTokens.ts b/src/ai/getModelMaxTokens.ts deleted file mode 100644 index 74783d6..0000000 --- a/src/ai/getModelMaxTokens.ts +++ /dev/null @@ -1,21 +0,0 @@ -import type { Model } from "./models"; - - -export function getModelMaxTokens(model: Model) { - switch (model) { - case "text-davinci-003": - return 4096; - case "gpt-3.5-turbo": - return 4096; - case "gpt-4": - return 8192; - case "gpt-3.5-turbo-16k": - return 16384; - case "gpt-3.5-turbo-1106": - return 16385; - case "gpt-4-1106-preview": - return 128000; - case "gpt-4-32k": - return 32768; - } -} diff --git a/src/ai/models.ts b/src/ai/models.ts deleted file mode 100644 index b550f85..0000000 --- a/src/ai/models.ts +++ /dev/null @@ -1,4 +0,0 @@ -export const models = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-1106", "gpt-4", "gpt-4-1106-preview", "gpt-4-32k", "text-davinci-003"] as const; -export const models_and_ask_me = [...models, "Ask me"] as const; -export type Model = typeof models[number]; -export type Models_And_Ask_Me = typeof models_and_ask_me[number]; diff --git a/src/engine/MacroChoiceEngine.ts b/src/engine/MacroChoiceEngine.ts index 9dfc4b0..cd550ff 100644 --- a/src/engine/MacroChoiceEngine.ts +++ b/src/engine/MacroChoiceEngine.ts @@ -28,8 +28,13 @@ import { waitFor } from "src/utility"; import type { IAIAssistantCommand } from "src/types/macros/QuickCommands/IAIAssistantCommand"; import { runAIAssistant } from "src/ai/AIAssistant"; import { settingsStore } from "src/settingsStore"; -import { models } from "src/ai/models"; import { CompleteFormatter } from "src/formatters/completeFormatter"; +import { + getModelByName, + getModelNames, + getModelProvider, +} from "src/ai/aiHelpers"; +import type { Model } from "src/ai/Provider"; export class MacroChoiceEngine extends QuickAddChoiceEngine { public choice: IMacroChoice; @@ -305,20 +310,35 @@ export class MacroChoiceEngine extends QuickAddChoiceEngine { const aiSettings = settingsStore.getState().ai; - const options = [...models]; - const model = + const options = getModelNames(); + const modelName: string = command.model === "Ask me" ? await GenericSuggester.Suggest(app, options, options) : command.model; + + const model: Model | undefined = getModelByName(modelName); + + if (!model) { + throw new Error(`Model ${modelName} not found with any provider.`); + } + const formatter = new CompleteFormatter( app, QuickAdd.instance, this.choiceExecutor ); + const modelProvider = getModelProvider(model.name); + + if (!modelProvider) { + throw new Error( + `Model ${model.name} not found in the AI providers settings.` + ); + } + const aiOutputVariables = await runAIAssistant( { - apiKey: aiSettings.OpenAIApiKey, + apiKey: modelProvider.apiKey, model, outputVariableName: command.outputVariableName, promptTemplate: command.promptTemplate, diff --git a/src/gui/AIAssistantProvidersModal.ts b/src/gui/AIAssistantProvidersModal.ts new file mode 100644 index 0000000..8ffc381 --- /dev/null +++ b/src/gui/AIAssistantProvidersModal.ts @@ -0,0 +1,280 @@ +/* eslint-disable @typescript-eslint/no-non-null-assertion */ +import type { App } from "obsidian"; +import { ButtonComponent, Modal, Setting } from "obsidian"; +import type { AIProvider } from "src/ai/Provider"; +import { setPasswordOnBlur } from "src/utils/setPasswordOnBlur"; +import GenericInputPrompt from "./GenericInputPrompt/GenericInputPrompt"; +import GenericYesNoPrompt from "./GenericYesNoPrompt/GenericYesNoPrompt"; +import type { IconType } from "src/types/IconType"; + +export class AIAssistantProvidersModal extends Modal { + public waitForClose: Promise; + + private resolvePromise: (settings: AIProvider[]) => void; + private rejectPromise: (reason?: unknown) => void; + + private providers: AIProvider[]; + private selectedProvider: AIProvider | null; + + private _selectedProviderClone: AIProvider | null; + + constructor(providers: AIProvider[], app: App) { + super(app); + + this.providers = providers; + + this.waitForClose = new Promise((resolve, reject) => { + this.rejectPromise = reject; + this.resolvePromise = resolve; + }); + + this.open(); + this.display(); + } + + private display(): void { + const modalName = this.selectedProvider + ? `${this.selectedProvider.name}` + : "Providers"; + + this.contentEl.createEl("h2", { + text: modalName, + }).style.textAlign = "center"; + + if (this.selectedProvider) { + this.addProviderSetting(this.contentEl); + + return; + } + + this.addProvidersSetting(this.contentEl); + } + + private reload(): void { + this.contentEl.empty(); + + this.display(); + } + + addProvidersSetting(container: HTMLElement) { + new Setting(container) + .setName("Providers") + .setDesc("Providers for the AI Assistant") + .addButton((button) => { + button.setButtonText("Add Provider").onClick(async () => { + const providerName = await GenericInputPrompt.Prompt( + app, + "Provider Name" + ); + + this.providers.push({ + name: providerName, + endpoint: "", + apiKey: "", + models: [], + }); + + this.reload(); + }); + + button.setCta(); + }); + + const providersContainer = container.createDiv("providers-container"); + providersContainer.style.display = "flex"; + providersContainer.style.flexDirection = "column"; + providersContainer.style.gap = "10px"; + providersContainer.style.overflowY = "auto"; + providersContainer.style.maxHeight = "400px"; + providersContainer.style.padding = "10px"; + + this.providers.forEach((provider, i) => { + new Setting(providersContainer) + .setName(provider.name) + .setDesc(provider.endpoint) + .addButton((button) => { + button.onClick(async () => { + const confirmation = await GenericYesNoPrompt.Prompt( + app, + `Are you sure you want to delete ${provider.name}?` + ); + if (!confirmation) { + return; + } + + this.providers.splice(i, 1); + this.reload(); + }); + button.setWarning(); + button.setIcon("trash" as IconType); + }) + .addButton((button) => { + button.setButtonText("Edit").onClick(() => { + this.selectedProvider = provider; + this._selectedProviderClone = structuredClone(provider); + + this.reload(); + }); + }); + }); + } + + addProviderSetting(container: HTMLElement) { + this.addNameSetting(container); + this.addEndpointSetting(container); + this.addApiKeySetting(container); + + this.addProviderModelsSetting(container); + + this.addProviderSettingButtonRow(this.contentEl); + } + + addNameSetting(container: HTMLElement) { + new Setting(container) + .setName("Name") + .setDesc("The name of the provider") + .addText((text) => { + text.setValue(this.selectedProvider!.name).onChange((value) => { + this.selectedProvider!.name = value; + }); + }); + } + + addEndpointSetting(container: HTMLElement) { + new Setting(container) + .setName("Endpoint") + .setDesc("The endpoint for the AI Assistant") + .addText((text) => { + text.setValue(this.selectedProvider!.endpoint).onChange( + (value) => { + this.selectedProvider!.endpoint = value; + } + ); + }); + } + + addApiKeySetting(container: HTMLElement) { + new Setting(container) + .setName("API Key") + .setDesc("The API Key for the AI Assistant") + .addText((text) => { + setPasswordOnBlur(text.inputEl); + text.setValue(this.selectedProvider!.apiKey).onChange( + (value) => { + this.selectedProvider!.apiKey = value; + } + ); + }); + } + + addProviderModelsSetting(container: HTMLElement) { + const modelsContainer = container.createDiv("models-container"); + modelsContainer.style.display = "flex"; + modelsContainer.style.flexDirection = "column"; + modelsContainer.style.gap = "10px"; + modelsContainer.style.overflowY = "auto"; + modelsContainer.style.maxHeight = "400px"; + modelsContainer.style.padding = "10px"; + + this.selectedProvider!.models.forEach((model, i) => { + new Setting(modelsContainer) + .setName(model.name) + .setDesc(`Max Tokens: ${model.maxTokens}`) + .addButton((button) => { + button.onClick(async () => { + const confirmation = await GenericYesNoPrompt.Prompt( + app, + `Are you sure you want to delete ${model.name}?` + ); + if (!confirmation) { + return; + } + + this.selectedProvider!.models.splice(i, 1); + this.reload(); + }); + button.setWarning(); + button.setIcon("trash" as IconType); + }); + }); + + new Setting(modelsContainer) + .setName("Add Model") + .addButton((button) => { + button.setButtonText("Add Model").onClick(async () => { + const modelName = await GenericInputPrompt.Prompt( + app, + "Model Name" + ); + const maxTokens = await GenericInputPrompt.Prompt( + app, + "Max Tokens" + ); + + this.selectedProvider!.models.push({ + name: modelName, + maxTokens: parseInt(maxTokens), + }); + + this.reload(); + }); + button.setCta(); + }); + } + + addProviderSettingButtonRow(container: HTMLElement) { + const buttonRow = container.createDiv("button-row"); + buttonRow.style.display = "flex"; + buttonRow.style.justifyContent = "space-between"; + buttonRow.style.marginTop = "20px"; + + const CancelButton = new ButtonComponent(buttonRow); + CancelButton.setButtonText("Cancel"); + CancelButton.setWarning(); + CancelButton.onClick(() => { + if (!this.selectedProvider || !this._selectedProviderClone) return; + const noChangesMade = !checkObjectDiff( + this.selectedProvider, + this._selectedProviderClone + ); + + if (noChangesMade) { + this.selectedProvider = null; + this._selectedProviderClone = null; + + this.reload(); + return; + } + + Object.assign(this.selectedProvider, this._selectedProviderClone); + this.selectedProvider = this._selectedProviderClone; + this._selectedProviderClone = null; + + this.close(); + }); + + const SaveButton = new ButtonComponent(buttonRow); + SaveButton.setButtonText("Save"); + SaveButton.setCta(); + SaveButton.onClick(() => { + this.selectedProvider = null; + this.reload(); + }); + } + + onClose(): void { + if (this.selectedProvider) { + // go back to main view + this.selectedProvider = null; + this.reload(); + this.open(); + } + + this.resolvePromise(this.providers); + super.onClose(); + } +} + +function checkObjectDiff(obj1: unknown, obj2: unknown) { + return JSON.stringify(obj1) !== JSON.stringify(obj2); +} diff --git a/src/gui/AIAssistantSettingsModal.ts b/src/gui/AIAssistantSettingsModal.ts index 4968412..3d29683 100644 --- a/src/gui/AIAssistantSettingsModal.ts +++ b/src/gui/AIAssistantSettingsModal.ts @@ -1,11 +1,10 @@ import { Modal, Setting, TextAreaComponent } from "obsidian"; -import type { Models_And_Ask_Me } from "src/ai/models"; -import { models_and_ask_me } from "src/ai/models"; import type { QuickAddSettings } from "src/quickAddSettingsTab"; -import { setPasswordOnBlur } from "src/utils/setPasswordOnBlur"; import { FormatSyntaxSuggester } from "./suggesters/formatSyntaxSuggester"; import QuickAdd from "src/main"; import { FormatDisplayFormatter } from "src/formatters/formatDisplayFormatter"; +import { AIAssistantProvidersModal } from "./AIAssistantProvidersModal"; +import { getModelNames } from "src/ai/aiHelpers"; type AIAssistantSettings = QuickAddSettings["ai"]; @@ -38,7 +37,7 @@ export class AIAssistantSettingsModal extends Modal { text: "AI Assistant Settings", }).style.textAlign = "center"; - this.addApiKeySetting(this.contentEl); + this.addProvidersSetting(this.contentEl); this.addDefaultModelSetting(this.contentEl); this.addPromptTemplateFolderPathSetting(this.contentEl); this.addShowAssistantSetting(this.contentEl); @@ -52,32 +51,37 @@ export class AIAssistantSettingsModal extends Modal { this.display(); } - addApiKeySetting(container: HTMLElement) { + addProvidersSetting(container: HTMLElement) { new Setting(container) - .setName("API Key") - .setDesc("The API Key for the AI Assistant") - .addText((text) => { - setPasswordOnBlur(text.inputEl); - text.setValue(this.settings.OpenAIApiKey).onChange((value) => { - this.settings.OpenAIApiKey = value; + .setName("Providers") + .setDesc("The providers for the AI Assistant") + .addButton((button) => { + button.setButtonText("Edit Providers").onClick(() => { + void new AIAssistantProvidersModal( + this.settings.providers, + app + ).waitForClose.then(() => { + this.reload(); + }); }); - - text.inputEl.placeholder = "sk-..."; }); } - + addDefaultModelSetting(container: HTMLElement) { new Setting(container) .setName("Default Model") .setDesc("The default model for the AI Assistant") .addDropdown((dropdown) => { - for (const model of models_and_ask_me) { + const models = getModelNames(); + for (const model of models) { dropdown.addOption(model, model); } + dropdown.addOption("Ask me", "Ask me"); + dropdown.setValue(this.settings.defaultModel); dropdown.onChange((value) => { - this.settings.defaultModel = value as Models_And_Ask_Me; + this.settings.defaultModel = value; }); }); } @@ -144,8 +148,8 @@ export class AIAssistantSettingsModal extends Modal { )))(); } - onClose(): void { - this.resolvePromise(this.settings); - super.onClose(); - } + onClose(): void { + this.resolvePromise(this.settings); + super.onClose(); + } } diff --git a/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts b/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts index acecb22..11023a6 100644 --- a/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts +++ b/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts @@ -1,6 +1,4 @@ import { Modal, Setting, TextAreaComponent, debounce } from "obsidian"; -import type { Models_And_Ask_Me } from "src/ai/models"; -import { models_and_ask_me } from "src/ai/models"; import { FormatSyntaxSuggester } from "./../suggesters/formatSyntaxSuggester"; import QuickAdd from "src/main"; import { FormatDisplayFormatter } from "src/formatters/formatDisplayFormatter"; @@ -16,6 +14,7 @@ import { DEFAULT_TOP_P, } from "src/ai/OpenAIModelParameters"; import { getTokenCount } from "src/ai/AIAssistant"; +import { getModelByName, getModelNames } from "src/ai/aiHelpers"; export class AIAssistantCommandSettingsModal extends Modal { public waitForClose: Promise; @@ -29,7 +28,10 @@ export class AIAssistantCommandSettingsModal extends Modal { private get systemPromptTokenLength(): number { if (this.settings.model === "Ask me") return Number.POSITIVE_INFINITY; - return getTokenCount(this.settings.systemPrompt, this.settings.model); + const model = getModelByName(this.settings.model); + if (!model) return Number.POSITIVE_INFINITY; + + return getTokenCount(this.settings.systemPrompt, model); } constructor(settings: IAIAssistantCommand) { @@ -136,13 +138,16 @@ export class AIAssistantCommandSettingsModal extends Modal { .setName("Model") .setDesc("The model the AI Assistant will use") .addDropdown((dropdown) => { - for (const model of models_and_ask_me) { + const models = getModelNames(); + for (const model of models) { dropdown.addOption(model, model); } + dropdown.addOption("Ask me", "Ask me"); + dropdown.setValue(this.settings.model); dropdown.onChange((value) => { - this.settings.model = value as Models_And_Ask_Me; + this.settings.model = value; this.reload(); }); diff --git a/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts b/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts index 90592b2..4e6370c 100644 --- a/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts +++ b/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts @@ -1,6 +1,4 @@ import { Modal, Setting, TextAreaComponent, debounce } from "obsidian"; -import type { Model } from "src/ai/models"; -import { models_and_ask_me } from "src/ai/models"; import { FormatSyntaxSuggester } from "./../suggesters/formatSyntaxSuggester"; import QuickAdd from "src/main"; import { FormatDisplayFormatter } from "src/formatters/formatDisplayFormatter"; @@ -13,7 +11,7 @@ import { DEFAULT_TOP_P, } from "src/ai/OpenAIModelParameters"; import { getTokenCount } from "src/ai/AIAssistant"; -import { getModelMaxTokens } from "src/ai/getModelMaxTokens"; +import { getModelByName, getModelNames } from "src/ai/aiHelpers"; export class InfiniteAIAssistantCommandSettingsModal extends Modal { public waitForClose: Promise; @@ -25,7 +23,9 @@ export class InfiniteAIAssistantCommandSettingsModal extends Modal { private showAdvancedSettings = false; private get systemPromptTokenLength(): number { - return getTokenCount(this.settings.systemPrompt, this.settings.model); + const model = getModelByName(this.settings.model); + if (!model) return Number.POSITIVE_INFINITY; + return getTokenCount(this.settings.systemPrompt, model); } constructor(settings: IInfiniteAIAssistantCommand) { @@ -103,13 +103,16 @@ export class InfiniteAIAssistantCommandSettingsModal extends Modal { .setName("Model") .setDesc("The model the AI Assistant will use") .addDropdown((dropdown) => { - for (const model of models_and_ask_me) { + const models = getModelNames(); + for (const model of models) { dropdown.addOption(model, model); } + dropdown.addOption("Ask me", "Ask me"); + dropdown.setValue(this.settings.model); dropdown.onChange((value) => { - this.settings.model = value as Model; + this.settings.model = value; this.reload(); }); @@ -305,9 +308,15 @@ export class InfiniteAIAssistantCommandSettingsModal extends Modal { "The maximum number of tokens in each chunk, calculated as the chunk token size + prompt template token size + system prompt token size. Make sure you leave room for the model to respond to the prompt." ) .addSlider((slider) => { - const modelMaxTokens = getModelMaxTokens(this.settings.model); + const model = getModelByName(this.settings.model); + + if (!model) { + throw new Error( + `Model ${this.settings.model} not found in settings` + ); + } - slider.setLimits(1, modelMaxTokens - this.systemPromptTokenLength, 1); + slider.setLimits(1, model.maxTokens - this.systemPromptTokenLength, 1); slider.setDynamicTooltip(); slider.setValue(this.settings.maxChunkTokens); diff --git a/src/migrations/addDefaultAIProviders.ts b/src/migrations/addDefaultAIProviders.ts new file mode 100644 index 0000000..248593d --- /dev/null +++ b/src/migrations/addDefaultAIProviders.ts @@ -0,0 +1,38 @@ +import { DefaultProviders } from "src/ai/Provider"; +import type { Migration } from "./Migrations"; +import { settingsStore } from "src/settingsStore"; + +const addDefaultAIProviders: Migration = { + description: "Add default AI providers to the settings.", + // eslint-disable-next-line @typescript-eslint/require-await + migrate: async (_) => { + const ai = settingsStore.getState().ai; + + const defaultProvidersWithOpenAIKey = DefaultProviders.map( + (provider) => { + if (provider.name === "OpenAI") { + if ("OpenAIApiKey" in ai && typeof ai.OpenAIApiKey === "string") { + provider.apiKey = ai.OpenAIApiKey; + } + } + + return provider; + } + ); + + if ("OpenAIApiKey" in ai) { + delete ai.OpenAIApiKey; + } + + settingsStore.setState({ + ai: { + ...settingsStore.getState().ai, + providers: defaultProvidersWithOpenAIKey, + }, + }); + + + }, +}; + +export default addDefaultAIProviders; diff --git a/src/migrations/migrate.ts b/src/migrations/migrate.ts index d624b34..2dfee3f 100644 --- a/src/migrations/migrate.ts +++ b/src/migrations/migrate.ts @@ -6,13 +6,15 @@ import useQuickAddTemplateFolder from "./useQuickAddTemplateFolder"; import incrementFileNameSettingMoveToDefaultBehavior from "./incrementFileNameSettingMoveToDefaultBehavior"; import mutualExclusionInsertAfterAndWriteToBottomOfFile from "./mutualExclusionInsertAfterAndWriteToBottomOfFile"; import setVersionAfterUpdateModalRelease from "./setVersionAfterUpdateModalRelease"; +import addDefaultAIProviders from "./addDefaultAIProviders"; const migrations: Migrations = { migrateToMacroIDFromEmbeddedMacro, useQuickAddTemplateFolder, incrementFileNameSettingMoveToDefaultBehavior, mutualExclusionInsertAfterAndWriteToBottomOfFile, - setVersionAfterUpdateModalRelease + setVersionAfterUpdateModalRelease, + addDefaultAIProviders, }; async function migrate(plugin: QuickAdd): Promise { diff --git a/src/quickAddApi.ts b/src/quickAddApi.ts index 9e44303..a4cc7bc 100644 --- a/src/quickAddApi.ts +++ b/src/quickAddApi.ts @@ -14,9 +14,13 @@ import { MarkdownView } from "obsidian"; import GenericWideInputPrompt from "./gui/GenericWideInputPrompt/GenericWideInputPrompt"; import { ChunkedPrompt, Prompt, getTokenCount } from "./ai/AIAssistant"; import { settingsStore } from "./settingsStore"; -import { models, type Model } from "./ai/models"; import type { OpenAIModelParameters } from "./ai/OpenAIModelParameters"; -import { getModelMaxTokens } from "./ai/getModelMaxTokens"; +import type { Model } from "./ai/Provider"; +import { + getModelByName, + getModelNames, + getModelProvider, +} from "./ai/aiHelpers"; export class QuickAddApi { public static GetApi( @@ -127,11 +131,19 @@ export class QuickAddApi { choiceExecutor ).format; + const modelProvider = getModelProvider(model.name); + + if (!modelProvider) { + throw new Error( + `Model '${model.name}' not found in any provider` + ); + } + const assistantRes = await Prompt( { model, prompt, - apiKey: AISettings.OpenAIApiKey, + apiKey: modelProvider.apiKey, modelOptions: settings?.modelOptions ?? {}, outputVariableName: settings?.variableName ?? "output", @@ -189,13 +201,28 @@ export class QuickAddApi { choiceExecutor ).format; + const modelProvider = getModelProvider(model.name); + + if (!modelProvider) { + throw new Error( + `Model '${model.name}' not found in any provider` + ); + } + + + if (!modelProvider.apiKey) { + throw new Error( + `Model '${model.name}' requires an API key` + ); + } + const assistantRes = await ChunkedPrompt( { model, text, promptTemplate, chunkSeparator: settings?.chunkSeparator ?? /\n/, - apiKey: AISettings.OpenAIApiKey, + apiKey: modelProvider.apiKey, modelOptions: settings?.modelOptions ?? {}, outputVariableName: settings?.variableName ?? "output", @@ -230,10 +257,16 @@ export class QuickAddApi { return assistantRes; }, getModels: () => { - return models; + return getModelNames(); }, - getMaxTokens: (model: Model) => { - return getModelMaxTokens(model); + getMaxTokens: (modelName: string) => { + const model = getModelByName(modelName); + + if (!model) { + throw new Error(`Model ${modelName} not found.`); + } + + return model.maxTokens; }, countTokens(text: string, model: Model) { return getTokenCount(text, model); diff --git a/src/quickAddSettingsTab.ts b/src/quickAddSettingsTab.ts index df58534..0a3b054 100644 --- a/src/quickAddSettingsTab.ts +++ b/src/quickAddSettingsTab.ts @@ -6,7 +6,8 @@ import ChoiceView from "./gui/choiceList/ChoiceView.svelte"; import type { IMacro } from "./types/macros/IMacro"; import { GenericTextSuggester } from "./gui/suggesters/genericTextSuggester"; import { settingsStore } from "./settingsStore"; -import type { Models_And_Ask_Me } from "./ai/models"; +import type { Model } from "./ai/Provider"; +import { DefaultProviders, type AIProvider } from "./ai/Provider"; export interface QuickAddSettings { choices: IChoice[]; @@ -22,11 +23,11 @@ export interface QuickAddSettings { */ disableOnlineFeatures: boolean; ai: { - OpenAIApiKey: string; - defaultModel: Models_And_Ask_Me; + defaultModel: Model["name"] | "Ask me"; defaultSystemPrompt: string; promptTemplatesFolderPath: string; showAssistant: boolean; + providers: AIProvider[]; }; migrations: { migrateToMacroIDFromEmbeddedMacro: boolean; @@ -34,6 +35,7 @@ export interface QuickAddSettings { incrementFileNameSettingMoveToDefaultBehavior: boolean; mutualExclusionInsertAfterAndWriteToBottomOfFile: boolean; setVersionAfterUpdateModalRelease: boolean; + addDefaultAIProviders: boolean; }; } @@ -47,11 +49,11 @@ export const DEFAULT_SETTINGS: QuickAddSettings = { version: "0.0.0", disableOnlineFeatures: true, ai: { - OpenAIApiKey: "", defaultModel: "Ask me", defaultSystemPrompt: `As an AI assistant within Obsidian, your primary goal is to help users manage their ideas and knowledge more effectively. Format your responses using Markdown syntax. Please use the [[Obsidian]] link format. You can write aliases for the links by writing [[Obsidian|the alias after the pipe symbol]]. To use mathematical notation, use LaTeX syntax. LaTeX syntax for larger equations should be on separate lines, surrounded with double dollar signs ($$). You can also inline math expressions by wrapping it in $ symbols. For example, use $$w_{ij}^{\text{new}}:=w_{ij}^{\text{current}}+\eta\cdot\delta_j\cdot x_{ij}$$ on a separate line, but you can write "($\eta$ = learning rate, $\delta_j$ = error term, $x_{ij}$ = input)" inline.`, promptTemplatesFolderPath: "", showAssistant: true, + providers: DefaultProviders, }, migrations: { migrateToMacroIDFromEmbeddedMacro: false, @@ -59,6 +61,7 @@ export const DEFAULT_SETTINGS: QuickAddSettings = { incrementFileNameSettingMoveToDefaultBehavior: false, mutualExclusionInsertAfterAndWriteToBottomOfFile: false, setVersionAfterUpdateModalRelease: false, + addDefaultAIProviders: false, }, }; diff --git a/src/types/macros/QuickCommands/AIAssistantCommand.ts b/src/types/macros/QuickCommands/AIAssistantCommand.ts index 5ca52f0..80fd8b2 100644 --- a/src/types/macros/QuickCommands/AIAssistantCommand.ts +++ b/src/types/macros/QuickCommands/AIAssistantCommand.ts @@ -1,4 +1,3 @@ -import type { Models_And_Ask_Me } from "src/ai/models"; import { Command } from "../Command"; import { CommandType } from "../CommandType"; import type { IAIAssistantCommand } from "./IAIAssistantCommand"; @@ -11,7 +10,7 @@ export class AIAssistantCommand extends Command implements IAIAssistantCommand { name: string; type: CommandType; - model: Models_And_Ask_Me; + model: string; systemPrompt: string; outputVariableName: string; promptTemplate: { diff --git a/src/types/macros/QuickCommands/IAIAssistantCommand.ts b/src/types/macros/QuickCommands/IAIAssistantCommand.ts index 723aa6b..40b3a61 100644 --- a/src/types/macros/QuickCommands/IAIAssistantCommand.ts +++ b/src/types/macros/QuickCommands/IAIAssistantCommand.ts @@ -1,4 +1,3 @@ -import type { Model, Models_And_Ask_Me } from "src/ai/models"; import type { ICommand } from "../ICommand"; import type { OpenAIModelParameters } from "src/ai/OpenAIModelParameters"; @@ -10,7 +9,7 @@ interface IBaseAIAssistantCommand extends ICommand { } export interface IAIAssistantCommand extends IBaseAIAssistantCommand { - model: Models_And_Ask_Me; + model: string; promptTemplate: { enable: boolean; name: string; @@ -18,7 +17,7 @@ export interface IAIAssistantCommand extends IBaseAIAssistantCommand { } export interface IInfiniteAIAssistantCommand extends IBaseAIAssistantCommand { - model: Model; + model: string; resultJoiner: string; chunkSeparator: string; maxChunkTokens: number;