From 9ebeba39d631761316516ce1c949f2fdb320e454 Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 16:49:45 +0100 Subject: [PATCH 01/16] add provider abstraction & default providers --- src/ai/Provider.ts | 84 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 src/ai/Provider.ts diff --git a/src/ai/Provider.ts b/src/ai/Provider.ts new file mode 100644 index 0000000..69fdf4c --- /dev/null +++ b/src/ai/Provider.ts @@ -0,0 +1,84 @@ +export interface AIProvider { + name: string; + endpoint: string; + apiKey: string; + models: Model[]; +} + +export interface Model { + name: string; + maxTokens: number; +} + +const OpenAIProvider: AIProvider = { + name: "OpenAI", + endpoint: "https://api.openai.com/v1", + apiKey: "", + models: [ + { + name: "gpt-3.5-turbo", + maxTokens: 4096, + }, + { + name: "gpt-3.5-turbo-16k", + maxTokens: 16384, + }, + { + name: "gpt-3.5-turbo-1106", + maxTokens: 16385, + }, + { + name: "gpt-4", + maxTokens: 8192, + }, + { + name: "gpt-4-32k", + maxTokens: 32768, + }, + { + name: "gpt-4-1106-preview", + maxTokens: 128000, + }, + { + name: "text-davinci-003", + maxTokens: 4096, + }, + ], +}; + +const OllamaLocalAIProvider: AIProvider = { + name: "Ollama Local", + endpoint: "http://localhost:3000/v1", + apiKey: "", + models: [], +}; + +const TogetherXYZAIProvider: AIProvider = { + name: "Together.ai", + endpoint: "https://api.together.xyz/v1", + apiKey: "", + models: [], +}; + +const GroqAPIProvider: AIProvider = { + name: "Groq", + endpoint: "https://api.groq.com/openai/v1", + apiKey: "", + models: [ + { + name: "LLaMA2-70b", + maxTokens: 4096, + }, + { + name: "Mixtral-8x7b", + maxTokens: 32768, + }, + ], +}; + +export const DefaultProviders: AIProvider[] = [ + OpenAIProvider, + OllamaLocalAIProvider, + TogetherXYZAIProvider, + GroqAPIProvider, +]; From 78b936c7e895ccef114e7d171c82a93e3e5d8b5e Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 16:50:25 +0100 Subject: [PATCH 02/16] add providers to settings & default providers migration --- src/migrations/addDefaultAIProviders.ts | 29 +++++++++++++++++++++++++ src/migrations/migrate.ts | 4 +++- src/quickAddSettingsTab.ts | 5 +++++ 3 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 src/migrations/addDefaultAIProviders.ts diff --git a/src/migrations/addDefaultAIProviders.ts b/src/migrations/addDefaultAIProviders.ts new file mode 100644 index 0000000..4a5f73b --- /dev/null +++ b/src/migrations/addDefaultAIProviders.ts @@ -0,0 +1,29 @@ +import { DefaultProviders } from "src/ai/Provider"; +import type { Migration } from "./Migrations"; +import { settingsStore } from "src/settingsStore"; + +const addDefaultAIProviders: Migration = { + description: "Add default AI providers to the settings.", + // eslint-disable-next-line @typescript-eslint/require-await + migrate: async (_) => { + const { OpenAIApiKey } = settingsStore.getState().ai; + const defaultProvidersWithOpenAIKey = DefaultProviders.map( + (provider) => { + if (provider.name === "OpenAI") { + provider.apiKey = OpenAIApiKey; + } + + return provider; + } + ); + + settingsStore.setState({ + ai: { + ...settingsStore.getState().ai, + providers: defaultProvidersWithOpenAIKey, + }, + }); + }, +}; + +export default addDefaultAIProviders; diff --git a/src/migrations/migrate.ts b/src/migrations/migrate.ts index d624b34..2dfee3f 100644 --- a/src/migrations/migrate.ts +++ b/src/migrations/migrate.ts @@ -6,13 +6,15 @@ import useQuickAddTemplateFolder from "./useQuickAddTemplateFolder"; import incrementFileNameSettingMoveToDefaultBehavior from "./incrementFileNameSettingMoveToDefaultBehavior"; import mutualExclusionInsertAfterAndWriteToBottomOfFile from "./mutualExclusionInsertAfterAndWriteToBottomOfFile"; import setVersionAfterUpdateModalRelease from "./setVersionAfterUpdateModalRelease"; +import addDefaultAIProviders from "./addDefaultAIProviders"; const migrations: Migrations = { migrateToMacroIDFromEmbeddedMacro, useQuickAddTemplateFolder, incrementFileNameSettingMoveToDefaultBehavior, mutualExclusionInsertAfterAndWriteToBottomOfFile, - setVersionAfterUpdateModalRelease + setVersionAfterUpdateModalRelease, + addDefaultAIProviders, }; async function migrate(plugin: QuickAdd): Promise { diff --git a/src/quickAddSettingsTab.ts b/src/quickAddSettingsTab.ts index df58534..c720f14 100644 --- a/src/quickAddSettingsTab.ts +++ b/src/quickAddSettingsTab.ts @@ -7,6 +7,7 @@ import type { IMacro } from "./types/macros/IMacro"; import { GenericTextSuggester } from "./gui/suggesters/genericTextSuggester"; import { settingsStore } from "./settingsStore"; import type { Models_And_Ask_Me } from "./ai/models"; +import { DefaultProviders, type AIProvider } from "./ai/Provider"; export interface QuickAddSettings { choices: IChoice[]; @@ -27,6 +28,7 @@ export interface QuickAddSettings { defaultSystemPrompt: string; promptTemplatesFolderPath: string; showAssistant: boolean; + providers: AIProvider[]; }; migrations: { migrateToMacroIDFromEmbeddedMacro: boolean; @@ -34,6 +36,7 @@ export interface QuickAddSettings { incrementFileNameSettingMoveToDefaultBehavior: boolean; mutualExclusionInsertAfterAndWriteToBottomOfFile: boolean; setVersionAfterUpdateModalRelease: boolean; + addDefaultAIProviders: boolean; }; } @@ -52,6 +55,7 @@ export const DEFAULT_SETTINGS: QuickAddSettings = { defaultSystemPrompt: `As an AI assistant within Obsidian, your primary goal is to help users manage their ideas and knowledge more effectively. Format your responses using Markdown syntax. Please use the [[Obsidian]] link format. You can write aliases for the links by writing [[Obsidian|the alias after the pipe symbol]]. To use mathematical notation, use LaTeX syntax. LaTeX syntax for larger equations should be on separate lines, surrounded with double dollar signs ($$). You can also inline math expressions by wrapping it in $ symbols. For example, use $$w_{ij}^{\text{new}}:=w_{ij}^{\text{current}}+\eta\cdot\delta_j\cdot x_{ij}$$ on a separate line, but you can write "($\eta$ = learning rate, $\delta_j$ = error term, $x_{ij}$ = input)" inline.`, promptTemplatesFolderPath: "", showAssistant: true, + providers: DefaultProviders, }, migrations: { migrateToMacroIDFromEmbeddedMacro: false, @@ -59,6 +63,7 @@ export const DEFAULT_SETTINGS: QuickAddSettings = { incrementFileNameSettingMoveToDefaultBehavior: false, mutualExclusionInsertAfterAndWriteToBottomOfFile: false, setVersionAfterUpdateModalRelease: false, + addDefaultAIProviders: false, }, }; From 8007cdd33ffce07dbf71b5288b21575146c88bed Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 17:12:58 +0100 Subject: [PATCH 03/16] remove openaikey as part of settings --- src/gui/AIAssistantSettingsModal.ts | 44 ++++++++++++++----------- src/migrations/addDefaultAIProviders.ts | 13 ++++++-- src/quickAddSettingsTab.ts | 6 ++-- 3 files changed, 37 insertions(+), 26 deletions(-) diff --git a/src/gui/AIAssistantSettingsModal.ts b/src/gui/AIAssistantSettingsModal.ts index 4968412..3d29683 100644 --- a/src/gui/AIAssistantSettingsModal.ts +++ b/src/gui/AIAssistantSettingsModal.ts @@ -1,11 +1,10 @@ import { Modal, Setting, TextAreaComponent } from "obsidian"; -import type { Models_And_Ask_Me } from "src/ai/models"; -import { models_and_ask_me } from "src/ai/models"; import type { QuickAddSettings } from "src/quickAddSettingsTab"; -import { setPasswordOnBlur } from "src/utils/setPasswordOnBlur"; import { FormatSyntaxSuggester } from "./suggesters/formatSyntaxSuggester"; import QuickAdd from "src/main"; import { FormatDisplayFormatter } from "src/formatters/formatDisplayFormatter"; +import { AIAssistantProvidersModal } from "./AIAssistantProvidersModal"; +import { getModelNames } from "src/ai/aiHelpers"; type AIAssistantSettings = QuickAddSettings["ai"]; @@ -38,7 +37,7 @@ export class AIAssistantSettingsModal extends Modal { text: "AI Assistant Settings", }).style.textAlign = "center"; - this.addApiKeySetting(this.contentEl); + this.addProvidersSetting(this.contentEl); this.addDefaultModelSetting(this.contentEl); this.addPromptTemplateFolderPathSetting(this.contentEl); this.addShowAssistantSetting(this.contentEl); @@ -52,32 +51,37 @@ export class AIAssistantSettingsModal extends Modal { this.display(); } - addApiKeySetting(container: HTMLElement) { + addProvidersSetting(container: HTMLElement) { new Setting(container) - .setName("API Key") - .setDesc("The API Key for the AI Assistant") - .addText((text) => { - setPasswordOnBlur(text.inputEl); - text.setValue(this.settings.OpenAIApiKey).onChange((value) => { - this.settings.OpenAIApiKey = value; + .setName("Providers") + .setDesc("The providers for the AI Assistant") + .addButton((button) => { + button.setButtonText("Edit Providers").onClick(() => { + void new AIAssistantProvidersModal( + this.settings.providers, + app + ).waitForClose.then(() => { + this.reload(); + }); }); - - text.inputEl.placeholder = "sk-..."; }); } - + addDefaultModelSetting(container: HTMLElement) { new Setting(container) .setName("Default Model") .setDesc("The default model for the AI Assistant") .addDropdown((dropdown) => { - for (const model of models_and_ask_me) { + const models = getModelNames(); + for (const model of models) { dropdown.addOption(model, model); } + dropdown.addOption("Ask me", "Ask me"); + dropdown.setValue(this.settings.defaultModel); dropdown.onChange((value) => { - this.settings.defaultModel = value as Models_And_Ask_Me; + this.settings.defaultModel = value; }); }); } @@ -144,8 +148,8 @@ export class AIAssistantSettingsModal extends Modal { )))(); } - onClose(): void { - this.resolvePromise(this.settings); - super.onClose(); - } + onClose(): void { + this.resolvePromise(this.settings); + super.onClose(); + } } diff --git a/src/migrations/addDefaultAIProviders.ts b/src/migrations/addDefaultAIProviders.ts index 4a5f73b..248593d 100644 --- a/src/migrations/addDefaultAIProviders.ts +++ b/src/migrations/addDefaultAIProviders.ts @@ -6,23 +6,32 @@ const addDefaultAIProviders: Migration = { description: "Add default AI providers to the settings.", // eslint-disable-next-line @typescript-eslint/require-await migrate: async (_) => { - const { OpenAIApiKey } = settingsStore.getState().ai; + const ai = settingsStore.getState().ai; + const defaultProvidersWithOpenAIKey = DefaultProviders.map( (provider) => { if (provider.name === "OpenAI") { - provider.apiKey = OpenAIApiKey; + if ("OpenAIApiKey" in ai && typeof ai.OpenAIApiKey === "string") { + provider.apiKey = ai.OpenAIApiKey; + } } return provider; } ); + if ("OpenAIApiKey" in ai) { + delete ai.OpenAIApiKey; + } + settingsStore.setState({ ai: { ...settingsStore.getState().ai, providers: defaultProvidersWithOpenAIKey, }, }); + + }, }; diff --git a/src/quickAddSettingsTab.ts b/src/quickAddSettingsTab.ts index c720f14..0a3b054 100644 --- a/src/quickAddSettingsTab.ts +++ b/src/quickAddSettingsTab.ts @@ -6,7 +6,7 @@ import ChoiceView from "./gui/choiceList/ChoiceView.svelte"; import type { IMacro } from "./types/macros/IMacro"; import { GenericTextSuggester } from "./gui/suggesters/genericTextSuggester"; import { settingsStore } from "./settingsStore"; -import type { Models_And_Ask_Me } from "./ai/models"; +import type { Model } from "./ai/Provider"; import { DefaultProviders, type AIProvider } from "./ai/Provider"; export interface QuickAddSettings { @@ -23,8 +23,7 @@ export interface QuickAddSettings { */ disableOnlineFeatures: boolean; ai: { - OpenAIApiKey: string; - defaultModel: Models_And_Ask_Me; + defaultModel: Model["name"] | "Ask me"; defaultSystemPrompt: string; promptTemplatesFolderPath: string; showAssistant: boolean; @@ -50,7 +49,6 @@ export const DEFAULT_SETTINGS: QuickAddSettings = { version: "0.0.0", disableOnlineFeatures: true, ai: { - OpenAIApiKey: "", defaultModel: "Ask me", defaultSystemPrompt: `As an AI assistant within Obsidian, your primary goal is to help users manage their ideas and knowledge more effectively. Format your responses using Markdown syntax. Please use the [[Obsidian]] link format. You can write aliases for the links by writing [[Obsidian|the alias after the pipe symbol]]. To use mathematical notation, use LaTeX syntax. LaTeX syntax for larger equations should be on separate lines, surrounded with double dollar signs ($$). You can also inline math expressions by wrapping it in $ symbols. For example, use $$w_{ij}^{\text{new}}:=w_{ij}^{\text{current}}+\eta\cdot\delta_j\cdot x_{ij}$$ on a separate line, but you can write "($\eta$ = learning rate, $\delta_j$ = error term, $x_{ij}$ = input)" inline.`, promptTemplatesFolderPath: "", From bf171b040412c23958921a45f167764d13dde25a Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 17:13:22 +0100 Subject: [PATCH 04/16] create providers modal --- src/gui/AIAssistantProvidersModal.ts | 280 +++++++++++++++++++++++++++ 1 file changed, 280 insertions(+) create mode 100644 src/gui/AIAssistantProvidersModal.ts diff --git a/src/gui/AIAssistantProvidersModal.ts b/src/gui/AIAssistantProvidersModal.ts new file mode 100644 index 0000000..8ffc381 --- /dev/null +++ b/src/gui/AIAssistantProvidersModal.ts @@ -0,0 +1,280 @@ +/* eslint-disable @typescript-eslint/no-non-null-assertion */ +import type { App } from "obsidian"; +import { ButtonComponent, Modal, Setting } from "obsidian"; +import type { AIProvider } from "src/ai/Provider"; +import { setPasswordOnBlur } from "src/utils/setPasswordOnBlur"; +import GenericInputPrompt from "./GenericInputPrompt/GenericInputPrompt"; +import GenericYesNoPrompt from "./GenericYesNoPrompt/GenericYesNoPrompt"; +import type { IconType } from "src/types/IconType"; + +export class AIAssistantProvidersModal extends Modal { + public waitForClose: Promise; + + private resolvePromise: (settings: AIProvider[]) => void; + private rejectPromise: (reason?: unknown) => void; + + private providers: AIProvider[]; + private selectedProvider: AIProvider | null; + + private _selectedProviderClone: AIProvider | null; + + constructor(providers: AIProvider[], app: App) { + super(app); + + this.providers = providers; + + this.waitForClose = new Promise((resolve, reject) => { + this.rejectPromise = reject; + this.resolvePromise = resolve; + }); + + this.open(); + this.display(); + } + + private display(): void { + const modalName = this.selectedProvider + ? `${this.selectedProvider.name}` + : "Providers"; + + this.contentEl.createEl("h2", { + text: modalName, + }).style.textAlign = "center"; + + if (this.selectedProvider) { + this.addProviderSetting(this.contentEl); + + return; + } + + this.addProvidersSetting(this.contentEl); + } + + private reload(): void { + this.contentEl.empty(); + + this.display(); + } + + addProvidersSetting(container: HTMLElement) { + new Setting(container) + .setName("Providers") + .setDesc("Providers for the AI Assistant") + .addButton((button) => { + button.setButtonText("Add Provider").onClick(async () => { + const providerName = await GenericInputPrompt.Prompt( + app, + "Provider Name" + ); + + this.providers.push({ + name: providerName, + endpoint: "", + apiKey: "", + models: [], + }); + + this.reload(); + }); + + button.setCta(); + }); + + const providersContainer = container.createDiv("providers-container"); + providersContainer.style.display = "flex"; + providersContainer.style.flexDirection = "column"; + providersContainer.style.gap = "10px"; + providersContainer.style.overflowY = "auto"; + providersContainer.style.maxHeight = "400px"; + providersContainer.style.padding = "10px"; + + this.providers.forEach((provider, i) => { + new Setting(providersContainer) + .setName(provider.name) + .setDesc(provider.endpoint) + .addButton((button) => { + button.onClick(async () => { + const confirmation = await GenericYesNoPrompt.Prompt( + app, + `Are you sure you want to delete ${provider.name}?` + ); + if (!confirmation) { + return; + } + + this.providers.splice(i, 1); + this.reload(); + }); + button.setWarning(); + button.setIcon("trash" as IconType); + }) + .addButton((button) => { + button.setButtonText("Edit").onClick(() => { + this.selectedProvider = provider; + this._selectedProviderClone = structuredClone(provider); + + this.reload(); + }); + }); + }); + } + + addProviderSetting(container: HTMLElement) { + this.addNameSetting(container); + this.addEndpointSetting(container); + this.addApiKeySetting(container); + + this.addProviderModelsSetting(container); + + this.addProviderSettingButtonRow(this.contentEl); + } + + addNameSetting(container: HTMLElement) { + new Setting(container) + .setName("Name") + .setDesc("The name of the provider") + .addText((text) => { + text.setValue(this.selectedProvider!.name).onChange((value) => { + this.selectedProvider!.name = value; + }); + }); + } + + addEndpointSetting(container: HTMLElement) { + new Setting(container) + .setName("Endpoint") + .setDesc("The endpoint for the AI Assistant") + .addText((text) => { + text.setValue(this.selectedProvider!.endpoint).onChange( + (value) => { + this.selectedProvider!.endpoint = value; + } + ); + }); + } + + addApiKeySetting(container: HTMLElement) { + new Setting(container) + .setName("API Key") + .setDesc("The API Key for the AI Assistant") + .addText((text) => { + setPasswordOnBlur(text.inputEl); + text.setValue(this.selectedProvider!.apiKey).onChange( + (value) => { + this.selectedProvider!.apiKey = value; + } + ); + }); + } + + addProviderModelsSetting(container: HTMLElement) { + const modelsContainer = container.createDiv("models-container"); + modelsContainer.style.display = "flex"; + modelsContainer.style.flexDirection = "column"; + modelsContainer.style.gap = "10px"; + modelsContainer.style.overflowY = "auto"; + modelsContainer.style.maxHeight = "400px"; + modelsContainer.style.padding = "10px"; + + this.selectedProvider!.models.forEach((model, i) => { + new Setting(modelsContainer) + .setName(model.name) + .setDesc(`Max Tokens: ${model.maxTokens}`) + .addButton((button) => { + button.onClick(async () => { + const confirmation = await GenericYesNoPrompt.Prompt( + app, + `Are you sure you want to delete ${model.name}?` + ); + if (!confirmation) { + return; + } + + this.selectedProvider!.models.splice(i, 1); + this.reload(); + }); + button.setWarning(); + button.setIcon("trash" as IconType); + }); + }); + + new Setting(modelsContainer) + .setName("Add Model") + .addButton((button) => { + button.setButtonText("Add Model").onClick(async () => { + const modelName = await GenericInputPrompt.Prompt( + app, + "Model Name" + ); + const maxTokens = await GenericInputPrompt.Prompt( + app, + "Max Tokens" + ); + + this.selectedProvider!.models.push({ + name: modelName, + maxTokens: parseInt(maxTokens), + }); + + this.reload(); + }); + button.setCta(); + }); + } + + addProviderSettingButtonRow(container: HTMLElement) { + const buttonRow = container.createDiv("button-row"); + buttonRow.style.display = "flex"; + buttonRow.style.justifyContent = "space-between"; + buttonRow.style.marginTop = "20px"; + + const CancelButton = new ButtonComponent(buttonRow); + CancelButton.setButtonText("Cancel"); + CancelButton.setWarning(); + CancelButton.onClick(() => { + if (!this.selectedProvider || !this._selectedProviderClone) return; + const noChangesMade = !checkObjectDiff( + this.selectedProvider, + this._selectedProviderClone + ); + + if (noChangesMade) { + this.selectedProvider = null; + this._selectedProviderClone = null; + + this.reload(); + return; + } + + Object.assign(this.selectedProvider, this._selectedProviderClone); + this.selectedProvider = this._selectedProviderClone; + this._selectedProviderClone = null; + + this.close(); + }); + + const SaveButton = new ButtonComponent(buttonRow); + SaveButton.setButtonText("Save"); + SaveButton.setCta(); + SaveButton.onClick(() => { + this.selectedProvider = null; + this.reload(); + }); + } + + onClose(): void { + if (this.selectedProvider) { + // go back to main view + this.selectedProvider = null; + this.reload(); + this.open(); + } + + this.resolvePromise(this.providers); + super.onClose(); + } +} + +function checkObjectDiff(obj1: unknown, obj2: unknown) { + return JSON.stringify(obj1) !== JSON.stringify(obj2); +} From 141b813795af441adc50ca91413d5436ba7a5e0e Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 17:19:40 +0100 Subject: [PATCH 05/16] update api to accomodate new typing --- src/ai/AIAssistant.ts | 8 ++-- src/ai/OpenAIRequest.ts | 7 ++- src/ai/aiHelpers.ts | 39 +++++++++++++++ src/ai/getModelMaxTokens.ts | 21 --------- src/ai/models.ts | 4 -- src/engine/MacroChoiceEngine.ts | 14 ++++-- .../AIAssistantCommandSettingsModal.ts | 10 ++-- ...AIAssistantInfiniteCommandSettingsModal.ts | 21 ++++++--- src/quickAddApi.ts | 47 ++++++++++++++++--- 9 files changed, 117 insertions(+), 54 deletions(-) create mode 100644 src/ai/aiHelpers.ts delete mode 100644 src/ai/getModelMaxTokens.ts delete mode 100644 src/ai/models.ts diff --git a/src/ai/AIAssistant.ts b/src/ai/AIAssistant.ts index 27a68b5..4e2ff66 100644 --- a/src/ai/AIAssistant.ts +++ b/src/ai/AIAssistant.ts @@ -1,5 +1,4 @@ import GenericSuggester from "src/gui/GenericSuggester/genericSuggester"; -import type { Model } from "./models"; import { TFile } from "obsidian"; import { getMarkdownFilesInFolder } from "src/utilityObsidian"; import invariant from "src/utils/invariant"; @@ -8,11 +7,12 @@ import { settingsStore } from "src/settingsStore"; import { encodingForModel } from "js-tiktoken"; import { OpenAIRequest } from "./OpenAIRequest"; import { makeNoticeHandler } from "./makeNoticeHandler"; -import { getModelMaxTokens } from "./getModelMaxTokens"; +import type { Model } from "./Provider"; +import { getModelMaxTokens } from "./aiHelpers"; export const getTokenCount = (text: string, model: Model) => { // gpt-3.5-turbo-16k is a special case - it isn't in the library list yet. Same with gpt-4-1106-preview and gpt-3.5-turbo-1106. - let m = model === "gpt-3.5-turbo-16k" ? "gpt-3.5-turbo" : model; + let m = model.name === "gpt-3.5-turbo-16k" ? "gpt-3.5-turbo" : model.name; m = m === "gpt-4-1106-preview" ? "gpt-4" : m; m = m === "gpt-3.5-turbo-1106" ? "gpt-3.5-turbo" : m; @@ -379,7 +379,7 @@ export async function ChunkedPrompt( ); const maxChunkTokenSize = - getModelMaxTokens(model) / 2 - systemPromptLength; // temp, need to impl. config + getModelMaxTokens(model.name) / 2 - systemPromptLength; // temp, need to impl. config // Whether we should strictly enforce the chunking rules or we should merge chunks that are too small const shouldMerge = settings.shouldMerge ?? true; // temp, need to impl. config diff --git a/src/ai/OpenAIRequest.ts b/src/ai/OpenAIRequest.ts index 220fa91..5ad531c 100644 --- a/src/ai/OpenAIRequest.ts +++ b/src/ai/OpenAIRequest.ts @@ -1,10 +1,9 @@ -import type { Model } from "./models"; import { requestUrl } from "obsidian"; import type { OpenAIModelParameters } from "./OpenAIModelParameters"; import { settingsStore } from "src/settingsStore"; import { getTokenCount } from "./AIAssistant"; -import { getModelMaxTokens } from "./getModelMaxTokens"; import { preventCursorChange } from "./preventCursorChange"; +import type { Model } from "./Provider"; type ReqResponse = { id: string; @@ -38,11 +37,11 @@ export function OpenAIRequest( const tokenCount = getTokenCount(prompt, model) + getTokenCount(systemPrompt, model); - const maxTokens = getModelMaxTokens(model); + const { maxTokens } = model; if (tokenCount > maxTokens) { throw new Error( - `The ${model} API has a token limit of ${maxTokens}. Your prompt has ${tokenCount} tokens.` + `The ${model.name} API has a token limit of ${maxTokens}. Your prompt has ${tokenCount} tokens.` ); } diff --git a/src/ai/aiHelpers.ts b/src/ai/aiHelpers.ts new file mode 100644 index 0000000..ff22fff --- /dev/null +++ b/src/ai/aiHelpers.ts @@ -0,0 +1,39 @@ +import { settingsStore } from "src/settingsStore"; + +export function getModelNames() { + const aiSettings = settingsStore.getState().ai; + + return aiSettings.providers + .flatMap((provider) => provider.models) + .map((model) => model.name); +} + +export function getModelByName(model: string) { + const aiSettings = settingsStore.getState().ai; + + return aiSettings.providers + .flatMap((provider) => provider.models) + .find((m) => m.name === model); +} + +export function getModelMaxTokens(model: string) { + const aiSettings = settingsStore.getState().ai; + + const modelData = aiSettings.providers + .flatMap((provider) => provider.models) + .find((m) => m.name === model); + + if (modelData) { + return modelData.maxTokens; + } + + throw new Error(`Model ${model} not found with any provider.`); +} + +export function getModelProvider(modelName: string) { + const aiSettings = settingsStore.getState().ai; + + return aiSettings.providers.find((provider) => + provider.models.some((m) => m.name === modelName) + ); +} diff --git a/src/ai/getModelMaxTokens.ts b/src/ai/getModelMaxTokens.ts deleted file mode 100644 index 74783d6..0000000 --- a/src/ai/getModelMaxTokens.ts +++ /dev/null @@ -1,21 +0,0 @@ -import type { Model } from "./models"; - - -export function getModelMaxTokens(model: Model) { - switch (model) { - case "text-davinci-003": - return 4096; - case "gpt-3.5-turbo": - return 4096; - case "gpt-4": - return 8192; - case "gpt-3.5-turbo-16k": - return 16384; - case "gpt-3.5-turbo-1106": - return 16385; - case "gpt-4-1106-preview": - return 128000; - case "gpt-4-32k": - return 32768; - } -} diff --git a/src/ai/models.ts b/src/ai/models.ts deleted file mode 100644 index b550f85..0000000 --- a/src/ai/models.ts +++ /dev/null @@ -1,4 +0,0 @@ -export const models = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-1106", "gpt-4", "gpt-4-1106-preview", "gpt-4-32k", "text-davinci-003"] as const; -export const models_and_ask_me = [...models, "Ask me"] as const; -export type Model = typeof models[number]; -export type Models_And_Ask_Me = typeof models_and_ask_me[number]; diff --git a/src/engine/MacroChoiceEngine.ts b/src/engine/MacroChoiceEngine.ts index 9dfc4b0..4f144c2 100644 --- a/src/engine/MacroChoiceEngine.ts +++ b/src/engine/MacroChoiceEngine.ts @@ -28,8 +28,8 @@ import { waitFor } from "src/utility"; import type { IAIAssistantCommand } from "src/types/macros/QuickCommands/IAIAssistantCommand"; import { runAIAssistant } from "src/ai/AIAssistant"; import { settingsStore } from "src/settingsStore"; -import { models } from "src/ai/models"; import { CompleteFormatter } from "src/formatters/completeFormatter"; +import { getModelNames, getModelProvider } from "src/ai/aiHelpers"; export class MacroChoiceEngine extends QuickAddChoiceEngine { public choice: IMacroChoice; @@ -305,7 +305,7 @@ export class MacroChoiceEngine extends QuickAddChoiceEngine { const aiSettings = settingsStore.getState().ai; - const options = [...models]; + const options = [...getModelNames()]; const model = command.model === "Ask me" ? await GenericSuggester.Suggest(app, options, options) @@ -316,9 +316,17 @@ export class MacroChoiceEngine extends QuickAddChoiceEngine { this.choiceExecutor ); + const modelProvider = getModelProvider(model); + + if (!modelProvider) { + throw new Error( + `Model ${model} not found in the AI providers settings.` + ); + } + const aiOutputVariables = await runAIAssistant( { - apiKey: aiSettings.OpenAIApiKey, + apiKey: modelProvider.apiKey, model, outputVariableName: command.outputVariableName, promptTemplate: command.promptTemplate, diff --git a/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts b/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts index acecb22..b85e409 100644 --- a/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts +++ b/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts @@ -1,6 +1,4 @@ import { Modal, Setting, TextAreaComponent, debounce } from "obsidian"; -import type { Models_And_Ask_Me } from "src/ai/models"; -import { models_and_ask_me } from "src/ai/models"; import { FormatSyntaxSuggester } from "./../suggesters/formatSyntaxSuggester"; import QuickAdd from "src/main"; import { FormatDisplayFormatter } from "src/formatters/formatDisplayFormatter"; @@ -16,6 +14,7 @@ import { DEFAULT_TOP_P, } from "src/ai/OpenAIModelParameters"; import { getTokenCount } from "src/ai/AIAssistant"; +import { getModelNames } from "src/ai/aiHelpers"; export class AIAssistantCommandSettingsModal extends Modal { public waitForClose: Promise; @@ -136,13 +135,16 @@ export class AIAssistantCommandSettingsModal extends Modal { .setName("Model") .setDesc("The model the AI Assistant will use") .addDropdown((dropdown) => { - for (const model of models_and_ask_me) { + const models = getModelNames(); + for (const model of models) { dropdown.addOption(model, model); } + dropdown.addOption("Ask me", "Ask me"); + dropdown.setValue(this.settings.model); dropdown.onChange((value) => { - this.settings.model = value as Models_And_Ask_Me; + this.settings.model = value; this.reload(); }); diff --git a/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts b/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts index 90592b2..4c6efdc 100644 --- a/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts +++ b/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts @@ -1,6 +1,4 @@ import { Modal, Setting, TextAreaComponent, debounce } from "obsidian"; -import type { Model } from "src/ai/models"; -import { models_and_ask_me } from "src/ai/models"; import { FormatSyntaxSuggester } from "./../suggesters/formatSyntaxSuggester"; import QuickAdd from "src/main"; import { FormatDisplayFormatter } from "src/formatters/formatDisplayFormatter"; @@ -13,7 +11,7 @@ import { DEFAULT_TOP_P, } from "src/ai/OpenAIModelParameters"; import { getTokenCount } from "src/ai/AIAssistant"; -import { getModelMaxTokens } from "src/ai/getModelMaxTokens"; +import { getModelByName, getModelNames } from "src/ai/aiHelpers"; export class InfiniteAIAssistantCommandSettingsModal extends Modal { public waitForClose: Promise; @@ -103,13 +101,16 @@ export class InfiniteAIAssistantCommandSettingsModal extends Modal { .setName("Model") .setDesc("The model the AI Assistant will use") .addDropdown((dropdown) => { - for (const model of models_and_ask_me) { + const models = getModelNames(); + for (const model of models) { dropdown.addOption(model, model); } + dropdown.addOption("Ask me", "Ask me"); + dropdown.setValue(this.settings.model); dropdown.onChange((value) => { - this.settings.model = value as Model; + this.settings.model = value; this.reload(); }); @@ -305,9 +306,15 @@ export class InfiniteAIAssistantCommandSettingsModal extends Modal { "The maximum number of tokens in each chunk, calculated as the chunk token size + prompt template token size + system prompt token size. Make sure you leave room for the model to respond to the prompt." ) .addSlider((slider) => { - const modelMaxTokens = getModelMaxTokens(this.settings.model); + const model = getModelByName(this.settings.model); + + if (!model) { + throw new Error( + `Model ${this.settings.model} not found in settings` + ); + } - slider.setLimits(1, modelMaxTokens - this.systemPromptTokenLength, 1); + slider.setLimits(1, model.maxTokens - this.systemPromptTokenLength, 1); slider.setDynamicTooltip(); slider.setValue(this.settings.maxChunkTokens); diff --git a/src/quickAddApi.ts b/src/quickAddApi.ts index 9e44303..a4cc7bc 100644 --- a/src/quickAddApi.ts +++ b/src/quickAddApi.ts @@ -14,9 +14,13 @@ import { MarkdownView } from "obsidian"; import GenericWideInputPrompt from "./gui/GenericWideInputPrompt/GenericWideInputPrompt"; import { ChunkedPrompt, Prompt, getTokenCount } from "./ai/AIAssistant"; import { settingsStore } from "./settingsStore"; -import { models, type Model } from "./ai/models"; import type { OpenAIModelParameters } from "./ai/OpenAIModelParameters"; -import { getModelMaxTokens } from "./ai/getModelMaxTokens"; +import type { Model } from "./ai/Provider"; +import { + getModelByName, + getModelNames, + getModelProvider, +} from "./ai/aiHelpers"; export class QuickAddApi { public static GetApi( @@ -127,11 +131,19 @@ export class QuickAddApi { choiceExecutor ).format; + const modelProvider = getModelProvider(model.name); + + if (!modelProvider) { + throw new Error( + `Model '${model.name}' not found in any provider` + ); + } + const assistantRes = await Prompt( { model, prompt, - apiKey: AISettings.OpenAIApiKey, + apiKey: modelProvider.apiKey, modelOptions: settings?.modelOptions ?? {}, outputVariableName: settings?.variableName ?? "output", @@ -189,13 +201,28 @@ export class QuickAddApi { choiceExecutor ).format; + const modelProvider = getModelProvider(model.name); + + if (!modelProvider) { + throw new Error( + `Model '${model.name}' not found in any provider` + ); + } + + + if (!modelProvider.apiKey) { + throw new Error( + `Model '${model.name}' requires an API key` + ); + } + const assistantRes = await ChunkedPrompt( { model, text, promptTemplate, chunkSeparator: settings?.chunkSeparator ?? /\n/, - apiKey: AISettings.OpenAIApiKey, + apiKey: modelProvider.apiKey, modelOptions: settings?.modelOptions ?? {}, outputVariableName: settings?.variableName ?? "output", @@ -230,10 +257,16 @@ export class QuickAddApi { return assistantRes; }, getModels: () => { - return models; + return getModelNames(); }, - getMaxTokens: (model: Model) => { - return getModelMaxTokens(model); + getMaxTokens: (modelName: string) => { + const model = getModelByName(modelName); + + if (!model) { + throw new Error(`Model ${modelName} not found.`); + } + + return model.maxTokens; }, countTokens(text: string, model: Model) { return getTokenCount(text, model); From 5b5aea93c1ed97b648e0eb2f50458cb94a95cacd Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 17:28:46 +0100 Subject: [PATCH 06/16] tokenizer 'support' for non openai models --- src/ai/AIAssistant.ts | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/ai/AIAssistant.ts b/src/ai/AIAssistant.ts index 4e2ff66..773af50 100644 --- a/src/ai/AIAssistant.ts +++ b/src/ai/AIAssistant.ts @@ -4,7 +4,8 @@ import { getMarkdownFilesInFolder } from "src/utilityObsidian"; import invariant from "src/utils/invariant"; import type { OpenAIModelParameters } from "./OpenAIModelParameters"; import { settingsStore } from "src/settingsStore"; -import { encodingForModel } from "js-tiktoken"; +import type { TiktokenModel} from "js-tiktoken"; +import { encodingForModel, getEncoding } from "js-tiktoken"; import { OpenAIRequest } from "./OpenAIRequest"; import { makeNoticeHandler } from "./makeNoticeHandler"; import type { Model } from "./Provider"; @@ -16,7 +17,13 @@ export const getTokenCount = (text: string, model: Model) => { m = m === "gpt-4-1106-preview" ? "gpt-4" : m; m = m === "gpt-3.5-turbo-1106" ? "gpt-3.5-turbo" : m; - return encodingForModel(m).encode(text).length; + // kind of hacky, but we'll be using this general heuristic to support non-openai models + try { + return encodingForModel(m as TiktokenModel).encode(text).length; + } catch { + const enc = getEncoding("cl100k_base"); + return enc.encode(text).length; + } }; async function repeatUntilResolved( @@ -398,7 +405,10 @@ export async function ChunkedPrompt( if (strSize > maxCombinedChunkSize) { throw new Error( - `The chunk "${chunk.slice(0, 25)}..." is too large to fit in a single prompt.` + `The chunk "${chunk.slice( + 0, + 25 + )}..." is too large to fit in a single prompt.` ); } From 8a50eefd8c6990c3c8591aee37be64d91f36c85d Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 17:43:18 +0100 Subject: [PATCH 07/16] update groq models --- src/ai/Provider.ts | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/src/ai/Provider.ts b/src/ai/Provider.ts index 69fdf4c..95a4f17 100644 --- a/src/ai/Provider.ts +++ b/src/ai/Provider.ts @@ -46,31 +46,17 @@ const OpenAIProvider: AIProvider = { ], }; -const OllamaLocalAIProvider: AIProvider = { - name: "Ollama Local", - endpoint: "http://localhost:3000/v1", - apiKey: "", - models: [], -}; - -const TogetherXYZAIProvider: AIProvider = { - name: "Together.ai", - endpoint: "https://api.together.xyz/v1", - apiKey: "", - models: [], -}; - const GroqAPIProvider: AIProvider = { name: "Groq", endpoint: "https://api.groq.com/openai/v1", apiKey: "", models: [ { - name: "LLaMA2-70b", + name: "llama2-70b-4096", maxTokens: 4096, }, { - name: "Mixtral-8x7b", + name: "mixtral-8x7b-32768", maxTokens: 32768, }, ], @@ -78,7 +64,5 @@ const GroqAPIProvider: AIProvider = { export const DefaultProviders: AIProvider[] = [ OpenAIProvider, - OllamaLocalAIProvider, - TogetherXYZAIProvider, GroqAPIProvider, ]; From 230c6575908faa625e76cb7d842a1e9a835e15aa Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 17:43:29 +0100 Subject: [PATCH 08/16] fix api usage --- src/ai/OpenAIRequest.ts | 15 +++++++++++++-- src/engine/MacroChoiceEngine.ts | 20 ++++++++++++++++---- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/src/ai/OpenAIRequest.ts b/src/ai/OpenAIRequest.ts index 5ad531c..f30b631 100644 --- a/src/ai/OpenAIRequest.ts +++ b/src/ai/OpenAIRequest.ts @@ -4,6 +4,7 @@ import { settingsStore } from "src/settingsStore"; import { getTokenCount } from "./AIAssistant"; import { preventCursorChange } from "./preventCursorChange"; import type { Model } from "./Provider"; +import { getModelProvider } from "./aiHelpers"; type ReqResponse = { id: string; @@ -45,17 +46,27 @@ export function OpenAIRequest( ); } + const modelProvider = getModelProvider(model.name); + + if (!modelProvider) { + throw new Error(`Model ${model.name} not found with any provider.`); + } + + console.log( + `Making request to ${modelProvider?.name} at ${modelProvider.endpoint} with model ${model.name}` + ); + try { const restoreCursor = preventCursorChange(); const _response = requestUrl({ - url: `https://api.openai.com/v1/chat/completions`, + url: `${modelProvider?.endpoint}/chat/completions`, method: "POST", headers: { "Content-Type": "application/json", Authorization: `Bearer ${apiKey}`, }, body: JSON.stringify({ - model, + model: model.name, ...modelParams, messages: [ { role: "system", content: systemPrompt }, diff --git a/src/engine/MacroChoiceEngine.ts b/src/engine/MacroChoiceEngine.ts index 4f144c2..a4a58e5 100644 --- a/src/engine/MacroChoiceEngine.ts +++ b/src/engine/MacroChoiceEngine.ts @@ -29,7 +29,12 @@ import type { IAIAssistantCommand } from "src/types/macros/QuickCommands/IAIAssi import { runAIAssistant } from "src/ai/AIAssistant"; import { settingsStore } from "src/settingsStore"; import { CompleteFormatter } from "src/formatters/completeFormatter"; -import { getModelNames, getModelProvider } from "src/ai/aiHelpers"; +import { + getModelByName, + getModelNames, + getModelProvider, +} from "src/ai/aiHelpers"; +import { Model } from "src/ai/Provider"; export class MacroChoiceEngine extends QuickAddChoiceEngine { public choice: IMacroChoice; @@ -306,21 +311,28 @@ export class MacroChoiceEngine extends QuickAddChoiceEngine { const aiSettings = settingsStore.getState().ai; const options = [...getModelNames()]; - const model = + const modelName: string = command.model === "Ask me" ? await GenericSuggester.Suggest(app, options, options) : command.model; + + const model: Model | undefined = getModelByName(modelName); + + if (!model) { + throw new Error(`Model ${modelName} not found with any provider.`); + } + const formatter = new CompleteFormatter( app, QuickAdd.instance, this.choiceExecutor ); - const modelProvider = getModelProvider(model); + const modelProvider = getModelProvider(model.name); if (!modelProvider) { throw new Error( - `Model ${model} not found in the AI providers settings.` + `Model ${model.name} not found in the AI providers settings.` ); } From 769b730658e78c1e30d67733f23f9a2b0377a0ac Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 17:44:51 +0100 Subject: [PATCH 09/16] feat: Local LLM & external provider support for AI Assistant From 65e0aa01663cba1c413b3e865be52a7a8169b282 Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 19:39:27 +0100 Subject: [PATCH 10/16] remove groq as default provider --- src/ai/Provider.ts | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/src/ai/Provider.ts b/src/ai/Provider.ts index 95a4f17..8c9ec74 100644 --- a/src/ai/Provider.ts +++ b/src/ai/Provider.ts @@ -46,23 +46,7 @@ const OpenAIProvider: AIProvider = { ], }; -const GroqAPIProvider: AIProvider = { - name: "Groq", - endpoint: "https://api.groq.com/openai/v1", - apiKey: "", - models: [ - { - name: "llama2-70b-4096", - maxTokens: 4096, - }, - { - name: "mixtral-8x7b-32768", - maxTokens: 32768, - }, - ], -}; export const DefaultProviders: AIProvider[] = [ OpenAIProvider, - GroqAPIProvider, ]; From 832f0d7e1ca2be26dc83f5ac5671d32503dea4e1 Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 19:39:38 +0100 Subject: [PATCH 11/16] update error msg --- src/ai/OpenAIRequest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ai/OpenAIRequest.ts b/src/ai/OpenAIRequest.ts index f30b631..e17a6e1 100644 --- a/src/ai/OpenAIRequest.ts +++ b/src/ai/OpenAIRequest.ts @@ -82,7 +82,7 @@ export function OpenAIRequest( } catch (error) { console.log(error); throw new Error( - `Error while making request to OpenAI API: ${ + `Error while making request to ${modelProvider.name}: ${ (error as { message: string }).message }` ); From dc57c692f4eb4c1c625d6f976a08b0ce0d1e67f8 Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 19:40:12 +0100 Subject: [PATCH 12/16] docs: how to use local llm & external providers --- docs/docs/AIAssistant.md | 95 ++++++++++++++++++++++++++++++---------- 1 file changed, 71 insertions(+), 24 deletions(-) diff --git a/docs/docs/AIAssistant.md b/docs/docs/AIAssistant.md index 6d100c6..3a79275 100644 --- a/docs/docs/AIAssistant.md +++ b/docs/docs/AIAssistant.md @@ -3,14 +3,16 @@ title: AI Assistant --- # AI Assistant -The AI Assistant in QuickAdd leverages the power of OpenAI's GPT-3 and GPT-4 models to act as your personal AI assistant within Obsidian. It can streamline your workflows by automating routine tasks and providing intellectual support. To use this feature, you need the QuickAdd plugin and an OpenAI API key. + +The AI Assistant in QuickAdd leverages the power of Large Language Models (LLMs) to act as your personal AI assistant within Obsidian. It can streamline your workflows by automating routine tasks and providing intellectual support. To use this feature, you need the QuickAdd plugin and a provider you'd like to use. ## How to Setup the AI Assistant + To set up the AI Assistant, follow these steps: 1. In Obsidian, create a new folder dedicated to AI prompt templates, e.g. `bins/ai_prompts`. 2. Navigate to QuickAdd settings and locate the "AI Assistant" section. Specify the path to the folder you created in step 1. -3. In the same section, paste your OpenAI API key into the "OpenAI API Key" field. +3. In the same section, add a provider to get started. If you are using OpenAI, you will need to add your API key to the settings. As of v1.8.x, you need to enter your API key in the [provider](#providers) settings. The video below is from an older version, but the process is the similar. ![AI Assistant Setup](./Images/AI_Assistant_Setup.gif) @@ -31,46 +33,91 @@ Here's an example of how you can set up a prompt template: You can also use AI Assistant features from within the [API](./QuickAddAPI.md). +## Providers + +QuickAdd supports multiple providers for LLMs. +The only requirement is that they are OpenAI-compatible, which means their API should be similar to OpenAIs. + +Here are a few providers that are known to work with QuickAdd: + +- [OpenAI](https://openai.com) +- [TogetherAI](https://www.together.ai) +- [Groq](https://groq.com) +- [Ollama (local)](https://ollama.com) + +Paid providers expose their own API, which you can use with QuickAdd. Free providers, such as Ollama, are also supported. + +By default, QuickAdd will add the OpenAI provider. You can add more providers by clicking the "Add Provider" button in the AI Assistant settings. + +Here's a video showcasing adding Groq as a provider: + + + +### Local LLMs + +You can use your own machine to run LLMs. This is useful if you want to keep your data private, or if you want to use a specific model that isn't available on the cloud. +To use a local LLM, you need to set up a server that can run the model. +You can then add the server as a provider in QuickAdd. + +One such server is [Ollama](https://ollama.com). Ollama is a free, open-source, and self-hosted LLM server. You can set up Ollama on your own machine, and then use it as a provider in QuickAdd. +You can find the [quick start documentation here](https://github.com/ollama/ollama/blob/main/README.md#quickstart). +Ollama binds to the port `11434` ([src](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-expose-ollama-on-my-network)), so your provider settings would be as follows: + +``` +Name: Ollama +URL: http://localhost:11434/v1 +Api Key: (empty) +``` + +And that's it! You can now use Ollama as a provider in QuickAdd. +Make sure you add the model you want to use. [mistral](https://ollama.com/library/mistral) is great. + ## AI Assistant Settings + Within the main AI Assistant settings accessible via QuickAdd settings, you can configure the following options: -- OpenAI API Key: The key to interact with OpenAI's models. -- Prompt Templates Folder: The location where all your prompt templates reside. -- Default model: The default OpenAI model to be used. -- Show Assistant: Toggle for status messages. -- Default System Prompt Template: Sets the behavior of the model. +- OpenAI API Key: The key to interact with OpenAI's models. +- Prompt Templates Folder: The location where all your prompt templates reside. +- Default model: The default OpenAI model to be used. +- Show Assistant: Toggle for status messages. +- Default System Prompt Template: Sets the behavior of the model. For each individual AI Assistant command in your macros, you can set these options: -- Prompt Template: Determines the prompt template to use. -- Model: Specifies the OpenAI model to use, overriding the default model. -- Output Name Variable: Sets the variable name for the AI Assistant’s output. -- System Prompt Template: Determines the models behavior, overriding the default system prompt template. +- Prompt Template: Determines the prompt template to use. +- Model: Specifies the OpenAI model to use, overriding the default model. +- Output Name Variable: Sets the variable name for the AI Assistant’s output. +- System Prompt Template: Determines the models behavior, overriding the default system prompt template. You can also tweak model parameters in advanced settings: -- **temperature:** Allows you to adjust the sampling temperature between 0 and 2. Higher values result in more random outputs, while lower values make the output more focused and deterministic. -- **top_p:** This parameter relates to nucleus sampling. The model considers only the tokens comprising the top 'p' probability mass. For example, 0.1 means only tokens from the top 10% probability mass are considered. -- **frequency_penalty:** A parameter ranging between -2.0 and 2.0. Positive values penalize new tokens based on their frequency in the existing text, reducing the model's tendency to repeat the same lines. -- **presence_penalty:** Also ranging between -2.0 and 2.0, positive values penalize new tokens based on their presence in the existing text, encouraging the model to introduce new topics. +- **temperature:** Allows you to adjust the sampling temperature between 0 and 2. Higher values result in more random outputs, while lower values make the output more focused and deterministic. +- **top_p:** This parameter relates to nucleus sampling. The model considers only the tokens comprising the top 'p' probability mass. For example, 0.1 means only tokens from the top 10% probability mass are considered. +- **frequency_penalty:** A parameter ranging between -2.0 and 2.0. Positive values penalize new tokens based on their frequency in the existing text, reducing the model's tendency to repeat the same lines. +- **presence_penalty:** Also ranging between -2.0 and 2.0, positive values penalize new tokens based on their presence in the existing text, encouraging the model to introduce new topics. ## AI-Powered Workflows + You can create powerful workflows utilizing the AI Assistant. Some examples are: -- **Generating Writing Prompts:** Using links to related notes to generate writing prompts. -- **Summarizer:** Create summaries of selected text. -- **Transform Selected:** Transform selected text based on provided instructions. -- **Flashcard Creator:** Generate flashcards based on selected text. -- **Get Me Started Writing About…:** Generate points to kickstart your writing on a given topic. -- **Manual Prompt:** Provide a manual prompt to the AI assistant. -- **Alternative Viewpoints:** Obtain alternative perspectives and improvements on your draft. -- **Prompt Chaining:** Chain multiple prompts together, with each prompt using the output of the previous one. +- **Generating Writing Prompts:** Using links to related notes to generate writing prompts. +- **Summarizer:** Create summaries of selected text. +- **Transform Selected:** Transform selected text based on provided instructions. +- **Flashcard Creator:** Generate flashcards based on selected text. +- **Get Me Started Writing About…:** Generate points to kickstart your writing on a given topic. +- **Manual Prompt:** Provide a manual prompt to the AI assistant. +- **Alternative Viewpoints:** Obtain alternative perspectives and improvements on your draft. +- **Prompt Chaining:** Chain multiple prompts together, with each prompt using the output of the previous one. All of these examples, and more, can be found in [Christian's blog post about the AI Assistant](https://bagerbach.com/blog/obsidian-ai). Please note, using the AI Assistant will incur costs depending on the API usage. Set spending limits on your OpenAI account to avoid unexpected expenses. Play around with different models to find the one that best suits your needs. ### Example: Summarizer + Here’s a simple prompt where you select some text, and then use the assistant with that prompt. Then it’ll spit out an AI-generated summary: @@ -79,4 +126,4 @@ Please summarize the following text. Use only the text itself as material for su {{value}} ``` -You can use the getting-started demonstration shown earlier to set this up. \ No newline at end of file +You can use the getting-started demonstration shown earlier to set this up. From 7c7ef293775cdf02b6a1c0766ea994b211aaf853 Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 19:40:52 +0100 Subject: [PATCH 13/16] refactor: use model type import --- src/engine/MacroChoiceEngine.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/engine/MacroChoiceEngine.ts b/src/engine/MacroChoiceEngine.ts index a4a58e5..6f6e0e0 100644 --- a/src/engine/MacroChoiceEngine.ts +++ b/src/engine/MacroChoiceEngine.ts @@ -34,7 +34,7 @@ import { getModelNames, getModelProvider, } from "src/ai/aiHelpers"; -import { Model } from "src/ai/Provider"; +import type { Model } from "src/ai/Provider"; export class MacroChoiceEngine extends QuickAddChoiceEngine { public choice: IMacroChoice; From e0f991b8701da7398123473380f1bceb8ae85379 Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 19:46:13 +0100 Subject: [PATCH 14/16] refactor: fix types --- src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts | 7 +++++-- .../MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts | 4 +++- src/types/macros/QuickCommands/AIAssistantCommand.ts | 3 +-- src/types/macros/QuickCommands/IAIAssistantCommand.ts | 5 ++--- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts b/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts index b85e409..11023a6 100644 --- a/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts +++ b/src/gui/MacroGUIs/AIAssistantCommandSettingsModal.ts @@ -14,7 +14,7 @@ import { DEFAULT_TOP_P, } from "src/ai/OpenAIModelParameters"; import { getTokenCount } from "src/ai/AIAssistant"; -import { getModelNames } from "src/ai/aiHelpers"; +import { getModelByName, getModelNames } from "src/ai/aiHelpers"; export class AIAssistantCommandSettingsModal extends Modal { public waitForClose: Promise; @@ -28,7 +28,10 @@ export class AIAssistantCommandSettingsModal extends Modal { private get systemPromptTokenLength(): number { if (this.settings.model === "Ask me") return Number.POSITIVE_INFINITY; - return getTokenCount(this.settings.systemPrompt, this.settings.model); + const model = getModelByName(this.settings.model); + if (!model) return Number.POSITIVE_INFINITY; + + return getTokenCount(this.settings.systemPrompt, model); } constructor(settings: IAIAssistantCommand) { diff --git a/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts b/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts index 4c6efdc..4e6370c 100644 --- a/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts +++ b/src/gui/MacroGUIs/AIAssistantInfiniteCommandSettingsModal.ts @@ -23,7 +23,9 @@ export class InfiniteAIAssistantCommandSettingsModal extends Modal { private showAdvancedSettings = false; private get systemPromptTokenLength(): number { - return getTokenCount(this.settings.systemPrompt, this.settings.model); + const model = getModelByName(this.settings.model); + if (!model) return Number.POSITIVE_INFINITY; + return getTokenCount(this.settings.systemPrompt, model); } constructor(settings: IInfiniteAIAssistantCommand) { diff --git a/src/types/macros/QuickCommands/AIAssistantCommand.ts b/src/types/macros/QuickCommands/AIAssistantCommand.ts index 5ca52f0..80fd8b2 100644 --- a/src/types/macros/QuickCommands/AIAssistantCommand.ts +++ b/src/types/macros/QuickCommands/AIAssistantCommand.ts @@ -1,4 +1,3 @@ -import type { Models_And_Ask_Me } from "src/ai/models"; import { Command } from "../Command"; import { CommandType } from "../CommandType"; import type { IAIAssistantCommand } from "./IAIAssistantCommand"; @@ -11,7 +10,7 @@ export class AIAssistantCommand extends Command implements IAIAssistantCommand { name: string; type: CommandType; - model: Models_And_Ask_Me; + model: string; systemPrompt: string; outputVariableName: string; promptTemplate: { diff --git a/src/types/macros/QuickCommands/IAIAssistantCommand.ts b/src/types/macros/QuickCommands/IAIAssistantCommand.ts index 723aa6b..40b3a61 100644 --- a/src/types/macros/QuickCommands/IAIAssistantCommand.ts +++ b/src/types/macros/QuickCommands/IAIAssistantCommand.ts @@ -1,4 +1,3 @@ -import type { Model, Models_And_Ask_Me } from "src/ai/models"; import type { ICommand } from "../ICommand"; import type { OpenAIModelParameters } from "src/ai/OpenAIModelParameters"; @@ -10,7 +9,7 @@ interface IBaseAIAssistantCommand extends ICommand { } export interface IAIAssistantCommand extends IBaseAIAssistantCommand { - model: Models_And_Ask_Me; + model: string; promptTemplate: { enable: boolean; name: string; @@ -18,7 +17,7 @@ export interface IAIAssistantCommand extends IBaseAIAssistantCommand { } export interface IInfiniteAIAssistantCommand extends IBaseAIAssistantCommand { - model: Model; + model: string; resultJoiner: string; chunkSeparator: string; maxChunkTokens: number; From 3f15d035f6173cdc58b72310d9af893c9e8e4fd1 Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 19:51:44 +0100 Subject: [PATCH 15/16] refactor: remove console.log --- src/ai/OpenAIRequest.ts | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/ai/OpenAIRequest.ts b/src/ai/OpenAIRequest.ts index e17a6e1..dac73b4 100644 --- a/src/ai/OpenAIRequest.ts +++ b/src/ai/OpenAIRequest.ts @@ -52,14 +52,10 @@ export function OpenAIRequest( throw new Error(`Model ${model.name} not found with any provider.`); } - console.log( - `Making request to ${modelProvider?.name} at ${modelProvider.endpoint} with model ${model.name}` - ); - try { const restoreCursor = preventCursorChange(); const _response = requestUrl({ - url: `${modelProvider?.endpoint}/chat/completions`, + url: `${modelProvider.endpoint}/chat/completions`, method: "POST", headers: { "Content-Type": "application/json", From 0fc88ae4fe690b02bdefbef99a9ef43f502895e1 Mon Sep 17 00:00:00 2001 From: Christian Bager Bach Houmann Date: Sun, 3 Mar 2024 19:52:44 +0100 Subject: [PATCH 16/16] refactor: remove `...` --- src/engine/MacroChoiceEngine.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/engine/MacroChoiceEngine.ts b/src/engine/MacroChoiceEngine.ts index 6f6e0e0..cd550ff 100644 --- a/src/engine/MacroChoiceEngine.ts +++ b/src/engine/MacroChoiceEngine.ts @@ -310,7 +310,7 @@ export class MacroChoiceEngine extends QuickAddChoiceEngine { const aiSettings = settingsStore.getState().ai; - const options = [...getModelNames()]; + const options = getModelNames(); const modelName: string = command.model === "Ask me" ? await GenericSuggester.Suggest(app, options, options)