diff --git a/src/llms/openai.ts b/src/llms/openai.ts index 5bc222e..d683852 100644 --- a/src/llms/openai.ts +++ b/src/llms/openai.ts @@ -188,8 +188,8 @@ const isDoneMessage = (message: any): message is DoneMessage => { } /** - * Make a request to OpenAI's chat-completions API via the Grafana LLM plugin proxy. - */ + * Make a request to OpenAI's chat-completions API via the Grafana LLM plugin proxy. + */ export async function chatCompletions(request: ChatCompletionsRequest): Promise { const response = await getBackendSrv().post('/api/plugins/grafana-llm-app/resources/openai/v1/chat/completions', request, { headers: { 'Content-Type': 'application/json' } @@ -198,11 +198,11 @@ export async function chatCompletions(request: ChatCompletionsRequest): Promise< } /** - * Make a streaming request to OpenAI's chat-completions API via the Grafana LLM plugin proxy. - * - * A stream of tokens will be returned as an `Observable`. Use rxjs' `scan` if you want - * to produce a new stream containing the concatenated tokens so far. - */ + * Make a streaming request to OpenAI's chat-completions API via the Grafana LLM plugin proxy. + * + * A stream of tokens will be returned as an `Observable`. Use rxjs' `scan` if you want + * to produce a new stream containing the concatenated tokens so far. + */ export function streamChatCompletions(request: ChatCompletionsRequest): Observable { const channel: LiveChannelAddress = { scope: LiveChannelScope.Plugin,