Skip to content

Commit

Permalink
Merge pull request #93 from grafana/llms-prettier
Browse files Browse the repository at this point in the history
  • Loading branch information
sd2k authored Dec 13, 2023
2 parents 52d27cd + 5aa4537 commit eb7a3a7
Show file tree
Hide file tree
Showing 3 changed files with 105 additions and 72 deletions.
6 changes: 3 additions & 3 deletions src/llms/constants.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { logWarning } from "@grafana/runtime";
import { logWarning } from '@grafana/runtime';

import { SemVer } from "semver";
import { SemVer } from 'semver';

export const LLM_PLUGIN_ID = 'grafana-llm-app';
export const LLM_PLUGIN_ROUTE = `/api/plugins/${LLM_PLUGIN_ID}`;
Expand All @@ -14,6 +14,6 @@ export function setLLMPluginVersion(version: string) {
try {
LLM_PLUGIN_VERSION = new SemVer(version);
} catch (e) {
logWarning('Failed to parse version of grafana-llm-app; assuming old version is present.')
logWarning('Failed to parse version of grafana-llm-app; assuming old version is present.');
}
}
123 changes: 74 additions & 49 deletions src/llms/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,21 @@
* The {@link enabled} function can be used to check if the plugin is enabled and configured.
*/

import { isLiveChannelMessageEvent, LiveChannelAddress, LiveChannelMessageEvent, LiveChannelScope } from "@grafana/data";
import { getBackendSrv, getGrafanaLiveSrv, logDebug, /* logError */ } from "@grafana/runtime";
import {
isLiveChannelMessageEvent,
LiveChannelAddress,
LiveChannelMessageEvent,
LiveChannelScope,
} from '@grafana/data';
import { getBackendSrv, getGrafanaLiveSrv, logDebug /* logError */ } from '@grafana/runtime';

import React, { useEffect, useCallback, useState } from 'react';
import { useAsync } from 'react-use';
import { pipe, Observable, UnaryFunction, Subscription } from "rxjs";
import { filter, map, scan, takeWhile, tap } from "rxjs/operators";
import { pipe, Observable, UnaryFunction, Subscription } from 'rxjs';
import { filter, map, scan, takeWhile, tap } from 'rxjs/operators';

import { LLM_PLUGIN_ID, LLM_PLUGIN_ROUTE, setLLMPluginVersion } from "./constants";
import { HealthCheckResponse, OpenAIHealthDetails } from "./types";
import { LLM_PLUGIN_ID, LLM_PLUGIN_ROUTE, setLLMPluginVersion } from './constants';
import { HealthCheckResponse, OpenAIHealthDetails } from './types';

const OPENAI_CHAT_COMPLETIONS_PATH = 'openai/v1/chat/completions';

Expand Down Expand Up @@ -229,17 +234,18 @@ export interface ChatCompletionsChunk {

/** Return true if the message is a 'content' message. */
export function isContentMessage(message: ChatCompletionsDelta): message is ContentMessage {
return 'content' in message
return 'content' in message;
}


/** Return true if the message is a 'done' message. */
export function isDoneMessage(message: ChatCompletionsDelta): message is DoneMessage {
return 'done' in message && message.done != null;
}

/** Return true if the response is an error response. */
export function isErrorResponse<T>(response: ChatCompletionsResponse<T> | ChatCompletionsErrorResponse): response is ChatCompletionsErrorResponse {
export function isErrorResponse<T>(
response: ChatCompletionsResponse<T> | ChatCompletionsErrorResponse
): response is ChatCompletionsErrorResponse {
return 'error' in response;
}

Expand All @@ -257,12 +263,17 @@ export function isErrorResponse<T>(response: ChatCompletionsResponse<T> | ChatCo
* // Output:
* // ['Hello', '? ', 'How ', 'are ', 'you', '?']
*/
export function extractContent(): UnaryFunction<Observable<ChatCompletionsResponse<ChatCompletionsChunk>>, Observable<string>> {
export function extractContent(): UnaryFunction<
Observable<ChatCompletionsResponse<ChatCompletionsChunk>>,
Observable<string>
> {
return pipe(
filter((response: ChatCompletionsResponse<ChatCompletionsChunk>) => isContentMessage(response.choices[0].delta)),
// The type assertion is needed here because the type predicate above doesn't seem to propagate.
map((response: ChatCompletionsResponse<ChatCompletionsChunk>) => (response.choices[0].delta as ContentMessage).content),
)
map(
(response: ChatCompletionsResponse<ChatCompletionsChunk>) => (response.choices[0].delta as ContentMessage).content
)
);
}

/**
Expand All @@ -279,20 +290,27 @@ export function extractContent(): UnaryFunction<Observable<ChatCompletionsRespon
* // Output:
* // ['Hello', 'Hello! ', 'Hello! How ', 'Hello! How are ', 'Hello! How are you', 'Hello! How are you?']
*/
export function accumulateContent(): UnaryFunction<Observable<ChatCompletionsResponse<ChatCompletionsChunk>>, Observable<string>> {
export function accumulateContent(): UnaryFunction<
Observable<ChatCompletionsResponse<ChatCompletionsChunk>>,
Observable<string>
> {
return pipe(
extractContent(),
scan((acc, curr) => acc + curr, ''),
scan((acc, curr) => acc + curr, '')
);
}

/**
* Make a request to OpenAI's chat-completions API via the Grafana LLM plugin proxy.
*/
export async function chatCompletions(request: ChatCompletionsRequest): Promise<ChatCompletionsResponse> {
const response = await getBackendSrv().post<ChatCompletionsResponse>('/api/plugins/grafana-llm-app/resources/openai/v1/chat/completions', request, {
headers: { 'Content-Type': 'application/json' }
});
const response = await getBackendSrv().post<ChatCompletionsResponse>(
'/api/plugins/grafana-llm-app/resources/openai/v1/chat/completions',
request,
{
headers: { 'Content-Type': 'application/json' },
}
);
return response;
}

Expand Down Expand Up @@ -323,7 +341,9 @@ export async function chatCompletions(request: ChatCompletionsRequest): Promise<
* // Output:
* // ['Hello', 'Hello! ', 'Hello! How ', 'Hello! How are ', 'Hello! How are you', 'Hello! How are you?']
*/
export function streamChatCompletions(request: ChatCompletionsRequest): Observable<ChatCompletionsResponse<ChatCompletionsChunk>> {
export function streamChatCompletions(
request: ChatCompletionsRequest
): Observable<ChatCompletionsResponse<ChatCompletionsChunk>> {
const channel: LiveChannelAddress = {
scope: LiveChannelScope.Plugin,
namespace: LLM_PLUGIN_ID,
Expand All @@ -332,15 +352,17 @@ export function streamChatCompletions(request: ChatCompletionsRequest): Observab
};
const messages = getGrafanaLiveSrv()
.getStream(channel)
.pipe(filter((event) => isLiveChannelMessageEvent(event))) as Observable<LiveChannelMessageEvent<ChatCompletionsResponse<ChatCompletionsChunk>>>
.pipe(filter((event) => isLiveChannelMessageEvent(event))) as Observable<
LiveChannelMessageEvent<ChatCompletionsResponse<ChatCompletionsChunk>>
>;
return messages.pipe(
tap((event) => {
if (isErrorResponse(event.message)) {
throw new Error(event.message.error);
}
}),
takeWhile((event) => isErrorResponse(event.message) || !isDoneMessage(event.message.choices[0].delta)),
map((event) => event.message),
map((event) => event.message)
);
}

Expand All @@ -351,31 +373,37 @@ export const health = async (): Promise<OpenAIHealthDetails> => {
// First check if the plugin is enabled.
try {
const settings = await getBackendSrv().get(`${LLM_PLUGIN_ROUTE}/settings`, undefined, undefined, {
showSuccessAlert: false, showErrorAlert: false,
showSuccessAlert: false,
showErrorAlert: false,
});
if (!settings.enabled) {
return { configured: false, ok: false, error: 'The Grafana LLM plugin is not enabled.' }
return { configured: false, ok: false, error: 'The Grafana LLM plugin is not enabled.' };
}
} catch (e) {
logDebug(String(e));
logDebug('Failed to check if OpenAI is enabled. This is expected if the Grafana LLM plugin is not installed, and the above error can be ignored.');
logDebug(
'Failed to check if OpenAI is enabled. This is expected if the Grafana LLM plugin is not installed, and the above error can be ignored.'
);
loggedWarning = true;
return { configured: false, ok: false, error: 'The Grafana LLM plugin is not installed.' }
return { configured: false, ok: false, error: 'The Grafana LLM plugin is not installed.' };
}

// Run a health check to see if OpenAI is configured on the plugin.
let response: HealthCheckResponse;
try {
response = await getBackendSrv().get(`${LLM_PLUGIN_ROUTE}/health`, undefined, undefined, {
showSuccessAlert: false, showErrorAlert: false,
showSuccessAlert: false,
showErrorAlert: false,
});
} catch (e) {
if (!loggedWarning) {
logDebug(String(e));
logDebug('Failed to check if OpenAI is enabled. This is expected if the Grafana LLM plugin is not installed, and the above error can be ignored.');
logDebug(
'Failed to check if OpenAI is enabled. This is expected if the Grafana LLM plugin is not installed, and the above error can be ignored.'
);
loggedWarning = true;
}
return { configured: false, ok: false, error: 'The Grafana LLM plugin is not installed.' }
return { configured: false, ok: false, error: 'The Grafana LLM plugin is not installed.' };
}

const { details } = response;
Expand All @@ -384,17 +412,15 @@ export const health = async (): Promise<OpenAIHealthDetails> => {
setLLMPluginVersion(details.version);
}
if (details?.openAI === undefined) {
return { configured: false, ok: false, error: 'The Grafana LLM plugin is outdated; please update it.' }
return { configured: false, ok: false, error: 'The Grafana LLM plugin is outdated; please update it.' };
}
return typeof details.openAI === 'boolean' ?
{ configured: details.openAI, ok: details.openAI } :
details.openAI;
}
return typeof details.openAI === 'boolean' ? { configured: details.openAI, ok: details.openAI } : details.openAI;
};

export const enabled = async (): Promise<boolean> => {
const healthDetails = await health();
return healthDetails.configured && healthDetails.ok;
}
};

/**
* Enum representing different states for a stream.
Expand Down Expand Up @@ -442,7 +468,7 @@ export type OpenAIStreamState = {
stream: Subscription;
}
| undefined;
}
};

/**
* A custom React hook for managing an OpenAI stream that communicates with the provided model.
Expand All @@ -463,7 +489,7 @@ export type OpenAIStreamState = {
export function useOpenAIStream(
model = 'gpt-4',
temperature = 1,
notifyError: (title: string, text?: string, traceId?: string) => void = () => {},
notifyError: (title: string, text?: string, traceId?: string) => void = () => {}
): OpenAIStreamState {
// The messages array to send to the LLM.
const [messages, setMessages] = useState<Message[]>([]);
Expand Down Expand Up @@ -498,20 +524,19 @@ export function useOpenAIStream(
setError(undefined);
// Stream the completions. Each element is the next stream chunk.
const stream = streamChatCompletions({
model,
temperature,
messages,
})
.pipe(
// Accumulate the stream content into a stream of strings, where each
// element contains the accumulated message so far.
accumulateContent()
// The stream is just a regular Observable, so we can use standard rxjs
// functionality to update state, e.g. recording when the stream
// has completed.
// The operator decision tree on the rxjs website is a useful resource:
// https://rxjs.dev/operator-decision-tree.)
);
model,
temperature,
messages,
}).pipe(
// Accumulate the stream content into a stream of strings, where each
// element contains the accumulated message so far.
accumulateContent()
// The stream is just a regular Observable, so we can use standard rxjs
// functionality to update state, e.g. recording when the stream
// has completed.
// The operator decision tree on the rxjs website is a useful resource:
// https://rxjs.dev/operator-decision-tree.)
);
// Subscribe to the stream and update the state for each returned value.
return {
enabled: isEnabled,
Expand Down
48 changes: 28 additions & 20 deletions src/llms/vector.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@
* The {@link enabled} function can be used to check if the plugin is enabled and configured.
*/

import { getBackendSrv, logDebug } from "@grafana/runtime";
import { LLM_PLUGIN_ROUTE, setLLMPluginVersion } from "./constants";
import { HealthCheckResponse, VectorHealthDetails } from "./types";
import { getBackendSrv, logDebug } from '@grafana/runtime';
import { LLM_PLUGIN_ROUTE, setLLMPluginVersion } from './constants';
import { HealthCheckResponse, VectorHealthDetails } from './types';

interface SearchResultPayload extends Record<string, any> { }
interface SearchResultPayload extends Record<string, any> {}

/**
* A request to search for resources in the vector database.
Expand All @@ -28,7 +28,7 @@ export interface SearchRequest {

/**
* Limit the number of results returned to the top `topK` results.
*
*
* Defaults to 10.
**/
topK?: number;
Expand Down Expand Up @@ -69,9 +69,13 @@ interface SearchResultResponse<T extends SearchResultPayload> {
* Search for resources in the configured vector database.
*/
export async function search<T extends SearchResultPayload>(request: SearchRequest): Promise<Array<SearchResult<T>>> {
const response = await getBackendSrv().post<SearchResultResponse<T>>('/api/plugins/grafana-llm-app/resources/vector/search', request, {
headers: { 'Content-Type': 'application/json' }
});
const response = await getBackendSrv().post<SearchResultResponse<T>>(
'/api/plugins/grafana-llm-app/resources/vector/search',
request,
{
headers: { 'Content-Type': 'application/json' },
}
);
return response.results;
}

Expand All @@ -82,33 +86,39 @@ export const health = async (): Promise<VectorHealthDetails> => {
// First check if the plugin is enabled.
try {
const settings = await getBackendSrv().get(`${LLM_PLUGIN_ROUTE}/settings`, undefined, undefined, {
showSuccessAlert: false, showErrorAlert: false,
showSuccessAlert: false,
showErrorAlert: false,
});
if (!settings.enabled) {
return { enabled: false, ok: false, error: 'The Grafana LLM plugin is not enabled.' }
return { enabled: false, ok: false, error: 'The Grafana LLM plugin is not enabled.' };
}
} catch (e) {
logDebug(String(e));
logDebug('Failed to check if the vector service is enabled. This is expected if the Grafana LLM plugin is not installed, and the above error can be ignored.');
logDebug(
'Failed to check if the vector service is enabled. This is expected if the Grafana LLM plugin is not installed, and the above error can be ignored.'
);
loggedWarning = true;
return { enabled: false, ok: false, error: 'The Grafana LLM plugin is not installed.' }
return { enabled: false, ok: false, error: 'The Grafana LLM plugin is not installed.' };
}

// Run a health check to see if the vector service is configured on the plugin.
let response: HealthCheckResponse;
try {
response = await getBackendSrv().get(`${LLM_PLUGIN_ROUTE}/health`, undefined, undefined, {
showSuccessAlert: false, showErrorAlert: false,
showSuccessAlert: false,
showErrorAlert: false,
});
} catch (e) {
// We shouldn't really get here if we managed to get the plugin's settings above,
// but catch this just in case.
if (!loggedWarning) {
logDebug(String(e));
logDebug('Failed to check if vector service is enabled. This is expected if the Grafana LLM plugin is not installed, and the above error can be ignored.');
logDebug(
'Failed to check if vector service is enabled. This is expected if the Grafana LLM plugin is not installed, and the above error can be ignored.'
);
loggedWarning = true;
}
return { enabled: false, ok: false, error: 'The Grafana LLM plugin is not installed.' }
return { enabled: false, ok: false, error: 'The Grafana LLM plugin is not installed.' };
}

const { details } = response;
Expand All @@ -117,14 +127,12 @@ export const health = async (): Promise<VectorHealthDetails> => {
setLLMPluginVersion(details.version);
}
if (details?.vector === undefined) {
return { enabled: false, ok: false, error: 'The Grafana LLM plugin is outdated; please update it.' }
return { enabled: false, ok: false, error: 'The Grafana LLM plugin is outdated; please update it.' };
}
return typeof details.vector === 'boolean' ?
{ enabled: details.vector, ok: details.vector } :
details.vector;
return typeof details.vector === 'boolean' ? { enabled: details.vector, ok: details.vector } : details.vector;
};

export const enabled = async (): Promise<boolean> => {
const healthDetails = await health();
return healthDetails.enabled && healthDetails.ok;
}
};

0 comments on commit eb7a3a7

Please sign in to comment.