diff --git a/packages/grafana-llm-app/llmclient/go.mod b/packages/grafana-llm-app/llmclient/go.mod deleted file mode 100644 index a374de4c..00000000 --- a/packages/grafana-llm-app/llmclient/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/grafana/grafana-llm-app/llmclient - -go 1.19 - -require github.com/sashabaranov/go-openai v1.15.3 diff --git a/packages/grafana-llm-app/llmclient/go.sum b/packages/grafana-llm-app/llmclient/go.sum deleted file mode 100644 index c5e5edf6..00000000 --- a/packages/grafana-llm-app/llmclient/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/sashabaranov/go-openai v1.15.3 h1:rzoNK9n+Cak+PM6OQ9puxDmFllxfnVea9StlmhglXqA= -github.com/sashabaranov/go-openai v1.15.3/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= diff --git a/packages/grafana-llm-app/llmclient/llmclient.go b/packages/grafana-llm-app/llmclient/llmclient.go index a68ddd1d..fcd220f4 100644 --- a/packages/grafana-llm-app/llmclient/llmclient.go +++ b/packages/grafana-llm-app/llmclient/llmclient.go @@ -35,6 +35,12 @@ type ChatCompletionRequest struct { Model Model `json:"model"` } +// AssistantRequest is a request for creating an assistant using an abstract model. +type AssistantRequest struct { + openai.AssistantRequest + Model Model `json:"model"` +} + // OpenAI is an interface for talking to OpenAI via the Grafana LLM app. // Requests made using this interface will be routed to the OpenAI backend // configured in the Grafana LLM app's settings, with authentication handled @@ -47,6 +53,20 @@ type OpenAI interface { ChatCompletions(ctx context.Context, req ChatCompletionRequest) (openai.ChatCompletionResponse, error) // ChatCompletionsStream makes a streaming request to the OpenAI Chat Completion API. ChatCompletionsStream(ctx context.Context, req ChatCompletionRequest) (*openai.ChatCompletionStream, error) + // CreateAssistant creates an assistant using the given request. + CreateAssistant(ctx context.Context, req AssistantRequest) (openai.Assistant, error) + // CreateThread creates a new thread. + CreateThread(ctx context.Context, req openai.ThreadRequest) (openai.Thread, error) + // CreateMessage creates a new message in a thread. + CreateMessage(ctx context.Context, threadID string, request openai.MessageRequest) (msg openai.Message, err error) + // CreateRun creates a new run in a thread. + CreateRun(ctx context.Context, threadID string, request openai.RunRequest) (run openai.Run, err error) + // RetrieveRun retrieves a run in a thread. + RetrieveRun(ctx context.Context, threadID string, runID string) (run openai.Run, err error) + // SubmitToolOutputs submits tool outputs for a run in a thread. + SubmitToolOutputs(ctx context.Context, threadID string, runID string, request openai.SubmitToolOutputsRequest) (response openai.Run, err error) + // ListMessage lists messages in a thread. + ListMessage(ctx context.Context, threadID string, limit *int, order *string, after *string, before *string) (openai.MessagesList, error) } type openAI struct { @@ -159,3 +179,33 @@ func (o *openAI) ChatCompletionsStream(ctx context.Context, req ChatCompletionRe r.Model = string(req.Model) return o.client.CreateChatCompletionStream(ctx, r) } + +func (o *openAI) CreateAssistant(ctx context.Context, req AssistantRequest) (openai.Assistant, error) { + r := req.AssistantRequest + r.Model = string(req.Model) + return o.client.CreateAssistant(ctx, r) +} + +func (o *openAI) CreateThread(ctx context.Context, req openai.ThreadRequest) (openai.Thread, error) { + return o.client.CreateThread(ctx, req) +} + +func (o *openAI) CreateMessage(ctx context.Context, threadID string, request openai.MessageRequest) (msg openai.Message, err error) { + return o.client.CreateMessage(ctx, threadID, request) +} + +func (o *openAI) CreateRun(ctx context.Context, threadID string, request openai.RunRequest) (run openai.Run, err error) { + return o.client.CreateRun(ctx, threadID, request) +} + +func (o *openAI) RetrieveRun(ctx context.Context, threadID string, runID string) (run openai.Run, err error) { + return o.client.RetrieveRun(ctx, threadID, runID) +} + +func (o *openAI) SubmitToolOutputs(ctx context.Context, threadID string, runID string, request openai.SubmitToolOutputsRequest) (response openai.Run, err error) { + return o.client.SubmitToolOutputs(ctx, threadID, runID, request) +} + +func (o *openAI) ListMessage(ctx context.Context, threadID string, limit *int, order *string, after *string, before *string) (openai.MessagesList, error) { + return o.client.ListMessage(ctx, threadID, limit, order, after, before) +}