From f769a074412e13ecb85239bcb1ac1dee600c4fff Mon Sep 17 00:00:00 2001
From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com>
Date: Fri, 16 Aug 2024 16:16:28 +0200
Subject: [PATCH] Fix AgentsCompletionStreamRequest (#132)
* fix agents
* update doc
---
.speakeasy/gen.lock | 19 ++++--
.speakeasy/gen.yaml | 2 +-
.speakeasy/workflow.lock | 33 +++++++++-
docs/models/agentscompletionrequest.md | 2 +-
docs/models/agentscompletionstreamrequest.md | 24 +++----
.../agentscompletionstreamrequestmessages.md | 23 +++++++
...agentscompletionstreamrequesttoolchoice.md | 10 +++
docs/models/chatcompletionchoice.md | 10 +--
docs/models/chatcompletionrequest.md | 4 +-
docs/models/chatcompletionstreamrequest.md | 4 +-
docs/models/tool.md | 8 +--
docs/models/toolcall.md | 10 +--
docs/models/tooltooltypes.md | 8 +++
docs/models/tooltypes.md | 8 +++
docs/sdks/agents/README.md | 39 ++++++-----
docs/sdks/chat/README.md | 12 ++--
docs/sdks/embeddings/README.md | 2 +
docs/sdks/files/README.md | 8 +++
docs/sdks/fim/README.md | 4 ++
docs/sdks/jobs/README.md | 10 +++
docs/sdks/models/README.md | 12 ++++
examples/async_jobs.py | 4 +-
packages/mistralai_azure/.speakeasy/gen.lock | 17 +++--
packages/mistralai_azure/.speakeasy/gen.yaml | 2 +-
.../docs/models/chatcompletionchoice.md | 4 +-
.../docs/models/chatcompletionrequest.md | 2 +-
.../models/chatcompletionstreamrequest.md | 2 +-
packages/mistralai_azure/docs/models/tool.md | 8 +--
.../mistralai_azure/docs/models/toolcall.md | 10 +--
.../docs/models/tooltooltypes.md | 8 +++
.../mistralai_azure/docs/models/tooltypes.md | 8 +++
packages/mistralai_azure/poetry.lock | 8 +--
packages/mistralai_azure/pyproject.toml | 4 +-
packages/mistralai_azure/scripts/publish.sh | 2 +
.../src/mistralai_azure/models/__init__.py | 6 +-
.../models/chatcompletionchoice.py | 7 +-
.../src/mistralai_azure/models/tool.py | 14 ++--
.../src/mistralai_azure/models/toolcall.py | 12 ++--
.../src/mistralai_azure/sdkconfiguration.py | 6 +-
packages/mistralai_gcp/.speakeasy/gen.lock | 17 +++--
packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +-
.../docs/models/chatcompletionchoice.md | 4 +-
.../docs/models/chatcompletionrequest.md | 4 +-
.../models/chatcompletionstreamrequest.md | 4 +-
packages/mistralai_gcp/docs/models/tool.md | 8 +--
.../mistralai_gcp/docs/models/toolcall.md | 10 +--
.../docs/models/tooltooltypes.md | 8 +++
.../mistralai_gcp/docs/models/tooltypes.md | 8 +++
packages/mistralai_gcp/poetry.lock | 8 +--
packages/mistralai_gcp/pyproject.toml | 4 +-
packages/mistralai_gcp/scripts/publish.sh | 2 +
.../mistralai_gcp/src/mistralai_gcp/chat.py | 8 +--
.../src/mistralai_gcp/models/__init__.py | 6 +-
.../models/chatcompletionchoice.py | 7 +-
.../models/chatcompletionrequest.py | 4 +-
.../models/chatcompletionstreamrequest.py | 4 +-
.../src/mistralai_gcp/models/tool.py | 14 ++--
.../src/mistralai_gcp/models/toolcall.py | 12 ++--
.../src/mistralai_gcp/sdkconfiguration.py | 6 +-
pyproject.toml | 2 +-
scripts/publish.sh | 2 +
src/mistralai/agents.py | 62 +++++++++---------
src/mistralai/chat.py | 8 +--
src/mistralai/models/__init__.py | 8 +--
.../models/agentscompletionstreamrequest.py | 64 ++++++++++---------
src/mistralai/models/chatcompletionchoice.py | 7 +-
src/mistralai/models/chatcompletionrequest.py | 4 +-
.../models/chatcompletionstreamrequest.py | 4 +-
src/mistralai/models/tool.py | 14 ++--
src/mistralai/models/toolcall.py | 12 ++--
src/mistralai/sdkconfiguration.py | 6 +-
71 files changed, 454 insertions(+), 255 deletions(-)
create mode 100644 docs/models/agentscompletionstreamrequestmessages.md
create mode 100644 docs/models/agentscompletionstreamrequesttoolchoice.md
create mode 100644 docs/models/tooltooltypes.md
create mode 100644 docs/models/tooltypes.md
create mode 100644 packages/mistralai_azure/docs/models/tooltooltypes.md
create mode 100644 packages/mistralai_azure/docs/models/tooltypes.md
create mode 100644 packages/mistralai_gcp/docs/models/tooltooltypes.md
create mode 100644 packages/mistralai_gcp/docs/models/tooltypes.md
diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock
index 0cbace7..bc40f70 100644
--- a/.speakeasy/gen.lock
+++ b/.speakeasy/gen.lock
@@ -1,12 +1,12 @@
lockVersion: 2.0.0
id: 2d045ec7-2ebb-4f4d-ad25-40953b132161
management:
- docChecksum: a7c4268dd1228c969aecfd7cfdb6ca3c
+ docChecksum: c19f5a86b8045af32a46604ee5478061
docVersion: 0.0.2
- speakeasyVersion: 1.357.4
- generationVersion: 2.390.6
- releaseVersion: 1.0.1
- configChecksum: 374a669373f10730cda1eb9a91d59b8b
+ speakeasyVersion: 1.372.0
+ generationVersion: 2.399.0
+ releaseVersion: 1.1.3
+ configChecksum: b9757e45cfabeceebf51f9a514724903
repoURL: https://github.com/mistralai/client-python.git
installationURL: https://github.com/mistralai/client-python.git
published: true
@@ -14,7 +14,7 @@ features:
python:
additionalDependencies: 1.0.0
constsAndDefaults: 1.0.2
- core: 5.3.7
+ core: 5.3.8
defaultEnabledRetries: 0.2.0
envVarSecurityUsage: 0.3.1
examples: 3.0.0
@@ -27,13 +27,14 @@ features:
multipartFileContentType: 1.0.0
nameOverrides: 3.0.0
nullables: 1.0.0
+ openEnums: 1.0.0
responseFormat: 1.0.0
retries: 3.0.0
sdkHooks: 1.0.0
serverEvents: 1.0.2
serverEventsSentinels: 0.1.0
serverIDs: 3.0.0
- unions: 3.0.1
+ unions: 3.0.2
uploadStreams: 1.0.0
generatedFiles:
- src/mistralai/sdkconfiguration.py
@@ -211,6 +212,7 @@ generatedFiles:
- docs/models/chatcompletionchoice.md
- docs/models/assistantmessagerole.md
- docs/models/assistantmessage.md
+ - docs/models/tooltypes.md
- docs/models/toolcall.md
- docs/models/arguments.md
- docs/models/functioncall.md
@@ -219,6 +221,7 @@ generatedFiles:
- docs/models/messages.md
- docs/models/toolchoice.md
- docs/models/chatcompletionrequest.md
+ - docs/models/tooltooltypes.md
- docs/models/tool.md
- docs/models/function.md
- docs/models/responseformats.md
@@ -252,6 +255,8 @@ generatedFiles:
- docs/models/agentscompletionrequesttoolchoice.md
- docs/models/agentscompletionrequest.md
- docs/models/agentscompletionstreamrequeststop.md
+ - docs/models/agentscompletionstreamrequestmessages.md
+ - docs/models/agentscompletionstreamrequesttoolchoice.md
- docs/models/agentscompletionstreamrequest.md
- docs/models/embeddingresponse.md
- docs/models/embeddingresponsedata.md
diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml
index 1d54a9e..90ee2f7 100644
--- a/.speakeasy/gen.yaml
+++ b/.speakeasy/gen.yaml
@@ -12,7 +12,7 @@ generation:
auth:
oAuth2ClientCredentialsEnabled: true
python:
- version: 1.0.1
+ version: 1.1.3
additionalDependencies:
dev:
pytest: ^8.2.2
diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock
index 92bc880..432ad60 100644
--- a/.speakeasy/workflow.lock
+++ b/.speakeasy/workflow.lock
@@ -1,15 +1,42 @@
-speakeasyVersion: 1.357.4
-sources: {}
+speakeasyVersion: 1.372.0
+sources:
+ mistral-azure-source:
+ sourceNamespace: mistral-openapi-azure
+ sourceRevisionDigest: sha256:bc53dba5935490a409045de3c39ccf9e90243a289656dd538a542990aa376cca
+ sourceBlobDigest: sha256:4173c3be19775dd2bdd4ce28bb9ae6655650df75f2b689a44c3362d418d69d49
+ tags:
+ - latest
+ mistral-google-cloud-source:
+ sourceNamespace: mistral-openapi-google-cloud
+ sourceRevisionDigest: sha256:ab52d75474e071db240ed9a5367dc6374867b5c9306d478dcfdf8f7b7d08607f
+ sourceBlobDigest: sha256:d5f9c665861d7fedd5093567d13e1f7f6a12b82137fbbecda4708007b15030ba
+ tags:
+ - latest
+ mistral-openapi:
+ sourceNamespace: mistral-openapi
+ sourceRevisionDigest: sha256:e4d5f5fe40e7f1141006ba40c1d85b743ce5dc2407635ca2e776ba0dfb00a398
+ sourceBlobDigest: sha256:56f1bbe3a050c9505e003bb9790e443084922bff74b072805757076cdb8a136e
+ tags:
+ - latest
targets:
mistralai-azure-sdk:
source: mistral-azure-source
+ sourceNamespace: mistral-openapi-azure
+ sourceRevisionDigest: sha256:bc53dba5935490a409045de3c39ccf9e90243a289656dd538a542990aa376cca
+ sourceBlobDigest: sha256:4173c3be19775dd2bdd4ce28bb9ae6655650df75f2b689a44c3362d418d69d49
outLocation: ./packages/mistralai_azure
mistralai-gcp-sdk:
source: mistral-google-cloud-source
+ sourceNamespace: mistral-openapi-google-cloud
+ sourceRevisionDigest: sha256:ab52d75474e071db240ed9a5367dc6374867b5c9306d478dcfdf8f7b7d08607f
+ sourceBlobDigest: sha256:d5f9c665861d7fedd5093567d13e1f7f6a12b82137fbbecda4708007b15030ba
outLocation: ./packages/mistralai_gcp
mistralai-sdk:
source: mistral-openapi
- outLocation: /github/workspace/repo
+ sourceNamespace: mistral-openapi
+ sourceRevisionDigest: sha256:e4d5f5fe40e7f1141006ba40c1d85b743ce5dc2407635ca2e776ba0dfb00a398
+ sourceBlobDigest: sha256:56f1bbe3a050c9505e003bb9790e443084922bff74b072805757076cdb8a136e
+ outLocation: /Users/gaspard/public-mistral/client-python
workflow:
workflowVersion: 1.0.0
speakeasyVersion: latest
diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md
index 2d0d672..7f6c428 100644
--- a/docs/models/agentscompletionrequest.md
+++ b/docs/models/agentscompletionrequest.md
@@ -5,7 +5,7 @@
| Field | Type | Required | Description | Example |
| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | |
| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | |
diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md
index c318774..d849a95 100644
--- a/docs/models/agentscompletionstreamrequest.md
+++ b/docs/models/agentscompletionstreamrequest.md
@@ -3,15 +3,15 @@
## Fields
-| Field | Type | Required | Description | Example |
-| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 |
-| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def |
-| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
-| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
-| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
-| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | |
-| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | |
-| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | |
-| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | |
-| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b |
\ No newline at end of file
+| Field | Type | Required | Description | Example |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
+| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | |
+| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
+| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | |
+| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | |
+| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | |
+| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | |
+| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | |
+| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | |
+| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | |
\ No newline at end of file
diff --git a/docs/models/agentscompletionstreamrequestmessages.md b/docs/models/agentscompletionstreamrequestmessages.md
new file mode 100644
index 0000000..d8cf99e
--- /dev/null
+++ b/docs/models/agentscompletionstreamrequestmessages.md
@@ -0,0 +1,23 @@
+# AgentsCompletionStreamRequestMessages
+
+
+## Supported Types
+
+### `models.AssistantMessage`
+
+```python
+value: models.AssistantMessage = /* values here */
+```
+
+### `models.ToolMessage`
+
+```python
+value: models.ToolMessage = /* values here */
+```
+
+### `models.UserMessage`
+
+```python
+value: models.UserMessage = /* values here */
+```
+
diff --git a/docs/models/agentscompletionstreamrequesttoolchoice.md b/docs/models/agentscompletionstreamrequesttoolchoice.md
new file mode 100644
index 0000000..e761d1e
--- /dev/null
+++ b/docs/models/agentscompletionstreamrequesttoolchoice.md
@@ -0,0 +1,10 @@
+# AgentsCompletionStreamRequestToolChoice
+
+
+## Values
+
+| Name | Value |
+| ------ | ------ |
+| `AUTO` | auto |
+| `NONE` | none |
+| `ANY` | any |
\ No newline at end of file
diff --git a/docs/models/chatcompletionchoice.md b/docs/models/chatcompletionchoice.md
index c916fc0..d77d286 100644
--- a/docs/models/chatcompletionchoice.md
+++ b/docs/models/chatcompletionchoice.md
@@ -3,8 +3,8 @@
## Fields
-| Field | Type | Required | Description | Example |
-| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ |
-| `index` | *int* | :heavy_check_mark: | N/A | 0 |
-| `finish_reason` | [models.FinishReason](../models/finishreason.md) | :heavy_check_mark: | N/A | stop |
-| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | |
\ No newline at end of file
+| Field | Type | Required | Description | Example |
+| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- |
+| `index` | *int* | :heavy_check_mark: | N/A | 0 |
+| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | |
+| `finish_reason` | [models.FinishReason](../models/finishreason.md) | :heavy_check_mark: | N/A | stop |
\ No newline at end of file
diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md
index cfb3596..d22efc3 100644
--- a/docs/models/chatcompletionrequest.md
+++ b/docs/models/chatcompletionrequest.md
@@ -5,8 +5,8 @@
| Field | Type | Required | Description | Example |
| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
-| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
+| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md
index 8c3a0ba..fd1fc48 100644
--- a/docs/models/chatcompletionstreamrequest.md
+++ b/docs/models/chatcompletionstreamrequest.md
@@ -5,8 +5,8 @@
| Field | Type | Required | Description | Example |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
-| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
+| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
diff --git a/docs/models/tool.md b/docs/models/tool.md
index 291394c..ca624a9 100644
--- a/docs/models/tool.md
+++ b/docs/models/tool.md
@@ -3,7 +3,7 @@
## Fields
-| Field | Type | Required | Description |
-| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- |
-| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A |
-| `type` | *Optional[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
+| Field | Type | Required | Description |
+| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A |
+| `type` | [Optional[models.ToolToolTypes]](../models/tooltooltypes.md) | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md
index bd2dc9f..7aca5fc 100644
--- a/docs/models/toolcall.md
+++ b/docs/models/toolcall.md
@@ -3,8 +3,8 @@
## Fields
-| Field | Type | Required | Description |
-| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ |
-| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A |
-| `id` | *Optional[str]* | :heavy_minus_sign: | N/A |
-| `type` | *Optional[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
+| Field | Type | Required | Description |
+| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- |
+| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A |
+| `id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/docs/models/tooltooltypes.md b/docs/models/tooltooltypes.md
new file mode 100644
index 0000000..e396430
--- /dev/null
+++ b/docs/models/tooltooltypes.md
@@ -0,0 +1,8 @@
+# ToolToolTypes
+
+
+## Values
+
+| Name | Value |
+| ---------- | ---------- |
+| `FUNCTION` | function |
\ No newline at end of file
diff --git a/docs/models/tooltypes.md b/docs/models/tooltypes.md
new file mode 100644
index 0000000..84e4925
--- /dev/null
+++ b/docs/models/tooltypes.md
@@ -0,0 +1,8 @@
+# ToolTypes
+
+
+## Values
+
+| Name | Value |
+| ---------- | ---------- |
+| `FUNCTION` | function |
\ No newline at end of file
diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md
index e3981c1..167f041 100644
--- a/docs/sdks/agents/README.md
+++ b/docs/sdks/agents/README.md
@@ -38,11 +38,13 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | |
| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | |
@@ -80,7 +82,12 @@ s = Mistral(
)
-res = s.agents.stream(model="codestral-2405", prompt="def", suffix="return a+b")
+res = s.agents.stream(messages=[
+ {
+ "content": "Who is the best French painter? Answer in one short sentence.",
+ "role": "user",
+ },
+], agent_id="")
if res is not None:
for event in res:
@@ -89,21 +96,23 @@ if res is not None:
```
+
+
### Parameters
-| Parameter | Type | Required | Description | Example |
-| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 |
-| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def |
-| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
-| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
-| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
-| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | |
-| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | |
-| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | |
-| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | |
-| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b |
-| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | |
+| Parameter | Type | Required | Description | Example |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
+| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | |
+| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
+| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | |
+| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | |
+| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | |
+| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | |
+| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | |
+| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | |
+| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | |
+| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | |
### Response
diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md
index e941104..aaa828e 100644
--- a/docs/sdks/chat/README.md
+++ b/docs/sdks/chat/README.md
@@ -38,12 +38,14 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
-| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
+| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
@@ -97,12 +99,14 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
-| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
+| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md
index ee46f9b..2f9f2c7 100644
--- a/docs/sdks/embeddings/README.md
+++ b/docs/sdks/embeddings/README.md
@@ -32,6 +32,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md
index 897556f..ec90fd3 100644
--- a/docs/sdks/files/README.md
+++ b/docs/sdks/files/README.md
@@ -42,6 +42,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
@@ -82,6 +84,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
@@ -121,6 +125,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
@@ -161,6 +167,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md
index 784b521..ef8b1dc 100644
--- a/docs/sdks/fim/README.md
+++ b/docs/sdks/fim/README.md
@@ -33,6 +33,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
@@ -84,6 +86,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md
index 3366c73..b0926f6 100644
--- a/docs/sdks/jobs/README.md
+++ b/docs/sdks/jobs/README.md
@@ -32,6 +32,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
@@ -80,6 +82,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
@@ -127,6 +131,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
@@ -167,6 +173,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
@@ -207,6 +215,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md
index 051aa53..00fca08 100644
--- a/docs/sdks/models/README.md
+++ b/docs/sdks/models/README.md
@@ -37,6 +37,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description |
@@ -77,6 +79,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
@@ -118,6 +122,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
@@ -159,6 +165,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
@@ -201,6 +209,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
@@ -241,6 +251,8 @@ if res is not None:
```
+
+
### Parameters
| Parameter | Type | Required | Description | Example |
diff --git a/examples/async_jobs.py b/examples/async_jobs.py
index b1f9e3b..e54c890 100644
--- a/examples/async_jobs.py
+++ b/examples/async_jobs.py
@@ -10,7 +10,9 @@
async def main():
api_key = os.environ["MISTRAL_API_KEY"]
- client = Mistral(api_key=api_key)
+ client = Mistral(
+ api_key="gpN4hC0YOSdZoBbzRcWNyALyMnNOT9jj", server_url="http://0.0.0.0:8882/"
+ )
# Create new files
with open("examples/file.jsonl", "rb") as f:
diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock
index 7823281..5d2740c 100644
--- a/packages/mistralai_azure/.speakeasy/gen.lock
+++ b/packages/mistralai_azure/.speakeasy/gen.lock
@@ -1,18 +1,18 @@
lockVersion: 2.0.0
id: dc40fa48-2c4d-46ad-ac8b-270749770f34
management:
- docChecksum: ec02d5407fd9354b416518c4b8fa8b95
+ docChecksum: 0f6edfd8ad8df6c49b3d429d1af7b9e2
docVersion: 0.0.2
- speakeasyVersion: 1.357.4
- generationVersion: 2.390.6
- releaseVersion: 1.0.0-rc.4
- configChecksum: ad8d0273f78dacd83fbba33510acd0a5
+ speakeasyVersion: 1.372.0
+ generationVersion: 2.399.0
+ releaseVersion: 1.0.0-rc.7
+ configChecksum: d14083a6bfd01e2d81264338ac4ed619
published: true
features:
python:
additionalDependencies: 1.0.0
constsAndDefaults: 1.0.2
- core: 5.3.7
+ core: 5.3.8
defaultEnabledRetries: 0.2.0
envVarSecurityUsage: 0.3.1
examples: 3.0.0
@@ -23,13 +23,14 @@ features:
globalServerURLs: 3.0.0
nameOverrides: 3.0.0
nullables: 1.0.0
+ openEnums: 1.0.0
responseFormat: 1.0.0
retries: 3.0.0
sdkHooks: 1.0.0
serverEvents: 1.0.2
serverEventsSentinels: 0.1.0
serverIDs: 3.0.0
- unions: 3.0.1
+ unions: 3.0.2
generatedFiles:
- src/mistralai_azure/sdkconfiguration.py
- src/mistralai_azure/chat.py
@@ -92,6 +93,7 @@ generatedFiles:
- docs/models/finishreason.md
- docs/models/completionresponsestreamchoice.md
- docs/models/deltamessage.md
+ - docs/models/tooltypes.md
- docs/models/toolcall.md
- docs/models/arguments.md
- docs/models/functioncall.md
@@ -103,6 +105,7 @@ generatedFiles:
- docs/models/messages.md
- docs/models/toolchoice.md
- docs/models/chatcompletionstreamrequest.md
+ - docs/models/tooltooltypes.md
- docs/models/tool.md
- docs/models/function.md
- docs/models/responseformats.md
diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml
index be4280b..ec9945e 100644
--- a/packages/mistralai_azure/.speakeasy/gen.yaml
+++ b/packages/mistralai_azure/.speakeasy/gen.yaml
@@ -12,7 +12,7 @@ generation:
auth:
oAuth2ClientCredentialsEnabled: true
python:
- version: 1.0.0-rc.4
+ version: 1.0.0-rc.7
additionalDependencies:
dev:
pytest: ^8.2.2
diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoice.md b/packages/mistralai_azure/docs/models/chatcompletionchoice.md
index 6fa839b..deaa0ea 100644
--- a/packages/mistralai_azure/docs/models/chatcompletionchoice.md
+++ b/packages/mistralai_azure/docs/models/chatcompletionchoice.md
@@ -6,5 +6,5 @@
| Field | Type | Required | Description | Example |
| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |
| `index` | *int* | :heavy_check_mark: | N/A | 0 |
-| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop |
-| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | |
\ No newline at end of file
+| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | |
+| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop |
\ No newline at end of file
diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md
index 3df1e28..307b279 100644
--- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md
+++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md
@@ -5,7 +5,7 @@
| Field | Type | Required | Description | Example |
| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai |
| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md
index 1fc3470..5ed2e2b 100644
--- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md
+++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md
@@ -5,7 +5,7 @@
| Field | Type | Required | Description | Example |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai |
| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/mistralai_azure/docs/models/tool.md
index 291394c..ca624a9 100644
--- a/packages/mistralai_azure/docs/models/tool.md
+++ b/packages/mistralai_azure/docs/models/tool.md
@@ -3,7 +3,7 @@
## Fields
-| Field | Type | Required | Description |
-| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- |
-| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A |
-| `type` | *Optional[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
+| Field | Type | Required | Description |
+| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A |
+| `type` | [Optional[models.ToolToolTypes]](../models/tooltooltypes.md) | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md
index bd2dc9f..7aca5fc 100644
--- a/packages/mistralai_azure/docs/models/toolcall.md
+++ b/packages/mistralai_azure/docs/models/toolcall.md
@@ -3,8 +3,8 @@
## Fields
-| Field | Type | Required | Description |
-| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ |
-| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A |
-| `id` | *Optional[str]* | :heavy_minus_sign: | N/A |
-| `type` | *Optional[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
+| Field | Type | Required | Description |
+| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- |
+| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A |
+| `id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/packages/mistralai_azure/docs/models/tooltooltypes.md b/packages/mistralai_azure/docs/models/tooltooltypes.md
new file mode 100644
index 0000000..e396430
--- /dev/null
+++ b/packages/mistralai_azure/docs/models/tooltooltypes.md
@@ -0,0 +1,8 @@
+# ToolToolTypes
+
+
+## Values
+
+| Name | Value |
+| ---------- | ---------- |
+| `FUNCTION` | function |
\ No newline at end of file
diff --git a/packages/mistralai_azure/docs/models/tooltypes.md b/packages/mistralai_azure/docs/models/tooltypes.md
new file mode 100644
index 0000000..84e4925
--- /dev/null
+++ b/packages/mistralai_azure/docs/models/tooltypes.md
@@ -0,0 +1,8 @@
+# ToolTypes
+
+
+## Values
+
+| Name | Value |
+| ---------- | ---------- |
+| `FUNCTION` | function |
\ No newline at end of file
diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock
index 457c8ec..2e5fecf 100644
--- a/packages/mistralai_azure/poetry.lock
+++ b/packages/mistralai_azure/poetry.lock
@@ -553,13 +553,13 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
[[package]]
name = "python-dateutil"
-version = "2.9.0.post0"
+version = "2.8.2"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
- {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
- {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
]
[package.dependencies]
@@ -649,4 +649,4 @@ typing-extensions = ">=3.7.4"
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
-content-hash = "c6fe50d9865be14321ec4828bf746f43c421f79507e9956b4e45ee6601fd1f0d"
+content-hash = "85499d03f45cd26302b8b267be44478c701581e8a56a3df0907bb38897fdb2e4"
diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml
index 2253ab3..f4cf5b5 100644
--- a/packages/mistralai_azure/pyproject.toml
+++ b/packages/mistralai_azure/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "mistralai_azure"
-version = "1.0.0-rc.4"
+version = "1.0.0-rc.7"
description = "Python Client SDK for the Mistral AI API in Azure."
authors = ["Mistral",]
readme = "README-PYPI.md"
@@ -21,7 +21,7 @@ eval-type-backport = "^0.2.0"
httpx = "^0.27.0"
jsonpath-python = "^1.0.6"
pydantic = "~2.8.2"
-python-dateutil = "^2.9.0.post0"
+python-dateutil = "2.8.2"
typing-inspect = "^0.9.0"
[tool.poetry.group.dev.dependencies]
diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh
index 1ee7194..ab45b1f 100755
--- a/packages/mistralai_azure/scripts/publish.sh
+++ b/packages/mistralai_azure/scripts/publish.sh
@@ -2,4 +2,6 @@
export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN}
+poetry run python scripts/prepare-readme.py
+
poetry publish --build --skip-existing
diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py
index a102b13..710fe56 100644
--- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py
+++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py
@@ -18,11 +18,11 @@
from .security import Security, SecurityTypedDict
from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict
from .textchunk import TextChunk, TextChunkTypedDict
-from .tool import Tool, ToolTypedDict
-from .toolcall import ToolCall, ToolCallTypedDict
+from .tool import Tool, ToolToolTypes, ToolTypedDict
+from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes
from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict
from .usageinfo import UsageInfo, UsageInfoTypedDict
from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict
from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict
-__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
+__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py
index acfd5bb..9199545 100644
--- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py
+++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py
@@ -3,20 +3,19 @@
from __future__ import annotations
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
from mistralai_azure.types import BaseModel
-from typing import Literal, Optional, TypedDict
-from typing_extensions import NotRequired
+from typing import Literal, TypedDict
ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"]
class ChatCompletionChoiceTypedDict(TypedDict):
index: int
+ message: AssistantMessageTypedDict
finish_reason: ChatCompletionChoiceFinishReason
- message: NotRequired[AssistantMessageTypedDict]
class ChatCompletionChoice(BaseModel):
index: int
+ message: AssistantMessage
finish_reason: ChatCompletionChoiceFinishReason
- message: Optional[AssistantMessage] = None
diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py
index e77c77d..48c5ba8 100644
--- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py
+++ b/packages/mistralai_azure/src/mistralai_azure/models/tool.py
@@ -2,17 +2,21 @@
from __future__ import annotations
from .function import Function, FunctionTypedDict
-from mistralai_azure.types import BaseModel
-import pydantic
-from typing import Final, Optional, TypedDict
-from typing_extensions import Annotated
+from mistralai_azure.types import BaseModel, UnrecognizedStr
+from mistralai_azure.utils import validate_open_enum
+from pydantic.functional_validators import PlainValidator
+from typing import Literal, Optional, TypedDict, Union
+from typing_extensions import Annotated, NotRequired
+ToolToolTypes = Union[Literal["function"], UnrecognizedStr]
+
class ToolTypedDict(TypedDict):
function: FunctionTypedDict
+ type: NotRequired[ToolToolTypes]
class Tool(BaseModel):
function: Function
- TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore
+ type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function"
diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py
index f15bee9..578d6ff 100644
--- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py
+++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py
@@ -2,19 +2,23 @@
from __future__ import annotations
from .functioncall import FunctionCall, FunctionCallTypedDict
-from mistralai_azure.types import BaseModel
-import pydantic
-from typing import Final, Optional, TypedDict
+from mistralai_azure.types import BaseModel, UnrecognizedStr
+from mistralai_azure.utils import validate_open_enum
+from pydantic.functional_validators import PlainValidator
+from typing import Literal, Optional, TypedDict, Union
from typing_extensions import Annotated, NotRequired
+ToolTypes = Union[Literal["function"], UnrecognizedStr]
+
class ToolCallTypedDict(TypedDict):
function: FunctionCallTypedDict
id: NotRequired[str]
+ type: NotRequired[ToolTypes]
class ToolCall(BaseModel):
function: FunctionCall
id: Optional[str] = "null"
- TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore
+ type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function"
diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py
index b6cff2a..b0a94cc 100644
--- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py
+++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py
@@ -29,9 +29,9 @@ class SDKConfiguration:
server: Optional[str] = ""
language: str = "python"
openapi_doc_version: str = "0.0.2"
- sdk_version: str = "1.0.0-rc.4"
- gen_version: str = "2.390.6"
- user_agent: str = "speakeasy-sdk/python 1.0.0-rc.4 2.390.6 0.0.2 mistralai_azure"
+ sdk_version: str = "1.0.0-rc.7"
+ gen_version: str = "2.399.0"
+ user_agent: str = "speakeasy-sdk/python 1.0.0-rc.7 2.399.0 0.0.2 mistralai_azure"
retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
timeout_ms: Optional[int] = None
diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock
index c28b218..1b9cf79 100644
--- a/packages/mistralai_gcp/.speakeasy/gen.lock
+++ b/packages/mistralai_gcp/.speakeasy/gen.lock
@@ -1,18 +1,18 @@
lockVersion: 2.0.0
id: ec60f2d8-7869-45c1-918e-773d41a8cf74
management:
- docChecksum: e2bc44269918d569bbc51b1521c4c29b
+ docChecksum: 4cc6e7c5c5ba15491872c600d4a247ef
docVersion: 0.0.2
- speakeasyVersion: 1.357.4
- generationVersion: 2.390.6
- releaseVersion: 1.0.0-rc.4
- configChecksum: a8248e0ef5bdbc73910c2aae86c3c3b5
+ speakeasyVersion: 1.372.0
+ generationVersion: 2.399.0
+ releaseVersion: 1.0.0-rc.8
+ configChecksum: 1830c00a5e810fe954553fe55fdf9b71
published: true
features:
python:
additionalDependencies: 1.0.0
constsAndDefaults: 1.0.2
- core: 5.3.7
+ core: 5.3.8
defaultEnabledRetries: 0.2.0
envVarSecurityUsage: 0.3.1
examples: 3.0.0
@@ -23,13 +23,14 @@ features:
globalServerURLs: 3.0.0
nameOverrides: 3.0.0
nullables: 1.0.0
+ openEnums: 1.0.0
responseFormat: 1.0.0
retries: 3.0.0
sdkHooks: 1.0.0
serverEvents: 1.0.2
serverEventsSentinels: 0.1.0
serverIDs: 3.0.0
- unions: 3.0.1
+ unions: 3.0.2
generatedFiles:
- src/mistralai_gcp/sdkconfiguration.py
- src/mistralai_gcp/chat.py
@@ -96,6 +97,7 @@ generatedFiles:
- docs/models/finishreason.md
- docs/models/completionresponsestreamchoice.md
- docs/models/deltamessage.md
+ - docs/models/tooltypes.md
- docs/models/toolcall.md
- docs/models/arguments.md
- docs/models/functioncall.md
@@ -107,6 +109,7 @@ generatedFiles:
- docs/models/messages.md
- docs/models/toolchoice.md
- docs/models/chatcompletionstreamrequest.md
+ - docs/models/tooltooltypes.md
- docs/models/tool.md
- docs/models/function.md
- docs/models/responseformats.md
diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml
index 8e6ae7b..7ddc742 100644
--- a/packages/mistralai_gcp/.speakeasy/gen.yaml
+++ b/packages/mistralai_gcp/.speakeasy/gen.yaml
@@ -12,7 +12,7 @@ generation:
auth:
oAuth2ClientCredentialsEnabled: true
python:
- version: 1.0.0-rc.4
+ version: 1.0.0-rc.8
additionalDependencies:
dev:
pytest: ^8.2.2
diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionchoice.md
index 6fa839b..deaa0ea 100644
--- a/packages/mistralai_gcp/docs/models/chatcompletionchoice.md
+++ b/packages/mistralai_gcp/docs/models/chatcompletionchoice.md
@@ -6,5 +6,5 @@
| Field | Type | Required | Description | Example |
| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |
| `index` | *int* | :heavy_check_mark: | N/A | 0 |
-| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop |
-| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | |
\ No newline at end of file
+| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | |
+| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop |
\ No newline at end of file
diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md
index 3e30c64..fb3bfb4 100644
--- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md
+++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md
@@ -5,8 +5,8 @@
| Field | Type | Required | Description | Example |
| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
-| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
+| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md
index adc7ff9..d7b7fe9 100644
--- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md
+++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md
@@ -5,8 +5,8 @@
| Field | Type | Required | Description | Example |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
-| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} |
+| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest |
+| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] |
| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | |
diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md
index 291394c..ca624a9 100644
--- a/packages/mistralai_gcp/docs/models/tool.md
+++ b/packages/mistralai_gcp/docs/models/tool.md
@@ -3,7 +3,7 @@
## Fields
-| Field | Type | Required | Description |
-| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- |
-| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A |
-| `type` | *Optional[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
+| Field | Type | Required | Description |
+| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A |
+| `type` | [Optional[models.ToolToolTypes]](../models/tooltooltypes.md) | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md
index bd2dc9f..7aca5fc 100644
--- a/packages/mistralai_gcp/docs/models/toolcall.md
+++ b/packages/mistralai_gcp/docs/models/toolcall.md
@@ -3,8 +3,8 @@
## Fields
-| Field | Type | Required | Description |
-| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ |
-| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A |
-| `id` | *Optional[str]* | :heavy_minus_sign: | N/A |
-| `type` | *Optional[str]* | :heavy_minus_sign: | N/A |
\ No newline at end of file
+| Field | Type | Required | Description |
+| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- |
+| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A |
+| `id` | *Optional[str]* | :heavy_minus_sign: | N/A |
+| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A |
\ No newline at end of file
diff --git a/packages/mistralai_gcp/docs/models/tooltooltypes.md b/packages/mistralai_gcp/docs/models/tooltooltypes.md
new file mode 100644
index 0000000..e396430
--- /dev/null
+++ b/packages/mistralai_gcp/docs/models/tooltooltypes.md
@@ -0,0 +1,8 @@
+# ToolToolTypes
+
+
+## Values
+
+| Name | Value |
+| ---------- | ---------- |
+| `FUNCTION` | function |
\ No newline at end of file
diff --git a/packages/mistralai_gcp/docs/models/tooltypes.md b/packages/mistralai_gcp/docs/models/tooltypes.md
new file mode 100644
index 0000000..84e4925
--- /dev/null
+++ b/packages/mistralai_gcp/docs/models/tooltypes.md
@@ -0,0 +1,8 @@
+# ToolTypes
+
+
+## Values
+
+| Name | Value |
+| ---------- | ---------- |
+| `FUNCTION` | function |
\ No newline at end of file
diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock
index befb32f..8a625e2 100644
--- a/packages/mistralai_gcp/poetry.lock
+++ b/packages/mistralai_gcp/poetry.lock
@@ -711,13 +711,13 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
[[package]]
name = "python-dateutil"
-version = "2.9.0.post0"
+version = "2.8.2"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
- {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
- {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
]
[package.dependencies]
@@ -859,4 +859,4 @@ zstd = ["zstandard (>=0.18.0)"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
-content-hash = "f7ec8ed73d60233b1bf0450f38af7d51c9dfe088ae0a0b8ee975ba2ae512d817"
+content-hash = "c693a1bfd23435953d0a7305446907287d0d66ba881c76188dca0a9eefc7a1b6"
diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml
index 90afd36..7e7a2a1 100644
--- a/packages/mistralai_gcp/pyproject.toml
+++ b/packages/mistralai_gcp/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "mistralai-gcp"
-version = "1.0.0-rc.4"
+version = "1.0.0-rc.8"
description = "Python Client SDK for the Mistral AI API in GCP."
authors = ["Mistral",]
readme = "README-PYPI.md"
@@ -22,7 +22,7 @@ google-auth = "^2.31.0"
httpx = "^0.27.0"
jsonpath-python = "^1.0.6"
pydantic = "~2.8.2"
-python-dateutil = "^2.9.0.post0"
+python-dateutil = "2.8.2"
requests = "^2.32.3"
typing-inspect = "^0.9.0"
diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh
index 1ee7194..ab45b1f 100755
--- a/packages/mistralai_gcp/scripts/publish.sh
+++ b/packages/mistralai_gcp/scripts/publish.sh
@@ -2,4 +2,6 @@
export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN}
+poetry run python scripts/prepare-readme.py
+
poetry publish --build --skip-existing
diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py
index d9ad7bc..7a1e7f7 100644
--- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py
+++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py
@@ -33,7 +33,7 @@ def stream(
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -146,7 +146,7 @@ async def stream_async(
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -257,7 +257,7 @@ def complete(
) -> Optional[models.ChatCompletionResponse]:
r"""Chat Completion
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -367,7 +367,7 @@ async def complete_async(
) -> Optional[models.ChatCompletionResponse]:
r"""Chat Completion
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py
index 79fb7c9..7c8c1f4 100644
--- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py
+++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py
@@ -21,11 +21,11 @@
from .security import Security, SecurityTypedDict
from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict
from .textchunk import TextChunk, TextChunkTypedDict
-from .tool import Tool, ToolTypedDict
-from .toolcall import ToolCall, ToolCallTypedDict
+from .tool import Tool, ToolToolTypes, ToolTypedDict
+from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes
from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict
from .usageinfo import UsageInfo, UsageInfoTypedDict
from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict
from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict
-__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
+__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py
index d868422..67ff1f5 100644
--- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py
+++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py
@@ -3,20 +3,19 @@
from __future__ import annotations
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
from mistralai_gcp.types import BaseModel
-from typing import Literal, Optional, TypedDict
-from typing_extensions import NotRequired
+from typing import Literal, TypedDict
ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"]
class ChatCompletionChoiceTypedDict(TypedDict):
index: int
+ message: AssistantMessageTypedDict
finish_reason: ChatCompletionChoiceFinishReason
- message: NotRequired[AssistantMessageTypedDict]
class ChatCompletionChoice(BaseModel):
index: int
+ message: AssistantMessage
finish_reason: ChatCompletionChoiceFinishReason
- message: Optional[AssistantMessage] = None
diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py
index a105359..45f61e7 100644
--- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py
+++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py
@@ -32,7 +32,7 @@
class ChatCompletionRequestTypedDict(TypedDict):
model: Nullable[str]
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
messages: List[ChatCompletionRequestMessagesTypedDict]
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
temperature: NotRequired[float]
@@ -56,7 +56,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
class ChatCompletionRequest(BaseModel):
model: Nullable[str]
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
messages: List[ChatCompletionRequestMessages]
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
temperature: Optional[float] = 0.7
diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py
index ecf8393..a07f71e 100644
--- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py
+++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py
@@ -32,7 +32,7 @@
class ChatCompletionStreamRequestTypedDict(TypedDict):
model: Nullable[str]
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
messages: List[MessagesTypedDict]
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
temperature: NotRequired[float]
@@ -55,7 +55,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
class ChatCompletionStreamRequest(BaseModel):
model: Nullable[str]
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
messages: List[Messages]
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
temperature: Optional[float] = 0.7
diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py
index b4e0645..2e860d9 100644
--- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py
+++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py
@@ -2,17 +2,21 @@
from __future__ import annotations
from .function import Function, FunctionTypedDict
-from mistralai_gcp.types import BaseModel
-import pydantic
-from typing import Final, Optional, TypedDict
-from typing_extensions import Annotated
+from mistralai_gcp.types import BaseModel, UnrecognizedStr
+from mistralai_gcp.utils import validate_open_enum
+from pydantic.functional_validators import PlainValidator
+from typing import Literal, Optional, TypedDict, Union
+from typing_extensions import Annotated, NotRequired
+ToolToolTypes = Union[Literal["function"], UnrecognizedStr]
+
class ToolTypedDict(TypedDict):
function: FunctionTypedDict
+ type: NotRequired[ToolToolTypes]
class Tool(BaseModel):
function: Function
- TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore
+ type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function"
diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py
index 5ea87fd..7f22889 100644
--- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py
+++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py
@@ -2,19 +2,23 @@
from __future__ import annotations
from .functioncall import FunctionCall, FunctionCallTypedDict
-from mistralai_gcp.types import BaseModel
-import pydantic
-from typing import Final, Optional, TypedDict
+from mistralai_gcp.types import BaseModel, UnrecognizedStr
+from mistralai_gcp.utils import validate_open_enum
+from pydantic.functional_validators import PlainValidator
+from typing import Literal, Optional, TypedDict, Union
from typing_extensions import Annotated, NotRequired
+ToolTypes = Union[Literal["function"], UnrecognizedStr]
+
class ToolCallTypedDict(TypedDict):
function: FunctionCallTypedDict
id: NotRequired[str]
+ type: NotRequired[ToolTypes]
class ToolCall(BaseModel):
function: FunctionCall
id: Optional[str] = "null"
- TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore
+ type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function"
diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py
index 94d271b..9b354d3 100644
--- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py
+++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py
@@ -29,9 +29,9 @@ class SDKConfiguration:
server: Optional[str] = ""
language: str = "python"
openapi_doc_version: str = "0.0.2"
- sdk_version: str = "1.0.0-rc.4"
- gen_version: str = "2.390.6"
- user_agent: str = "speakeasy-sdk/python 1.0.0-rc.4 2.390.6 0.0.2 mistralai-gcp"
+ sdk_version: str = "1.0.0-rc.8"
+ gen_version: str = "2.399.0"
+ user_agent: str = "speakeasy-sdk/python 1.0.0-rc.8 2.399.0 0.0.2 mistralai-gcp"
retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
timeout_ms: Optional[int] = None
diff --git a/pyproject.toml b/pyproject.toml
index 810ec3d..d8b7854 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "mistralai"
-version = "1.0.1"
+version = "1.1.3"
description = "Python Client SDK for the Mistral AI API."
authors = ["Mistral"]
readme = "README.md"
diff --git a/scripts/publish.sh b/scripts/publish.sh
index 1ee7194..ab45b1f 100755
--- a/scripts/publish.sh
+++ b/scripts/publish.sh
@@ -2,4 +2,6 @@
export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN}
+poetry run python scripts/prepare-readme.py
+
poetry publish --build --skip-existing
diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py
index e1c8704..aa29125 100644
--- a/src/mistralai/agents.py
+++ b/src/mistralai/agents.py
@@ -3,7 +3,7 @@
from .basesdk import BaseSDK
from mistralai import models, utils
from mistralai._hooks import HookContext
-from mistralai.types import Nullable, OptionalNullable, UNSET
+from mistralai.types import OptionalNullable, UNSET
from mistralai.utils import eventstreaming, get_security_from_env
from typing import Any, AsyncGenerator, Generator, List, Optional, Union
@@ -221,16 +221,16 @@ async def complete_async(
def stream(
self, *,
- model: Nullable[str],
- prompt: str,
- temperature: Optional[float] = 0.7,
- top_p: Optional[float] = 1,
+ messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]],
+ agent_id: str,
max_tokens: OptionalNullable[int] = UNSET,
min_tokens: OptionalNullable[int] = UNSET,
stream: Optional[bool] = True,
stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None,
random_seed: OptionalNullable[int] = UNSET,
- suffix: OptionalNullable[str] = UNSET,
+ response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None,
+ tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET,
+ tool_choice: Optional[models.AgentsCompletionStreamRequestToolChoice] = "auto",
retries: OptionalNullable[utils.RetryConfig] = UNSET,
server_url: Optional[str] = None,
timeout_ms: Optional[int] = None,
@@ -239,16 +239,16 @@ def stream(
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
- :param prompt: The text/code to complete.
- :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
- :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
+ :param agent_id: The ID of the agent to use for this completion.
:param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
:param min_tokens: The minimum number of tokens to generate in the completion.
:param stream:
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
- :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
+ :param response_format:
+ :param tools:
+ :param tool_choice:
:param retries: Override the default retry configuration for this method
:param server_url: Override the default server URL for this method
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -262,16 +262,16 @@ def stream(
base_url = server_url
request = models.AgentsCompletionStreamRequest(
- model=model,
- temperature=temperature,
- top_p=top_p,
max_tokens=max_tokens,
min_tokens=min_tokens,
stream=stream,
stop=stop,
random_seed=random_seed,
- prompt=prompt,
- suffix=suffix,
+ messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]),
+ response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]),
+ tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]),
+ tool_choice=tool_choice,
+ agent_id=agent_id,
)
req = self.build_request(
@@ -328,16 +328,16 @@ def stream(
async def stream_async(
self, *,
- model: Nullable[str],
- prompt: str,
- temperature: Optional[float] = 0.7,
- top_p: Optional[float] = 1,
+ messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]],
+ agent_id: str,
max_tokens: OptionalNullable[int] = UNSET,
min_tokens: OptionalNullable[int] = UNSET,
stream: Optional[bool] = True,
stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None,
random_seed: OptionalNullable[int] = UNSET,
- suffix: OptionalNullable[str] = UNSET,
+ response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None,
+ tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET,
+ tool_choice: Optional[models.AgentsCompletionStreamRequestToolChoice] = "auto",
retries: OptionalNullable[utils.RetryConfig] = UNSET,
server_url: Optional[str] = None,
timeout_ms: Optional[int] = None,
@@ -346,16 +346,16 @@ async def stream_async(
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
- :param prompt: The text/code to complete.
- :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
- :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
+ :param agent_id: The ID of the agent to use for this completion.
:param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
:param min_tokens: The minimum number of tokens to generate in the completion.
:param stream:
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
- :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
+ :param response_format:
+ :param tools:
+ :param tool_choice:
:param retries: Override the default retry configuration for this method
:param server_url: Override the default server URL for this method
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -369,16 +369,16 @@ async def stream_async(
base_url = server_url
request = models.AgentsCompletionStreamRequest(
- model=model,
- temperature=temperature,
- top_p=top_p,
max_tokens=max_tokens,
min_tokens=min_tokens,
stream=stream,
stop=stop,
random_seed=random_seed,
- prompt=prompt,
- suffix=suffix,
+ messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]),
+ response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]),
+ tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]),
+ tool_choice=tool_choice,
+ agent_id=agent_id,
)
req = self.build_request(
diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py
index 1323be2..e83fc34 100644
--- a/src/mistralai/chat.py
+++ b/src/mistralai/chat.py
@@ -32,7 +32,7 @@ def complete(
) -> Optional[models.ChatCompletionResponse]:
r"""Chat Completion
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -145,7 +145,7 @@ async def complete_async(
) -> Optional[models.ChatCompletionResponse]:
r"""Chat Completion
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -260,7 +260,7 @@ def stream(
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -376,7 +376,7 @@ async def stream_async(
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
- :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
+ :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
:param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
:param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py
index f316270..cb21b8f 100644
--- a/src/mistralai/models/__init__.py
+++ b/src/mistralai/models/__init__.py
@@ -1,7 +1,7 @@
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
from .agentscompletionrequest import AgentsCompletionRequest, AgentsCompletionRequestMessages, AgentsCompletionRequestMessagesTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, AgentsCompletionRequestTypedDict
-from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestTypedDict
+from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestMessages, AgentsCompletionStreamRequestMessagesTypedDict, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestToolChoice, AgentsCompletionStreamRequestTypedDict
from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict
from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict
from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict, FinishReason
@@ -64,8 +64,8 @@
from .source import Source
from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict
from .textchunk import TextChunk, TextChunkTypedDict
-from .tool import Tool, ToolTypedDict
-from .toolcall import ToolCall, ToolCallTypedDict
+from .tool import Tool, ToolToolTypes, ToolTypedDict
+from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes
from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict
from .trainingfile import TrainingFile, TrainingFileTypedDict
from .trainingparameters import TrainingParameters, TrainingParametersTypedDict
@@ -79,4 +79,4 @@
from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict
from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict
-__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "JobIn", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "QueryParamStatus", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "UnarchiveFTModelOut", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationTypedDict"]
+__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestMessages", "AgentsCompletionStreamRequestMessagesTypedDict", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "JobIn", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "QueryParamStatus", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "UnarchiveFTModelOut", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationTypedDict"]
diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py
index 91b5bcb..9398081 100644
--- a/src/mistralai/models/agentscompletionstreamrequest.py
+++ b/src/mistralai/models/agentscompletionstreamrequest.py
@@ -1,10 +1,16 @@
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
from __future__ import annotations
+from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
+from .responseformat import ResponseFormat, ResponseFormatTypedDict
+from .tool import Tool, ToolTypedDict
+from .toolmessage import ToolMessage, ToolMessageTypedDict
+from .usermessage import UserMessage, UserMessageTypedDict
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
-from pydantic import model_serializer
-from typing import List, Optional, TypedDict, Union
-from typing_extensions import NotRequired
+from mistralai.utils import get_discriminator
+from pydantic import Discriminator, Tag, model_serializer
+from typing import List, Literal, Optional, TypedDict, Union
+from typing_extensions import Annotated, NotRequired
AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]]
@@ -15,18 +21,19 @@
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
+AgentsCompletionStreamRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
+
+
+AgentsCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
+
+
+AgentsCompletionStreamRequestToolChoice = Literal["auto", "none", "any"]
+
class AgentsCompletionStreamRequestTypedDict(TypedDict):
- model: Nullable[str]
- r"""ID of the model to use. Only compatible for now with:
- - `codestral-2405`
- - `codestral-latest`
- """
- prompt: str
- r"""The text/code to complete."""
- temperature: NotRequired[float]
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
- top_p: NotRequired[float]
- r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
+ messages: List[AgentsCompletionStreamRequestMessagesTypedDict]
+ r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
+ agent_id: str
+ r"""The ID of the agent to use for this completion."""
max_tokens: NotRequired[Nullable[int]]
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
min_tokens: NotRequired[Nullable[int]]
@@ -36,22 +43,16 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
random_seed: NotRequired[Nullable[int]]
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
- suffix: NotRequired[Nullable[str]]
- r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
+ response_format: NotRequired[ResponseFormatTypedDict]
+ tools: NotRequired[Nullable[List[ToolTypedDict]]]
+ tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoice]
class AgentsCompletionStreamRequest(BaseModel):
- model: Nullable[str]
- r"""ID of the model to use. Only compatible for now with:
- - `codestral-2405`
- - `codestral-latest`
- """
- prompt: str
- r"""The text/code to complete."""
- temperature: Optional[float] = 0.7
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
- top_p: Optional[float] = 1
- r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
+ messages: List[AgentsCompletionStreamRequestMessages]
+ r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
+ agent_id: str
+ r"""The ID of the agent to use for this completion."""
max_tokens: OptionalNullable[int] = UNSET
r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
min_tokens: OptionalNullable[int] = UNSET
@@ -61,13 +62,14 @@ class AgentsCompletionStreamRequest(BaseModel):
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
random_seed: OptionalNullable[int] = UNSET
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
- suffix: OptionalNullable[str] = UNSET
- r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
+ response_format: Optional[ResponseFormat] = None
+ tools: OptionalNullable[List[Tool]] = UNSET
+ tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = "auto"
@model_serializer(mode="wrap")
def serialize_model(self, handler):
- optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
- nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
+ optional_fields = ["max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
+ nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"]
null_default_fields = []
serialized = handler(self)
diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py
index 04d2350..748dbc1 100644
--- a/src/mistralai/models/chatcompletionchoice.py
+++ b/src/mistralai/models/chatcompletionchoice.py
@@ -3,20 +3,19 @@
from __future__ import annotations
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
from mistralai.types import BaseModel
-from typing import Literal, Optional, TypedDict
-from typing_extensions import NotRequired
+from typing import Literal, TypedDict
FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"]
class ChatCompletionChoiceTypedDict(TypedDict):
index: int
+ message: AssistantMessageTypedDict
finish_reason: FinishReason
- message: NotRequired[AssistantMessageTypedDict]
class ChatCompletionChoice(BaseModel):
index: int
+ message: AssistantMessage
finish_reason: FinishReason
- message: Optional[AssistantMessage] = None
diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py
index bf6baf2..e3440b7 100644
--- a/src/mistralai/models/chatcompletionrequest.py
+++ b/src/mistralai/models/chatcompletionrequest.py
@@ -32,7 +32,7 @@
class ChatCompletionRequestTypedDict(TypedDict):
model: Nullable[str]
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
messages: List[MessagesTypedDict]
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
temperature: NotRequired[float]
@@ -58,7 +58,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
class ChatCompletionRequest(BaseModel):
model: Nullable[str]
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
messages: List[Messages]
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
temperature: Optional[float] = 0.7
diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py
index 9e2ae40..992584d 100644
--- a/src/mistralai/models/chatcompletionstreamrequest.py
+++ b/src/mistralai/models/chatcompletionstreamrequest.py
@@ -32,7 +32,7 @@
class ChatCompletionStreamRequestTypedDict(TypedDict):
model: Nullable[str]
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
messages: List[ChatCompletionStreamRequestMessagesTypedDict]
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
temperature: NotRequired[float]
@@ -57,7 +57,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
class ChatCompletionStreamRequest(BaseModel):
model: Nullable[str]
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
+ r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
messages: List[ChatCompletionStreamRequestMessages]
r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
temperature: Optional[float] = 0.7
diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py
index c790e63..3a3ccdf 100644
--- a/src/mistralai/models/tool.py
+++ b/src/mistralai/models/tool.py
@@ -2,17 +2,21 @@
from __future__ import annotations
from .function import Function, FunctionTypedDict
-from mistralai.types import BaseModel
-import pydantic
-from typing import Final, Optional, TypedDict
-from typing_extensions import Annotated
+from mistralai.types import BaseModel, UnrecognizedStr
+from mistralai.utils import validate_open_enum
+from pydantic.functional_validators import PlainValidator
+from typing import Literal, Optional, TypedDict, Union
+from typing_extensions import Annotated, NotRequired
+ToolToolTypes = Union[Literal["function"], UnrecognizedStr]
+
class ToolTypedDict(TypedDict):
function: FunctionTypedDict
+ type: NotRequired[ToolToolTypes]
class Tool(BaseModel):
function: Function
- TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore
+ type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function"
diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py
index 2afd453..4842aff 100644
--- a/src/mistralai/models/toolcall.py
+++ b/src/mistralai/models/toolcall.py
@@ -2,19 +2,23 @@
from __future__ import annotations
from .functioncall import FunctionCall, FunctionCallTypedDict
-from mistralai.types import BaseModel
-import pydantic
-from typing import Final, Optional, TypedDict
+from mistralai.types import BaseModel, UnrecognizedStr
+from mistralai.utils import validate_open_enum
+from pydantic.functional_validators import PlainValidator
+from typing import Literal, Optional, TypedDict, Union
from typing_extensions import Annotated, NotRequired
+ToolTypes = Union[Literal["function"], UnrecognizedStr]
+
class ToolCallTypedDict(TypedDict):
function: FunctionCallTypedDict
id: NotRequired[str]
+ type: NotRequired[ToolTypes]
class ToolCall(BaseModel):
function: FunctionCall
id: Optional[str] = "null"
- TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore
+ type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function"
diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py
index 2a18bab..5baafe9 100644
--- a/src/mistralai/sdkconfiguration.py
+++ b/src/mistralai/sdkconfiguration.py
@@ -29,9 +29,9 @@ class SDKConfiguration:
server: Optional[str] = ""
language: str = "python"
openapi_doc_version: str = "0.0.2"
- sdk_version: str = "1.0.1"
- gen_version: str = "2.390.6"
- user_agent: str = "speakeasy-sdk/python 1.0.1 2.390.6 0.0.2 mistralai"
+ sdk_version: str = "1.1.3"
+ gen_version: str = "2.399.0"
+ user_agent: str = "speakeasy-sdk/python 1.1.3 2.399.0 0.0.2 mistralai"
retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
timeout_ms: Optional[int] = None